devices/virtio/vhost_user_backend/gpu/sys/
linux.rs

1// Copyright 2022 The ChromiumOS Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use std::cell::RefCell;
6use std::collections::BTreeMap;
7use std::path::PathBuf;
8use std::rc::Rc;
9use std::sync::Arc;
10
11use anyhow::Context;
12use argh::FromArgs;
13use base::clone_descriptor;
14use base::error;
15use base::RawDescriptor;
16use base::SafeDescriptor;
17use base::Tube;
18use base::UnixSeqpacketListener;
19use base::UnlinkUnixSeqpacketListener;
20use cros_async::AsyncWrapper;
21use cros_async::Executor;
22use cros_async::IoSource;
23use hypervisor::ProtectionType;
24use sync::Mutex;
25
26use crate::virtio;
27use crate::virtio::gpu;
28use crate::virtio::gpu::ProcessDisplayResult;
29use crate::virtio::vhost_user_backend::gpu::GpuBackend;
30use crate::virtio::vhost_user_backend::wl::parse_wayland_sock;
31use crate::virtio::vhost_user_backend::BackendConnection;
32use crate::virtio::Gpu;
33use crate::virtio::GpuDisplayParameters;
34use crate::virtio::GpuParameters;
35use crate::virtio::Interrupt;
36
37async fn run_display(
38    display: IoSource<AsyncWrapper<SafeDescriptor>>,
39    state: Rc<RefCell<gpu::Frontend>>,
40) {
41    loop {
42        if let Err(e) = display.wait_readable().await {
43            error!(
44                "Failed to wait for display context to become readable: {}",
45                e
46            );
47            break;
48        }
49
50        match state.borrow_mut().process_display() {
51            ProcessDisplayResult::Error(e) => {
52                error!("Failed to process display events: {}", e);
53                break;
54            }
55            ProcessDisplayResult::CloseRequested => break,
56            ProcessDisplayResult::Success => {}
57        }
58    }
59}
60
61async fn run_resource_bridge(tube: IoSource<Tube>, state: Rc<RefCell<gpu::Frontend>>) {
62    loop {
63        if let Err(e) = tube.wait_readable().await {
64            error!(
65                "Failed to wait for resource bridge tube to become readable: {}",
66                e
67            );
68            break;
69        }
70
71        if let Err(e) = state.borrow_mut().process_resource_bridge(tube.as_source()) {
72            error!("Failed to process resource bridge: {:#}", e);
73            break;
74        }
75    }
76}
77
78impl GpuBackend {
79    pub fn start_platform_workers(&mut self, _interrupt: Interrupt) -> anyhow::Result<()> {
80        let state = self
81            .state
82            .as_ref()
83            .context("frontend state wasn't set")?
84            .clone();
85
86        // Start handling the resource bridges.
87        for bridge in self.resource_bridges.lock().drain(..) {
88            let tube = self
89                .ex
90                .async_from(bridge)
91                .context("failed to create async tube")?;
92            let task = self
93                .ex
94                .spawn_local(run_resource_bridge(tube, state.clone()));
95            self.platform_worker_tx
96                .unbounded_send(task)
97                .context("sending the run_resource_bridge task")?;
98        }
99
100        // Start handling the display.
101        let display = clone_descriptor(&*state.borrow_mut().display().borrow())
102            .map(AsyncWrapper::new)
103            .context("failed to clone inner WaitContext for gpu display")
104            .and_then(|ctx| {
105                self.ex
106                    .async_from(ctx)
107                    .context("failed to create async WaitContext")
108            })?;
109
110        let task = self.ex.spawn_local(run_display(display, state));
111        self.platform_worker_tx
112            .unbounded_send(task)
113            .context("sending the run_display task")?;
114
115        Ok(())
116    }
117}
118fn gpu_parameters_from_str(input: &str) -> Result<GpuParameters, String> {
119    serde_json::from_str(input).map_err(|e| e.to_string())
120}
121
122#[derive(FromArgs)]
123/// GPU device
124#[argh(subcommand, name = "gpu")]
125pub struct Options {
126    #[argh(option, arg_name = "PATH", hidden_help)]
127    /// deprecated - please use --socket-path instead
128    socket: Option<String>,
129    #[argh(option, arg_name = "PATH")]
130    /// path to the vhost-user socket to bind to.
131    /// If this flag is set, --fd cannot be specified.
132    socket_path: Option<String>,
133    #[argh(option, arg_name = "FD")]
134    /// file descriptor of a connected vhost-user socket.
135    /// If this flag is set, --socket-path cannot be specified.
136    fd: Option<RawDescriptor>,
137
138    #[argh(option, from_str_fn(parse_wayland_sock), arg_name = "PATH[,name=NAME]")]
139    /// path to one or more Wayland sockets. The unnamed socket is
140    /// used for displaying virtual screens while the named ones are used for IPC
141    wayland_sock: Vec<(String, PathBuf)>,
142    #[argh(option, arg_name = "PATH")]
143    /// path to one or more bridge sockets for communicating with
144    /// other graphics devices (wayland, video, etc)
145    resource_bridge: Vec<String>,
146    #[argh(option, arg_name = "DISPLAY")]
147    /// X11 display name to use
148    x_display: Option<String>,
149    #[argh(
150        option,
151        from_str_fn(gpu_parameters_from_str),
152        default = "Default::default()",
153        arg_name = "JSON"
154    )]
155    /// a JSON object of virtio-gpu parameters
156    params: GpuParameters,
157}
158
159pub fn run_gpu_device(opts: Options) -> anyhow::Result<()> {
160    let Options {
161        x_display,
162        params: mut gpu_parameters,
163        resource_bridge,
164        socket,
165        socket_path,
166        fd,
167        wayland_sock,
168    } = opts;
169
170    let channels: BTreeMap<_, _> = wayland_sock.into_iter().collect();
171
172    let resource_bridge_listeners = resource_bridge
173        .into_iter()
174        .map(|p| {
175            UnixSeqpacketListener::bind(&p)
176                .map(UnlinkUnixSeqpacketListener)
177                .with_context(|| format!("failed to bind socket at path {p}"))
178        })
179        .collect::<anyhow::Result<Vec<_>>>()?;
180
181    if gpu_parameters.display_params.is_empty() {
182        gpu_parameters
183            .display_params
184            .push(GpuDisplayParameters::default());
185    }
186
187    let ex = Executor::new().context("failed to create executor")?;
188
189    // We don't know the order in which other devices are going to connect to the resource bridges
190    // so start listening for all of them on separate threads. Any devices that connect after the
191    // gpu device starts its queues will not have its resource bridges processed. In practice this
192    // should be fine since the devices that use the resource bridge always try to connect to the
193    // gpu device before handling messages from the VM.
194    let resource_bridges = Arc::new(Mutex::new(Vec::with_capacity(
195        resource_bridge_listeners.len(),
196    )));
197    for listener in resource_bridge_listeners {
198        let resource_bridges = Arc::clone(&resource_bridges);
199        ex.spawn_blocking(move || match listener.accept() {
200            Ok(stream) => resource_bridges
201                .lock()
202                .push(Tube::try_from(stream).unwrap()),
203            Err(e) => {
204                let path = listener
205                    .path()
206                    .unwrap_or_else(|_| PathBuf::from("{unknown}"));
207                error!(
208                    "Failed to accept resource bridge connection for socket {}: {}",
209                    path.display(),
210                    e
211                );
212            }
213        })
214        .detach();
215    }
216
217    // TODO(b/232344535): Read side of the tube is ignored currently.
218    // Complete the implementation by polling `exit_evt_rdtube` and
219    // kill the sibling VM.
220    let (exit_evt_wrtube, _) =
221        Tube::directional_pair().context("failed to create vm event tube")?;
222
223    let (gpu_control_tube, _) = Tube::pair().context("failed to create gpu control tube")?;
224
225    let mut display_backends = vec![
226        virtio::DisplayBackend::X(x_display),
227        virtio::DisplayBackend::Stub,
228    ];
229    if let Some(p) = channels.get("") {
230        display_backends.insert(0, virtio::DisplayBackend::Wayland(Some(p.to_owned())));
231    }
232
233    // These are only used when there is an input device.
234    let event_devices = Vec::new();
235
236    let base_features = virtio::base_features(ProtectionType::Unprotected);
237
238    let conn = BackendConnection::from_opts(socket.as_deref(), socket_path.as_deref(), fd)?;
239
240    let gpu = Rc::new(RefCell::new(Gpu::new(
241        exit_evt_wrtube,
242        gpu_control_tube,
243        Vec::new(), // resource_bridges, handled separately by us
244        display_backends,
245        &gpu_parameters,
246        /* rutabaga_server_descriptor */
247        None,
248        event_devices,
249        base_features,
250        &channels,
251        /* gpu_cgroup_path */
252        None,
253    )));
254
255    let (platform_worker_tx, platform_worker_rx) = futures::channel::mpsc::unbounded();
256    let backend = GpuBackend {
257        ex: ex.clone(),
258        gpu,
259        resource_bridges,
260        state: None,
261        fence_state: Default::default(),
262        queue_workers: Default::default(),
263        platform_worker_rx,
264        platform_worker_tx,
265        shmem_mapper: Arc::new(Mutex::new(None)),
266    };
267
268    // Run until the backend is finished.
269    let _ = ex.run_until(conn.run_backend(backend, &ex))?;
270
271    // Process any tasks from the backend's destructor.
272    Ok(ex.run_until(async {})?)
273}