devices/virtio/
queue.rs

1// Copyright 2023 The ChromiumOS Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! virtqueue interface
6
7#![deny(missing_docs)]
8
9use std::ops::Deref;
10use std::ops::DerefMut;
11
12pub mod packed_descriptor_chain;
13mod packed_queue;
14pub mod split_descriptor_chain;
15mod split_queue;
16
17use std::num::Wrapping;
18
19use anyhow::bail;
20use anyhow::Context;
21use anyhow::Result;
22use base::warn;
23use base::Event;
24use cros_async::AsyncError;
25use cros_async::EventAsync;
26use futures::channel::oneshot;
27use futures::select_biased;
28use futures::FutureExt;
29use packed_queue::PackedQueue;
30use serde::Deserialize;
31use serde::Serialize;
32use snapshot::AnySnapshot;
33use split_queue::SplitQueue;
34use virtio_sys::virtio_config::VIRTIO_F_RING_PACKED;
35use vm_memory::GuestAddress;
36use vm_memory::GuestMemory;
37
38use crate::virtio::DescriptorChain;
39use crate::virtio::Interrupt;
40use crate::virtio::VIRTIO_MSI_NO_VECTOR;
41
42/// A virtio queue's parameters.
43///
44/// `QueueConfig` can be converted into a running `Queue` by calling [`QueueConfig::activate()`].
45pub struct QueueConfig {
46    /// Whether this queue has already been activated.
47    activated: bool,
48
49    /// The maximal size in elements offered by the device
50    max_size: u16,
51
52    /// The queue size in elements the driver selected. This is always guaranteed to be a power of
53    /// two less than or equal to `max_size`, as required for split virtqueues. These invariants
54    /// are enforced by `set_size()`.
55    size: u16,
56
57    /// Indicates if the queue is finished with configuration
58    ready: bool,
59
60    /// MSI-X vector for the queue. Don't care for INTx
61    vector: u16,
62
63    /// Ring features (e.g. `VIRTIO_RING_F_EVENT_IDX`, `VIRTIO_F_RING_PACKED`) offered by the
64    /// device
65    features: u64,
66
67    // Device feature bits accepted by the driver
68    acked_features: u64,
69
70    /// Guest physical address of the descriptor table
71    desc_table: GuestAddress,
72
73    /// Guest physical address of the available ring (driver area)
74    ///
75    /// TODO(b/290657008): update field and accessor names to match the current virtio spec
76    avail_ring: GuestAddress,
77
78    /// Guest physical address of the used ring (device area)
79    used_ring: GuestAddress,
80
81    /// Initial available ring index when the queue is activated.
82    next_avail: Wrapping<u16>,
83
84    /// Initial used ring index when the queue is activated.
85    next_used: Wrapping<u16>,
86}
87
88#[derive(Serialize, Deserialize)]
89struct QueueConfigSnapshot {
90    activated: bool,
91    max_size: u16,
92    size: u16,
93    ready: bool,
94    vector: u16,
95    features: u64,
96    acked_features: u64,
97    desc_table: GuestAddress,
98    avail_ring: GuestAddress,
99    used_ring: GuestAddress,
100    next_avail: Wrapping<u16>,
101    next_used: Wrapping<u16>,
102}
103
104impl QueueConfig {
105    /// Constructs a virtio queue configuration with the given `max_size`.
106    pub fn new(max_size: u16, features: u64) -> Self {
107        assert!(max_size > 0);
108        assert!(max_size <= Queue::MAX_SIZE);
109        QueueConfig {
110            activated: false,
111            max_size,
112            size: max_size,
113            ready: false,
114            vector: VIRTIO_MSI_NO_VECTOR,
115            desc_table: GuestAddress(0),
116            avail_ring: GuestAddress(0),
117            used_ring: GuestAddress(0),
118            features,
119            acked_features: 0,
120            next_used: Wrapping(0),
121            next_avail: Wrapping(0),
122        }
123    }
124
125    /// Returns the maximum size of this queue.
126    pub fn max_size(&self) -> u16 {
127        self.max_size
128    }
129
130    /// Returns the currently configured size of the queue.
131    pub fn size(&self) -> u16 {
132        self.size
133    }
134
135    /// Sets the queue size.
136    pub fn set_size(&mut self, val: u16) {
137        if self.ready {
138            warn!("ignoring write to size on ready queue");
139            return;
140        }
141
142        if val > self.max_size {
143            warn!(
144                "requested queue size {} is larger than max_size {}",
145                val, self.max_size
146            );
147            return;
148        }
149
150        self.size = val;
151    }
152
153    /// Returns the currently configured interrupt vector.
154    pub fn vector(&self) -> u16 {
155        self.vector
156    }
157
158    /// Sets the interrupt vector for this queue.
159    pub fn set_vector(&mut self, val: u16) {
160        if self.ready {
161            warn!("ignoring write to vector on ready queue");
162            return;
163        }
164
165        self.vector = val;
166    }
167
168    /// Getter for descriptor area
169    pub fn desc_table(&self) -> GuestAddress {
170        self.desc_table
171    }
172
173    /// Setter for descriptor area
174    pub fn set_desc_table(&mut self, val: GuestAddress) {
175        if self.ready {
176            warn!("ignoring write to desc_table on ready queue");
177            return;
178        }
179
180        self.desc_table = val;
181    }
182
183    /// Getter for driver area
184    pub fn avail_ring(&self) -> GuestAddress {
185        self.avail_ring
186    }
187
188    /// Setter for driver area
189    pub fn set_avail_ring(&mut self, val: GuestAddress) {
190        if self.ready {
191            warn!("ignoring write to avail_ring on ready queue");
192            return;
193        }
194
195        self.avail_ring = val;
196    }
197
198    /// Getter for device area
199    pub fn used_ring(&self) -> GuestAddress {
200        self.used_ring
201    }
202
203    /// Setter for device area
204    pub fn set_used_ring(&mut self, val: GuestAddress) {
205        if self.ready {
206            warn!("ignoring write to used_ring on ready queue");
207            return;
208        }
209
210        self.used_ring = val;
211    }
212
213    /// Getter for next_avail index
214    pub fn next_avail(&self) -> Wrapping<u16> {
215        self.next_avail
216    }
217
218    /// Setter for next_avail index
219    pub fn set_next_avail(&mut self, val: Wrapping<u16>) {
220        if self.ready {
221            warn!("ignoring write to next_avail on ready queue");
222            return;
223        }
224
225        self.next_avail = val;
226    }
227
228    /// Getter for next_used index
229    pub fn next_used(&self) -> Wrapping<u16> {
230        self.next_used
231    }
232
233    /// Setter for next_used index
234    pub fn set_next_used(&mut self, val: Wrapping<u16>) {
235        if self.ready {
236            warn!("ignoring write to next_used on ready queue");
237            return;
238        }
239
240        self.next_used = val;
241    }
242
243    /// Returns the features that have been acknowledged by the driver.
244    pub fn acked_features(&self) -> u64 {
245        self.acked_features
246    }
247
248    /// Acknowledges that this set of features should be enabled on this queue.
249    pub fn ack_features(&mut self, features: u64) {
250        self.acked_features |= features & self.features;
251    }
252
253    /// Return whether the driver has enabled this queue.
254    pub fn ready(&self) -> bool {
255        self.ready
256    }
257
258    /// Signal that the driver has completed queue configuration.
259    pub fn set_ready(&mut self, enable: bool) {
260        self.ready = enable;
261    }
262
263    /// Convert the queue configuration into an active queue.
264    pub fn activate(
265        &mut self,
266        mem: &GuestMemory,
267        event: Event,
268        interrupt: Interrupt,
269    ) -> Result<Queue> {
270        if !self.ready {
271            bail!("attempted to activate a non-ready queue");
272        }
273
274        if self.activated {
275            bail!("queue is already activated");
276        }
277        // If VIRTIO_F_RING_PACKED feature bit is set, create a packed queue, otherwise create a
278        // split queue
279        let queue: Queue = if ((self.acked_features >> VIRTIO_F_RING_PACKED) & 1) != 0 {
280            let pq = PackedQueue::new(self, mem, event, interrupt)
281                .context("Failed to create a packed queue.")?;
282            Queue::PackedVirtQueue(pq)
283        } else {
284            let sq = SplitQueue::new(self, mem, event, interrupt)
285                .context("Failed to create a split queue.")?;
286            Queue::SplitVirtQueue(sq)
287        };
288
289        self.activated = true;
290        Ok(queue)
291    }
292
293    /// Reset queue to a clean state
294    pub fn reset(&mut self) {
295        self.activated = false;
296        self.ready = false;
297        self.size = self.max_size;
298        self.vector = VIRTIO_MSI_NO_VECTOR;
299        self.desc_table = GuestAddress(0);
300        self.avail_ring = GuestAddress(0);
301        self.used_ring = GuestAddress(0);
302        self.next_avail = Wrapping(0);
303        self.next_used = Wrapping(0);
304        self.acked_features = 0;
305    }
306
307    /// Take snapshot of queue configuration
308    pub fn snapshot(&self) -> Result<AnySnapshot> {
309        AnySnapshot::to_any(QueueConfigSnapshot {
310            activated: self.activated,
311            max_size: self.max_size,
312            size: self.size,
313            ready: self.ready,
314            vector: self.vector,
315            features: self.features,
316            acked_features: self.acked_features,
317            desc_table: self.desc_table,
318            avail_ring: self.avail_ring,
319            used_ring: self.used_ring,
320            next_avail: self.next_avail,
321            next_used: self.next_used,
322        })
323        .context("error serializing")
324    }
325
326    /// Restore queue configuration from snapshot
327    pub fn restore(&mut self, data: AnySnapshot) -> Result<()> {
328        let snap: QueueConfigSnapshot =
329            AnySnapshot::from_any(data).context("error deserializing")?;
330        self.activated = snap.activated;
331        self.max_size = snap.max_size;
332        self.size = snap.size;
333        self.ready = snap.ready;
334        self.vector = snap.vector;
335        self.features = snap.features;
336        self.acked_features = snap.acked_features;
337        self.desc_table = snap.desc_table;
338        self.avail_ring = snap.avail_ring;
339        self.used_ring = snap.used_ring;
340        self.next_avail = snap.next_avail;
341        self.next_used = snap.next_used;
342        Ok(())
343    }
344}
345
346/// Usage: define_queue_method!(method_name, return_type[, mut][, arg1: arg1_type, arg2: arg2_type,
347/// ...])
348///
349/// - `method_name`: The name of the method to be defined (as an identifier).
350/// - `return_type`: The return type of the method.
351/// - `mut` (optional): Include this keyword if the method requires a mutable reference to `self`
352///   (`&mut self`).
353/// - `arg1: arg1_type, arg2: arg2_type, ...` (optional): Include method parameters as a
354///   comma-separated list of `name: type` pairs, if the method takes any arguments.
355macro_rules! define_queue_method {
356    (
357        $(#[$doc:meta])*
358        $method:ident, $return_type:ty, $( $var:ident : $vartype:ty ),*
359    ) => {
360        $(#[$doc])*
361        pub fn $method(&self, $($var: $vartype),*) -> $return_type {
362            match self {
363                Queue::SplitVirtQueue(sq) => sq.$method($($var),*),
364                Queue::PackedVirtQueue(pq) => pq.$method($($var),*),
365            }
366        }
367    };
368    (
369        $(#[$doc:meta])*
370        $method:ident, $return_type:ty, mut, $( $var:ident : $vartype:ty ),*
371    ) => {
372        $(#[$doc])*
373        pub fn $method(&mut self, $($var: $vartype),*) -> $return_type {
374            match self {
375                Queue::SplitVirtQueue(sq) => sq.$method($($var),*),
376                Queue::PackedVirtQueue(pq) => pq.$method($($var),*),
377            }
378        }
379    };
380}
381
382/// Virtqueue interface representing different types of virtqueues
383/// The struct of each queue type is wrapped in the enum variants
384#[derive(Debug)]
385pub enum Queue {
386    /// Split virtqueue type in virtio v1.2 spec: <https://docs.oasis-open.org/virtio/virtio/v1.2/csd01/virtio-v1.2-csd01.html#x1-350007>
387    SplitVirtQueue(SplitQueue),
388    /// Packed virtqueue type in virtio v1.2 spec: <https://docs.oasis-open.org/virtio/virtio/v1.2/csd01/virtio-v1.2-csd01.html#x1-720008>
389    PackedVirtQueue(PackedQueue),
390}
391
392impl Queue {
393    /// Largest valid number of entries in a virtqueue.
394    pub const MAX_SIZE: u16 = 32768;
395
396    /// Asynchronously read the next descriptor chain from the queue.
397    /// Returns a `DescriptorChain` when it is `await`ed.
398    pub async fn next_async(
399        &mut self,
400        eventfd: &mut EventAsync,
401    ) -> std::result::Result<DescriptorChain, AsyncError> {
402        loop {
403            // Check if there are more descriptors available.
404            if let Some(chain) = self.pop() {
405                return Ok(chain);
406            }
407            eventfd.next_val().await?;
408        }
409    }
410
411    /// Get the first available descriptor chain without removing it from the queue.
412    /// Call `pop()` on the returned [`PeekedDescriptorChain`] to remove it from the queue.
413    pub fn peek(&mut self) -> Option<PeekedDescriptorChain> {
414        let desc_chain = match self {
415            Queue::SplitVirtQueue(q) => q.peek(),
416            Queue::PackedVirtQueue(q) => q.peek(),
417        }?;
418
419        Some(PeekedDescriptorChain::new(self, desc_chain))
420    }
421
422    /// If a new DescriptorChain is available, returns one and removes it from the queue.
423    pub fn pop(&mut self) -> Option<DescriptorChain> {
424        self.peek().map(PeekedDescriptorChain::pop)
425    }
426
427    /// try to pop DescriptorChain and collect until writable descriptors' total length
428    /// bigger than request_length. If no enough descriptors, return None.
429    pub fn try_pop_length(&mut self, request_length: usize) -> Option<Vec<DescriptorChain>> {
430        match self {
431            Queue::SplitVirtQueue(q) => q.try_pop_length(request_length),
432            Queue::PackedVirtQueue(_q) => {
433                unimplemented!()
434            }
435        }
436    }
437
438    /// Returns `None` if stop_rx receives a value; otherwise returns the result
439    /// of waiting for the next descriptor.
440    pub async fn next_async_interruptable(
441        &mut self,
442        queue_event: &mut EventAsync,
443        mut stop_rx: &mut oneshot::Receiver<()>,
444    ) -> std::result::Result<Option<DescriptorChain>, AsyncError> {
445        select_biased! {
446            avail_desc_res = self.next_async(queue_event).fuse() => {
447                Ok(Some(avail_desc_res?))
448            }
449            _ = stop_rx => Ok(None),
450        }
451    }
452
453    /// inject interrupt into guest on this queue
454    /// return true: interrupt is injected into guest for this queue
455    ///        false: interrupt isn't injected
456    pub fn trigger_interrupt(&mut self) -> bool {
457        match self {
458            Queue::SplitVirtQueue(sq) => sq.trigger_interrupt(),
459            Queue::PackedVirtQueue(pq) => pq.trigger_interrupt(),
460        }
461    }
462
463    /// Restore queue from snapshot
464    pub fn restore(
465        queue_config: &QueueConfig,
466        queue_value: AnySnapshot,
467        mem: &GuestMemory,
468        event: Event,
469        interrupt: Interrupt,
470    ) -> anyhow::Result<Queue> {
471        if queue_config.acked_features & 1 << VIRTIO_F_RING_PACKED != 0 {
472            PackedQueue::restore(queue_value, mem, event, interrupt).map(Queue::PackedVirtQueue)
473        } else {
474            SplitQueue::restore(queue_value, mem, event, interrupt).map(Queue::SplitVirtQueue)
475        }
476    }
477
478    /// "Reclaim" a queue that was given to a vhost-user backend and is now being taken back using
479    /// VHOST_USER_GET_VRING_BASE.
480    ///
481    /// The `Queue` will have stale fields if the vhost-user backend fulfilled any virtqueue
482    /// requests. This function updates the `Queue` to pick up where the backend left off.
483    pub fn vhost_user_reclaim(&mut self, vring_base: u16) {
484        match self {
485            Queue::SplitVirtQueue(q) => q.vhost_user_reclaim(vring_base),
486            Queue::PackedVirtQueue(q) => q.vhost_user_reclaim(vring_base),
487        }
488    }
489
490    /// Getter for the next index of the available ring that device will process.
491    ///
492    /// Not to be confused with the available ring's index field, which is the next index for the
493    /// driver to fill.
494    pub fn next_avail_to_process(&self) -> u16 {
495        match self {
496            Queue::SplitVirtQueue(q) => q.next_avail_to_process(),
497            Queue::PackedVirtQueue(q) => q.next_avail_to_process(),
498        }
499    }
500
501    define_queue_method!(
502        /// Getter for vector field
503        vector,
504        u16,
505    );
506
507    define_queue_method!(
508        /// Getter for descriptor area
509        desc_table,
510        GuestAddress,
511    );
512
513    define_queue_method!(
514        /// Getter for driver area
515        avail_ring,
516        GuestAddress,
517    );
518
519    define_queue_method!(
520        /// Getter for device area
521        used_ring,
522        GuestAddress,
523    );
524
525    define_queue_method!(
526        /// Return the actual size of the queue, as the driver may not set up a
527        /// queue as big as the device allows.
528        size,
529        u16,
530    );
531
532    define_queue_method!(
533        /// Get a reference to the queue's event.
534        event,
535        &Event,
536    );
537
538    define_queue_method!(
539        /// Get a reference to the queue's interrupt.
540        interrupt,
541        &Interrupt,
542    );
543
544    /// Puts an available descriptor head into the used ring for use by the guest, using the number
545    /// of bytes written to `DescriptorChain`.
546    pub fn add_used(&mut self, desc_chain: DescriptorChain) {
547        let len: u32 = desc_chain.writer.bytes_written().try_into().unwrap();
548        self.add_used_with_bytes_written(desc_chain, len);
549    }
550
551    /// Puts an available descriptor head into the used ring for use by the guest, explicitly
552    /// specifying the number of bytes written.
553    pub fn add_used_with_bytes_written(&mut self, desc_chain: DescriptorChain, len: u32) {
554        let iter = std::iter::once((desc_chain, len));
555        match self {
556            Queue::SplitVirtQueue(q) => q.add_used_with_bytes_written_batch(iter),
557            Queue::PackedVirtQueue(q) => q.add_used_with_bytes_written_batch(iter),
558        }
559    }
560
561    /// add a batch of descriptor chain into used in one time with each descriptor chain
562    /// specifying the number of bytes written with `desc_chain.writer.bytes_written()`
563    pub fn add_used_batch(&mut self, desc_chains: impl IntoIterator<Item = DescriptorChain>) {
564        let iter = desc_chains.into_iter().map(|desc_chain| {
565            let len: u32 = desc_chain.writer.bytes_written().try_into().unwrap();
566            (desc_chain, len)
567        });
568        match self {
569            Queue::SplitVirtQueue(q) => q.add_used_with_bytes_written_batch(iter),
570            Queue::PackedVirtQueue(q) => q.add_used_with_bytes_written_batch(iter),
571        }
572    }
573
574    define_queue_method!(
575        /// Take snapshot of queue's current status
576        snapshot,
577        Result<AnySnapshot>,
578    );
579}
580
581/// A `DescriptorChain` that has been peeked from a `Queue` but not popped yet.
582///
583/// Call [`pop()`](Self::pop) to pop this descriptor chain from the `Queue` and receive the
584/// contained `DescriptorChain` object.
585///
586/// This object holds a mutable reference to the `Queue` to ensure it is not possible to pop or peek
587/// another descriptor while a peek is already active. Either `pop()` or drop this object before
588/// attempting to manipulate the `Queue` again.
589pub struct PeekedDescriptorChain<'q> {
590    queue: &'q mut Queue,
591    desc_chain: DescriptorChain,
592}
593
594impl<'q> PeekedDescriptorChain<'q> {
595    /// Create a `PeekedDescriptorChain` that holds a mutable reference to its `Queue`.
596    /// Use [`Queue::peek()`] rather than calling this function.
597    fn new(queue: &'q mut Queue, desc_chain: DescriptorChain) -> Self {
598        PeekedDescriptorChain { queue, desc_chain }
599    }
600
601    /// Pop this descriptor chain from the queue.
602    pub fn pop(self) -> DescriptorChain {
603        match self.queue {
604            Queue::SplitVirtQueue(q) => q.pop_peeked(&self.desc_chain),
605            Queue::PackedVirtQueue(q) => q.pop_peeked(&self.desc_chain),
606        }
607        self.desc_chain
608    }
609}
610
611impl Deref for PeekedDescriptorChain<'_> {
612    type Target = DescriptorChain;
613
614    fn deref(&self) -> &Self::Target {
615        &self.desc_chain
616    }
617}
618
619impl DerefMut for PeekedDescriptorChain<'_> {
620    fn deref_mut(&mut self) -> &mut Self::Target {
621        &mut self.desc_chain
622    }
623}