devices/usb/xhci/
ring_buffer.rs

1// Copyright 2019 The ChromiumOS Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use std::fmt;
6use std::fmt::Display;
7use std::mem::size_of;
8
9use base::debug;
10use base::error;
11use remain::sorted;
12use thiserror::Error;
13use vm_memory::GuestAddress;
14use vm_memory::GuestMemory;
15use vm_memory::GuestMemoryError;
16
17use super::xhci_abi::AddressedTrb;
18use super::xhci_abi::Error as TrbError;
19use super::xhci_abi::LinkTrb;
20use super::xhci_abi::TransferDescriptor;
21use super::xhci_abi::Trb;
22use super::xhci_abi::TrbCast;
23use super::xhci_abi::TrbType;
24
25#[sorted]
26#[derive(Error, Debug)]
27pub enum Error {
28    #[error("bad dequeue pointer: {0}")]
29    BadDequeuePointer(GuestAddress),
30    #[error("cannot cast trb: {0}")]
31    CastTrb(TrbError),
32    #[error("cannot read guest memory: {0}")]
33    ReadGuestMemory(GuestMemoryError),
34    #[error("cannot get trb chain bit: {0}")]
35    TrbChain(TrbError),
36}
37
38type Result<T> = std::result::Result<T, Error>;
39
40/// Ring Buffer is segmented circular buffer in guest memory containing work items
41/// called transfer descriptors, each of which consists of one or more TRBs.
42/// Ring buffer logic is shared between transfer ring and command ring.
43/// Transfer Ring management is defined in xHCI spec 4.9.2.
44pub struct RingBuffer {
45    name: String,
46    mem: GuestMemory,
47    // Used to keep track where the emulated ring should get the next TRB.
48    dequeue_pointer: GuestAddress,
49    // Used to keep track where the hardware would get the next TRB.
50    hw_dequeue_pointer: GuestAddress,
51    // Used to check if the ring is empty. Toggled when looping back to the begining
52    // of the buffer.
53    consumer_cycle_state: bool,
54    // Used to keep track of the cycle state where the hardware is currently at.
55    hw_consumer_cycle_state: bool,
56}
57
58impl Display for RingBuffer {
59    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
60        write!(f, "RingBuffer `{}`", self.name)
61    }
62}
63
64// Public interfaces for Ring buffer.
65impl RingBuffer {
66    /// Create a new RingBuffer.
67    pub fn new(name: String, mem: GuestMemory) -> Self {
68        RingBuffer {
69            name,
70            mem,
71            dequeue_pointer: GuestAddress(0),
72            hw_dequeue_pointer: GuestAddress(0),
73            consumer_cycle_state: false,
74            hw_consumer_cycle_state: false,
75        }
76    }
77
78    /// Dequeue next transfer descriptor from the transfer ring.
79    pub fn dequeue_transfer_descriptor(&mut self) -> Result<Option<TransferDescriptor>> {
80        let mut trbs = Vec::new();
81        while let Some(addressed_trb) = self.get_current_trb()? {
82            if let Ok(TrbType::Link) = addressed_trb.trb.get_trb_type() {
83                let link_trb = addressed_trb
84                    .trb
85                    .cast::<LinkTrb>()
86                    .map_err(Error::CastTrb)?;
87                self.dequeue_pointer = GuestAddress(link_trb.get_ring_segment_pointer());
88                self.consumer_cycle_state =
89                    self.consumer_cycle_state != link_trb.get_toggle_cycle();
90                continue;
91            }
92
93            self.dequeue_pointer = match self.dequeue_pointer.checked_add(size_of::<Trb>() as u64) {
94                Some(addr) => addr,
95                None => {
96                    return Err(Error::BadDequeuePointer(self.dequeue_pointer));
97                }
98            };
99
100            xhci_trace!(
101                "{}: adding trb {} to td {}",
102                self.name.as_str(),
103                addressed_trb.gpa,
104                addressed_trb.trb
105            );
106            trbs.push(addressed_trb);
107            if !addressed_trb.trb.get_chain_bit().map_err(Error::TrbChain)? {
108                debug!("xhci: trb chain is false returning");
109                break;
110            }
111        }
112        // A valid transfer descriptor contains at least one addressed trb and the last trb has
113        // chain bit == 0.
114        match trbs.last() {
115            Some(t) => {
116                if t.trb.get_chain_bit().map_err(Error::TrbChain)? {
117                    return Ok(None);
118                }
119            }
120            None => return Ok(None),
121        }
122        Ok(TransferDescriptor::new(trbs))
123    }
124
125    /// Get dequeue pointer of the ring buffer.
126    pub fn get_dequeue_pointer(&self) -> GuestAddress {
127        self.dequeue_pointer
128    }
129
130    /// Set dequeue pointer of the ring buffer.
131    pub fn set_dequeue_pointer(&mut self, addr: GuestAddress) {
132        xhci_trace!("{}: set dequeue pointer {:x}", self.name.as_str(), addr.0);
133
134        self.dequeue_pointer = addr;
135        self.hw_dequeue_pointer = addr;
136    }
137
138    /// Get consumer cycle state of the ring buffer.
139    pub fn get_consumer_cycle_state(&self) -> bool {
140        self.consumer_cycle_state
141    }
142
143    /// Set consumer cycle state of the ring buffer.
144    pub fn set_consumer_cycle_state(&mut self, state: bool) {
145        xhci_trace!("{}: set consumer cycle state {}", self.name.as_str(), state);
146        self.consumer_cycle_state = state;
147        self.hw_consumer_cycle_state = state;
148    }
149
150    // Read trb pointed by dequeue pointer. Does not proceed dequeue pointer.
151    fn get_current_trb(&self) -> Result<Option<AddressedTrb>> {
152        let trb: Trb = self
153            .mem
154            .read_obj_from_addr(self.dequeue_pointer)
155            .map_err(Error::ReadGuestMemory)?;
156        xhci_trace!("{}: trb read from memory {:?}", self.name.as_str(), trb);
157        // If cycle bit of trb does not equal consumer cycle state, the ring is empty.
158        // This trb is invalid.
159        if trb.get_cycle() != self.consumer_cycle_state {
160            debug!(
161                "xhci: cycle bit does not match, self cycle {}",
162                self.consumer_cycle_state
163            );
164            Ok(None)
165        } else {
166            Ok(Some(AddressedTrb {
167                trb,
168                gpa: self.dequeue_pointer.0,
169            }))
170        }
171    }
172
173    /// Update the internal cache for HW dequeue pointer. The 'trb' argument should not be a
174    /// LinkTRB.
175    pub fn complete(&mut self, trb: &AddressedTrb) {
176        // This addition should never fail, but if it did there's nothing we can do but keep the
177        // old value and hope that the next completion puts back the hw_dequeue_pointer to a sane
178        // value.
179        self.hw_dequeue_pointer = match GuestAddress(trb.gpa).checked_add(size_of::<Trb>() as u64) {
180            Some(addr) => addr,
181            None => {
182                error!("xhci: gpa of completed TRB is corrupted");
183                self.hw_dequeue_pointer
184            }
185        };
186        self.hw_consumer_cycle_state = trb.trb.get_cycle();
187    }
188
189    /// Synchronize the dequeue pointer and the consumer cycle state with the hardware values.
190    pub fn synchronize_with_hardware(&mut self) {
191        self.dequeue_pointer = self.hw_dequeue_pointer;
192        self.consumer_cycle_state = self.hw_consumer_cycle_state;
193    }
194}
195
196#[cfg(test)]
197mod test {
198    use base::pagesize;
199
200    use super::*;
201    use crate::usb::xhci::xhci_abi::*;
202
203    #[test]
204    fn ring_test_dequeue() {
205        let trb_size = size_of::<Trb>() as u64;
206        let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
207        let mut transfer_ring = RingBuffer::new(String::new(), gm.clone());
208
209        // Structure of ring buffer:
210        //  0x100  --> 0x200  --> 0x300
211        //  trb 1  |   trb 3  |   trb 5
212        //  trb 2  |   trb 4  |   trb 6
213        //  l trb  -   l trb  -   l trb to 0x100
214        let mut trb = NormalTrb::new();
215        trb.set_trb_type(TrbType::Normal);
216        trb.set_data_buffer_pointer(1);
217        trb.set_chain(true);
218        gm.write_obj_at_addr(trb, GuestAddress(0x100)).unwrap();
219
220        trb.set_data_buffer_pointer(2);
221        gm.write_obj_at_addr(trb, GuestAddress(0x100 + trb_size))
222            .unwrap();
223
224        let mut ltrb = LinkTrb::new();
225        ltrb.set_trb_type(TrbType::Link);
226        ltrb.set_ring_segment_pointer(0x200);
227        gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + 2 * trb_size))
228            .unwrap();
229
230        trb.set_data_buffer_pointer(3);
231        gm.write_obj_at_addr(trb, GuestAddress(0x200)).unwrap();
232
233        // Chain bit is false.
234        trb.set_data_buffer_pointer(4);
235        trb.set_chain(false);
236        gm.write_obj_at_addr(trb, GuestAddress(0x200 + 1 * trb_size))
237            .unwrap();
238
239        ltrb.set_ring_segment_pointer(0x300);
240        gm.write_obj_at_addr(ltrb, GuestAddress(0x200 + 2 * trb_size))
241            .unwrap();
242
243        trb.set_data_buffer_pointer(5);
244        trb.set_chain(true);
245        gm.write_obj_at_addr(trb, GuestAddress(0x300)).unwrap();
246
247        // Chain bit is false.
248        trb.set_data_buffer_pointer(6);
249        trb.set_chain(false);
250        gm.write_obj_at_addr(trb, GuestAddress(0x300 + 1 * trb_size))
251            .unwrap();
252
253        ltrb.set_ring_segment_pointer(0x100);
254        gm.write_obj_at_addr(ltrb, GuestAddress(0x300 + 2 * trb_size))
255            .unwrap();
256
257        transfer_ring.set_dequeue_pointer(GuestAddress(0x100));
258        transfer_ring.set_consumer_cycle_state(false);
259
260        // Read first transfer descriptor.
261        let descriptor = transfer_ring
262            .dequeue_transfer_descriptor()
263            .unwrap()
264            .unwrap();
265        assert_eq!(descriptor.len(), 4);
266        assert_eq!(descriptor[0].trb.get_parameter(), 1);
267        assert_eq!(descriptor[1].trb.get_parameter(), 2);
268        assert_eq!(descriptor[2].trb.get_parameter(), 3);
269        assert_eq!(descriptor[3].trb.get_parameter(), 4);
270
271        // Read second transfer descriptor.
272        let descriptor = transfer_ring
273            .dequeue_transfer_descriptor()
274            .unwrap()
275            .unwrap();
276        assert_eq!(descriptor.len(), 2);
277        assert_eq!(descriptor[0].trb.get_parameter(), 5);
278        assert_eq!(descriptor[1].trb.get_parameter(), 6);
279    }
280
281    #[test]
282    fn transfer_ring_test_dequeue_failure() {
283        let trb_size = size_of::<Trb>() as u64;
284        let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
285        let mut transfer_ring = RingBuffer::new(String::new(), gm.clone());
286
287        let mut trb = NormalTrb::new();
288        trb.set_trb_type(TrbType::Normal);
289        trb.set_data_buffer_pointer(1);
290        trb.set_chain(true);
291        gm.write_obj_at_addr(trb, GuestAddress(0x100)).unwrap();
292
293        trb.set_data_buffer_pointer(2);
294        gm.write_obj_at_addr(trb, GuestAddress(0x100 + trb_size))
295            .unwrap();
296
297        let mut ltrb = LinkTrb::new();
298        ltrb.set_trb_type(TrbType::Link);
299        ltrb.set_ring_segment_pointer(0x200);
300        ltrb.set_toggle_cycle(true);
301        gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + 2 * trb_size))
302            .unwrap();
303
304        trb.set_data_buffer_pointer(3);
305        gm.write_obj_at_addr(trb, GuestAddress(0x200)).unwrap();
306
307        transfer_ring.set_dequeue_pointer(GuestAddress(0x100));
308        transfer_ring.set_consumer_cycle_state(false);
309
310        // Read first transfer descriptor.
311        let descriptor = transfer_ring.dequeue_transfer_descriptor().unwrap();
312        assert_eq!(descriptor.is_none(), true);
313    }
314
315    #[test]
316    fn ring_test_toggle_cycle() {
317        let trb_size = size_of::<Trb>() as u64;
318        let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
319        let mut transfer_ring = RingBuffer::new(String::new(), gm.clone());
320
321        let mut trb = NormalTrb::new();
322        trb.set_trb_type(TrbType::Normal);
323        trb.set_data_buffer_pointer(1);
324        trb.set_chain(false);
325        trb.set_cycle(false);
326        gm.write_obj_at_addr(trb, GuestAddress(0x100)).unwrap();
327
328        let mut ltrb = LinkTrb::new();
329        ltrb.set_trb_type(TrbType::Link);
330        ltrb.set_ring_segment_pointer(0x100);
331        ltrb.set_toggle_cycle(true);
332        ltrb.set_cycle(false);
333        gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + trb_size))
334            .unwrap();
335
336        // Initial state: consumer cycle = false
337        transfer_ring.set_dequeue_pointer(GuestAddress(0x100));
338        transfer_ring.set_consumer_cycle_state(false);
339
340        // Read first transfer descriptor.
341        let descriptor = transfer_ring
342            .dequeue_transfer_descriptor()
343            .unwrap()
344            .unwrap();
345        assert_eq!(descriptor.len(), 1);
346        assert_eq!(descriptor[0].trb.get_parameter(), 1);
347
348        // Cycle bit should be unchanged since we haven't advanced past the Link TRB yet.
349        assert_eq!(transfer_ring.consumer_cycle_state, false);
350
351        // Overwrite the first TRB with a new one (data = 2)
352        // with the new producer cycle bit state (true).
353        let mut trb = NormalTrb::new();
354        trb.set_trb_type(TrbType::Normal);
355        trb.set_data_buffer_pointer(2);
356        trb.set_cycle(true); // Link TRB toggled the cycle.
357        gm.write_obj_at_addr(trb, GuestAddress(0x100)).unwrap();
358
359        // Read new transfer descriptor.
360        let descriptor = transfer_ring
361            .dequeue_transfer_descriptor()
362            .unwrap()
363            .unwrap();
364        assert_eq!(descriptor.len(), 1);
365        assert_eq!(descriptor[0].trb.get_parameter(), 2);
366
367        assert_eq!(transfer_ring.consumer_cycle_state, true);
368
369        // Update the Link TRB with the new cycle bit.
370        let mut ltrb = LinkTrb::new();
371        ltrb.set_trb_type(TrbType::Link);
372        ltrb.set_ring_segment_pointer(0x100);
373        ltrb.set_toggle_cycle(true);
374        ltrb.set_cycle(true); // Producer cycle state is now 1.
375        gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + trb_size))
376            .unwrap();
377
378        // Overwrite the first TRB again with a new one (data = 3)
379        // with the new producer cycle bit state (false).
380        let mut trb = NormalTrb::new();
381        trb.set_trb_type(TrbType::Normal);
382        trb.set_data_buffer_pointer(3);
383        trb.set_cycle(false); // Link TRB toggled the cycle.
384        gm.write_obj_at_addr(trb, GuestAddress(0x100)).unwrap();
385
386        // Read new transfer descriptor.
387        let descriptor = transfer_ring
388            .dequeue_transfer_descriptor()
389            .unwrap()
390            .unwrap();
391        assert_eq!(descriptor.len(), 1);
392        assert_eq!(descriptor[0].trb.get_parameter(), 3);
393
394        assert_eq!(transfer_ring.consumer_cycle_state, false);
395    }
396}