devices/usb/xhci/
ring_buffer.rs

1// Copyright 2019 The ChromiumOS Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use std::fmt;
6use std::fmt::Display;
7use std::mem::size_of;
8
9use base::debug;
10use remain::sorted;
11use thiserror::Error;
12use vm_memory::GuestAddress;
13use vm_memory::GuestMemory;
14use vm_memory::GuestMemoryError;
15
16use super::xhci_abi::AddressedTrb;
17use super::xhci_abi::Error as TrbError;
18use super::xhci_abi::LinkTrb;
19use super::xhci_abi::TransferDescriptor;
20use super::xhci_abi::Trb;
21use super::xhci_abi::TrbCast;
22use super::xhci_abi::TrbType;
23
24#[sorted]
25#[derive(Error, Debug)]
26pub enum Error {
27    #[error("bad dequeue pointer: {0}")]
28    BadDequeuePointer(GuestAddress),
29    #[error("cannot cast trb: {0}")]
30    CastTrb(TrbError),
31    #[error("cannot read guest memory: {0}")]
32    ReadGuestMemory(GuestMemoryError),
33    #[error("cannot get trb chain bit: {0}")]
34    TrbChain(TrbError),
35}
36
37type Result<T> = std::result::Result<T, Error>;
38
39/// Ring Buffer is segmented circular buffer in guest memory containing work items
40/// called transfer descriptors, each of which consists of one or more TRBs.
41/// Ring buffer logic is shared between transfer ring and command ring.
42/// Transfer Ring management is defined in xHCI spec 4.9.2.
43pub struct RingBuffer {
44    name: String,
45    mem: GuestMemory,
46    dequeue_pointer: GuestAddress,
47    // Used to check if the ring is empty. Toggled when looping back to the begining
48    // of the buffer.
49    consumer_cycle_state: bool,
50}
51
52impl Display for RingBuffer {
53    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
54        write!(f, "RingBuffer `{}`", self.name)
55    }
56}
57
58// Public interfaces for Ring buffer.
59impl RingBuffer {
60    /// Create a new RingBuffer.
61    pub fn new(name: String, mem: GuestMemory) -> Self {
62        RingBuffer {
63            name,
64            mem,
65            dequeue_pointer: GuestAddress(0),
66            consumer_cycle_state: false,
67        }
68    }
69
70    /// Dequeue next transfer descriptor from the transfer ring.
71    pub fn dequeue_transfer_descriptor(&mut self) -> Result<Option<TransferDescriptor>> {
72        let mut td: TransferDescriptor = TransferDescriptor::new();
73        while let Some(addressed_trb) = self.get_current_trb()? {
74            if let Ok(TrbType::Link) = addressed_trb.trb.get_trb_type() {
75                let link_trb = addressed_trb
76                    .trb
77                    .cast::<LinkTrb>()
78                    .map_err(Error::CastTrb)?;
79                self.dequeue_pointer = GuestAddress(link_trb.get_ring_segment_pointer());
80                self.consumer_cycle_state =
81                    self.consumer_cycle_state != link_trb.get_toggle_cycle();
82                continue;
83            }
84
85            self.dequeue_pointer = match self.dequeue_pointer.checked_add(size_of::<Trb>() as u64) {
86                Some(addr) => addr,
87                None => {
88                    return Err(Error::BadDequeuePointer(self.dequeue_pointer));
89                }
90            };
91
92            xhci_trace!(
93                "{}: adding trb to td {}",
94                self.name.as_str(),
95                addressed_trb.trb
96            );
97            td.push(addressed_trb);
98            if !addressed_trb.trb.get_chain_bit().map_err(Error::TrbChain)? {
99                debug!("xhci: trb chain is false returning");
100                break;
101            }
102        }
103        // A valid transfer descriptor contains at least one addressed trb and the last trb has
104        // chain bit != 0.
105        match td.last() {
106            Some(t) => {
107                if t.trb.get_chain_bit().map_err(Error::TrbChain)? {
108                    return Ok(None);
109                }
110            }
111            None => return Ok(None),
112        }
113        Ok(Some(td))
114    }
115
116    /// Get dequeue pointer of the ring buffer.
117    pub fn get_dequeue_pointer(&self) -> GuestAddress {
118        self.dequeue_pointer
119    }
120
121    /// Set dequeue pointer of the ring buffer.
122    pub fn set_dequeue_pointer(&mut self, addr: GuestAddress) {
123        xhci_trace!("{}: set dequeue pointer {:x}", self.name.as_str(), addr.0);
124
125        self.dequeue_pointer = addr;
126    }
127
128    /// Get consumer cycle state of the ring buffer.
129    pub fn get_consumer_cycle_state(&self) -> bool {
130        self.consumer_cycle_state
131    }
132
133    /// Set consumer cycle state of the ring buffer.
134    pub fn set_consumer_cycle_state(&mut self, state: bool) {
135        xhci_trace!("{}: set consumer cycle state {}", self.name.as_str(), state);
136        self.consumer_cycle_state = state;
137    }
138
139    // Read trb pointed by dequeue pointer. Does not proceed dequeue pointer.
140    fn get_current_trb(&self) -> Result<Option<AddressedTrb>> {
141        let trb: Trb = self
142            .mem
143            .read_obj_from_addr(self.dequeue_pointer)
144            .map_err(Error::ReadGuestMemory)?;
145        xhci_trace!("{}: trb read from memory {:?}", self.name.as_str(), trb);
146        // If cycle bit of trb does not equal consumer cycle state, the ring is empty.
147        // This trb is invalid.
148        if trb.get_cycle() != self.consumer_cycle_state {
149            debug!(
150                "xhci: cycle bit does not match, self cycle {}",
151                self.consumer_cycle_state
152            );
153            Ok(None)
154        } else {
155            Ok(Some(AddressedTrb {
156                trb,
157                gpa: self.dequeue_pointer.0,
158            }))
159        }
160    }
161}
162
163#[cfg(test)]
164mod test {
165    use base::pagesize;
166
167    use super::*;
168    use crate::usb::xhci::xhci_abi::*;
169
170    #[test]
171    fn ring_test_dequeue() {
172        let trb_size = size_of::<Trb>() as u64;
173        let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
174        let mut transfer_ring = RingBuffer::new(String::new(), gm.clone());
175
176        // Structure of ring buffer:
177        //  0x100  --> 0x200  --> 0x300
178        //  trb 1  |   trb 3  |   trb 5
179        //  trb 2  |   trb 4  |   trb 6
180        //  l trb  -   l trb  -   l trb to 0x100
181        let mut trb = NormalTrb::new();
182        trb.set_trb_type(TrbType::Normal);
183        trb.set_data_buffer_pointer(1);
184        trb.set_chain(true);
185        gm.write_obj_at_addr(trb, GuestAddress(0x100)).unwrap();
186
187        trb.set_data_buffer_pointer(2);
188        gm.write_obj_at_addr(trb, GuestAddress(0x100 + trb_size))
189            .unwrap();
190
191        let mut ltrb = LinkTrb::new();
192        ltrb.set_trb_type(TrbType::Link);
193        ltrb.set_ring_segment_pointer(0x200);
194        gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + 2 * trb_size))
195            .unwrap();
196
197        trb.set_data_buffer_pointer(3);
198        gm.write_obj_at_addr(trb, GuestAddress(0x200)).unwrap();
199
200        // Chain bit is false.
201        trb.set_data_buffer_pointer(4);
202        trb.set_chain(false);
203        gm.write_obj_at_addr(trb, GuestAddress(0x200 + 1 * trb_size))
204            .unwrap();
205
206        ltrb.set_ring_segment_pointer(0x300);
207        gm.write_obj_at_addr(ltrb, GuestAddress(0x200 + 2 * trb_size))
208            .unwrap();
209
210        trb.set_data_buffer_pointer(5);
211        trb.set_chain(true);
212        gm.write_obj_at_addr(trb, GuestAddress(0x300)).unwrap();
213
214        // Chain bit is false.
215        trb.set_data_buffer_pointer(6);
216        trb.set_chain(false);
217        gm.write_obj_at_addr(trb, GuestAddress(0x300 + 1 * trb_size))
218            .unwrap();
219
220        ltrb.set_ring_segment_pointer(0x100);
221        gm.write_obj_at_addr(ltrb, GuestAddress(0x300 + 2 * trb_size))
222            .unwrap();
223
224        transfer_ring.set_dequeue_pointer(GuestAddress(0x100));
225        transfer_ring.set_consumer_cycle_state(false);
226
227        // Read first transfer descriptor.
228        let descriptor = transfer_ring
229            .dequeue_transfer_descriptor()
230            .unwrap()
231            .unwrap();
232        assert_eq!(descriptor.len(), 4);
233        assert_eq!(descriptor[0].trb.get_parameter(), 1);
234        assert_eq!(descriptor[1].trb.get_parameter(), 2);
235        assert_eq!(descriptor[2].trb.get_parameter(), 3);
236        assert_eq!(descriptor[3].trb.get_parameter(), 4);
237
238        // Read second transfer descriptor.
239        let descriptor = transfer_ring
240            .dequeue_transfer_descriptor()
241            .unwrap()
242            .unwrap();
243        assert_eq!(descriptor.len(), 2);
244        assert_eq!(descriptor[0].trb.get_parameter(), 5);
245        assert_eq!(descriptor[1].trb.get_parameter(), 6);
246    }
247
248    #[test]
249    fn transfer_ring_test_dequeue_failure() {
250        let trb_size = size_of::<Trb>() as u64;
251        let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
252        let mut transfer_ring = RingBuffer::new(String::new(), gm.clone());
253
254        let mut trb = NormalTrb::new();
255        trb.set_trb_type(TrbType::Normal);
256        trb.set_data_buffer_pointer(1);
257        trb.set_chain(true);
258        gm.write_obj_at_addr(trb, GuestAddress(0x100)).unwrap();
259
260        trb.set_data_buffer_pointer(2);
261        gm.write_obj_at_addr(trb, GuestAddress(0x100 + trb_size))
262            .unwrap();
263
264        let mut ltrb = LinkTrb::new();
265        ltrb.set_trb_type(TrbType::Link);
266        ltrb.set_ring_segment_pointer(0x200);
267        ltrb.set_toggle_cycle(true);
268        gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + 2 * trb_size))
269            .unwrap();
270
271        trb.set_data_buffer_pointer(3);
272        gm.write_obj_at_addr(trb, GuestAddress(0x200)).unwrap();
273
274        transfer_ring.set_dequeue_pointer(GuestAddress(0x100));
275        transfer_ring.set_consumer_cycle_state(false);
276
277        // Read first transfer descriptor.
278        let descriptor = transfer_ring.dequeue_transfer_descriptor().unwrap();
279        assert_eq!(descriptor.is_none(), true);
280    }
281
282    #[test]
283    fn ring_test_toggle_cycle() {
284        let trb_size = size_of::<Trb>() as u64;
285        let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
286        let mut transfer_ring = RingBuffer::new(String::new(), gm.clone());
287
288        let mut trb = NormalTrb::new();
289        trb.set_trb_type(TrbType::Normal);
290        trb.set_data_buffer_pointer(1);
291        trb.set_chain(false);
292        trb.set_cycle(false);
293        gm.write_obj_at_addr(trb, GuestAddress(0x100)).unwrap();
294
295        let mut ltrb = LinkTrb::new();
296        ltrb.set_trb_type(TrbType::Link);
297        ltrb.set_ring_segment_pointer(0x100);
298        ltrb.set_toggle_cycle(true);
299        ltrb.set_cycle(false);
300        gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + trb_size))
301            .unwrap();
302
303        // Initial state: consumer cycle = false
304        transfer_ring.set_dequeue_pointer(GuestAddress(0x100));
305        transfer_ring.set_consumer_cycle_state(false);
306
307        // Read first transfer descriptor.
308        let descriptor = transfer_ring
309            .dequeue_transfer_descriptor()
310            .unwrap()
311            .unwrap();
312        assert_eq!(descriptor.len(), 1);
313        assert_eq!(descriptor[0].trb.get_parameter(), 1);
314
315        // Cycle bit should be unchanged since we haven't advanced past the Link TRB yet.
316        assert_eq!(transfer_ring.consumer_cycle_state, false);
317
318        // Overwrite the first TRB with a new one (data = 2)
319        // with the new producer cycle bit state (true).
320        let mut trb = NormalTrb::new();
321        trb.set_trb_type(TrbType::Normal);
322        trb.set_data_buffer_pointer(2);
323        trb.set_cycle(true); // Link TRB toggled the cycle.
324        gm.write_obj_at_addr(trb, GuestAddress(0x100)).unwrap();
325
326        // Read new transfer descriptor.
327        let descriptor = transfer_ring
328            .dequeue_transfer_descriptor()
329            .unwrap()
330            .unwrap();
331        assert_eq!(descriptor.len(), 1);
332        assert_eq!(descriptor[0].trb.get_parameter(), 2);
333
334        assert_eq!(transfer_ring.consumer_cycle_state, true);
335
336        // Update the Link TRB with the new cycle bit.
337        let mut ltrb = LinkTrb::new();
338        ltrb.set_trb_type(TrbType::Link);
339        ltrb.set_ring_segment_pointer(0x100);
340        ltrb.set_toggle_cycle(true);
341        ltrb.set_cycle(true); // Producer cycle state is now 1.
342        gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + trb_size))
343            .unwrap();
344
345        // Overwrite the first TRB again with a new one (data = 3)
346        // with the new producer cycle bit state (false).
347        let mut trb = NormalTrb::new();
348        trb.set_trb_type(TrbType::Normal);
349        trb.set_data_buffer_pointer(3);
350        trb.set_cycle(false); // Link TRB toggled the cycle.
351        gm.write_obj_at_addr(trb, GuestAddress(0x100)).unwrap();
352
353        // Read new transfer descriptor.
354        let descriptor = transfer_ring
355            .dequeue_transfer_descriptor()
356            .unwrap()
357            .unwrap();
358        assert_eq!(descriptor.len(), 1);
359        assert_eq!(descriptor[0].trb.get_parameter(), 3);
360
361        assert_eq!(transfer_ring.consumer_cycle_state, false);
362    }
363}