devices/usb/xhci/
ring_buffer_controller.rs

1// Copyright 2019 The ChromiumOS Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use std::fmt;
6use std::fmt::Display;
7use std::sync::Arc;
8use std::sync::MutexGuard;
9
10use anyhow::Context;
11use base::error;
12use base::Error as SysError;
13use base::Event;
14use base::EventType;
15use remain::sorted;
16use sync::Mutex;
17use thiserror::Error;
18use vm_memory::GuestAddress;
19use vm_memory::GuestMemory;
20
21use super::ring_buffer::RingBuffer;
22use super::ring_buffer_stop_cb::RingBufferStopCallback;
23use super::xhci_abi::TransferDescriptor;
24use crate::utils;
25use crate::utils::EventHandler;
26use crate::utils::EventLoop;
27
28#[sorted]
29#[derive(Error, Debug)]
30pub enum Error {
31    #[error("failed to add event to event loop: {0}")]
32    AddEvent(utils::Error),
33    #[error("failed to create event: {0}")]
34    CreateEvent(SysError),
35}
36
37type Result<T> = std::result::Result<T, Error>;
38
39#[derive(PartialEq, Copy, Clone, Eq)]
40enum RingBufferState {
41    /// Running: RingBuffer is running, consuming transfer descriptor.
42    Running,
43    /// Stopped: RingBuffer already stopped.
44    Stopped,
45}
46
47/// TransferDescriptorHandler handles transfer descriptor. User should implement this trait and
48/// build a ring buffer controller with the struct.
49pub trait TransferDescriptorHandler {
50    /// Process descriptor asynchronously, write complete_event when done.
51    fn handle_transfer_descriptor(
52        &self,
53        descriptor: TransferDescriptor,
54        complete_event: Event,
55    ) -> anyhow::Result<()>;
56
57    /// Cancel transfer descriptors. This is called when stopping a ring buffer controller, due to
58    /// receiving a Stop Endpoint command.
59    /// There may be one or more transfers in-flight at the hardware level and the xHCI spec says
60    /// we need to cancel or complete them before sending the completion event for the Stop
61    /// Endpoint command. Use the callback to send the completion event once all the in-flight ones
62    /// are cleared.
63    fn cancel(&self, _callback: RingBufferStopCallback) {}
64}
65
66/// RingBufferController owns a ring buffer. It lives on a event_loop. It will pop out transfer
67/// descriptor and let TransferDescriptorHandler handle it.
68pub struct RingBufferController<T: 'static + TransferDescriptorHandler> {
69    name: String,
70    state: Mutex<RingBufferState>,
71    ring_buffer: Mutex<RingBuffer>,
72    handler: Mutex<T>,
73    event_loop: Arc<EventLoop>,
74    event: Event,
75}
76
77impl<T: 'static + TransferDescriptorHandler> Display for RingBufferController<T> {
78    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
79        write!(f, "RingBufferController `{}`", self.name)
80    }
81}
82
83impl<T: Send> RingBufferController<T>
84where
85    T: 'static + TransferDescriptorHandler,
86{
87    /// Create a ring buffer controller and add it to event loop.
88    pub fn new_with_handler(
89        name: String,
90        mem: GuestMemory,
91        event_loop: Arc<EventLoop>,
92        handler: T,
93    ) -> Result<Arc<RingBufferController<T>>> {
94        let evt = Event::new().map_err(Error::CreateEvent)?;
95        let controller = Arc::new(RingBufferController {
96            name: name.clone(),
97            state: Mutex::new(RingBufferState::Stopped),
98            ring_buffer: Mutex::new(RingBuffer::new(name, mem)),
99            handler: Mutex::new(handler),
100            event_loop: event_loop.clone(),
101            event: evt,
102        });
103        let event_handler: Arc<dyn EventHandler> = controller.clone();
104        event_loop
105            .add_event(
106                &controller.event,
107                EventType::Read,
108                Arc::downgrade(&event_handler),
109            )
110            .map_err(Error::AddEvent)?;
111        Ok(controller)
112    }
113
114    fn lock_ring_buffer(&self) -> MutexGuard<RingBuffer> {
115        self.ring_buffer.lock()
116    }
117
118    /// Get dequeue pointer of the internal ring buffer.
119    pub fn get_dequeue_pointer(&self) -> GuestAddress {
120        self.lock_ring_buffer().get_dequeue_pointer()
121    }
122
123    /// Set dequeue pointer of the internal ring buffer.
124    pub fn set_dequeue_pointer(&self, ptr: GuestAddress) {
125        xhci_trace!("{}: set_dequeue_pointer({:x})", self.name, ptr.0);
126        // Fast because this should only happen during xhci setup.
127        self.lock_ring_buffer().set_dequeue_pointer(ptr);
128    }
129
130    /// Get consumer cycle state.
131    pub fn get_consumer_cycle_state(&self) -> bool {
132        self.lock_ring_buffer().get_consumer_cycle_state()
133    }
134
135    /// Set consumer cycle state.
136    pub fn set_consumer_cycle_state(&self, state: bool) {
137        xhci_trace!("{}: set consumer cycle state: {}", self.name, state);
138        // Fast because this should only happen during xhci setup.
139        self.lock_ring_buffer().set_consumer_cycle_state(state);
140    }
141
142    /// Start the ring buffer.
143    pub fn start(&self) {
144        xhci_trace!("start {}", self.name);
145        let mut state = self.state.lock();
146        if *state != RingBufferState::Running {
147            *state = RingBufferState::Running;
148            if let Err(e) = self.event.signal() {
149                error!("cannot start event ring: {}", e);
150            }
151        }
152    }
153
154    /// Stop the ring buffer asynchronously.
155    pub fn stop(&self, callback: RingBufferStopCallback) {
156        xhci_trace!("stop {}", self.name);
157
158        // This lock prevents new descriptors to be processed in on_event().
159        let mut state = self.state.lock();
160        self.handler.lock().cancel(callback);
161        *state = RingBufferState::Stopped;
162    }
163}
164
165impl<T> Drop for RingBufferController<T>
166where
167    T: 'static + TransferDescriptorHandler,
168{
169    fn drop(&mut self) {
170        // Remove self from the event loop.
171        if let Err(e) = self.event_loop.remove_event_for_descriptor(&self.event) {
172            error!(
173                "cannot remove ring buffer controller from event loop: {}",
174                e
175            );
176        }
177    }
178}
179
180impl<T> EventHandler for RingBufferController<T>
181where
182    T: 'static + TransferDescriptorHandler + Send,
183{
184    fn on_event(&self) -> anyhow::Result<()> {
185        // `self.event` triggers ring buffer controller to run.
186        self.event.wait().context("cannot read from event")?;
187
188        // ISOC transfers post many small descriptors at once, which need to be submitted to xHCI
189        // as soon as possible to keep up with the transfer rate. Otherwise, the device will send
190        // an error in later TDs.
191        loop {
192            let mut state = self.state.lock();
193
194            match *state {
195                RingBufferState::Stopped => return Ok(()),
196                RingBufferState::Running => {}
197            }
198
199            let transfer_descriptor = self
200                .lock_ring_buffer()
201                .dequeue_transfer_descriptor()
202                .context("cannot dequeue transfer descriptor")?;
203
204            let transfer_descriptor = match transfer_descriptor {
205                Some(t) => t,
206                None => {
207                    *state = RingBufferState::Stopped;
208                    return Ok(());
209                }
210            };
211
212            let event = self.event.try_clone().context("cannot clone event")?;
213            self.handler
214                .lock()
215                .handle_transfer_descriptor(transfer_descriptor, event)?;
216        }
217    }
218}
219
220#[cfg(test)]
221mod tests {
222    use std::mem::size_of;
223    use std::sync::mpsc::channel;
224    use std::sync::mpsc::Sender;
225
226    use base::pagesize;
227
228    use super::super::xhci_abi::LinkTrb;
229    use super::super::xhci_abi::NormalTrb;
230    use super::super::xhci_abi::Trb;
231    use super::super::xhci_abi::TrbType;
232    use super::*;
233
234    struct TestHandler {
235        sender: Sender<i32>,
236    }
237
238    impl TransferDescriptorHandler for TestHandler {
239        fn handle_transfer_descriptor(
240            &self,
241            descriptor: TransferDescriptor,
242            complete_event: Event,
243        ) -> anyhow::Result<()> {
244            for atrb in descriptor {
245                assert_eq!(atrb.trb.get_trb_type().unwrap(), TrbType::Normal);
246                self.sender.send(atrb.trb.get_parameter() as i32).unwrap();
247            }
248            complete_event.signal().unwrap();
249            Ok(())
250        }
251    }
252
253    fn setup_mem() -> GuestMemory {
254        let trb_size = size_of::<Trb>() as u64;
255        let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
256
257        // Structure of ring buffer:
258        //  0x100  --> 0x200  --> 0x300
259        //  trb 1  |   trb 3  |   trb 5
260        //  trb 2  |   trb 4  |   trb 6
261        //  l trb  -   l trb  -   l trb to 0x100
262        let mut trb = NormalTrb::new();
263        trb.set_trb_type(TrbType::Normal);
264        trb.set_data_buffer_pointer(1);
265        trb.set_chain(true);
266        gm.write_obj_at_addr(trb, GuestAddress(0x100)).unwrap();
267
268        trb.set_data_buffer_pointer(2);
269        gm.write_obj_at_addr(trb, GuestAddress(0x100 + trb_size))
270            .unwrap();
271
272        let mut ltrb = LinkTrb::new();
273        ltrb.set_trb_type(TrbType::Link);
274        ltrb.set_ring_segment_pointer(0x200);
275        gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + 2 * trb_size))
276            .unwrap();
277
278        trb.set_data_buffer_pointer(3);
279        gm.write_obj_at_addr(trb, GuestAddress(0x200)).unwrap();
280
281        // Chain bit is false.
282        trb.set_data_buffer_pointer(4);
283        trb.set_chain(false);
284        gm.write_obj_at_addr(trb, GuestAddress(0x200 + 1 * trb_size))
285            .unwrap();
286
287        ltrb.set_ring_segment_pointer(0x300);
288        gm.write_obj_at_addr(ltrb, GuestAddress(0x200 + 2 * trb_size))
289            .unwrap();
290
291        trb.set_data_buffer_pointer(5);
292        trb.set_chain(true);
293        gm.write_obj_at_addr(trb, GuestAddress(0x300)).unwrap();
294
295        // Chain bit is false.
296        trb.set_data_buffer_pointer(6);
297        trb.set_chain(false);
298        gm.write_obj_at_addr(trb, GuestAddress(0x300 + 1 * trb_size))
299            .unwrap();
300
301        ltrb.set_ring_segment_pointer(0x100);
302        ltrb.set_toggle_cycle(true);
303        gm.write_obj_at_addr(ltrb, GuestAddress(0x300 + 2 * trb_size))
304            .unwrap();
305        gm
306    }
307
308    #[test]
309    fn test_ring_buffer_controller() {
310        let (tx, rx) = channel();
311        let mem = setup_mem();
312        let (l, j) = EventLoop::start("test".to_string(), None).unwrap();
313        let l = Arc::new(l);
314        let controller = RingBufferController::new_with_handler(
315            "".to_string(),
316            mem,
317            l.clone(),
318            TestHandler { sender: tx },
319        )
320        .unwrap();
321        controller.set_dequeue_pointer(GuestAddress(0x100));
322        controller.set_consumer_cycle_state(false);
323        controller.start();
324        assert_eq!(rx.recv().unwrap(), 1);
325        assert_eq!(rx.recv().unwrap(), 2);
326        assert_eq!(rx.recv().unwrap(), 3);
327        assert_eq!(rx.recv().unwrap(), 4);
328        assert_eq!(rx.recv().unwrap(), 5);
329        assert_eq!(rx.recv().unwrap(), 6);
330        l.stop();
331        j.join().unwrap();
332    }
333}