usb_util/
device.rs

1// Copyright 2019 The ChromiumOS Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use std::convert::TryInto;
6use std::fs::File;
7use std::io::Read;
8use std::io::Seek;
9use std::io::SeekFrom;
10use std::mem::size_of_val;
11use std::os::raw::c_int;
12use std::os::raw::c_uchar;
13use std::os::raw::c_uint;
14use std::os::raw::c_void;
15use std::sync::atomic::AtomicBool;
16use std::sync::atomic::AtomicUsize;
17use std::sync::atomic::Ordering;
18use std::sync::Arc;
19use std::sync::Weak;
20
21use base::error;
22use base::handle_eintr_errno;
23use base::warn;
24use base::AsRawDescriptor;
25use base::IoctlNr;
26use base::MappedRegion;
27use base::MemoryMapping;
28use base::MemoryMappingBuilder;
29use base::Protection;
30use base::RawDescriptor;
31use data_model::vec_with_array_field;
32use libc::EAGAIN;
33use libc::ECONNRESET;
34use libc::ENODEV;
35use libc::ENOENT;
36use libc::EPIPE;
37use libc::ESHUTDOWN;
38use sync::Mutex;
39
40use crate::control_request_type;
41use crate::descriptor;
42use crate::ConfigDescriptorTree;
43use crate::ControlRequestDataPhaseTransferDirection;
44use crate::ControlRequestRecipient;
45use crate::ControlRequestType;
46use crate::DeviceDescriptor;
47use crate::DeviceDescriptorTree;
48use crate::DeviceSpeed;
49use crate::Error;
50use crate::Result;
51use crate::StandardControlRequest;
52
53// This is the maximum block size observed during storage performance test
54const MMAP_SIZE: usize = 1024 * 1024;
55
56/// ManagedDmaBuffer represents the entire DMA buffer allocated by a device
57struct ManagedDmaBuffer {
58    /// The entire DMA buffer
59    buf: MemoryMapping,
60    /// A DMA buffer lent to a TransferBuffer. This is a part of the entire buffer.
61    used: Option<Arc<Mutex<DmaBuffer>>>,
62}
63
64/// DmaBuffer represents a DMA buffer lent by a device
65pub struct DmaBuffer {
66    /// Host virtual address of the buffer
67    addr: u64,
68    /// Size of the buffer
69    size: usize,
70}
71
72impl DmaBuffer {
73    pub fn address(&mut self) -> *mut c_void {
74        self.addr as *mut c_void
75    }
76
77    pub fn size(&self) -> usize {
78        self.size
79    }
80
81    pub fn as_slice(&self) -> &[u8] {
82        // SAFETY:
83        // Safe because the region has been lent by a device
84        unsafe { std::slice::from_raw_parts(self.addr as *const u8, self.size) }
85    }
86
87    pub fn as_mut_slice(&mut self) -> &mut [u8] {
88        // SAFETY:
89        // Safe because the region has been lent by a device
90        unsafe { std::slice::from_raw_parts_mut(self.addr as *mut u8, self.size) }
91    }
92}
93
94/// TransferBuffer is used for data transfer between crosvm and the host kernel
95#[derive(Clone)]
96pub enum TransferBuffer {
97    Vector(Vec<u8>),
98    Dma(Weak<Mutex<DmaBuffer>>),
99}
100
101impl TransferBuffer {
102    pub fn address(&mut self) -> Option<*mut c_void> {
103        match self {
104            TransferBuffer::Vector(v) => Some(v.as_mut_ptr() as *mut c_void),
105            TransferBuffer::Dma(buf) => buf.upgrade().map(|buf| buf.lock().address()),
106        }
107    }
108    pub fn size(&self) -> Option<usize> {
109        match self {
110            TransferBuffer::Vector(v) => Some(v.len()),
111            TransferBuffer::Dma(buf) => buf.upgrade().map(|buf| buf.lock().size()),
112        }
113    }
114}
115
116/// Device represents a USB device.
117pub struct Device {
118    fd: Arc<File>,
119    device_descriptor_tree: DeviceDescriptorTree,
120    dma_buffer: Option<ManagedDmaBuffer>,
121    in_flight_transfers: AtomicUsize,
122    detaching: AtomicBool,
123    is_lost: AtomicBool,
124    is_unrecoverable: AtomicBool,
125    cancel_lock: Arc<Mutex<()>>,
126}
127
128/// Transfer contains the information necessary to submit a USB request
129/// and, once it has been submitted and completed, contains the response.
130pub struct Transfer {
131    // NOTE: This Vec is actually a single URB with a trailing
132    // variable-length field created by vec_with_array_field().
133    urb: Vec<usb_sys::usbdevfs_urb>,
134    pub buffer: TransferBuffer,
135    callback: Option<Box<dyn Fn(Transfer) + Send + Sync>>,
136}
137
138/// TransferHandle is a handle that allows cancellation of in-flight transfers
139/// between submit_transfer() and get_completed_transfer().
140/// Attempting to cancel a transfer that has already completed is safe and will
141/// return an error.
142pub struct TransferHandle {
143    weak_transfer: std::sync::Weak<Transfer>,
144    fd: std::sync::Weak<File>,
145    cancel_lock: Arc<Mutex<()>>,
146}
147
148#[derive(PartialEq, Eq, Clone, Copy)]
149pub enum TransferStatus {
150    Completed,
151    Error,
152    Cancelled,
153    NoDevice,
154    Stalled,
155}
156
157impl Device {
158    /// Create a new `Device` from a file descriptor.
159    /// `fd` should be a file in usbdevfs (e.g. `/dev/bus/usb/001/002`).
160    pub fn new(mut fd: File) -> Result<Self> {
161        fd.seek(SeekFrom::Start(0)).map_err(Error::DescriptorRead)?;
162        let mut descriptor_data = Vec::new();
163        fd.read_to_end(&mut descriptor_data)
164            .map_err(Error::DescriptorRead)?;
165        let device_descriptor_tree = descriptor::parse_usbfs_descriptors(&descriptor_data)?;
166
167        let mut device = Device {
168            fd: Arc::new(fd),
169            device_descriptor_tree,
170            dma_buffer: None,
171            in_flight_transfers: AtomicUsize::new(0),
172            detaching: AtomicBool::new(false),
173            is_lost: AtomicBool::new(false),
174            is_unrecoverable: AtomicBool::new(false),
175            cancel_lock: Arc::new(Mutex::new(())),
176        };
177
178        let map = MemoryMappingBuilder::new(MMAP_SIZE)
179            .from_file(&device.fd)
180            .protection(Protection::read_write())
181            .build();
182        match map {
183            Ok(map) => {
184                device.dma_buffer = Some(ManagedDmaBuffer {
185                    buf: map,
186                    used: None,
187                });
188            }
189            Err(e) => {
190                // Ignore the error since we can process requests without DMA buffer
191                warn!(
192                    "mmap() failed. User-provided buffer will be used for data transfer. {}",
193                    e
194                );
195            }
196        }
197        Ok(device)
198    }
199
200    pub fn fd(&self) -> Arc<File> {
201        self.fd.clone()
202    }
203
204    unsafe fn ioctl(&self, nr: IoctlNr) -> Result<i32> {
205        let ret = handle_eintr_errno!(base::ioctl(&*self.fd, nr));
206        if ret < 0 {
207            return Err(Error::IoctlFailed(nr, base::Error::last()));
208        }
209        Ok(ret)
210    }
211
212    unsafe fn ioctl_with_ref<T>(&self, nr: IoctlNr, arg: &T) -> Result<i32> {
213        let ret = handle_eintr_errno!(base::ioctl_with_ref(&*self.fd, nr, arg));
214        if ret < 0 {
215            return Err(Error::IoctlFailed(nr, base::Error::last()));
216        }
217        Ok(ret)
218    }
219
220    unsafe fn ioctl_with_mut_ref<T>(&self, nr: IoctlNr, arg: &mut T) -> Result<i32> {
221        let ret = handle_eintr_errno!(base::ioctl_with_mut_ref(&*self.fd, nr, arg));
222        if ret < 0 {
223            return Err(Error::IoctlFailed(nr, base::Error::last()));
224        }
225        Ok(ret)
226    }
227
228    unsafe fn ioctl_with_mut_ptr<T>(&self, nr: IoctlNr, arg: *mut T) -> Result<i32> {
229        let ret = handle_eintr_errno!(base::ioctl_with_mut_ptr(&*self.fd, nr, arg));
230        if ret < 0 {
231            return Err(Error::IoctlFailed(nr, base::Error::last()));
232        }
233        Ok(ret)
234    }
235
236    pub fn reserve_dma_buffer(&mut self, size: usize) -> Result<Weak<Mutex<DmaBuffer>>> {
237        if let Some(managed) = &mut self.dma_buffer {
238            if managed.used.is_none() {
239                let buf = Arc::new(Mutex::new(DmaBuffer {
240                    addr: managed.buf.as_ptr() as u64,
241                    size,
242                }));
243                let ret = Ok(Arc::downgrade(&buf));
244                managed.used = Some(buf);
245                return ret;
246            }
247        }
248        Err(Error::GetDmaBufferFailed(size))
249    }
250
251    pub fn release_dma_buffer(&mut self, dmabuf: Weak<Mutex<DmaBuffer>>) -> Result<()> {
252        if let Some(managed) = &mut self.dma_buffer {
253            if let Some(released) = dmabuf.upgrade() {
254                let addr = { released.lock().address() as u64 };
255                if let Some(lent) = &managed.used {
256                    if lent.lock().addr == addr {
257                        managed.used = None;
258                        return Ok(());
259                    }
260                }
261            }
262        }
263        Err(Error::ReleaseDmaBufferFailed)
264    }
265
266    /// Submit a transfer to the device.
267    /// The transfer will be processed asynchronously by the device.
268    /// Call `poll_transfers()` on this device to check for completed transfers.
269    pub fn submit_transfer(&mut self, transfer: Transfer) -> Result<TransferHandle> {
270        if self.is_detaching() || self.is_device_lost() || self.is_unrecoverable() {
271            return Err(Error::NoDevice);
272        }
273
274        let mut rc_transfer = Arc::new(transfer);
275
276        // Technically, Arc::from_raw() should only be called on pointers returned
277        // from Arc::into_raw(). However, we need to stash this value inside the
278        // Arc<Transfer> itself, so we manually calculate the address that would be
279        // returned from Arc::into_raw() via Deref and then call Arc::into_raw()
280        // to forget the Arc without dropping its contents.
281        // Do not remove the into_raw() call!
282        let raw_transfer = (&*rc_transfer) as *const Transfer as usize;
283        match Arc::get_mut(&mut rc_transfer) {
284            Some(t) => t.urb_mut().usercontext = raw_transfer,
285            None => {
286                // This should never happen, since there is only one strong reference
287                // at this point.
288                return Err(Error::RcGetMutFailed);
289            }
290        }
291        let _ = Arc::into_raw(rc_transfer.clone());
292
293        let urb_ptr = rc_transfer.urb.as_ptr() as *mut usb_sys::usbdevfs_urb;
294
295        // SAFETY:
296        // Safe because we control the lifetime of the URB via Arc::into_raw() and
297        // Arc::from_raw() in poll_transfers().
298        unsafe {
299            if let Err(e) = self.ioctl_with_mut_ptr(usb_sys::USBDEVFS_SUBMITURB, urb_ptr) {
300                // Reclaim the leaked Arc reference if submission failed.
301                let leaked_transfer = Arc::from_raw(raw_transfer as *const Transfer);
302                if let TransferBuffer::Dma(buf) = &leaked_transfer.buffer {
303                    if self.release_dma_buffer(buf.clone()).is_err() {
304                        warn!("failed to release dma buffer");
305                    }
306                }
307                return Err(e);
308            }
309        }
310
311        let _ = self.in_flight_transfers.fetch_add(1, Ordering::SeqCst);
312        let weak_transfer = Arc::downgrade(&rc_transfer);
313
314        Ok(TransferHandle {
315            weak_transfer,
316            fd: Arc::downgrade(&self.fd),
317            cancel_lock: self.cancel_lock.clone(),
318        })
319    }
320
321    /// Check for completed asynchronous transfers submitted via `submit_transfer()`.
322    /// The callback for each completed transfer will be called.
323    pub fn poll_transfers(&mut self) -> Result<()> {
324        // Reap completed transfers until we get EAGAIN.
325        loop {
326            let mut urb_ptr: *mut usb_sys::usbdevfs_urb = std::ptr::null_mut();
327            let result =
328        // SAFETY:
329            // Safe because we provide a valid urb_ptr to be filled by the kernel.
330                unsafe { self.ioctl_with_mut_ref(usb_sys::USBDEVFS_REAPURBNDELAY, &mut urb_ptr) };
331            match result {
332                // EAGAIN indicates no more completed transfers right now.
333                Err(Error::IoctlFailed(_nr, e)) if e.errno() == EAGAIN => break,
334                // ENODEV/ESHUTDOWN indicates the device is gone.
335                Err(Error::IoctlFailed(_nr, e))
336                    if e.errno() == ENODEV || e.errno() == ESHUTDOWN =>
337                {
338                    self.is_lost.store(true, Ordering::SeqCst);
339                    break;
340                }
341                Err(e) => {
342                    self.is_unrecoverable.store(true, Ordering::SeqCst);
343                    return Err(e);
344                }
345                Ok(_) => {}
346            }
347
348            if urb_ptr.is_null() {
349                break;
350            }
351
352            let _ = self.in_flight_transfers.fetch_sub(1, Ordering::SeqCst);
353
354            let mut transfer = {
355                // Synchronize with TransferHandle::cancel to ensure it drops its strong Arc
356                // reference before we attempt try_unwrap.
357                let _guard = self.cancel_lock.lock();
358
359                let rc_transfer: Arc<Transfer> =
360            // SAFETY:
361                // Safe because the URB usercontext field is always set to the result of
362                // Arc::into_raw() in submit_transfer().
363                    unsafe { Arc::from_raw((*urb_ptr).usercontext as *const Transfer) };
364
365                // There should always be exactly one strong reference to rc_transfer because
366                // cancel_lock guarantees cancel() is not holding a reference.
367                Arc::try_unwrap(rc_transfer).map_err(|_| Error::RcUnwrapFailed)?
368            };
369
370            let dmabuf = match &mut transfer.buffer {
371                TransferBuffer::Dma(buf) => Some(buf.clone()),
372                TransferBuffer::Vector(_) => None,
373            };
374
375            if let Some(cb) = transfer.callback.take() {
376                cb(transfer);
377            }
378
379            if let Some(dmabuf) = dmabuf {
380                if self.release_dma_buffer(dmabuf).is_err() {
381                    warn!("failed to release dma buffer");
382                }
383            }
384        }
385
386        Ok(())
387    }
388
389    fn no_in_flight_transfer(&self) -> bool {
390        self.in_flight_transfers.load(Ordering::SeqCst) == 0
391    }
392
393    fn is_detaching(&self) -> bool {
394        self.detaching.load(Ordering::SeqCst)
395    }
396
397    /// Return true if the device is lost.
398    pub fn is_device_lost(&self) -> bool {
399        self.is_lost.load(Ordering::SeqCst)
400    }
401
402    /// Request the device to get ready for detaching. Check the status with ready_to_detach().
403    pub fn set_detaching(&self) {
404        self.detaching.store(true, Ordering::SeqCst);
405    }
406
407    /// Check if the device is ready to be detached, i.e., if we have reaped all the transfers
408    /// we've submitted to the host. Returns true when ready.
409    pub fn ready_to_detach(&self) -> bool {
410        self.is_detaching() && (self.is_unrecoverable() || self.no_in_flight_transfer())
411    }
412
413    fn is_unrecoverable(&self) -> bool {
414        self.is_unrecoverable.load(Ordering::SeqCst)
415    }
416
417    /// Drop the DMA buffer.
418    pub fn drop_dma_buffer(&mut self) {
419        self.dma_buffer.take();
420    }
421
422    /// Perform a USB port reset to reinitialize a device.
423    pub fn reset(&self) -> Result<()> {
424        // TODO(dverkamp): re-enable reset once crbug.com/1058059 is resolved.
425        // Skip reset for all non-Edge TPU devices.
426        let vid = self.device_descriptor_tree.idVendor;
427        let pid = self.device_descriptor_tree.idProduct;
428        match (vid, pid) {
429            (0x1a6e, 0x089a) => (),
430            _ => return Ok(()),
431        }
432
433        // SAFETY:
434        // Safe because self.fd is a valid usbdevfs file descriptor.
435        let result = unsafe { self.ioctl(usb_sys::USBDEVFS_RESET) };
436
437        if let Err(Error::IoctlFailed(_nr, errno_err)) = result {
438            // The device may disappear after a reset if e.g. its firmware changed.
439            // Treat that as success.
440            if errno_err.errno() == libc::ENODEV {
441                return Ok(());
442            }
443        }
444
445        result?;
446        Ok(())
447    }
448
449    /// Claim an interface on this device.
450    pub fn claim_interface(&self, interface_number: u8) -> Result<()> {
451        let disconnect_claim = usb_sys::usbdevfs_disconnect_claim {
452            interface: interface_number.into(),
453            flags: 0,
454            driver: [0u8; 256],
455        };
456        // SAFETY:
457        // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid
458        // pointer to a usbdevs_disconnect_claim structure.
459        unsafe {
460            self.ioctl_with_ref(usb_sys::USBDEVFS_DISCONNECT_CLAIM, &disconnect_claim)?;
461        }
462
463        Ok(())
464    }
465
466    /// Release an interface previously claimed with `claim_interface()`.
467    pub fn release_interface(&self, interface_number: u8) -> Result<()> {
468        let ifnum: c_uint = interface_number.into();
469        // SAFETY:
470        // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid
471        // pointer to unsigned int.
472        unsafe {
473            self.ioctl_with_ref(usb_sys::USBDEVFS_RELEASEINTERFACE, &ifnum)?;
474        }
475
476        Ok(())
477    }
478
479    /// Activate an alternate setting for an interface.
480    pub fn set_interface_alt_setting(
481        &self,
482        interface_number: u8,
483        alternative_setting: u8,
484    ) -> Result<()> {
485        let setinterface = usb_sys::usbdevfs_setinterface {
486            interface: interface_number.into(),
487            altsetting: alternative_setting.into(),
488        };
489        // SAFETY:
490        // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid
491        // pointer to a usbdevfs_setinterface structure.
492        unsafe {
493            self.ioctl_with_ref(usb_sys::USBDEVFS_SETINTERFACE, &setinterface)?;
494        }
495        Ok(())
496    }
497
498    /// Set active configuration for this device.
499    pub fn set_active_configuration(&mut self, config: u8) -> Result<()> {
500        let config: c_int = config.into();
501        // SAFETY:
502        // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid
503        // pointer to int.
504        unsafe {
505            self.ioctl_with_ref(usb_sys::USBDEVFS_SETCONFIGURATION, &config)?;
506        }
507
508        Ok(())
509    }
510
511    /// Get the device descriptor of this device.
512    pub fn get_device_descriptor(&self) -> Result<DeviceDescriptor> {
513        Ok(*self.device_descriptor_tree)
514    }
515
516    pub fn get_device_descriptor_tree(&self) -> &DeviceDescriptorTree {
517        &self.device_descriptor_tree
518    }
519
520    /// Get active config descriptor of this device.
521    pub fn get_config_descriptor(&self, config: u8) -> Result<ConfigDescriptorTree> {
522        match self.device_descriptor_tree.get_config_descriptor(config) {
523            Some(config_descriptor) => Ok(config_descriptor.clone()),
524            None => Err(Error::NoSuchDescriptor),
525        }
526    }
527
528    /// Get a configuration descriptor by its index within the list of descriptors returned
529    /// by the device.
530    pub fn get_config_descriptor_by_index(&self, config_index: u8) -> Result<ConfigDescriptorTree> {
531        match self
532            .device_descriptor_tree
533            .get_config_descriptor_by_index(config_index)
534        {
535            Some(config_descriptor) => Ok(config_descriptor.clone()),
536            None => Err(Error::NoSuchDescriptor),
537        }
538    }
539
540    /// Get bConfigurationValue of the currently active configuration.
541    pub fn get_active_configuration(&self) -> Result<u8> {
542        // If the device only exposes a single configuration, bypass the control transfer below
543        // by looking up the configuration value from the descriptor.
544        if self.device_descriptor_tree.bNumConfigurations == 1 {
545            if let Some(config_descriptor) = self
546                .device_descriptor_tree
547                .get_config_descriptor_by_index(0)
548            {
549                return Ok(config_descriptor.bConfigurationValue);
550            }
551        }
552
553        // Send a synchronous control transfer to get the active configuration.
554        let mut active_config: u8 = 0;
555        let ctrl_transfer = usb_sys::usbdevfs_ctrltransfer {
556            bRequestType: control_request_type(
557                ControlRequestType::Standard,
558                ControlRequestDataPhaseTransferDirection::DeviceToHost,
559                ControlRequestRecipient::Device,
560            ),
561            bRequest: StandardControlRequest::GetConfiguration as u8,
562            wValue: 0,
563            wIndex: 0,
564            wLength: size_of_val(&active_config) as u16,
565            timeout: 5000, // milliseconds
566            data: &mut active_config as *mut u8 as *mut c_void,
567        };
568        // SAFETY:
569        // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid
570        // pointer to a usbdevfs_ctrltransfer structure.
571        unsafe {
572            self.ioctl_with_ref(usb_sys::USBDEVFS_CONTROL, &ctrl_transfer)?;
573        }
574        Ok(active_config)
575    }
576
577    /// Get the total number of configurations for this device.
578    pub fn get_num_configurations(&self) -> u8 {
579        self.device_descriptor_tree.bNumConfigurations
580    }
581
582    /// Clear the halt/stall condition for an endpoint.
583    pub fn clear_halt(&self, ep_addr: u8) -> Result<()> {
584        let endpoint: c_uint = ep_addr.into();
585        // SAFETY:
586        // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid
587        // pointer to unsigned int.
588        unsafe {
589            self.ioctl_with_ref(usb_sys::USBDEVFS_CLEAR_HALT, &endpoint)?;
590        }
591
592        Ok(())
593    }
594
595    /// Get speed of this device.
596    pub fn get_speed(&self) -> Result<Option<DeviceSpeed>> {
597        // SAFETY: args are valid and the return value is checked
598        let speed = unsafe { self.ioctl(usb_sys::USBDEVFS_GET_SPEED) }?;
599        match speed {
600            1 => Ok(Some(DeviceSpeed::Low)),       // Low Speed
601            2 => Ok(Some(DeviceSpeed::Full)),      // Full Speed
602            3 => Ok(Some(DeviceSpeed::High)),      // High Speed
603            4 => Ok(Some(DeviceSpeed::High)),      // Wireless, treat as a High Speed device
604            5 => Ok(Some(DeviceSpeed::Super)),     // Super Speed
605            6 => Ok(Some(DeviceSpeed::SuperPlus)), // Super Speed Plus
606            _ => {
607                error!("unexpected speed: {:?}", speed);
608                Ok(None)
609            }
610        }
611    }
612
613    /// Allocate streams for the endpoint
614    pub fn alloc_streams(&self, ep: u8, num_streams: u16) -> Result<()> {
615        let mut streams = vec_with_array_field::<usb_sys::usbdevfs_streams, c_uchar>(1);
616        streams[0].num_streams = num_streams as c_uint;
617        streams[0].num_eps = 1 as c_uint;
618        // SAFETY:
619        // Safe because we have allocated enough memory
620        let eps = unsafe { streams[0].eps.as_mut_slice(1) };
621        eps[0] = ep as c_uchar;
622        // SAFETY:
623        // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid
624        // pointer to a usbdevfs_streams structure.
625        unsafe {
626            self.ioctl_with_ref(usb_sys::USBDEVFS_ALLOC_STREAMS, &streams[0])?;
627        }
628        Ok(())
629    }
630
631    /// Free streams for the endpoint
632    pub fn free_streams(&self, ep: u8) -> Result<()> {
633        let mut streams = vec_with_array_field::<usb_sys::usbdevfs_streams, c_uchar>(1);
634        streams[0].num_eps = 1 as c_uint;
635        // SAFETY:
636        // Safe because we have allocated enough memory
637        let eps = unsafe { streams[0].eps.as_mut_slice(1) };
638        eps[0] = ep as c_uchar;
639        // SAFETY:
640        // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid
641        // pointer to a usbdevfs_streams structure.
642        unsafe {
643            self.ioctl_with_ref(usb_sys::USBDEVFS_FREE_STREAMS, &streams[0])?;
644        }
645        Ok(())
646    }
647}
648
649impl AsRawDescriptor for Device {
650    fn as_raw_descriptor(&self) -> RawDescriptor {
651        self.fd.as_raw_descriptor()
652    }
653}
654
655impl Transfer {
656    fn urb(&self) -> &usb_sys::usbdevfs_urb {
657        // self.urb is a Vec created with `vec_with_array_field`; the first entry is
658        // the URB itself.
659        &self.urb[0]
660    }
661
662    fn urb_mut(&mut self) -> &mut usb_sys::usbdevfs_urb {
663        &mut self.urb[0]
664    }
665
666    fn new(
667        transfer_type: u8,
668        endpoint: u8,
669        buffer: TransferBuffer,
670        iso_packets: &[usb_sys::usbdevfs_iso_packet_desc],
671    ) -> Result<Transfer> {
672        let mut transfer = Transfer {
673            urb: vec_with_array_field::<usb_sys::usbdevfs_urb, usb_sys::usbdevfs_iso_packet_desc>(
674                iso_packets.len(),
675            ),
676            buffer,
677            callback: None,
678        };
679
680        transfer.urb_mut().urb_type = transfer_type;
681        transfer.urb_mut().endpoint = endpoint;
682        transfer.urb_mut().buffer = transfer.buffer.address().ok_or(Error::InvalidBuffer)?;
683        transfer.urb_mut().buffer_length = transfer
684            .buffer
685            .size()
686            .ok_or(Error::InvalidBuffer)?
687            .try_into()
688            .map_err(Error::InvalidBufferLength)?;
689
690        // SAFETY:
691        // Safe because we ensured there is enough space in transfer.urb to hold the number of
692        // isochronous frames required.
693        let iso_frame_desc = unsafe {
694            transfer
695                .urb_mut()
696                .iso_frame_desc
697                .as_mut_slice(iso_packets.len())
698        };
699        iso_frame_desc.copy_from_slice(iso_packets);
700
701        Ok(transfer)
702    }
703
704    /// Create a control transfer.
705    pub fn new_control(buffer: TransferBuffer) -> Result<Transfer> {
706        let endpoint = 0;
707        Self::new(usb_sys::USBDEVFS_URB_TYPE_CONTROL, endpoint, buffer, &[])
708    }
709
710    /// Create an interrupt transfer.
711    pub fn new_interrupt(endpoint: u8, buffer: TransferBuffer) -> Result<Transfer> {
712        Self::new(usb_sys::USBDEVFS_URB_TYPE_INTERRUPT, endpoint, buffer, &[])
713    }
714
715    /// Create a bulk transfer.
716    pub fn new_bulk(
717        endpoint: u8,
718        buffer: TransferBuffer,
719        stream_id: Option<u16>,
720    ) -> Result<Transfer> {
721        let mut transfer = Self::new(usb_sys::USBDEVFS_URB_TYPE_BULK, endpoint, buffer, &[])?;
722        if let Some(stream_id) = stream_id {
723            transfer.urb_mut().number_of_packets_or_stream_id = stream_id as u32;
724        }
725        Ok(transfer)
726    }
727
728    /// Create an isochronous transfer.
729    pub fn new_isochronous(
730        endpoint: u8,
731        buffer: TransferBuffer,
732        packet_size: u32,
733    ) -> Result<Transfer> {
734        let buffer_size: u32 = buffer
735            .size()
736            .ok_or(Error::InvalidBuffer)?
737            .try_into()
738            .map_err(Error::InvalidBufferLength)?;
739        // Isochronous transfers divide the buffer into multiple packets.
740        if buffer_size == 0 || packet_size == 0 {
741            error!("invalid ISOC parameters: buffer_size={buffer_size}, packet_size={packet_size}");
742            return Err(Error::InvalidIsochronousParameters);
743        }
744        let count = buffer_size.div_ceil(packet_size);
745
746        let mut iso_packets = vec![
747            usb_sys::usbdevfs_iso_packet_desc {
748                length: packet_size,
749                actual_length: 0,
750                status: 0,
751            };
752            count as usize
753        ];
754        let last_entry = iso_packets
755            .last_mut()
756            .expect("there should be at least one entry for ISOC packet");
757        last_entry.length = buffer_size - packet_size * (count - 1);
758
759        let mut transfer = Self::new(
760            usb_sys::USBDEVFS_URB_TYPE_ISO,
761            endpoint,
762            buffer,
763            &iso_packets,
764        )?;
765        transfer.urb_mut().number_of_packets_or_stream_id = count;
766        transfer.urb_mut().flags = usb_sys::USBDEVFS_URB_ISO_ASAP;
767        Ok(transfer)
768    }
769
770    /// Get the status of a completed transfer.
771    pub fn status(&self) -> TransferStatus {
772        let status = self.urb().status;
773        if status == 0 {
774            TransferStatus::Completed
775        } else if status == -ENODEV || status == -ESHUTDOWN {
776            TransferStatus::NoDevice
777        } else if status == -ENOENT || status == -ECONNRESET {
778            TransferStatus::Cancelled
779        } else if status == -EPIPE {
780            TransferStatus::Stalled
781        } else {
782            TransferStatus::Error
783        }
784    }
785
786    /// Get the actual amount of data transferred, which may be less than
787    /// the original length.
788    pub fn actual_length(&self) -> usize {
789        self.urb().actual_length as usize
790    }
791
792    /// Set callback function for transfer completion.
793    pub fn set_callback<C: 'static + Fn(Transfer) + Send + Sync>(&mut self, cb: C) {
794        self.callback = Some(Box::new(cb));
795    }
796}
797
798impl TransferHandle {
799    /// Attempt to cancel the transfer associated with this `TransferHandle`.
800    /// Safe to call even if the transfer has already completed;
801    /// `Error::TransferAlreadyCompleted` will be returned in this case.
802    pub fn cancel(&self) -> Result<()> {
803        let _guard = self.cancel_lock.lock();
804
805        let rc_transfer = match self.weak_transfer.upgrade() {
806            None => return Err(Error::TransferAlreadyCompleted),
807            Some(rc_transfer) => rc_transfer,
808        };
809
810        let urb_ptr = rc_transfer.urb.as_ptr() as *mut usb_sys::usbdevfs_urb;
811        let fd = match self.fd.upgrade() {
812            None => return Err(Error::NoDevice),
813            Some(fd) => fd,
814        };
815
816        // SAFETY:
817        // Safe because fd is a valid usbdevfs file descriptor and we pass a valid
818        // pointer to a usbdevfs_urb structure.
819        if unsafe {
820            handle_eintr_errno!(base::ioctl_with_mut_ptr(
821                &*fd,
822                usb_sys::USBDEVFS_DISCARDURB,
823                urb_ptr
824            ))
825        } < 0
826        {
827            return Err(Error::IoctlFailed(
828                usb_sys::USBDEVFS_DISCARDURB,
829                base::Error::last(),
830            ));
831        }
832
833        Ok(())
834    }
835}