1use std::convert::TryInto;
6use std::fs::File;
7use std::io::Read;
8use std::io::Seek;
9use std::io::SeekFrom;
10use std::mem::size_of_val;
11use std::os::raw::c_int;
12use std::os::raw::c_uchar;
13use std::os::raw::c_uint;
14use std::os::raw::c_void;
15use std::sync::atomic::AtomicBool;
16use std::sync::atomic::AtomicUsize;
17use std::sync::atomic::Ordering;
18use std::sync::Arc;
19use std::sync::Weak;
20
21use base::error;
22use base::handle_eintr_errno;
23use base::warn;
24use base::AsRawDescriptor;
25use base::IoctlNr;
26use base::MappedRegion;
27use base::MemoryMapping;
28use base::MemoryMappingBuilder;
29use base::Protection;
30use base::RawDescriptor;
31use data_model::vec_with_array_field;
32use libc::EAGAIN;
33use libc::ECONNRESET;
34use libc::ENODEV;
35use libc::ENOENT;
36use libc::EPIPE;
37use libc::ESHUTDOWN;
38use sync::Mutex;
39
40use crate::control_request_type;
41use crate::descriptor;
42use crate::ConfigDescriptorTree;
43use crate::ControlRequestDataPhaseTransferDirection;
44use crate::ControlRequestRecipient;
45use crate::ControlRequestType;
46use crate::DeviceDescriptor;
47use crate::DeviceDescriptorTree;
48use crate::DeviceSpeed;
49use crate::Error;
50use crate::Result;
51use crate::StandardControlRequest;
52
53const MMAP_SIZE: usize = 1024 * 1024;
55
56struct ManagedDmaBuffer {
58 buf: MemoryMapping,
60 used: Option<Arc<Mutex<DmaBuffer>>>,
62}
63
64pub struct DmaBuffer {
66 addr: u64,
68 size: usize,
70}
71
72impl DmaBuffer {
73 pub fn address(&mut self) -> *mut c_void {
74 self.addr as *mut c_void
75 }
76
77 pub fn size(&self) -> usize {
78 self.size
79 }
80
81 pub fn as_slice(&self) -> &[u8] {
82 unsafe { std::slice::from_raw_parts(self.addr as *const u8, self.size) }
85 }
86
87 pub fn as_mut_slice(&mut self) -> &mut [u8] {
88 unsafe { std::slice::from_raw_parts_mut(self.addr as *mut u8, self.size) }
91 }
92}
93
94#[derive(Clone)]
96pub enum TransferBuffer {
97 Vector(Vec<u8>),
98 Dma(Weak<Mutex<DmaBuffer>>),
99}
100
101impl TransferBuffer {
102 pub fn address(&mut self) -> Option<*mut c_void> {
103 match self {
104 TransferBuffer::Vector(v) => Some(v.as_mut_ptr() as *mut c_void),
105 TransferBuffer::Dma(buf) => buf.upgrade().map(|buf| buf.lock().address()),
106 }
107 }
108 pub fn size(&self) -> Option<usize> {
109 match self {
110 TransferBuffer::Vector(v) => Some(v.len()),
111 TransferBuffer::Dma(buf) => buf.upgrade().map(|buf| buf.lock().size()),
112 }
113 }
114}
115
116pub struct Device {
118 fd: Arc<File>,
119 device_descriptor_tree: DeviceDescriptorTree,
120 dma_buffer: Option<ManagedDmaBuffer>,
121 in_flight_transfers: AtomicUsize,
122 detaching: AtomicBool,
123 is_lost: AtomicBool,
124 is_unrecoverable: AtomicBool,
125 cancel_lock: Arc<Mutex<()>>,
126}
127
128pub struct Transfer {
131 urb: Vec<usb_sys::usbdevfs_urb>,
134 pub buffer: TransferBuffer,
135 callback: Option<Box<dyn Fn(Transfer) + Send + Sync>>,
136}
137
138pub struct TransferHandle {
143 weak_transfer: std::sync::Weak<Transfer>,
144 fd: std::sync::Weak<File>,
145 cancel_lock: Arc<Mutex<()>>,
146}
147
148#[derive(PartialEq, Eq, Clone, Copy)]
149pub enum TransferStatus {
150 Completed,
151 Error,
152 Cancelled,
153 NoDevice,
154 Stalled,
155}
156
157impl Device {
158 pub fn new(mut fd: File) -> Result<Self> {
161 fd.seek(SeekFrom::Start(0)).map_err(Error::DescriptorRead)?;
162 let mut descriptor_data = Vec::new();
163 fd.read_to_end(&mut descriptor_data)
164 .map_err(Error::DescriptorRead)?;
165 let device_descriptor_tree = descriptor::parse_usbfs_descriptors(&descriptor_data)?;
166
167 let mut device = Device {
168 fd: Arc::new(fd),
169 device_descriptor_tree,
170 dma_buffer: None,
171 in_flight_transfers: AtomicUsize::new(0),
172 detaching: AtomicBool::new(false),
173 is_lost: AtomicBool::new(false),
174 is_unrecoverable: AtomicBool::new(false),
175 cancel_lock: Arc::new(Mutex::new(())),
176 };
177
178 let map = MemoryMappingBuilder::new(MMAP_SIZE)
179 .from_file(&device.fd)
180 .protection(Protection::read_write())
181 .build();
182 match map {
183 Ok(map) => {
184 device.dma_buffer = Some(ManagedDmaBuffer {
185 buf: map,
186 used: None,
187 });
188 }
189 Err(e) => {
190 warn!(
192 "mmap() failed. User-provided buffer will be used for data transfer. {}",
193 e
194 );
195 }
196 }
197 Ok(device)
198 }
199
200 pub fn fd(&self) -> Arc<File> {
201 self.fd.clone()
202 }
203
204 unsafe fn ioctl(&self, nr: IoctlNr) -> Result<i32> {
205 let ret = handle_eintr_errno!(base::ioctl(&*self.fd, nr));
206 if ret < 0 {
207 return Err(Error::IoctlFailed(nr, base::Error::last()));
208 }
209 Ok(ret)
210 }
211
212 unsafe fn ioctl_with_ref<T>(&self, nr: IoctlNr, arg: &T) -> Result<i32> {
213 let ret = handle_eintr_errno!(base::ioctl_with_ref(&*self.fd, nr, arg));
214 if ret < 0 {
215 return Err(Error::IoctlFailed(nr, base::Error::last()));
216 }
217 Ok(ret)
218 }
219
220 unsafe fn ioctl_with_mut_ref<T>(&self, nr: IoctlNr, arg: &mut T) -> Result<i32> {
221 let ret = handle_eintr_errno!(base::ioctl_with_mut_ref(&*self.fd, nr, arg));
222 if ret < 0 {
223 return Err(Error::IoctlFailed(nr, base::Error::last()));
224 }
225 Ok(ret)
226 }
227
228 unsafe fn ioctl_with_mut_ptr<T>(&self, nr: IoctlNr, arg: *mut T) -> Result<i32> {
229 let ret = handle_eintr_errno!(base::ioctl_with_mut_ptr(&*self.fd, nr, arg));
230 if ret < 0 {
231 return Err(Error::IoctlFailed(nr, base::Error::last()));
232 }
233 Ok(ret)
234 }
235
236 pub fn reserve_dma_buffer(&mut self, size: usize) -> Result<Weak<Mutex<DmaBuffer>>> {
237 if let Some(managed) = &mut self.dma_buffer {
238 if managed.used.is_none() {
239 let buf = Arc::new(Mutex::new(DmaBuffer {
240 addr: managed.buf.as_ptr() as u64,
241 size,
242 }));
243 let ret = Ok(Arc::downgrade(&buf));
244 managed.used = Some(buf);
245 return ret;
246 }
247 }
248 Err(Error::GetDmaBufferFailed(size))
249 }
250
251 pub fn release_dma_buffer(&mut self, dmabuf: Weak<Mutex<DmaBuffer>>) -> Result<()> {
252 if let Some(managed) = &mut self.dma_buffer {
253 if let Some(released) = dmabuf.upgrade() {
254 let addr = { released.lock().address() as u64 };
255 if let Some(lent) = &managed.used {
256 if lent.lock().addr == addr {
257 managed.used = None;
258 return Ok(());
259 }
260 }
261 }
262 }
263 Err(Error::ReleaseDmaBufferFailed)
264 }
265
266 pub fn submit_transfer(&mut self, transfer: Transfer) -> Result<TransferHandle> {
270 if self.is_detaching() || self.is_device_lost() || self.is_unrecoverable() {
271 return Err(Error::NoDevice);
272 }
273
274 let mut rc_transfer = Arc::new(transfer);
275
276 let raw_transfer = (&*rc_transfer) as *const Transfer as usize;
283 match Arc::get_mut(&mut rc_transfer) {
284 Some(t) => t.urb_mut().usercontext = raw_transfer,
285 None => {
286 return Err(Error::RcGetMutFailed);
289 }
290 }
291 let _ = Arc::into_raw(rc_transfer.clone());
292
293 let urb_ptr = rc_transfer.urb.as_ptr() as *mut usb_sys::usbdevfs_urb;
294
295 unsafe {
299 if let Err(e) = self.ioctl_with_mut_ptr(usb_sys::USBDEVFS_SUBMITURB, urb_ptr) {
300 let leaked_transfer = Arc::from_raw(raw_transfer as *const Transfer);
302 if let TransferBuffer::Dma(buf) = &leaked_transfer.buffer {
303 if self.release_dma_buffer(buf.clone()).is_err() {
304 warn!("failed to release dma buffer");
305 }
306 }
307 return Err(e);
308 }
309 }
310
311 let _ = self.in_flight_transfers.fetch_add(1, Ordering::SeqCst);
312 let weak_transfer = Arc::downgrade(&rc_transfer);
313
314 Ok(TransferHandle {
315 weak_transfer,
316 fd: Arc::downgrade(&self.fd),
317 cancel_lock: self.cancel_lock.clone(),
318 })
319 }
320
321 pub fn poll_transfers(&mut self) -> Result<()> {
324 loop {
326 let mut urb_ptr: *mut usb_sys::usbdevfs_urb = std::ptr::null_mut();
327 let result =
328 unsafe { self.ioctl_with_mut_ref(usb_sys::USBDEVFS_REAPURBNDELAY, &mut urb_ptr) };
331 match result {
332 Err(Error::IoctlFailed(_nr, e)) if e.errno() == EAGAIN => break,
334 Err(Error::IoctlFailed(_nr, e))
336 if e.errno() == ENODEV || e.errno() == ESHUTDOWN =>
337 {
338 self.is_lost.store(true, Ordering::SeqCst);
339 break;
340 }
341 Err(e) => {
342 self.is_unrecoverable.store(true, Ordering::SeqCst);
343 return Err(e);
344 }
345 Ok(_) => {}
346 }
347
348 if urb_ptr.is_null() {
349 break;
350 }
351
352 let _ = self.in_flight_transfers.fetch_sub(1, Ordering::SeqCst);
353
354 let mut transfer = {
355 let _guard = self.cancel_lock.lock();
358
359 let rc_transfer: Arc<Transfer> =
360 unsafe { Arc::from_raw((*urb_ptr).usercontext as *const Transfer) };
364
365 Arc::try_unwrap(rc_transfer).map_err(|_| Error::RcUnwrapFailed)?
368 };
369
370 let dmabuf = match &mut transfer.buffer {
371 TransferBuffer::Dma(buf) => Some(buf.clone()),
372 TransferBuffer::Vector(_) => None,
373 };
374
375 if let Some(cb) = transfer.callback.take() {
376 cb(transfer);
377 }
378
379 if let Some(dmabuf) = dmabuf {
380 if self.release_dma_buffer(dmabuf).is_err() {
381 warn!("failed to release dma buffer");
382 }
383 }
384 }
385
386 Ok(())
387 }
388
389 fn no_in_flight_transfer(&self) -> bool {
390 self.in_flight_transfers.load(Ordering::SeqCst) == 0
391 }
392
393 fn is_detaching(&self) -> bool {
394 self.detaching.load(Ordering::SeqCst)
395 }
396
397 pub fn is_device_lost(&self) -> bool {
399 self.is_lost.load(Ordering::SeqCst)
400 }
401
402 pub fn set_detaching(&self) {
404 self.detaching.store(true, Ordering::SeqCst);
405 }
406
407 pub fn ready_to_detach(&self) -> bool {
410 self.is_detaching() && (self.is_unrecoverable() || self.no_in_flight_transfer())
411 }
412
413 fn is_unrecoverable(&self) -> bool {
414 self.is_unrecoverable.load(Ordering::SeqCst)
415 }
416
417 pub fn drop_dma_buffer(&mut self) {
419 self.dma_buffer.take();
420 }
421
422 pub fn reset(&self) -> Result<()> {
424 let vid = self.device_descriptor_tree.idVendor;
427 let pid = self.device_descriptor_tree.idProduct;
428 match (vid, pid) {
429 (0x1a6e, 0x089a) => (),
430 _ => return Ok(()),
431 }
432
433 let result = unsafe { self.ioctl(usb_sys::USBDEVFS_RESET) };
436
437 if let Err(Error::IoctlFailed(_nr, errno_err)) = result {
438 if errno_err.errno() == libc::ENODEV {
441 return Ok(());
442 }
443 }
444
445 result?;
446 Ok(())
447 }
448
449 pub fn claim_interface(&self, interface_number: u8) -> Result<()> {
451 let disconnect_claim = usb_sys::usbdevfs_disconnect_claim {
452 interface: interface_number.into(),
453 flags: 0,
454 driver: [0u8; 256],
455 };
456 unsafe {
460 self.ioctl_with_ref(usb_sys::USBDEVFS_DISCONNECT_CLAIM, &disconnect_claim)?;
461 }
462
463 Ok(())
464 }
465
466 pub fn release_interface(&self, interface_number: u8) -> Result<()> {
468 let ifnum: c_uint = interface_number.into();
469 unsafe {
473 self.ioctl_with_ref(usb_sys::USBDEVFS_RELEASEINTERFACE, &ifnum)?;
474 }
475
476 Ok(())
477 }
478
479 pub fn set_interface_alt_setting(
481 &self,
482 interface_number: u8,
483 alternative_setting: u8,
484 ) -> Result<()> {
485 let setinterface = usb_sys::usbdevfs_setinterface {
486 interface: interface_number.into(),
487 altsetting: alternative_setting.into(),
488 };
489 unsafe {
493 self.ioctl_with_ref(usb_sys::USBDEVFS_SETINTERFACE, &setinterface)?;
494 }
495 Ok(())
496 }
497
498 pub fn set_active_configuration(&mut self, config: u8) -> Result<()> {
500 let config: c_int = config.into();
501 unsafe {
505 self.ioctl_with_ref(usb_sys::USBDEVFS_SETCONFIGURATION, &config)?;
506 }
507
508 Ok(())
509 }
510
511 pub fn get_device_descriptor(&self) -> Result<DeviceDescriptor> {
513 Ok(*self.device_descriptor_tree)
514 }
515
516 pub fn get_device_descriptor_tree(&self) -> &DeviceDescriptorTree {
517 &self.device_descriptor_tree
518 }
519
520 pub fn get_config_descriptor(&self, config: u8) -> Result<ConfigDescriptorTree> {
522 match self.device_descriptor_tree.get_config_descriptor(config) {
523 Some(config_descriptor) => Ok(config_descriptor.clone()),
524 None => Err(Error::NoSuchDescriptor),
525 }
526 }
527
528 pub fn get_config_descriptor_by_index(&self, config_index: u8) -> Result<ConfigDescriptorTree> {
531 match self
532 .device_descriptor_tree
533 .get_config_descriptor_by_index(config_index)
534 {
535 Some(config_descriptor) => Ok(config_descriptor.clone()),
536 None => Err(Error::NoSuchDescriptor),
537 }
538 }
539
540 pub fn get_active_configuration(&self) -> Result<u8> {
542 if self.device_descriptor_tree.bNumConfigurations == 1 {
545 if let Some(config_descriptor) = self
546 .device_descriptor_tree
547 .get_config_descriptor_by_index(0)
548 {
549 return Ok(config_descriptor.bConfigurationValue);
550 }
551 }
552
553 let mut active_config: u8 = 0;
555 let ctrl_transfer = usb_sys::usbdevfs_ctrltransfer {
556 bRequestType: control_request_type(
557 ControlRequestType::Standard,
558 ControlRequestDataPhaseTransferDirection::DeviceToHost,
559 ControlRequestRecipient::Device,
560 ),
561 bRequest: StandardControlRequest::GetConfiguration as u8,
562 wValue: 0,
563 wIndex: 0,
564 wLength: size_of_val(&active_config) as u16,
565 timeout: 5000, data: &mut active_config as *mut u8 as *mut c_void,
567 };
568 unsafe {
572 self.ioctl_with_ref(usb_sys::USBDEVFS_CONTROL, &ctrl_transfer)?;
573 }
574 Ok(active_config)
575 }
576
577 pub fn get_num_configurations(&self) -> u8 {
579 self.device_descriptor_tree.bNumConfigurations
580 }
581
582 pub fn clear_halt(&self, ep_addr: u8) -> Result<()> {
584 let endpoint: c_uint = ep_addr.into();
585 unsafe {
589 self.ioctl_with_ref(usb_sys::USBDEVFS_CLEAR_HALT, &endpoint)?;
590 }
591
592 Ok(())
593 }
594
595 pub fn get_speed(&self) -> Result<Option<DeviceSpeed>> {
597 let speed = unsafe { self.ioctl(usb_sys::USBDEVFS_GET_SPEED) }?;
599 match speed {
600 1 => Ok(Some(DeviceSpeed::Low)), 2 => Ok(Some(DeviceSpeed::Full)), 3 => Ok(Some(DeviceSpeed::High)), 4 => Ok(Some(DeviceSpeed::High)), 5 => Ok(Some(DeviceSpeed::Super)), 6 => Ok(Some(DeviceSpeed::SuperPlus)), _ => {
607 error!("unexpected speed: {:?}", speed);
608 Ok(None)
609 }
610 }
611 }
612
613 pub fn alloc_streams(&self, ep: u8, num_streams: u16) -> Result<()> {
615 let mut streams = vec_with_array_field::<usb_sys::usbdevfs_streams, c_uchar>(1);
616 streams[0].num_streams = num_streams as c_uint;
617 streams[0].num_eps = 1 as c_uint;
618 let eps = unsafe { streams[0].eps.as_mut_slice(1) };
621 eps[0] = ep as c_uchar;
622 unsafe {
626 self.ioctl_with_ref(usb_sys::USBDEVFS_ALLOC_STREAMS, &streams[0])?;
627 }
628 Ok(())
629 }
630
631 pub fn free_streams(&self, ep: u8) -> Result<()> {
633 let mut streams = vec_with_array_field::<usb_sys::usbdevfs_streams, c_uchar>(1);
634 streams[0].num_eps = 1 as c_uint;
635 let eps = unsafe { streams[0].eps.as_mut_slice(1) };
638 eps[0] = ep as c_uchar;
639 unsafe {
643 self.ioctl_with_ref(usb_sys::USBDEVFS_FREE_STREAMS, &streams[0])?;
644 }
645 Ok(())
646 }
647}
648
649impl AsRawDescriptor for Device {
650 fn as_raw_descriptor(&self) -> RawDescriptor {
651 self.fd.as_raw_descriptor()
652 }
653}
654
655impl Transfer {
656 fn urb(&self) -> &usb_sys::usbdevfs_urb {
657 &self.urb[0]
660 }
661
662 fn urb_mut(&mut self) -> &mut usb_sys::usbdevfs_urb {
663 &mut self.urb[0]
664 }
665
666 fn new(
667 transfer_type: u8,
668 endpoint: u8,
669 buffer: TransferBuffer,
670 iso_packets: &[usb_sys::usbdevfs_iso_packet_desc],
671 ) -> Result<Transfer> {
672 let mut transfer = Transfer {
673 urb: vec_with_array_field::<usb_sys::usbdevfs_urb, usb_sys::usbdevfs_iso_packet_desc>(
674 iso_packets.len(),
675 ),
676 buffer,
677 callback: None,
678 };
679
680 transfer.urb_mut().urb_type = transfer_type;
681 transfer.urb_mut().endpoint = endpoint;
682 transfer.urb_mut().buffer = transfer.buffer.address().ok_or(Error::InvalidBuffer)?;
683 transfer.urb_mut().buffer_length = transfer
684 .buffer
685 .size()
686 .ok_or(Error::InvalidBuffer)?
687 .try_into()
688 .map_err(Error::InvalidBufferLength)?;
689
690 let iso_frame_desc = unsafe {
694 transfer
695 .urb_mut()
696 .iso_frame_desc
697 .as_mut_slice(iso_packets.len())
698 };
699 iso_frame_desc.copy_from_slice(iso_packets);
700
701 Ok(transfer)
702 }
703
704 pub fn new_control(buffer: TransferBuffer) -> Result<Transfer> {
706 let endpoint = 0;
707 Self::new(usb_sys::USBDEVFS_URB_TYPE_CONTROL, endpoint, buffer, &[])
708 }
709
710 pub fn new_interrupt(endpoint: u8, buffer: TransferBuffer) -> Result<Transfer> {
712 Self::new(usb_sys::USBDEVFS_URB_TYPE_INTERRUPT, endpoint, buffer, &[])
713 }
714
715 pub fn new_bulk(
717 endpoint: u8,
718 buffer: TransferBuffer,
719 stream_id: Option<u16>,
720 ) -> Result<Transfer> {
721 let mut transfer = Self::new(usb_sys::USBDEVFS_URB_TYPE_BULK, endpoint, buffer, &[])?;
722 if let Some(stream_id) = stream_id {
723 transfer.urb_mut().number_of_packets_or_stream_id = stream_id as u32;
724 }
725 Ok(transfer)
726 }
727
728 pub fn new_isochronous(
730 endpoint: u8,
731 buffer: TransferBuffer,
732 packet_size: u32,
733 ) -> Result<Transfer> {
734 let buffer_size: u32 = buffer
735 .size()
736 .ok_or(Error::InvalidBuffer)?
737 .try_into()
738 .map_err(Error::InvalidBufferLength)?;
739 if buffer_size == 0 || packet_size == 0 {
741 error!("invalid ISOC parameters: buffer_size={buffer_size}, packet_size={packet_size}");
742 return Err(Error::InvalidIsochronousParameters);
743 }
744 let count = buffer_size.div_ceil(packet_size);
745
746 let mut iso_packets = vec![
747 usb_sys::usbdevfs_iso_packet_desc {
748 length: packet_size,
749 actual_length: 0,
750 status: 0,
751 };
752 count as usize
753 ];
754 let last_entry = iso_packets
755 .last_mut()
756 .expect("there should be at least one entry for ISOC packet");
757 last_entry.length = buffer_size - packet_size * (count - 1);
758
759 let mut transfer = Self::new(
760 usb_sys::USBDEVFS_URB_TYPE_ISO,
761 endpoint,
762 buffer,
763 &iso_packets,
764 )?;
765 transfer.urb_mut().number_of_packets_or_stream_id = count;
766 transfer.urb_mut().flags = usb_sys::USBDEVFS_URB_ISO_ASAP;
767 Ok(transfer)
768 }
769
770 pub fn status(&self) -> TransferStatus {
772 let status = self.urb().status;
773 if status == 0 {
774 TransferStatus::Completed
775 } else if status == -ENODEV || status == -ESHUTDOWN {
776 TransferStatus::NoDevice
777 } else if status == -ENOENT || status == -ECONNRESET {
778 TransferStatus::Cancelled
779 } else if status == -EPIPE {
780 TransferStatus::Stalled
781 } else {
782 TransferStatus::Error
783 }
784 }
785
786 pub fn actual_length(&self) -> usize {
789 self.urb().actual_length as usize
790 }
791
792 pub fn set_callback<C: 'static + Fn(Transfer) + Send + Sync>(&mut self, cb: C) {
794 self.callback = Some(Box::new(cb));
795 }
796}
797
798impl TransferHandle {
799 pub fn cancel(&self) -> Result<()> {
803 let _guard = self.cancel_lock.lock();
804
805 let rc_transfer = match self.weak_transfer.upgrade() {
806 None => return Err(Error::TransferAlreadyCompleted),
807 Some(rc_transfer) => rc_transfer,
808 };
809
810 let urb_ptr = rc_transfer.urb.as_ptr() as *mut usb_sys::usbdevfs_urb;
811 let fd = match self.fd.upgrade() {
812 None => return Err(Error::NoDevice),
813 Some(fd) => fd,
814 };
815
816 if unsafe {
820 handle_eintr_errno!(base::ioctl_with_mut_ptr(
821 &*fd,
822 usb_sys::USBDEVFS_DISCARDURB,
823 urb_ptr
824 ))
825 } < 0
826 {
827 return Err(Error::IoctlFailed(
828 usb_sys::USBDEVFS_DISCARDURB,
829 base::Error::last(),
830 ));
831 }
832
833 Ok(())
834 }
835}