1use std::cmp;
6use std::io::Read;
7use std::io::Write;
8
9use base::warn;
10use data_model::Be16;
11use data_model::Be32;
12use data_model::Be64;
13use zerocopy::FromBytes;
14use zerocopy::Immutable;
15use zerocopy::IntoBytes;
16use zerocopy::KnownLayout;
17use zerocopy::Unaligned;
18
19use crate::virtio::scsi::constants::INQUIRY;
20use crate::virtio::scsi::constants::MAINTENANCE_IN;
21use crate::virtio::scsi::constants::MODE_SELECT_6;
22use crate::virtio::scsi::constants::MODE_SENSE_6;
23use crate::virtio::scsi::constants::READ_10;
24use crate::virtio::scsi::constants::READ_6;
25use crate::virtio::scsi::constants::READ_CAPACITY_10;
26use crate::virtio::scsi::constants::READ_CAPACITY_16;
27use crate::virtio::scsi::constants::REPORT_LUNS;
28use crate::virtio::scsi::constants::REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS;
29use crate::virtio::scsi::constants::SERVICE_ACTION_IN_16;
30use crate::virtio::scsi::constants::SYNCHRONIZE_CACHE_10;
31use crate::virtio::scsi::constants::TEST_UNIT_READY;
32use crate::virtio::scsi::constants::TYPE_DISK;
33use crate::virtio::scsi::constants::UNMAP;
34use crate::virtio::scsi::constants::WRITE_10;
35use crate::virtio::scsi::constants::WRITE_SAME_10;
36use crate::virtio::scsi::constants::WRITE_SAME_16;
37use crate::virtio::scsi::device::AsyncLogicalUnit;
38use crate::virtio::scsi::device::ExecuteError;
39use crate::virtio::Reader;
40use crate::virtio::Writer;
41
42pub async fn execute_cdb(
44 cdb: &[u8],
45 reader: &mut Reader,
46 writer: &mut Writer,
47 dev: &AsyncLogicalUnit,
48) -> Result<(), ExecuteError> {
49 let op = cdb[0];
50 match op {
51 INQUIRY => parse_cdb::<Inquiry>(cdb)?.emulate(writer, dev),
52 MAINTENANCE_IN => execute_maintenance_in(cdb, writer),
53 MODE_SELECT_6 => parse_cdb::<ModeSelect6>(cdb)?.emulate(reader, dev),
54 MODE_SENSE_6 => parse_cdb::<ModeSense6>(cdb)?.emulate(writer, dev),
55 READ_6 => parse_cdb::<Read6>(cdb)?.emulate(writer, dev).await,
56 READ_10 => parse_cdb::<Read10>(cdb)?.emulate(writer, dev).await,
57 READ_CAPACITY_10 => parse_cdb::<ReadCapacity10>(cdb)?.emulate(writer, dev),
58 REPORT_LUNS => parse_cdb::<ReportLuns>(cdb)?.emulate(writer),
59 SERVICE_ACTION_IN_16 => execute_service_action_in_16(cdb, writer, dev),
60 SYNCHRONIZE_CACHE_10 => parse_cdb::<SynchronizeCache10>(cdb)?.emulate(dev).await,
61 TEST_UNIT_READY => parse_cdb::<TestUnitReady>(cdb)?.emulate(),
62 UNMAP => parse_cdb::<Unmap>(cdb)?.emulate(reader, dev).await,
63 WRITE_10 => parse_cdb::<Write10>(cdb)?.emulate(reader, dev).await,
64 WRITE_SAME_10 => parse_cdb::<WriteSame10>(cdb)?.emulate(reader, dev).await,
65 WRITE_SAME_16 => parse_cdb::<WriteSame16>(cdb)?.emulate(reader, dev).await,
66 _ => {
67 warn!("SCSI command {:#x?} is not implemented", op);
68 Err(ExecuteError::Unsupported(op))
69 }
70 }
71}
72
73fn execute_maintenance_in(cdb: &[u8], writer: &mut Writer) -> Result<(), ExecuteError> {
74 let service_action = cdb[1] & 0x1f;
76 match service_action {
77 REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS => {
78 parse_cdb::<ReportSupportedTMFs>(cdb)?.emulate(writer)
79 }
80 _ => {
81 warn!(
82 "service action {:#x?} for MAINTENANCE_IN is not implemented",
83 service_action
84 );
85 Err(ExecuteError::Unsupported(cdb[0]))
86 }
87 }
88}
89
90fn execute_service_action_in_16(
91 cdb: &[u8],
92 writer: &mut Writer,
93 dev: &AsyncLogicalUnit,
94) -> Result<(), ExecuteError> {
95 let service_action = cdb[1] & 0x1f;
97 match service_action {
98 READ_CAPACITY_16 => parse_cdb::<ReadCapacity16>(cdb)?.emulate(writer, dev),
99 _ => {
100 warn!(
101 "service action {:#x?} for SERVICE_ACTION_IN_16 is not implemented",
102 service_action
103 );
104 Err(ExecuteError::Unsupported(cdb[0]))
105 }
106 }
107}
108
109fn parse_cdb<T: FromBytes + Unaligned + Immutable + KnownLayout>(
110 cdb: &[u8],
111) -> Result<&T, ExecuteError> {
112 let (command, _) = T::ref_from_prefix(cdb).map_err(|_| ExecuteError::ReadCommand)?;
113 Ok(command)
114}
115
116#[derive(
117 Copy,
118 Clone,
119 Debug,
120 Default,
121 FromBytes,
122 Immutable,
123 IntoBytes,
124 KnownLayout,
125 PartialEq,
126 Eq,
127 Unaligned,
128)]
129#[repr(C, packed)]
130pub struct TestUnitReady {
131 opcode: u8,
132 reserved: [u8; 4],
133 control: u8,
134}
135
136impl TestUnitReady {
137 fn emulate(&self) -> Result<(), ExecuteError> {
138 Ok(())
140 }
141}
142
143fn check_lba_range(last_lba: u64, lba: u64, xfer_blocks: usize) -> Result<(), ExecuteError> {
144 match lba.checked_add(xfer_blocks as u64) {
146 Some(v) if v <= last_lba + 1 => Ok(()),
147 _ => Err(ExecuteError::LbaOutOfRange {
148 lba,
149 xfer_blocks,
150 last_lba,
151 }),
152 }
153}
154
155async fn read_from_disk(
156 writer: &mut Writer,
157 dev: &AsyncLogicalUnit,
158 xfer_blocks: usize,
159 lba: u64,
160) -> Result<(), ExecuteError> {
161 check_lba_range(dev.last_lba, lba, xfer_blocks)?;
162 let block_size = dev.block_size;
163 let count = xfer_blocks * block_size as usize;
164 let offset = lba * block_size as u64;
165 let before = writer.bytes_written();
166 writer
167 .write_all_from_at_fut(&*dev.disk_image, count, offset)
168 .await
169 .map_err(|desc_error| {
170 let resid = count - (writer.bytes_written() - before);
171 ExecuteError::ReadIo { resid, desc_error }
172 })
173}
174
175#[derive(
176 Copy,
177 Clone,
178 Debug,
179 Default,
180 FromBytes,
181 Immutable,
182 IntoBytes,
183 KnownLayout,
184 PartialEq,
185 Eq,
186 Unaligned,
187)]
188#[repr(C, packed)]
189pub struct Read6 {
190 opcode: u8,
191 lba_bytes: [u8; 3],
192 xfer_len_byte: u8,
193 control: u8,
194}
195
196impl Read6 {
197 fn lba(&self) -> u32 {
198 u32::from_be_bytes([
199 0,
200 self.lba_bytes[0] & 0x1f,
202 self.lba_bytes[1],
203 self.lba_bytes[2],
204 ])
205 }
206
207 fn xfer_len(&self) -> usize {
208 if self.xfer_len_byte == 0 {
210 256
211 } else {
212 self.xfer_len_byte as usize
213 }
214 }
215
216 async fn emulate(
217 &self,
218 writer: &mut Writer,
219 dev: &AsyncLogicalUnit,
220 ) -> Result<(), ExecuteError> {
221 let xfer_len = self.xfer_len();
222 let lba = self.lba() as u64;
223 let _trace = cros_tracing::trace_event!(VirtioScsi, "READ(6)", xfer_len, lba);
224 read_from_disk(writer, dev, xfer_len, lba).await
225 }
226}
227
228#[derive(
229 Copy,
230 Clone,
231 Debug,
232 Default,
233 FromBytes,
234 Immutable,
235 IntoBytes,
236 KnownLayout,
237 PartialEq,
238 Eq,
239 Unaligned,
240)]
241#[repr(C, packed)]
242pub struct Inquiry {
243 opcode: u8,
244 vpd_field: u8,
245 page_code: u8,
246 alloc_len_bytes: [u8; 2],
247 control: u8,
248}
249
250impl Inquiry {
251 fn vital_product_data_enabled(&self) -> bool {
252 self.vpd_field & 0x1 != 0
253 }
254
255 fn alloc_len(&self) -> usize {
256 u16::from_be_bytes(self.alloc_len_bytes) as usize
257 }
258
259 fn page_code(&self) -> u8 {
260 self.page_code
261 }
262
263 fn emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
264 let _trace = cros_tracing::trace_event!(VirtioScsi, "INQUIRY");
265 if self.vital_product_data_enabled() {
266 return self.emulate_vital_product_data_page(writer, dev);
267 }
268 if self.page_code() != 0 {
270 return Err(ExecuteError::InvalidField);
271 }
272 let alloc_len = self.alloc_len();
273 let mut outbuf = vec![0u8; cmp::max(writer.available_bytes(), alloc_len)];
274 outbuf[0] = TYPE_DISK;
276 outbuf[1] = 0x0;
278 outbuf[2] = 0x5;
280 outbuf[3] = 0x10 | 0x2;
284 outbuf[4] = {
286 let buflen = outbuf.len().try_into().unwrap_or(u8::MAX);
287 cmp::max(buflen, 36) - 5
289 };
290 outbuf[7] = 0x2;
292 Self::fill_left_aligned_ascii(&mut outbuf[8..16], "CROSVM");
294 Self::fill_left_aligned_ascii(&mut outbuf[16..32], "CROSVM HARDDISK");
296 Self::fill_left_aligned_ascii(&mut outbuf[32..36], "0.1");
298
299 writer
300 .write_all(&outbuf[..alloc_len])
301 .map_err(ExecuteError::Write)
302 }
303
304 fn emulate_vital_product_data_page(
305 &self,
306 writer: &mut Writer,
307 dev: &AsyncLogicalUnit,
308 ) -> Result<(), ExecuteError> {
309 let alloc_len = self.alloc_len();
310 let mut outbuf = vec![0u8; cmp::max(4096, alloc_len)];
311 outbuf[0] = TYPE_DISK;
313 let page_code = self.page_code();
314 outbuf[1] = page_code;
315 match page_code {
316 0x00 => {
318 const SUPPORTED_VPD_PAGE_CODES: [u8; 4] = [0x00, 0x83, 0xb0, 0xb2];
324 let page_code_len: u8 = SUPPORTED_VPD_PAGE_CODES
325 .len()
326 .try_into()
327 .expect("The number of vpd page codes cannot exceed u8::MAX");
328 outbuf[3] = page_code_len;
330 outbuf[4..4 + page_code_len as usize].copy_from_slice(&SUPPORTED_VPD_PAGE_CODES);
331 }
332 0x83 => {
334 const DEVICE_ID: &[u8] = b"CROSVM SCSI DEVICE";
335 let device_id_len: u8 = DEVICE_ID
336 .len()
337 .try_into()
338 .expect("device id should be shorter");
339 outbuf[2..4].copy_from_slice(&(4 + device_id_len as u16).to_be_bytes());
341 outbuf[4] = 0x2;
343 outbuf[7] = device_id_len;
349 outbuf[8..8 + device_id_len as usize].copy_from_slice(DEVICE_ID);
350 }
351 0xb0 => {
353 outbuf[3] = 0x3c;
355 outbuf[4] = 1;
358 outbuf[8..12].copy_from_slice(
361 &(dev.last_lba + 1)
362 .try_into()
363 .unwrap_or(u32::MAX)
364 .to_be_bytes(),
365 );
366 outbuf[20..24].fill(0xff);
368 outbuf[24..28].fill(0xff);
370 outbuf[28..32].copy_from_slice(&128u32.to_be_bytes());
372 outbuf[36..44].copy_from_slice(&(dev.last_lba + 1).to_be_bytes());
374 }
375 0xb2 => {
377 outbuf[3] = 4;
379 const UNMAP: u8 = 1 << 7;
382 const WRITE_SAME_16: u8 = 1 << 6;
383 const WRITE_SAME_10: u8 = 1 << 5;
384 outbuf[5] = UNMAP | WRITE_SAME_10 | WRITE_SAME_16;
385 outbuf[6] = 0x02;
387 }
390 _ => {
391 warn!("unsupported vpd page code: {:#x?}", page_code);
392 return Err(ExecuteError::InvalidField);
393 }
394 };
395 writer
396 .write_all(&outbuf[..alloc_len])
397 .map_err(ExecuteError::Write)
398 }
399
400 fn fill_left_aligned_ascii(buf: &mut [u8], s: &str) {
401 debug_assert!(s.len() < buf.len());
402 buf[..s.len()].copy_from_slice(s.as_bytes());
403 buf[s.len()..].fill(b' ');
404 }
405}
406
407fn fill_mode_page(
409 page_code: u8,
410 subpage_code: u8,
411 page_control: PageControl,
412 outbuf: &mut [u8],
413) -> Option<u8> {
414 match (page_code, subpage_code) {
417 (0x00, 0x00) => None,
419 (0x01, 0x00) => {
421 const LEN: u8 = 10;
422 outbuf[0] = page_code;
423 outbuf[1] = LEN;
424 if page_control != PageControl::Changable {
425 outbuf[3] = 0x80;
427 }
428 Some(LEN + 2)
429 }
430 (0x08, 0x00) => {
432 const LEN: u8 = 0x12;
433 outbuf[0] = page_code;
434 outbuf[1] = LEN;
435 if page_control != PageControl::Changable {
436 outbuf[2] = 0x04;
438 }
439 Some(LEN + 2)
440 }
441 _ => None,
442 }
443}
444
445#[derive(
448 Copy,
449 Clone,
450 Debug,
451 Default,
452 FromBytes,
453 Immutable,
454 IntoBytes,
455 KnownLayout,
456 PartialEq,
457 Eq,
458 Unaligned,
459)]
460#[repr(C, packed)]
461pub struct ModeSelect6 {
462 opcode: u8,
463 pf_sp_field: u8,
464 _reserved: [u8; 2],
465 param_list_len: u8,
466 control: u8,
467}
468
469impl ModeSelect6 {
470 fn is_valid_pf_and_sp(&self) -> bool {
471 self.pf_sp_field & 0x11 == 0x10
473 }
474
475 fn emulate(&self, reader: &mut Reader, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
476 #[derive(
477 Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
478 )]
479 #[repr(C, packed)]
480 struct BlockDescriptor {
481 _density: u8,
482 _number_of_blocks_field: [u8; 3],
483 _reserved: u8,
484 block_len_field: [u8; 3],
485 }
486
487 impl BlockDescriptor {
488 fn block_len(&self) -> u32 {
489 u32::from_be_bytes([
490 0,
491 self.block_len_field[0],
492 self.block_len_field[1],
493 self.block_len_field[2],
494 ])
495 }
496 }
497
498 let _trace = cros_tracing::trace_event!(VirtioScsi, "MODE_SELECT(6)");
499 if !self.is_valid_pf_and_sp() {
500 return Err(ExecuteError::InvalidField);
501 }
502 let [_mode_data_len, medium_type, _dev_param, block_desc_len] =
504 reader.read_obj::<[u8; 4]>().map_err(ExecuteError::Read)?;
505 if medium_type != TYPE_DISK {
506 return Err(ExecuteError::InvalidField);
507 }
508 match block_desc_len {
509 0 => (),
510 8 => {
511 let block_desc = reader
512 .read_obj::<BlockDescriptor>()
513 .map_err(ExecuteError::Read)?;
514 if block_desc.block_len() != dev.block_size {
516 return Err(ExecuteError::InvalidField);
517 }
518 }
519 _ => return Err(ExecuteError::InvalidField),
522 };
523 while reader.available_bytes() > 0 {
524 Self::handle_mode_page(reader)?;
525 }
526 Ok(())
527 }
528
529 fn handle_mode_page(reader: &mut Reader) -> Result<(), ExecuteError> {
530 #[derive(
531 Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
532 )]
533 #[repr(C, packed)]
534 struct Page0Header {
535 page_code: u8,
536 page_len: u8,
537 }
538
539 #[derive(
540 Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
541 )]
542 #[repr(C, packed)]
543 struct SubpageHeader {
544 page_code: u8,
545 subpage_code: u8,
546 page_len_field: [u8; 2],
547 }
548
549 let is_page0 = reader.peek_obj::<u8>().map_err(ExecuteError::Read)? & 0x40 == 0;
550 let (page_code, subpage_code, page_len) = if is_page0 {
551 let header = reader
552 .read_obj::<Page0Header>()
553 .map_err(ExecuteError::Read)?;
554 (header.page_code, 0, header.page_len as u16)
555 } else {
556 let header = reader
557 .read_obj::<SubpageHeader>()
558 .map_err(ExecuteError::Read)?;
559 (
560 header.page_code,
561 header.subpage_code,
562 u16::from_be_bytes(header.page_len_field),
563 )
564 };
565 let mut outbuf = vec![0; page_len as usize];
566 fill_mode_page(page_code, subpage_code, PageControl::Current, &mut outbuf);
567 let mut input = vec![0; page_len as usize];
568 reader.read_exact(&mut input).map_err(ExecuteError::Read)?;
569 if input == outbuf {
571 Ok(())
572 } else {
573 Err(ExecuteError::InvalidField)
574 }
575 }
576}
577
578#[derive(
579 Copy,
580 Clone,
581 Debug,
582 Default,
583 FromBytes,
584 Immutable,
585 IntoBytes,
586 KnownLayout,
587 PartialEq,
588 Eq,
589 Unaligned,
590)]
591#[repr(C, packed)]
592pub struct ModeSense6 {
593 opcode: u8,
594 dbd_field: u8,
595 page_control_and_page_code: u8,
596 subpage_code: u8,
597 alloc_len: u8,
598 control: u8,
599}
600
601#[derive(Copy, Clone, Debug, PartialEq, Eq)]
602enum PageControl {
603 Current,
604 Default,
605 Changable,
606}
607
608impl ModeSense6 {
609 fn alloc_len(&self) -> usize {
610 self.alloc_len as usize
611 }
612
613 fn disable_block_desc(&self) -> bool {
614 self.dbd_field & 0x8 != 0
615 }
616
617 fn page_code(&self) -> u8 {
618 self.page_control_and_page_code & 0x3f
620 }
621
622 fn page_control(&self) -> Result<PageControl, ExecuteError> {
623 match self.page_control_and_page_code >> 6 {
624 0 => Ok(PageControl::Current),
625 1 => Ok(PageControl::Changable),
626 2 => Ok(PageControl::Default),
627 3 => Err(ExecuteError::SavingParamNotSupported),
628 _ => Err(ExecuteError::InvalidField),
629 }
630 }
631
632 fn subpage_code(&self) -> u8 {
633 self.subpage_code
634 }
635
636 fn emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
637 let _trace = cros_tracing::trace_event!(VirtioScsi, "MODE_SENSE(6)");
638 let alloc_len = self.alloc_len();
639 let mut outbuf = vec![0u8; cmp::max(4096, alloc_len)];
640 outbuf[2] = if dev.read_only { 0x80 } else { 0x00 };
646 let mut idx = if !self.disable_block_desc() {
647 outbuf[3] = 8;
649 let sectors = dev.last_lba + 1;
651 if sectors <= 0xffffff {
654 outbuf[5..8].copy_from_slice(&(sectors as u32).to_be_bytes()[1..]);
655 }
656 outbuf[9..12].copy_from_slice(&dev.block_size.to_be_bytes()[1..]);
658 12
659 } else {
660 4
661 };
662
663 let page_control = self.page_control()?;
664 let page_code = self.page_code();
665 let subpage_code = self.subpage_code();
666 match (page_code, subpage_code) {
670 (0x3f, 0x00) => {
672 Self::add_all_page_codes(subpage_code, page_control, &mut outbuf, &mut idx)
673 }
674 (0x3f, 0xff) => {
676 for subpage_code in 0..0xff {
677 Self::add_all_page_codes(subpage_code, page_control, &mut outbuf, &mut idx)
678 }
679 }
680 (0x3f, _) => return Err(ExecuteError::InvalidField),
682 (_, 0xff) => {
684 for subpage_code in 0..0xff {
685 match fill_mode_page(
686 page_code,
687 subpage_code,
688 page_control,
689 &mut outbuf[idx as usize..],
690 ) {
691 Some(n) => idx += n,
692 None => return Err(ExecuteError::InvalidField),
693 };
694 }
695 }
696 (_, _) => {
697 match fill_mode_page(
698 page_code,
699 subpage_code,
700 page_control,
701 &mut outbuf[idx as usize..],
702 ) {
703 Some(n) => idx += n,
704 None => return Err(ExecuteError::InvalidField),
705 };
706 }
707 };
708 outbuf[0] = idx - 1;
709 writer
710 .write_all(&outbuf[..alloc_len])
711 .map_err(ExecuteError::Write)
712 }
713
714 fn add_all_page_codes(
716 subpage_code: u8,
717 page_control: PageControl,
718 outbuf: &mut [u8],
719 idx: &mut u8,
720 ) {
721 for page_code in 1..0x3f {
722 if let Some(n) = fill_mode_page(
723 page_code,
724 subpage_code,
725 page_control,
726 &mut outbuf[*idx as usize..],
727 ) {
728 *idx += n;
729 }
730 }
731 if let Some(n) = fill_mode_page(0, subpage_code, page_control, &mut outbuf[*idx as usize..])
733 {
734 *idx += n;
735 }
736 }
737}
738
739#[derive(
740 Copy,
741 Clone,
742 Debug,
743 Default,
744 FromBytes,
745 Immutable,
746 IntoBytes,
747 KnownLayout,
748 PartialEq,
749 Eq,
750 Unaligned,
751)]
752#[repr(C, packed)]
753pub struct ReadCapacity10 {
754 opcode: u8,
755 _obsolete1: u8,
756 _obsolete2: [u8; 4],
757 _reserved: [u8; 2],
758 _obsolete3: u8,
759 control: u8,
760}
761
762impl ReadCapacity10 {
763 fn emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
764 let block_address: u32 = dev.last_lba.try_into().unwrap_or(u32::MAX);
767 let mut outbuf = [0u8; 8];
768 outbuf[..4].copy_from_slice(&block_address.to_be_bytes());
769 outbuf[4..8].copy_from_slice(&dev.block_size.to_be_bytes());
770 writer.write_all(&outbuf).map_err(ExecuteError::Write)
771 }
772}
773
774#[derive(
775 Copy,
776 Clone,
777 Debug,
778 Default,
779 FromBytes,
780 Immutable,
781 IntoBytes,
782 KnownLayout,
783 PartialEq,
784 Eq,
785 Unaligned,
786)]
787#[repr(C, packed)]
788pub struct ReadCapacity16 {
789 opcode: u8,
790 service_action_field: u8,
791 _obsolete: [u8; 8],
792 alloc_len_bytes: [u8; 4],
793 _reserved: u8,
794 control: u8,
795}
796
797impl ReadCapacity16 {
798 fn emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
799 let _trace = cros_tracing::trace_event!(VirtioScsi, "READ_CAPACITY(16)");
800 let mut outbuf = [0u8; 32];
801 outbuf[..8].copy_from_slice(&dev.last_lba.to_be_bytes());
803 outbuf[8..12].copy_from_slice(&dev.block_size.to_be_bytes());
805 outbuf[14] = 1 << 7;
807 writer.write_all(&outbuf).map_err(ExecuteError::Write)
808 }
809}
810
811#[derive(
812 Copy,
813 Clone,
814 Debug,
815 Default,
816 FromBytes,
817 Immutable,
818 IntoBytes,
819 KnownLayout,
820 PartialEq,
821 Eq,
822 Unaligned,
823)]
824#[repr(C, packed)]
825pub struct Read10 {
826 opcode: u8,
827 rdprotect: u8,
828 lba_bytes: [u8; 4],
829 group_number: u8,
830 xfer_len_bytes: [u8; 2],
831 control: u8,
832}
833
834impl Read10 {
835 fn xfer_len(&self) -> usize {
836 u16::from_be_bytes(self.xfer_len_bytes) as usize
837 }
838
839 fn lba(&self) -> u64 {
840 u32::from_be_bytes(self.lba_bytes) as u64
841 }
842
843 async fn emulate(
844 &self,
845 writer: &mut Writer,
846 dev: &AsyncLogicalUnit,
847 ) -> Result<(), ExecuteError> {
848 let xfer_len = self.xfer_len();
849 let lba = self.lba();
850 let _trace = cros_tracing::trace_event!(VirtioScsi, "READ(10)", lba, xfer_len);
851 read_from_disk(writer, dev, xfer_len, lba).await
852 }
853}
854
855#[derive(
856 Copy,
857 Clone,
858 Debug,
859 Default,
860 FromBytes,
861 Immutable,
862 IntoBytes,
863 KnownLayout,
864 PartialEq,
865 Eq,
866 Unaligned,
867)]
868#[repr(C, packed)]
869pub struct Write10 {
870 opcode: u8,
871 wrprotect: u8,
872 lba_bytes: [u8; 4],
873 group_number: u8,
874 xfer_len_bytes: [u8; 2],
875 control: u8,
876}
877
878impl Write10 {
879 fn lba(&self) -> u64 {
880 u32::from_be_bytes(self.lba_bytes) as u64
881 }
882
883 fn xfer_len(&self) -> usize {
884 u16::from_be_bytes(self.xfer_len_bytes) as usize
885 }
886
887 async fn emulate(
888 &self,
889 reader: &mut Reader,
890 dev: &AsyncLogicalUnit,
891 ) -> Result<(), ExecuteError> {
892 let xfer_len = self.xfer_len();
893 let lba = self.lba();
894 let _trace = cros_tracing::trace_event!(VirtioScsi, "WRITE(10)", lba, xfer_len);
895 write_to_disk(reader, dev, xfer_len, lba).await
896 }
897}
898
899async fn write_to_disk(
900 reader: &mut Reader,
901 dev: &AsyncLogicalUnit,
902 xfer_blocks: usize,
903 lba: u64,
904) -> Result<(), ExecuteError> {
905 if dev.read_only {
906 return Err(ExecuteError::ReadOnly);
907 }
908 check_lba_range(dev.last_lba, lba, xfer_blocks)?;
909 let block_size = dev.block_size;
910 let count = xfer_blocks * block_size as usize;
911 let offset = lba * block_size as u64;
912 let before = reader.bytes_read();
913 reader
914 .read_exact_to_at_fut(&*dev.disk_image, count, offset)
915 .await
916 .map_err(|desc_error| {
917 let resid = count - (reader.bytes_read() - before);
918 ExecuteError::WriteIo { resid, desc_error }
919 })
920}
921
922#[derive(
923 Copy,
924 Clone,
925 Debug,
926 Default,
927 FromBytes,
928 Immutable,
929 IntoBytes,
930 KnownLayout,
931 PartialEq,
932 Eq,
933 Unaligned,
934)]
935#[repr(C, packed)]
936pub struct SynchronizeCache10 {
937 opcode: u8,
938 immed_byte: u8,
939 lba_bytes: [u8; 4],
940 group_number: u8,
941 block_num_bytes: [u8; 2],
942 control: u8,
943}
944
945impl SynchronizeCache10 {
946 async fn emulate(&self, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
947 let _trace = cros_tracing::trace_event!(VirtioScsi, "SYNCHRONIZE_CACHE(10)");
948 if dev.read_only {
949 return Err(ExecuteError::ReadOnly);
950 }
951 dev.disk_image.fdatasync().await.map_err(|e| {
952 warn!("failed to sync: {e}");
953 ExecuteError::SynchronizationError
954 })
955 }
956}
957
958async fn unmap(dev: &AsyncLogicalUnit, lba: u64, nblocks: u64) -> Result<(), ExecuteError> {
959 check_lba_range(dev.last_lba, lba, nblocks as usize)?;
960 let offset = lba * dev.block_size as u64;
961 let length = nblocks * dev.block_size as u64;
962 let _ = dev.disk_image.punch_hole(offset, length).await;
964 Ok(())
965}
966
967async fn write_same(
968 dev: &AsyncLogicalUnit,
969 lba: u64,
970 nblocks: u64,
971 reader: &mut Reader,
972) -> Result<(), ExecuteError> {
973 check_lba_range(dev.last_lba, lba, nblocks as usize)?;
974 reader.split_at(dev.block_size as usize);
977 if reader.get_remaining().iter().all(|s| s.is_all_zero()) {
978 let block_size = dev.block_size as u64;
979 let _ = dev
981 .disk_image
982 .write_zeroes_at(lba * block_size, nblocks * block_size)
983 .await;
984 Ok(())
985 } else {
986 Err(ExecuteError::InvalidField)
988 }
989}
990
991#[derive(
992 Copy,
993 Clone,
994 Debug,
995 Default,
996 FromBytes,
997 Immutable,
998 IntoBytes,
999 KnownLayout,
1000 PartialEq,
1001 Eq,
1002 Unaligned,
1003)]
1004#[repr(C, packed)]
1005pub struct WriteSame10 {
1006 opcode: u8,
1007 wrprotect_anchor_unmap: u8,
1008 lba_bytes: [u8; 4],
1009 group_number_field: u8,
1010 nblocks_bytes: [u8; 2],
1011 control: u8,
1012}
1013
1014impl WriteSame10 {
1015 fn lba(&self) -> u32 {
1016 u32::from_be_bytes(self.lba_bytes)
1017 }
1018
1019 fn nblocks(&self) -> u16 {
1020 u16::from_be_bytes(self.nblocks_bytes)
1021 }
1022
1023 fn unmap(&self) -> bool {
1024 self.wrprotect_anchor_unmap & 0x8 != 0
1025 }
1026
1027 fn anchor(&self) -> bool {
1028 self.wrprotect_anchor_unmap & 0x10 != 0
1029 }
1030
1031 async fn emulate(
1032 &self,
1033 reader: &mut Reader,
1034 dev: &AsyncLogicalUnit,
1035 ) -> Result<(), ExecuteError> {
1036 let lba = self.lba() as u64;
1037 let nblocks = self.nblocks() as u64;
1038 let _trace = cros_tracing::trace_event!(VirtioScsi, "WRITE_SAME(10)", lba, nblocks);
1039 if dev.read_only {
1040 return Err(ExecuteError::ReadOnly);
1041 }
1042 if nblocks == 0 {
1043 return Err(ExecuteError::InvalidField);
1045 }
1046 if self.anchor() {
1047 return Err(ExecuteError::InvalidField);
1049 }
1050 if self.unmap() {
1051 unmap(dev, lba, nblocks).await
1052 } else {
1053 write_same(dev, lba, nblocks, reader).await
1054 }
1055 }
1056}
1057
1058#[derive(
1059 Copy,
1060 Clone,
1061 Debug,
1062 Default,
1063 FromBytes,
1064 Immutable,
1065 IntoBytes,
1066 KnownLayout,
1067 PartialEq,
1068 Eq,
1069 Unaligned,
1070)]
1071#[repr(C, packed)]
1072pub struct Unmap {
1073 opcode: u8,
1074 anchor_field: u8,
1075 _reserved: [u8; 4],
1076 group_number_field: u8,
1077 param_list_len_bytes: [u8; 2],
1078 control: u8,
1079}
1080
1081impl Unmap {
1082 fn anchor(&self) -> bool {
1083 self.anchor_field & 0x01 != 0
1084 }
1085
1086 fn param_list_len(&self) -> u16 {
1087 u16::from_be_bytes(self.param_list_len_bytes)
1088 }
1089
1090 async fn emulate(
1091 &self,
1092 reader: &mut Reader,
1093 dev: &AsyncLogicalUnit,
1094 ) -> Result<(), ExecuteError> {
1095 let _trace = cros_tracing::trace_event!(VirtioScsi, "UNMAP");
1096 if self.anchor() {
1098 return Err(ExecuteError::InvalidField);
1099 }
1100 if dev.read_only {
1101 return Err(ExecuteError::ReadOnly);
1102 }
1103 let param_list_len = self.param_list_len();
1104 if 0 < param_list_len && param_list_len < 8 {
1105 return Err(ExecuteError::InvalidParamLen);
1106 }
1107 reader.consume(2);
1109 let unmap_block_descriptors = {
1110 let block_data_len = reader
1111 .read_obj::<Be16>()
1112 .map_err(ExecuteError::Read)?
1113 .to_native();
1114 block_data_len / 16
1116 };
1117 reader.consume(4);
1119 for _ in 0..unmap_block_descriptors {
1120 let lba = reader
1121 .read_obj::<Be64>()
1122 .map_err(ExecuteError::Read)?
1123 .to_native();
1124 let nblocks = reader
1125 .read_obj::<Be32>()
1126 .map_err(ExecuteError::Read)?
1127 .to_native() as u64;
1128 reader.consume(4);
1130 unmap(dev, lba, nblocks).await?;
1131 }
1132 Ok(())
1133 }
1134}
1135
1136#[derive(
1137 Copy,
1138 Clone,
1139 Debug,
1140 Default,
1141 FromBytes,
1142 Immutable,
1143 IntoBytes,
1144 KnownLayout,
1145 PartialEq,
1146 Eq,
1147 Unaligned,
1148)]
1149#[repr(C, packed)]
1150pub struct WriteSame16 {
1151 opcode: u8,
1152 wrprotect_anchor_unmap: u8,
1153 lba_bytes: [u8; 8],
1154 nblocks_bytes: [u8; 4],
1155 group_number_field: u8,
1156 control: u8,
1157}
1158
1159impl WriteSame16 {
1160 fn lba(&self) -> u64 {
1161 u64::from_be_bytes(self.lba_bytes)
1162 }
1163
1164 fn nblocks(&self) -> u32 {
1165 u32::from_be_bytes(self.nblocks_bytes)
1166 }
1167
1168 fn unmap(&self) -> bool {
1169 self.wrprotect_anchor_unmap & 0x8 != 0
1170 }
1171
1172 fn anchor(&self) -> bool {
1173 self.wrprotect_anchor_unmap & 0x10 != 0
1174 }
1175
1176 async fn emulate(
1177 &self,
1178 reader: &mut Reader,
1179 dev: &AsyncLogicalUnit,
1180 ) -> Result<(), ExecuteError> {
1181 let lba = self.lba();
1182 let nblocks = self.nblocks() as u64;
1183 let _trace = cros_tracing::trace_event!(VirtioScsi, "WRITE_SAME(16)", lba, nblocks);
1184 if nblocks == 0 {
1185 return Err(ExecuteError::InvalidField);
1187 }
1188 if self.anchor() {
1189 return Err(ExecuteError::InvalidField);
1191 }
1192 if self.unmap() {
1193 unmap(dev, lba, nblocks).await
1194 } else {
1195 write_same(dev, lba, nblocks, reader).await
1196 }
1197 }
1198}
1199
1200#[derive(
1201 Copy,
1202 Clone,
1203 Debug,
1204 Default,
1205 FromBytes,
1206 Immutable,
1207 IntoBytes,
1208 KnownLayout,
1209 PartialEq,
1210 Eq,
1211 Unaligned,
1212)]
1213#[repr(C, packed)]
1214pub struct ReportLuns {
1215 opcode: u8,
1216 _reserved: u8,
1217 select_report: u8,
1218 _reserved2: [u8; 3],
1219 alloc_len_bytes: [u8; 4],
1220 _reserved3: u8,
1221 control: u8,
1222}
1223
1224impl ReportLuns {
1225 fn alloc_len(&self) -> usize {
1226 u32::from_be_bytes(self.alloc_len_bytes) as usize
1227 }
1228
1229 fn emulate(&self, writer: &mut Writer) -> Result<(), ExecuteError> {
1230 let _trace = cros_tracing::trace_event!(VirtioScsi, "REPORT_LUNS");
1231 if self.alloc_len() < 16 {
1233 return Err(ExecuteError::InvalidField);
1234 }
1235 let lun_list_len = 8u32;
1237 writer
1238 .write_all(&lun_list_len.to_be_bytes())
1239 .map_err(ExecuteError::Write)?;
1240 let reserved = [0; 4];
1241 writer.write_all(&reserved).map_err(ExecuteError::Write)?;
1242 let lun0 = 0u64;
1243 writer
1244 .write_all(&lun0.to_be_bytes())
1245 .map_err(ExecuteError::Write)
1246 }
1247}
1248
1249#[derive(
1250 Copy,
1251 Clone,
1252 Debug,
1253 Default,
1254 FromBytes,
1255 Immutable,
1256 IntoBytes,
1257 KnownLayout,
1258 PartialEq,
1259 Eq,
1260 Unaligned,
1261)]
1262#[repr(C, packed)]
1263pub struct ReportSupportedTMFs {
1264 opcode: u8,
1265 service_action_field: u8,
1266 _reserved1: [u8; 4],
1267 alloc_len_bytes: [u8; 4],
1268 _reserved2: u8,
1269 control: u8,
1270}
1271
1272impl ReportSupportedTMFs {
1273 fn alloc_len(&self) -> u32 {
1274 u32::from_be_bytes(self.alloc_len_bytes)
1275 }
1276
1277 fn emulate(&self, writer: &mut Writer) -> Result<(), ExecuteError> {
1278 let _trace = cros_tracing::trace_event!(VirtioScsi, "REPORT_SUPPORTED_TMFs");
1279 if self.alloc_len() < 4 {
1281 return Err(ExecuteError::InvalidField);
1282 }
1283 const LOGICAL_UNIT_RESET: u8 = 1 << 3;
1285 const TARGET_RESET: u8 = 1 << 1;
1286 writer
1287 .write_obj(LOGICAL_UNIT_RESET | TARGET_RESET)
1288 .map_err(ExecuteError::Write)?;
1289 let reserved = [0u8; 3];
1291 writer.write_all(&reserved).map_err(ExecuteError::Write)?;
1292 Ok(())
1293 }
1294}
1295
1296#[cfg(test)]
1297mod tests {
1298 use super::*;
1299
1300 #[test]
1301 fn parse_test_unit_ready() {
1302 let cdb = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00];
1303 let test_unit_ready = parse_cdb::<TestUnitReady>(&cdb).unwrap();
1304 assert_eq!(test_unit_ready.opcode, TEST_UNIT_READY);
1305 assert_eq!(test_unit_ready.reserved, [0; 4]);
1306 assert_eq!(test_unit_ready.control, 0);
1307 }
1308
1309 #[test]
1310 fn parse_read6() {
1311 let cdb = [0x08, 0xab, 0xcd, 0xef, 0x00, 0x00];
1312 let read6 = parse_cdb::<Read6>(&cdb).unwrap();
1313 assert_eq!(read6.xfer_len(), 256);
1314 assert_eq!(read6.lba(), 0x0bcdef);
1315 }
1316
1317 #[test]
1318 fn parse_inquiry() {
1319 let cdb = [0x12, 0x01, 0x00, 0x00, 0x40, 0x00];
1320 let inquiry = parse_cdb::<Inquiry>(&cdb).unwrap();
1321 assert!(inquiry.vital_product_data_enabled());
1322 assert_eq!(inquiry.alloc_len(), 0x0040);
1323 assert_eq!(inquiry.page_code(), 0x00);
1324 }
1325
1326 #[test]
1327 fn parse_mode_sense_6() {
1328 let cdb = [0x1a, 0x00, 0xa8, 0x00, 0x04, 0x00];
1329 let mode_sense_6 = parse_cdb::<ModeSense6>(&cdb).unwrap();
1330 assert_eq!(mode_sense_6.alloc_len(), 0x04);
1331 assert_eq!(mode_sense_6.page_code(), 0x28);
1332 assert_eq!(mode_sense_6.page_control().unwrap(), PageControl::Default);
1333 }
1334
1335 #[test]
1336 fn parse_read_capacity_10() {
1337 let cdb = [0x25, 0x00, 0xab, 0xcd, 0xef, 0x01, 0x00, 0x00, 0x9, 0x0];
1338 let _read_capacity_10 = parse_cdb::<ReadCapacity10>(&cdb).unwrap();
1339 }
1340
1341 #[test]
1342 fn parse_read10() {
1343 let cdb = [0x28, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00];
1344 let read10 = parse_cdb::<Read10>(&cdb).unwrap();
1345 assert_eq!(read10.xfer_len(), 0x0008);
1346 assert_eq!(read10.lba(), 0x003c0000);
1347 }
1348
1349 #[test]
1350 fn parse_write10() {
1351 let cdb = [0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00];
1352 let write10 = parse_cdb::<Write10>(&cdb).unwrap();
1353 assert_eq!(write10.xfer_len(), 0x0008);
1354 assert_eq!(write10.lba(), 0x00000000);
1355 }
1356
1357 #[test]
1358 fn parse_synchronize_cache_10() {
1359 let cdb = [0x35, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00];
1360 let synchronize_cache_10 = parse_cdb::<SynchronizeCache10>(&cdb).unwrap();
1361 assert_eq!(synchronize_cache_10.opcode, SYNCHRONIZE_CACHE_10);
1362 assert_eq!(synchronize_cache_10.immed_byte, 0);
1363 assert_eq!(synchronize_cache_10.lba_bytes, [0x00, 0x00, 0x00, 0x00]);
1364 assert_eq!(synchronize_cache_10.group_number, 0x00);
1365 assert_eq!(synchronize_cache_10.block_num_bytes, [0x00, 0x00]);
1366 assert_eq!(synchronize_cache_10.control, 0x00);
1367 }
1368
1369 #[test]
1370 fn parse_report_luns() {
1371 let cdb = [
1372 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0xcd, 0xef, 0x12, 0x00, 0x00,
1373 ];
1374 let report_luns = parse_cdb::<ReportLuns>(&cdb).unwrap();
1375 assert_eq!(report_luns.alloc_len(), 0xabcdef12);
1376 }
1377
1378 #[test]
1379 fn parse_report_supported_tmfs() {
1380 let cdb = [
1381 0xa3, 0x0d, 0x00, 0x00, 0x00, 0x00, 0xab, 0xcd, 0xef, 0x12, 0x00, 0x00,
1382 ];
1383 let report_supported_tmfs = parse_cdb::<ReportSupportedTMFs>(&cdb).unwrap();
1384 assert_eq!(report_supported_tmfs.alloc_len(), 0xabcdef12);
1385 }
1386}