1use std::collections::BTreeMap;
8use std::fs::File;
9use std::io;
10use std::io::ErrorKind;
11use std::io::Read;
12use std::io::Seek;
13use std::io::SeekFrom;
14use std::mem;
15use std::sync::Arc;
16
17use async_trait::async_trait;
18use base::AsRawDescriptor;
19use base::FileAllocate;
20use base::FileReadWriteAtVolatile;
21use base::FileSetLen;
22use base::RawDescriptor;
23use base::VolatileSlice;
24use cros_async::BackingMemory;
25use cros_async::Executor;
26use cros_async::IoSource;
27use data_model::Le16;
28use data_model::Le32;
29use remain::sorted;
30use thiserror::Error;
31use zerocopy::FromBytes;
32use zerocopy::FromZeros;
33use zerocopy::Immutable;
34use zerocopy::IntoBytes;
35use zerocopy::KnownLayout;
36
37use crate::AsyncDisk;
38use crate::DiskFile;
39use crate::DiskGetLen;
40use crate::Error as DiskError;
41use crate::Result as DiskResult;
42use crate::ToAsyncDisk;
43
44#[sorted]
45#[derive(Error, Debug)]
46pub enum Error {
47 #[error("invalid magic header for android sparse format")]
48 InvalidMagicHeader,
49 #[error("invalid specification: \"{0}\"")]
50 InvalidSpecification(String),
51 #[error("failed to read specification: \"{0}\"")]
52 ReadSpecificationError(io::Error),
53}
54
55pub type Result<T> = std::result::Result<T, Error>;
56
57pub const SPARSE_HEADER_MAGIC: u32 = 0xed26ff3a;
58const MAJOR_VERSION: u16 = 1;
59
60#[repr(C)]
61#[derive(Clone, Copy, Debug, FromBytes, Immutable, IntoBytes, KnownLayout)]
62struct SparseHeader {
63 magic: Le32, major_version: Le16, minor_version: Le16, file_hdr_sz: Le16, chunk_hdr_size: Le16, blk_sz: Le32, total_blks: Le32, total_chunks: Le32, image_checksum: Le32,
74}
75
76const CHUNK_TYPE_RAW: u16 = 0xCAC1;
77const CHUNK_TYPE_FILL: u16 = 0xCAC2;
78const CHUNK_TYPE_DONT_CARE: u16 = 0xCAC3;
79const CHUNK_TYPE_CRC32: u16 = 0xCAC4;
80
81#[repr(C)]
82#[derive(Clone, Copy, Debug, FromBytes, Immutable, IntoBytes, KnownLayout)]
83struct ChunkHeader {
84 chunk_type: Le16, reserved1: u16,
86 chunk_sz: Le32, total_sz: Le32, }
89
90#[derive(Clone, Debug, PartialEq, Eq)]
91enum Chunk {
92 Raw(u64), Fill([u8; 4]),
94 DontCare,
95}
96
97#[derive(Clone, Debug, PartialEq, Eq)]
98struct ChunkWithSize {
99 chunk: Chunk,
100 expanded_size: u64,
101}
102
103#[derive(Debug)]
109pub struct AndroidSparse {
110 file: File,
111 total_size: u64,
112 chunks: BTreeMap<u64, ChunkWithSize>,
113}
114
115fn parse_chunk<T: Read + Seek>(input: &mut T, blk_sz: u64) -> Result<Option<ChunkWithSize>> {
116 const HEADER_SIZE: usize = mem::size_of::<ChunkHeader>();
117 let current_offset = input
118 .stream_position()
119 .map_err(Error::ReadSpecificationError)?;
120 let mut chunk_header = ChunkHeader::new_zeroed();
121 input
122 .read_exact(chunk_header.as_mut_bytes())
123 .map_err(Error::ReadSpecificationError)?;
124 let chunk_body_size = (chunk_header.total_sz.to_native() as usize)
125 .checked_sub(HEADER_SIZE)
126 .ok_or(Error::InvalidSpecification(format!(
127 "chunk total_sz {} smaller than header size {}",
128 chunk_header.total_sz.to_native(),
129 HEADER_SIZE
130 )))?;
131 let chunk = match chunk_header.chunk_type.to_native() {
132 CHUNK_TYPE_RAW => {
133 input
134 .seek(SeekFrom::Current(chunk_body_size as i64))
135 .map_err(Error::ReadSpecificationError)?;
136 Chunk::Raw(current_offset + HEADER_SIZE as u64)
137 }
138 CHUNK_TYPE_FILL => {
139 let mut fill_bytes = [0u8; 4];
140 if chunk_body_size != fill_bytes.len() {
141 return Err(Error::InvalidSpecification(format!(
142 "Fill chunk had bad size. Expected {}, was {}",
143 fill_bytes.len(),
144 chunk_body_size
145 )));
146 }
147 input
148 .read_exact(&mut fill_bytes)
149 .map_err(Error::ReadSpecificationError)?;
150 Chunk::Fill(fill_bytes)
151 }
152 CHUNK_TYPE_DONT_CARE => Chunk::DontCare,
153 CHUNK_TYPE_CRC32 => return Ok(None), unknown_type => {
155 return Err(Error::InvalidSpecification(format!(
156 "Chunk had invalid type, was {unknown_type:x}"
157 )))
158 }
159 };
160 let expanded_size = chunk_header.chunk_sz.to_native() as u64 * blk_sz;
161 Ok(Some(ChunkWithSize {
162 chunk,
163 expanded_size,
164 }))
165}
166
167impl AndroidSparse {
168 pub fn from_file(mut file: File) -> Result<AndroidSparse> {
169 file.seek(SeekFrom::Start(0))
170 .map_err(Error::ReadSpecificationError)?;
171 let mut sparse_header = SparseHeader::new_zeroed();
172 file.read_exact(sparse_header.as_mut_bytes())
173 .map_err(Error::ReadSpecificationError)?;
174 if sparse_header.magic != SPARSE_HEADER_MAGIC {
175 return Err(Error::InvalidSpecification(format!(
176 "Header did not match magic constant. Expected {:x}, was {:x}",
177 SPARSE_HEADER_MAGIC,
178 sparse_header.magic.to_native()
179 )));
180 } else if sparse_header.major_version != MAJOR_VERSION {
181 return Err(Error::InvalidSpecification(format!(
182 "Header major version did not match. Expected {}, was {}",
183 MAJOR_VERSION,
184 sparse_header.major_version.to_native(),
185 )));
186 } else if sparse_header.chunk_hdr_size.to_native() as usize != mem::size_of::<ChunkHeader>()
187 {
188 return Err(Error::InvalidSpecification(format!(
191 "Chunk header size does not match chunk header struct, expected {}, was {}",
192 sparse_header.chunk_hdr_size.to_native(),
193 mem::size_of::<ChunkHeader>()
194 )));
195 }
196 let block_size = sparse_header.blk_sz.to_native() as u64;
197 let chunks = (0..sparse_header.total_chunks.to_native())
198 .filter_map(|_| parse_chunk(&mut file, block_size).transpose())
199 .collect::<Result<Vec<ChunkWithSize>>>()?;
200 let total_size =
201 sparse_header.total_blks.to_native() as u64 * sparse_header.blk_sz.to_native() as u64;
202 AndroidSparse::from_parts(file, total_size, chunks)
203 }
204
205 fn from_parts(file: File, size: u64, chunks: Vec<ChunkWithSize>) -> Result<AndroidSparse> {
206 let mut chunks_map: BTreeMap<u64, ChunkWithSize> = BTreeMap::new();
207 let mut expanded_location: u64 = 0;
208 for chunk_with_size in chunks {
209 let size = chunk_with_size.expanded_size;
210 if chunks_map
211 .insert(expanded_location, chunk_with_size)
212 .is_some()
213 {
214 return Err(Error::InvalidSpecification(format!(
215 "Two chunks were at {expanded_location}"
216 )));
217 }
218 expanded_location += size;
219 }
220 let image = AndroidSparse {
221 file,
222 total_size: size,
223 chunks: chunks_map,
224 };
225 let calculated_len: u64 = image.chunks.iter().map(|x| x.1.expanded_size).sum();
226 if calculated_len != size {
227 return Err(Error::InvalidSpecification(format!(
228 "Header promised size {size}, chunks added up to {calculated_len}"
229 )));
230 }
231 Ok(image)
232 }
233}
234
235impl DiskGetLen for AndroidSparse {
236 fn get_len(&self) -> io::Result<u64> {
237 Ok(self.total_size)
238 }
239}
240
241impl FileSetLen for AndroidSparse {
242 fn set_len(&self, _len: u64) -> io::Result<()> {
243 Err(io::Error::new(
244 ErrorKind::PermissionDenied,
245 "unsupported operation",
246 ))
247 }
248}
249
250impl AsRawDescriptor for AndroidSparse {
251 fn as_raw_descriptor(&self) -> RawDescriptor {
252 self.file.as_raw_descriptor()
253 }
254}
255
256impl FileReadWriteAtVolatile for AndroidSparse {
258 fn read_at_volatile(&self, slice: VolatileSlice, offset: u64) -> io::Result<usize> {
259 let found_chunk = self.chunks.range(..=offset).next_back();
260 let (
261 chunk_start,
262 ChunkWithSize {
263 chunk,
264 expanded_size,
265 },
266 ) = found_chunk.ok_or_else(|| {
267 io::Error::new(
268 ErrorKind::UnexpectedEof,
269 format!("no chunk for offset {offset}"),
270 )
271 })?;
272 let chunk_offset = offset - chunk_start;
273 let chunk_size = *expanded_size;
274 let subslice = if chunk_offset + (slice.size() as u64) > chunk_size {
275 slice
276 .sub_slice(0, (chunk_size - chunk_offset) as usize)
277 .map_err(|e| io::Error::new(ErrorKind::InvalidData, format!("{e:?}")))?
278 } else {
279 slice
280 };
281 match chunk {
282 Chunk::DontCare => {
283 subslice.write_bytes(0);
284 Ok(subslice.size())
285 }
286 Chunk::Raw(file_offset) => self
287 .file
288 .read_at_volatile(subslice, *file_offset + chunk_offset),
289 Chunk::Fill(fill_bytes) => {
290 let chunk_offset_mod = chunk_offset % fill_bytes.len() as u64;
291 let filled_memory: Vec<u8> = fill_bytes
292 .iter()
293 .cloned()
294 .cycle()
295 .skip(chunk_offset_mod as usize)
296 .take(subslice.size())
297 .collect();
298 subslice.copy_from(&filled_memory);
299 Ok(subslice.size())
300 }
301 }
302 }
303 fn write_at_volatile(&self, _slice: VolatileSlice, _offset: u64) -> io::Result<usize> {
304 Err(io::Error::new(
305 ErrorKind::PermissionDenied,
306 "unsupported operation",
307 ))
308 }
309}
310
311impl DiskFile for AndroidSparse {}
313
314pub struct AsyncAndroidSparse {
316 inner: IoSource<File>,
317 total_size: u64,
318 chunks: BTreeMap<u64, ChunkWithSize>,
319}
320
321impl ToAsyncDisk for AndroidSparse {
322 fn to_async_disk(self: Box<Self>, ex: &Executor) -> DiskResult<Box<dyn AsyncDisk>> {
323 Ok(Box::new(AsyncAndroidSparse {
324 inner: ex.async_from(self.file).map_err(DiskError::ToAsync)?,
325 total_size: self.total_size,
326 chunks: self.chunks,
327 }))
328 }
329}
330
331impl DiskGetLen for AsyncAndroidSparse {
332 fn get_len(&self) -> io::Result<u64> {
333 Ok(self.total_size)
334 }
335}
336
337impl FileSetLen for AsyncAndroidSparse {
338 fn set_len(&self, _len: u64) -> io::Result<()> {
339 Err(io::Error::new(
340 ErrorKind::PermissionDenied,
341 "unsupported operation",
342 ))
343 }
344}
345
346impl FileAllocate for AsyncAndroidSparse {
347 fn allocate(&self, _offset: u64, _length: u64) -> io::Result<()> {
348 Err(io::Error::new(
349 ErrorKind::PermissionDenied,
350 "unsupported operation",
351 ))
352 }
353}
354
355#[async_trait(?Send)]
356impl AsyncDisk for AsyncAndroidSparse {
357 async fn flush(&self) -> crate::Result<()> {
358 Ok(())
360 }
361
362 async fn fsync(&self) -> DiskResult<()> {
363 Ok(())
365 }
366
367 async fn fdatasync(&self) -> DiskResult<()> {
368 Ok(())
370 }
371
372 async fn read_to_mem<'a>(
375 &'a self,
376 file_offset: u64,
377 mem: Arc<dyn BackingMemory + Send + Sync>,
378 mem_offsets: cros_async::MemRegionIter<'a>,
379 ) -> DiskResult<usize> {
380 let found_chunk = self.chunks.range(..=file_offset).next_back();
381 let (
382 chunk_start,
383 ChunkWithSize {
384 chunk,
385 expanded_size,
386 },
387 ) = found_chunk.ok_or(DiskError::ReadingData(io::Error::new(
388 ErrorKind::UnexpectedEof,
389 format!("no chunk for offset {file_offset}"),
390 )))?;
391 let chunk_offset = file_offset - chunk_start;
392 let chunk_size = *expanded_size;
393
394 let mem_offsets = mem_offsets.take_bytes((chunk_size - chunk_offset) as usize);
396 let mem_size = mem_offsets.clone().map(|x| x.len).sum();
397 match chunk {
398 Chunk::DontCare => {
399 for region in mem_offsets {
400 mem.get_volatile_slice(region)
401 .map_err(DiskError::GuestMemory)?
402 .write_bytes(0);
403 }
404 Ok(mem_size)
405 }
406 Chunk::Raw(offset) => self
407 .inner
408 .read_to_mem(Some(offset + chunk_offset), mem, mem_offsets)
409 .await
410 .map_err(DiskError::ReadToMem),
411 Chunk::Fill(fill_bytes) => {
412 let chunk_offset_mod = chunk_offset % fill_bytes.len() as u64;
413 let filled_memory: Vec<u8> = fill_bytes
414 .iter()
415 .cloned()
416 .cycle()
417 .skip(chunk_offset_mod as usize)
418 .take(mem_size)
419 .collect();
420
421 let mut filled_count = 0;
422 for region in mem_offsets {
423 let buf = &filled_memory[filled_count..filled_count + region.len];
424 mem.get_volatile_slice(region)
425 .map_err(DiskError::GuestMemory)?
426 .copy_from(buf);
427 filled_count += region.len;
428 }
429 Ok(mem_size)
430 }
431 }
432 }
433
434 async fn write_from_mem<'a>(
435 &'a self,
436 _file_offset: u64,
437 _mem: Arc<dyn BackingMemory + Send + Sync>,
438 _mem_offsets: cros_async::MemRegionIter<'a>,
439 ) -> DiskResult<usize> {
440 Err(DiskError::UnsupportedOperation)
441 }
442
443 async fn punch_hole(&self, _file_offset: u64, _length: u64) -> DiskResult<()> {
444 Err(DiskError::UnsupportedOperation)
445 }
446
447 async fn write_zeroes_at(&self, _file_offset: u64, _length: u64) -> DiskResult<()> {
448 Err(DiskError::UnsupportedOperation)
449 }
450}
451
452#[cfg(test)]
453mod tests {
454 use std::io::Cursor;
455 use std::io::Write;
456
457 use super::*;
458
459 const CHUNK_SIZE: usize = mem::size_of::<ChunkHeader>();
460
461 #[test]
462 fn parse_raw() {
463 let chunk_raw = ChunkHeader {
464 chunk_type: CHUNK_TYPE_RAW.into(),
465 reserved1: 0,
466 chunk_sz: 1.into(),
467 total_sz: (CHUNK_SIZE as u32 + 123).into(),
468 };
469 let header_bytes = chunk_raw.as_bytes();
470 let mut chunk_bytes: Vec<u8> = Vec::new();
471 chunk_bytes.extend_from_slice(header_bytes);
472 chunk_bytes.extend_from_slice(&[0u8; 123]);
473 let mut chunk_cursor = Cursor::new(chunk_bytes);
474 let chunk = parse_chunk(&mut chunk_cursor, 123)
475 .expect("Failed to parse")
476 .expect("Failed to determine chunk type");
477 let expected_chunk = ChunkWithSize {
478 chunk: Chunk::Raw(CHUNK_SIZE as u64),
479 expanded_size: 123,
480 };
481 assert_eq!(expected_chunk, chunk);
482 }
483
484 #[test]
485 fn parse_dont_care() {
486 let chunk_raw = ChunkHeader {
487 chunk_type: CHUNK_TYPE_DONT_CARE.into(),
488 reserved1: 0,
489 chunk_sz: 100.into(),
490 total_sz: (CHUNK_SIZE as u32).into(),
491 };
492 let header_bytes = chunk_raw.as_bytes();
493 let mut chunk_cursor = Cursor::new(header_bytes);
494 let chunk = parse_chunk(&mut chunk_cursor, 123)
495 .expect("Failed to parse")
496 .expect("Failed to determine chunk type");
497 let expected_chunk = ChunkWithSize {
498 chunk: Chunk::DontCare,
499 expanded_size: 12300,
500 };
501 assert_eq!(expected_chunk, chunk);
502 }
503
504 #[test]
505 fn parse_fill() {
506 let chunk_raw = ChunkHeader {
507 chunk_type: CHUNK_TYPE_FILL.into(),
508 reserved1: 0,
509 chunk_sz: 100.into(),
510 total_sz: (CHUNK_SIZE as u32 + 4).into(),
511 };
512 let header_bytes = chunk_raw.as_bytes();
513 let mut chunk_bytes: Vec<u8> = Vec::new();
514 chunk_bytes.extend_from_slice(header_bytes);
515 chunk_bytes.extend_from_slice(&[123u8; 4]);
516 let mut chunk_cursor = Cursor::new(chunk_bytes);
517 let chunk = parse_chunk(&mut chunk_cursor, 123)
518 .expect("Failed to parse")
519 .expect("Failed to determine chunk type");
520 let expected_chunk = ChunkWithSize {
521 chunk: Chunk::Fill([123, 123, 123, 123]),
522 expanded_size: 12300,
523 };
524 assert_eq!(expected_chunk, chunk);
525 }
526
527 #[test]
528 fn parse_crc32() {
529 let chunk_raw = ChunkHeader {
530 chunk_type: CHUNK_TYPE_CRC32.into(),
531 reserved1: 0,
532 chunk_sz: 0.into(),
533 total_sz: (CHUNK_SIZE as u32 + 4).into(),
534 };
535 let header_bytes = chunk_raw.as_bytes();
536 let mut chunk_bytes: Vec<u8> = Vec::new();
537 chunk_bytes.extend_from_slice(header_bytes);
538 chunk_bytes.extend_from_slice(&[123u8; 4]);
539 let mut chunk_cursor = Cursor::new(chunk_bytes);
540 let chunk = parse_chunk(&mut chunk_cursor, 123).expect("Failed to parse");
541 assert_eq!(None, chunk);
542 }
543
544 fn test_image(chunks: Vec<ChunkWithSize>) -> AndroidSparse {
545 let file = tempfile::tempfile().expect("failed to create tempfile");
546 let size = chunks.iter().map(|x| x.expanded_size).sum();
547 AndroidSparse::from_parts(file, size, chunks).expect("Could not create image")
548 }
549
550 #[test]
551 fn read_dontcare() {
552 let chunks = vec![ChunkWithSize {
553 chunk: Chunk::DontCare,
554 expanded_size: 100,
555 }];
556 let image = test_image(chunks);
557 let mut input_memory = [55u8; 100];
558 image
559 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
560 .expect("Could not read");
561 let expected = [0u8; 100];
562 assert_eq!(&expected[..], &input_memory[..]);
563 }
564
565 #[test]
566 fn read_fill_simple() {
567 let chunks = vec![ChunkWithSize {
568 chunk: Chunk::Fill([10, 20, 10, 20]),
569 expanded_size: 8,
570 }];
571 let image = test_image(chunks);
572 let mut input_memory = [55u8; 8];
573 image
574 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
575 .expect("Could not read");
576 let expected = [10, 20, 10, 20, 10, 20, 10, 20];
577 assert_eq!(&expected[..], &input_memory[..]);
578 }
579
580 #[test]
581 fn read_fill_edges() {
582 let chunks = vec![ChunkWithSize {
583 chunk: Chunk::Fill([10, 20, 30, 40]),
584 expanded_size: 8,
585 }];
586 let image = test_image(chunks);
587 let mut input_memory = [55u8; 6];
588 image
589 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 1)
590 .expect("Could not read");
591 let expected = [20, 30, 40, 10, 20, 30];
592 assert_eq!(&expected[..], &input_memory[..]);
593 }
594
595 #[test]
596 fn read_fill_offset_edges() {
597 let chunks = vec![
598 ChunkWithSize {
599 chunk: Chunk::DontCare,
600 expanded_size: 20,
601 },
602 ChunkWithSize {
603 chunk: Chunk::Fill([10, 20, 30, 40]),
604 expanded_size: 100,
605 },
606 ];
607 let image = test_image(chunks);
608 let mut input_memory = [55u8; 7];
609 image
610 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 39)
611 .expect("Could not read");
612 let expected = [40, 10, 20, 30, 40, 10, 20];
613 assert_eq!(&expected[..], &input_memory[..]);
614 }
615
616 #[test]
617 fn read_raw() {
618 let chunks = vec![ChunkWithSize {
619 chunk: Chunk::Raw(0),
620 expanded_size: 100,
621 }];
622 let mut image = test_image(chunks);
623 write!(image.file, "hello").expect("Failed to write into internal file");
624 let mut input_memory = [55u8; 5];
625 image
626 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
627 .expect("Could not read");
628 let expected = [104, 101, 108, 108, 111];
629 assert_eq!(&expected[..], &input_memory[..]);
630 }
631
632 #[test]
633 fn read_two_fills() {
634 let chunks = vec![
635 ChunkWithSize {
636 chunk: Chunk::Fill([10, 20, 10, 20]),
637 expanded_size: 4,
638 },
639 ChunkWithSize {
640 chunk: Chunk::Fill([30, 40, 30, 40]),
641 expanded_size: 4,
642 },
643 ];
644 let image = test_image(chunks);
645 let mut input_memory = [55u8; 8];
646 image
647 .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
648 .expect("Could not read");
649 let expected = [10, 20, 10, 20, 30, 40, 30, 40];
650 assert_eq!(&expected[..], &input_memory[..]);
651 }
652
653 use cros_async::MemRegion;
657 use cros_async::MemRegionIter;
658 use vm_memory::GuestAddress;
659 use vm_memory::GuestMemory;
660
661 fn test_async_image(
662 chunks: Vec<ChunkWithSize>,
663 ex: &Executor,
664 ) -> DiskResult<Box<dyn AsyncDisk>> {
665 Box::new(test_image(chunks)).to_async_disk(ex)
666 }
667
668 async fn read_exact_at(image: &dyn AsyncDisk, offset: usize, len: usize) -> Vec<u8> {
670 let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap());
671 guest_mem
673 .write_all_at_addr(&vec![55u8; len], GuestAddress(0))
674 .unwrap();
675
676 let mut count = 0usize;
677 while count < len {
678 let result = image
679 .read_to_mem(
680 (offset + count) as u64,
681 guest_mem.clone(),
682 MemRegionIter::new(&[MemRegion {
683 offset: count as u64,
684 len: len - count,
685 }]),
686 )
687 .await;
688 count += result.unwrap();
689 }
690
691 let mut buf = vec![0; len];
692 guest_mem.read_at_addr(&mut buf, GuestAddress(0)).unwrap();
693 buf
694 }
695
696 #[test]
697 fn async_read_dontcare() {
698 let ex = Executor::new().unwrap();
699 ex.run_until(async {
700 let chunks = vec![ChunkWithSize {
701 chunk: Chunk::DontCare,
702 expanded_size: 100,
703 }];
704 let image = test_async_image(chunks, &ex).unwrap();
705 let buf = read_exact_at(&*image, 0, 100).await;
706 assert!(buf.iter().all(|x| *x == 0));
707 })
708 .unwrap();
709 }
710
711 #[test]
712 fn async_read_dontcare_with_offsets() {
713 let ex = Executor::new().unwrap();
714 ex.run_until(async {
715 let chunks = vec![ChunkWithSize {
716 chunk: Chunk::DontCare,
717 expanded_size: 10,
718 }];
719 let image = test_async_image(chunks, &ex).unwrap();
720 let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap());
722 guest_mem
723 .write_all_at_addr(&[55u8; 20], GuestAddress(0))
724 .unwrap();
725
726 image
728 .read_to_mem(
729 0,
730 guest_mem.clone(),
731 MemRegionIter::new(&[
732 MemRegion { offset: 1, len: 3 },
733 MemRegion { offset: 6, len: 2 },
734 ]),
735 )
736 .await
737 .unwrap();
738 let mut buf = vec![0; 10];
739 guest_mem.read_at_addr(&mut buf, GuestAddress(0)).unwrap();
740 let expected = [55, 0, 0, 0, 55, 55, 0, 0, 55, 55];
741 assert_eq!(expected[..], buf[..]);
742 })
743 .unwrap();
744 }
745
746 #[test]
747 fn async_read_fill_simple() {
748 let ex = Executor::new().unwrap();
749 ex.run_until(async {
750 let chunks = vec![ChunkWithSize {
751 chunk: Chunk::Fill([10, 20, 10, 20]),
752 expanded_size: 8,
753 }];
754 let image = test_async_image(chunks, &ex).unwrap();
755 let buf = read_exact_at(&*image, 0, 8).await;
756 let expected = [10, 20, 10, 20, 10, 20, 10, 20];
757 assert_eq!(expected[..], buf[..]);
758 })
759 .unwrap();
760 }
761
762 #[test]
763 fn async_read_fill_simple_with_offset() {
764 let ex = Executor::new().unwrap();
765 ex.run_until(async {
766 let chunks = vec![ChunkWithSize {
767 chunk: Chunk::Fill([10, 20, 10, 20]),
768 expanded_size: 8,
769 }];
770 let image = test_async_image(chunks, &ex).unwrap();
771 let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap());
773 guest_mem
774 .write_all_at_addr(&[55u8; 20], GuestAddress(0))
775 .unwrap();
776
777 image
779 .read_to_mem(
780 0,
781 guest_mem.clone(),
782 MemRegionIter::new(&[
783 MemRegion { offset: 1, len: 3 },
784 MemRegion { offset: 6, len: 2 },
785 ]),
786 )
787 .await
788 .unwrap();
789 let mut buf = vec![0; 10];
790 guest_mem.read_at_addr(&mut buf, GuestAddress(0)).unwrap();
791 let expected = [55, 10, 20, 10, 55, 55, 20, 10, 55, 55];
792 assert_eq!(expected[..], buf[..]);
793 })
794 .unwrap();
795 }
796
797 #[test]
798 fn async_read_fill_edges() {
799 let ex = Executor::new().unwrap();
800 ex.run_until(async {
801 let chunks = vec![ChunkWithSize {
802 chunk: Chunk::Fill([10, 20, 30, 40]),
803 expanded_size: 8,
804 }];
805 let image = test_async_image(chunks, &ex).unwrap();
806 let buf = read_exact_at(&*image, 1, 6).await;
807 let expected = [20, 30, 40, 10, 20, 30];
808 assert_eq!(expected[..], buf[..]);
809 })
810 .unwrap();
811 }
812
813 #[test]
814 fn async_read_fill_offset_edges() {
815 let ex = Executor::new().unwrap();
816 ex.run_until(async {
817 let chunks = vec![
818 ChunkWithSize {
819 chunk: Chunk::DontCare,
820 expanded_size: 20,
821 },
822 ChunkWithSize {
823 chunk: Chunk::Fill([10, 20, 30, 40]),
824 expanded_size: 100,
825 },
826 ];
827 let image = test_async_image(chunks, &ex).unwrap();
828 let buf = read_exact_at(&*image, 39, 7).await;
829 let expected = [40, 10, 20, 30, 40, 10, 20];
830 assert_eq!(expected[..], buf[..]);
831 })
832 .unwrap();
833 }
834
835 #[test]
836 fn async_read_raw() {
837 let ex = Executor::new().unwrap();
838 ex.run_until(async {
839 let chunks = vec![ChunkWithSize {
840 chunk: Chunk::Raw(0),
841 expanded_size: 100,
842 }];
843 let mut image = Box::new(test_image(chunks));
844 write!(image.file, "hello").unwrap();
845 let async_image = image.to_async_disk(&ex).unwrap();
846 let buf = read_exact_at(&*async_image, 0, 5).await;
847 let expected = [104, 101, 108, 108, 111];
848 assert_eq!(&expected[..], &buf[..]);
849 })
850 .unwrap();
851 }
852
853 #[test]
854 fn async_read_fill_raw_with_offset() {
855 let ex = Executor::new().unwrap();
856 ex.run_until(async {
857 let chunks = vec![ChunkWithSize {
858 chunk: Chunk::Raw(0),
859 expanded_size: 100,
860 }];
861 let mut image = Box::new(test_image(chunks));
862 write!(image.file, "hello").unwrap();
863 let async_image = image.to_async_disk(&ex).unwrap();
864 let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap());
866 guest_mem
867 .write_all_at_addr(&[55u8; 20], GuestAddress(0))
868 .unwrap();
869
870 async_image
872 .read_to_mem(
873 0,
874 guest_mem.clone(),
875 MemRegionIter::new(&[
876 MemRegion { offset: 1, len: 3 },
877 MemRegion { offset: 6, len: 2 },
878 ]),
879 )
880 .await
881 .unwrap();
882 let mut buf = vec![0; 10];
883 guest_mem.read_at_addr(&mut buf, GuestAddress(0)).unwrap();
884 let expected = [55, 104, 101, 108, 55, 55, 108, 111, 55, 55];
885 assert_eq!(expected[..], buf[..]);
886 })
887 .unwrap();
888 }
889
890 #[test]
891 fn async_read_two_fills() {
892 let ex = Executor::new().unwrap();
893 ex.run_until(async {
894 let chunks = vec![
895 ChunkWithSize {
896 chunk: Chunk::Fill([10, 20, 10, 20]),
897 expanded_size: 4,
898 },
899 ChunkWithSize {
900 chunk: Chunk::Fill([30, 40, 30, 40]),
901 expanded_size: 4,
902 },
903 ];
904 let image = test_async_image(chunks, &ex).unwrap();
905 let buf = read_exact_at(&*image, 0, 8).await;
906 let expected = [10, 20, 10, 20, 30, 40, 30, 40];
907 assert_eq!(&expected[..], &buf[..]);
908 })
909 .unwrap();
910 }
911}