use std::fs::File;
use std::io;
use std::io::BufWriter;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom;
use std::io::Write;
use std::mem::size_of;
use std::mem::size_of_val;
use base::FileReadWriteAtVolatile;
use base::VolatileSlice;
use base::WriteZeroesAt;
use zerocopy::AsBytes;
#[derive(Debug)]
pub struct QcowRawFile {
file: File,
cluster_size: u64,
cluster_mask: u64,
}
impl QcowRawFile {
pub fn from(file: File, cluster_size: u64) -> Option<Self> {
if cluster_size.count_ones() != 1 {
return None;
}
Some(QcowRawFile {
file,
cluster_size,
cluster_mask: cluster_size - 1,
})
}
pub fn read_pointer_table(
&mut self,
offset: u64,
count: u64,
mask: Option<u64>,
) -> io::Result<Vec<u64>> {
let mut table = vec![0; count as usize];
self.file.seek(SeekFrom::Start(offset))?;
self.file.read_exact(table.as_bytes_mut())?;
let mask = mask.unwrap_or(u64::MAX);
for ptr in &mut table {
*ptr = u64::from_be(*ptr) & mask;
}
Ok(table)
}
pub fn read_pointer_cluster(&mut self, offset: u64, mask: Option<u64>) -> io::Result<Vec<u64>> {
let count = self.cluster_size / size_of::<u64>() as u64;
self.read_pointer_table(offset, count, mask)
}
pub fn write_pointer_table(
&mut self,
offset: u64,
table: &[u64],
non_zero_flags: u64,
) -> io::Result<()> {
self.file.seek(SeekFrom::Start(offset))?;
let mut buffer = BufWriter::with_capacity(size_of_val(table), &self.file);
for addr in table {
let val = if *addr == 0 {
0
} else {
*addr | non_zero_flags
};
buffer.write_all(&val.to_be_bytes())?;
}
buffer.flush()?;
Ok(())
}
pub fn read_refcount_block(&mut self, offset: u64) -> io::Result<Vec<u16>> {
let count = self.cluster_size / size_of::<u16>() as u64;
let mut table = vec![0; count as usize];
self.file.seek(SeekFrom::Start(offset))?;
self.file.read_exact(table.as_bytes_mut())?;
for refcount in &mut table {
*refcount = u16::from_be(*refcount);
}
Ok(table)
}
pub fn write_refcount_block(&mut self, offset: u64, table: &[u16]) -> io::Result<()> {
self.file.seek(SeekFrom::Start(offset))?;
let mut buffer = BufWriter::with_capacity(size_of_val(table), &self.file);
for count in table {
buffer.write_all(&count.to_be_bytes())?;
}
buffer.flush()?;
Ok(())
}
pub fn add_cluster_end(&mut self, max_valid_cluster_offset: u64) -> io::Result<Option<u64>> {
let file_end: u64 = self.file.seek(SeekFrom::End(0))?;
let new_cluster_address: u64 = (file_end + self.cluster_size - 1) & !self.cluster_mask;
if new_cluster_address > max_valid_cluster_offset {
return Ok(None);
}
self.file.set_len(new_cluster_address + self.cluster_size)?;
Ok(Some(new_cluster_address))
}
pub fn file(&self) -> &File {
&self.file
}
pub fn file_mut(&mut self) -> &mut File {
&mut self.file
}
pub fn cluster_size(&self) -> u64 {
self.cluster_size
}
pub fn cluster_offset(&self, address: u64) -> u64 {
address & self.cluster_mask
}
pub fn zero_cluster(&mut self, address: u64) -> io::Result<()> {
let cluster_size = self.cluster_size as usize;
self.file.write_zeroes_all_at(address, cluster_size)?;
Ok(())
}
pub fn write_cluster(&mut self, address: u64, mut initial_data: Vec<u8>) -> io::Result<()> {
if (initial_data.len() as u64) < self.cluster_size {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"`initial_data` is too small",
));
}
let volatile_slice = VolatileSlice::new(&mut initial_data[..self.cluster_size as usize]);
self.file.write_all_at_volatile(volatile_slice, address)
}
}