#![deny(missing_docs)]
use std::fs::File;
use std::ops::Range;
use std::os::unix::fs::FileExt;
use base::error;
use base::linux::MemoryMappingUnix;
use base::MemoryMapping;
use base::MemoryMappingBuilder;
use base::MmapError;
use base::Protection;
use base::VolatileMemory;
use base::VolatileMemoryError;
use base::VolatileSlice;
use thiserror::Error as ThisError;
use crate::pagesize::bytes_to_pages;
use crate::pagesize::is_page_aligned;
use crate::pagesize::pages_to_bytes;
pub type Result<T> = std::result::Result<T, Error>;
const MAX_PAGE_IDX: usize = (1 << 31) - 2;
#[derive(ThisError, Debug)]
pub enum Error {
#[error("failed to io: {0}")]
Io(#[from] std::io::Error),
#[error("failed to mmap operation ({0}): {1}")]
Mmap(&'static str, MmapError),
#[error("failed to volatile memory operation: {0}")]
VolatileMemory(#[from] VolatileMemoryError),
#[error("index is out of range")]
OutOfRange,
#[error("data size is invalid")]
InvalidSize,
#[error("index is invalid")]
InvalidIndex,
}
#[derive(Debug)]
struct FilePageState(u32);
impl FilePageState {
const FREED_BIT_MASK: u32 = 1 << 31;
fn freed_state(first_freed_page: Option<usize>) -> Self {
Self(
Self::FREED_BIT_MASK
| first_freed_page
.map(|idx_file| idx_file as u32 + 1)
.unwrap_or(0),
)
}
fn allocated_state(idx_page: usize) -> Option<Self> {
if idx_page <= MAX_PAGE_IDX {
Some(Self(idx_page as u32))
} else {
None
}
}
fn is_freed(&self) -> bool {
self.0 & Self::FREED_BIT_MASK != 0
}
fn next_file_freed_idx(&self) -> Option<Option<usize>> {
if self.is_freed() {
let next_idx_file = !Self::FREED_BIT_MASK & self.0;
if next_idx_file == 0 {
Some(None)
} else {
Some(Some(next_idx_file as usize - 1))
}
} else {
None
}
}
fn idx_page(&self) -> Option<usize> {
if self.is_freed() {
None
} else {
Some(self.0 as usize)
}
}
}
#[derive(Debug)]
struct FilePageStates {
first_idx_file_freed: Option<usize>,
states: Vec<FilePageState>,
}
impl FilePageStates {
fn new(capacity: usize) -> Self {
FilePageStates {
first_idx_file_freed: None,
states: Vec::with_capacity(capacity),
}
}
fn len(&self) -> usize {
self.states.len()
}
fn free(&mut self, idx_file: usize) {
self.states[idx_file] = FilePageState::freed_state(self.first_idx_file_freed);
self.first_idx_file_freed = Some(idx_file);
}
fn allocate(&mut self, idx_page: usize) -> usize {
if let Some(idx_file_freed) = self.first_idx_file_freed {
let Some(next_idx_file_freed) = self.states[idx_file_freed].next_file_freed_idx()
else {
unreachable!("pages in free list must be freed pages")
};
let Some(state) = FilePageState::allocated_state(idx_page) else {
unreachable!("idx_page must be less than MAX_PAGE_IDX");
};
self.states[idx_file_freed] = state;
self.first_idx_file_freed = next_idx_file_freed;
idx_file_freed
} else {
let head_idx_file = self.states.len();
let Some(state) = FilePageState::allocated_state(idx_page) else {
unreachable!("idx must be less than MAX_PAGE_IDX");
};
self.states.push(state);
head_idx_file
}
}
fn find_present_pages_range(
&self,
idx_file: usize,
page_states: &[PageState],
max_pages: usize,
consecutive: bool,
) -> Option<(Range<usize>, usize)> {
let next_head_idx_offset = self.states[idx_file..].iter().position(|state| {
!state.is_freed()
&& page_states[state
.idx_page()
.unwrap_or_else(|| unreachable!("the page is not freed"))]
.is_present()
})?;
let idx_file = idx_file + next_head_idx_offset;
let Some(head_idx_page) = self.states[idx_file].idx_page() else {
unreachable!("the file page must not be freed");
};
let mut pages = 1;
if max_pages > 1 {
for state in self.states[idx_file + 1..].iter() {
if state.is_freed() {
break;
} else {
let Some(idx_page) = state.idx_page() else {
unreachable!("allocated page must have idx_page");
};
if !page_states[idx_page].is_present()
|| (consecutive && idx_page != head_idx_page + pages)
{
break;
}
}
pages += 1;
if pages >= max_pages {
break;
}
}
}
Some((idx_file..idx_file + pages, head_idx_page))
}
}
#[derive(Clone, Debug)]
struct PageState(u32);
impl PageState {
const IDX_FILE_MASK: u32 = (1 << 31) - 1;
const PRESENT_BIT_MASK: u32 = 1 << 31;
fn is_none(&self) -> bool {
self.0 == 0
}
fn idx_file(&self) -> Option<usize> {
if self.0 != 0 {
Some((self.0 & Self::IDX_FILE_MASK) as usize - 1)
} else {
None
}
}
fn is_present(&self) -> bool {
self.0 & Self::PRESENT_BIT_MASK != 0
}
fn update(&mut self, idx_file: usize) {
self.0 = (idx_file as u32 + 1) | Self::PRESENT_BIT_MASK;
}
fn mark_as_present(&mut self) {
self.0 |= Self::PRESENT_BIT_MASK;
}
fn clear(&mut self) {
self.0 &= !Self::PRESENT_BIT_MASK;
}
fn free(&mut self) {
self.0 = 0;
}
}
#[derive(Debug)]
pub struct SwapFile<'a> {
file: &'a File,
file_mmap: MemoryMapping,
page_states: Vec<PageState>,
file_states: FilePageStates,
cursor_mlock: usize,
min_possible_present_idx_file: usize,
}
impl<'a> SwapFile<'a> {
pub fn new(file: &'a File, num_of_pages: usize) -> Result<Self> {
if num_of_pages > MAX_PAGE_IDX {
return Err(Error::InvalidSize);
}
let file_mmap = MemoryMappingBuilder::new(pages_to_bytes(num_of_pages))
.from_file(file)
.protection(Protection::read())
.build()
.map_err(|e| Error::Mmap("create", e))?;
Ok(Self {
file,
file_mmap,
page_states: vec![PageState(0); num_of_pages],
file_states: FilePageStates::new(num_of_pages),
cursor_mlock: 0,
min_possible_present_idx_file: 0,
})
}
pub fn page_content(
&self,
idx_page: usize,
allow_cleared: bool,
) -> Result<Option<VolatileSlice>> {
let state = self.page_states.get(idx_page).ok_or(Error::OutOfRange)?;
if !state.is_none() && (allow_cleared || state.is_present()) {
let Some(idx_file) = state.idx_file() else {
unreachable!("the page is not none");
};
return match self
.file_mmap
.get_slice(pages_to_bytes(idx_file), pages_to_bytes(1))
{
Ok(slice) => Ok(Some(slice)),
Err(VolatileMemoryError::OutOfBounds { .. }) => Err(Error::OutOfRange),
Err(e) => Err(e.into()),
};
}
Ok(None)
}
pub fn lock_and_async_prefetch(&mut self, max_pages: usize) -> Result<usize> {
if let Some((idx_file_range, _)) = self.file_states.find_present_pages_range(
self.cursor_mlock,
&self.page_states,
max_pages,
false,
) {
let pages = idx_file_range.end - idx_file_range.start;
let mem_offset = pages_to_bytes(idx_file_range.start);
let size_in_bytes = pages_to_bytes(pages);
self.file_mmap
.lock_on_fault(mem_offset, size_in_bytes)
.map_err(|e| Error::Mmap("mlock", e))?;
self.file_mmap
.async_prefetch(mem_offset, size_in_bytes)
.map_err(|e| Error::Mmap("madvise willneed", e))?;
self.cursor_mlock = idx_file_range.end;
Ok(pages)
} else {
self.cursor_mlock = self.file_states.len();
Ok(0)
}
}
pub fn clear_range(&mut self, idx_page_range: Range<usize>) -> Result<usize> {
let idx_file_range = self.convert_idx_page_range_to_idx_file(idx_page_range.clone())?;
for state in &mut self.page_states[idx_page_range] {
state.clear();
}
let offset = pages_to_bytes(idx_file_range.start);
let munlocked_size = if idx_file_range.start < self.cursor_mlock {
let pages = idx_file_range.end.min(self.cursor_mlock) - idx_file_range.start;
self.file_mmap
.unlock(offset, pages_to_bytes(pages))
.map_err(|e| Error::Mmap("munlock", e))?;
pages
} else {
0
};
let size = pages_to_bytes(idx_file_range.end - idx_file_range.start);
self.file_mmap
.drop_page_cache(offset, size)
.map_err(|e| Error::Mmap("madvise dontneed", e))?;
Ok(munlocked_size)
}
pub fn free_range(&mut self, idx_page_range: Range<usize>) -> Result<usize> {
if idx_page_range.end > self.page_states.len() {
return Err(Error::OutOfRange);
}
let mut mlocked_pages = 0;
let mut mlock_range: Option<Range<usize>> = None;
for state in &mut self.page_states[idx_page_range] {
if !state.is_none() {
let Some(idx_file) = state.idx_file() else {
unreachable!("the page is not none.");
};
self.file_states.free(idx_file);
if idx_file < self.cursor_mlock && state.is_present() {
mlocked_pages += 1;
if let Some(range) = mlock_range.as_mut() {
if idx_file + 1 == range.start {
range.start = idx_file;
} else if idx_file == range.end {
range.end += 1;
} else {
self.file_mmap
.unlock(
pages_to_bytes(range.start),
pages_to_bytes(range.end - range.start),
)
.map_err(|e| Error::Mmap("munlock", e))?;
mlock_range = Some(idx_file..idx_file + 1);
}
} else {
mlock_range = Some(idx_file..idx_file + 1);
}
}
}
state.free();
}
if let Some(mlock_range) = mlock_range {
self.file_mmap
.unlock(
pages_to_bytes(mlock_range.start),
pages_to_bytes(mlock_range.end - mlock_range.start),
)
.map_err(|e| Error::Mmap("munlock", e))?;
}
Ok(mlocked_pages)
}
pub fn clear_mlock(&mut self) -> Result<()> {
if self.cursor_mlock > 0 {
self.file_mmap
.unlock(0, pages_to_bytes(self.cursor_mlock))
.map_err(|e| Error::Mmap("munlock", e))?;
}
self.cursor_mlock = 0;
Ok(())
}
pub fn mark_as_present(&mut self, idx_page: usize) -> Result<()> {
let state = self
.page_states
.get_mut(idx_page)
.ok_or(Error::OutOfRange)?;
if !state.is_none() && !state.is_present() {
state.mark_as_present();
let Some(idx_file) = state.idx_file() else {
unreachable!("the page is not none.");
};
self.min_possible_present_idx_file =
std::cmp::min(idx_file, self.min_possible_present_idx_file);
Ok(())
} else {
Err(Error::InvalidIndex)
}
}
pub fn write_to_file(&mut self, idx_page: usize, mem_slice: &[u8]) -> Result<()> {
if !is_page_aligned(mem_slice.len()) {
return Err(Error::InvalidSize);
}
let num_pages = bytes_to_pages(mem_slice.len());
if idx_page + num_pages > self.page_states.len() {
return Err(Error::OutOfRange);
}
self.min_possible_present_idx_file = 0;
for cur in idx_page..idx_page + num_pages {
let state = &mut self.page_states[cur];
if state.is_none() {
let idx_file = self.file_states.allocate(cur);
state.update(idx_file);
} else {
state.mark_as_present();
}
}
let mut pending_idx_file = None;
let mut pending_pages = 0;
let mut mem_slice = mem_slice;
for state in self.page_states[idx_page..idx_page + num_pages].iter() {
let Some(idx_file) = state.idx_file() else {
unreachable!("pages must be allocated");
};
if let Some(pending_idx_file) = pending_idx_file {
if idx_file == pending_idx_file + pending_pages {
pending_pages += 1;
continue;
}
let size = pages_to_bytes(pending_pages);
self.file
.write_all_at(&mem_slice[..size], pages_to_bytes(pending_idx_file) as u64)?;
mem_slice = &mem_slice[size..];
}
pending_idx_file = Some(idx_file);
pending_pages = 1;
}
if let Some(pending_idx_file) = pending_idx_file {
let size = pages_to_bytes(pending_pages);
self.file
.write_all_at(&mem_slice[..size], pages_to_bytes(pending_idx_file) as u64)?;
mem_slice = &mem_slice[size..];
}
if !mem_slice.is_empty() {
unreachable!("mem_slice must be all consumed");
}
Ok(())
}
pub fn first_data_range(&mut self, max_pages: usize) -> Option<Range<usize>> {
if let Some((idx_file_range, head_idx_page)) = self.file_states.find_present_pages_range(
self.min_possible_present_idx_file,
&self.page_states,
max_pages,
true,
) {
self.min_possible_present_idx_file = idx_file_range.start;
let idx_page_range =
head_idx_page..head_idx_page + idx_file_range.end - idx_file_range.start;
Some(idx_page_range)
} else {
self.min_possible_present_idx_file = self.file_states.len();
None
}
}
pub fn get_slice(&self, idx_page_range: Range<usize>) -> Result<VolatileSlice> {
let idx_file_range = self.convert_idx_page_range_to_idx_file(idx_page_range)?;
match self.file_mmap.get_slice(
pages_to_bytes(idx_file_range.start),
pages_to_bytes(idx_file_range.end - idx_file_range.start),
) {
Ok(slice) => Ok(slice),
Err(VolatileMemoryError::OutOfBounds { .. }) => Err(Error::OutOfRange),
Err(e) => Err(e.into()),
}
}
pub fn present_pages(&self) -> usize {
self.page_states
.iter()
.map(|state| state.is_present() as usize)
.sum()
}
fn convert_idx_page_range_to_idx_file(
&self,
idx_page_range: Range<usize>,
) -> Result<Range<usize>> {
let state = self
.page_states
.get(idx_page_range.start)
.ok_or(Error::OutOfRange)?;
if state.is_none() || !state.is_present() {
return Err(Error::InvalidIndex);
}
let Some(head_idx_file) = state.idx_file() else {
unreachable!("the page is not none.");
};
let mut idx_file = head_idx_file;
for idx in idx_page_range.start + 1..idx_page_range.end {
let state = self.page_states.get(idx).ok_or(Error::OutOfRange)?;
idx_file += 1;
if state.is_none()
|| !state.is_present()
|| state
.idx_file()
.unwrap_or_else(|| unreachable!("the page is not none."))
!= idx_file
{
return Err(Error::InvalidIndex);
}
}
let idx_file_range =
head_idx_file..head_idx_file + idx_page_range.end - idx_page_range.start;
Ok(idx_file_range)
}
}
#[cfg(test)]
mod tests {
use std::slice;
use base::pagesize;
use base::sys::FileDataIterator;
use super::*;
#[test]
fn new_success() {
let file = tempfile::tempfile().unwrap();
assert_eq!(SwapFile::new(&file, 200).is_ok(), true);
}
#[test]
fn len() {
let file = tempfile::tempfile().unwrap();
let swap_file = SwapFile::new(&file, 200).unwrap();
assert_eq!(swap_file.page_states.len(), 200);
}
#[test]
fn page_content_default_is_none() {
let file = tempfile::tempfile().unwrap();
let swap_file = SwapFile::new(&file, 200).unwrap();
assert_eq!(swap_file.page_content(0, false).unwrap().is_none(), true);
}
#[test]
fn page_content_returns_content() {
let file = tempfile::tempfile().unwrap();
let mut swap_file = SwapFile::new(&file, 200).unwrap();
let data = &vec![1; pagesize()];
swap_file.write_to_file(0, data).unwrap();
let page = swap_file.page_content(0, false).unwrap().unwrap();
#[allow(clippy::undocumented_unsafe_blocks)]
let result = unsafe { slice::from_raw_parts(page.as_ptr(), pagesize()) };
assert_eq!(result, data);
}
#[test]
fn page_content_out_of_range() {
let file = tempfile::tempfile().unwrap();
let swap_file = SwapFile::new(&file, 200).unwrap();
assert_eq!(swap_file.page_content(199, false).is_ok(), true);
match swap_file.page_content(200, false) {
Err(Error::OutOfRange) => {}
_ => unreachable!("not out of range"),
}
}
fn assert_page_content(swap_file: &SwapFile, idx: usize, data: &[u8]) {
let page = swap_file.page_content(idx, false).unwrap().unwrap();
#[allow(clippy::undocumented_unsafe_blocks)]
let result = unsafe { slice::from_raw_parts(page.as_ptr(), pagesize()) };
assert_eq!(result, data);
}
#[test]
fn write_to_file_swap_file() {
let file = tempfile::tempfile().unwrap();
let mut swap_file = SwapFile::new(&file, 200).unwrap();
let buf1 = &vec![1; pagesize()];
let buf2 = &vec![2; 2 * pagesize()];
swap_file.write_to_file(0, buf1).unwrap();
swap_file.write_to_file(2, buf2).unwrap();
assert_page_content(&swap_file, 0, buf1);
assert_page_content(&swap_file, 2, &buf2[0..pagesize()]);
assert_page_content(&swap_file, 3, &buf2[pagesize()..2 * pagesize()]);
}
#[test]
fn write_to_file_invalid_size() {
let file = tempfile::tempfile().unwrap();
let mut swap_file = SwapFile::new(&file, 200).unwrap();
let buf = &vec![1; pagesize() + 1];
match swap_file.write_to_file(0, buf) {
Err(Error::InvalidSize) => {}
_ => unreachable!("not invalid size"),
};
}
#[test]
fn write_to_file_out_of_range() {
let file = tempfile::tempfile().unwrap();
let mut swap_file = SwapFile::new(&file, 200).unwrap();
let buf1 = &vec![1; pagesize()];
let buf2 = &vec![2; 2 * pagesize()];
match swap_file.write_to_file(200, buf1) {
Err(Error::OutOfRange) => {}
_ => unreachable!("not out of range"),
};
match swap_file.write_to_file(199, buf2) {
Err(Error::OutOfRange) => {}
_ => unreachable!("not out of range"),
};
}
#[test]
fn write_to_file_overwrite() {
let file = tempfile::tempfile().unwrap();
let mut swap_file = SwapFile::new(&file, 200).unwrap();
swap_file.write_to_file(0, &vec![1; pagesize()]).unwrap();
swap_file
.write_to_file(2, &vec![2; 2 * pagesize()])
.unwrap();
let mut buf = vec![0; 3 * pagesize()];
buf[..pagesize()].fill(3);
buf[pagesize()..2 * pagesize()].fill(4);
buf[2 * pagesize()..3 * pagesize()].fill(5);
swap_file.write_to_file(0, &buf).unwrap();
assert_page_content(&swap_file, 0, &vec![3; pagesize()]);
assert_page_content(&swap_file, 1, &vec![4; pagesize()]);
assert_page_content(&swap_file, 2, &vec![5; pagesize()]);
assert_page_content(&swap_file, 3, &vec![2; pagesize()]);
assert!(swap_file.page_content(4, false).unwrap().is_none());
let data = FileDataIterator::new(&file, 0, file.metadata().unwrap().len())
.collect::<std::result::Result<Vec<_>, _>>();
assert_eq!(data, Ok(vec![0..4 * pagesize() as u64]));
buf[..pagesize()].fill(6);
buf[pagesize()..2 * pagesize()].fill(7);
buf[2 * pagesize()..3 * pagesize()].fill(8);
swap_file.write_to_file(2, &buf).unwrap();
assert_page_content(&swap_file, 0, &vec![3; pagesize()]);
assert_page_content(&swap_file, 1, &vec![4; pagesize()]);
assert_page_content(&swap_file, 2, &vec![6; pagesize()]);
assert_page_content(&swap_file, 3, &vec![7; pagesize()]);
assert_page_content(&swap_file, 4, &vec![8; pagesize()]);
assert!(swap_file.page_content(5, false).unwrap().is_none());
let data = FileDataIterator::new(&file, 0, file.metadata().unwrap().len())
.collect::<std::result::Result<Vec<_>, _>>();
assert_eq!(data, Ok(vec![0..5 * pagesize() as u64]));
}
#[test]
#[cfg(target_arch = "x86_64")] fn lock_and_start_populate() {
let file = tempfile::tempfile().unwrap();
let mut swap_file = SwapFile::new(&file, 200).unwrap();
swap_file.write_to_file(1, &vec![1; pagesize()]).unwrap();
swap_file
.write_to_file(3, &vec![1; 5 * pagesize()])
.unwrap();
swap_file.write_to_file(10, &vec![1; pagesize()]).unwrap();
let mut locked_pages = 0;
loop {
let pages = swap_file.lock_and_async_prefetch(2).unwrap();
if pages == 0 {
break;
}
assert!(pages <= 2);
locked_pages += pages;
}
assert_eq!(locked_pages, 7);
}
#[test]
fn clear_range() {
let file = tempfile::tempfile().unwrap();
let mut swap_file = SwapFile::new(&file, 200).unwrap();
let data = &vec![1; pagesize()];
swap_file.write_to_file(0, data).unwrap();
swap_file.clear_range(0..1).unwrap();
assert!(swap_file.page_content(0, false).unwrap().is_none());
}
#[test]
#[cfg(target_arch = "x86_64")] fn clear_range_unlocked_pages() {
let file = tempfile::tempfile().unwrap();
let mut swap_file = SwapFile::new(&file, 200).unwrap();
swap_file
.write_to_file(1, &vec![1; 10 * pagesize()])
.unwrap();
assert_eq!(swap_file.lock_and_async_prefetch(5).unwrap(), 5);
assert_eq!(swap_file.clear_range(1..4).unwrap(), 3);
assert_eq!(swap_file.clear_range(4..7).unwrap(), 2);
assert_eq!(swap_file.clear_range(10..11).unwrap(), 0);
}
#[test]
fn clear_range_keep_on_disk() {
let file = tempfile::tempfile().unwrap();
let mut swap_file = SwapFile::new(&file, 200).unwrap();
let data = &vec![1; pagesize()];
swap_file.write_to_file(0, data).unwrap();
swap_file.clear_range(0..1).unwrap();
let slice = swap_file.page_content(0, true).unwrap().unwrap();
#[allow(clippy::undocumented_unsafe_blocks)]
let slice = unsafe { slice::from_raw_parts(slice.as_ptr(), slice.size()) };
assert_eq!(slice, data);
}
#[test]
fn clear_range_out_of_range() {
let file = tempfile::tempfile().unwrap();
let mut swap_file = SwapFile::new(&file, 200).unwrap();
swap_file.write_to_file(199, &vec![0; pagesize()]).unwrap();
match swap_file.clear_range(199..201) {
Err(Error::OutOfRange) => {}
_ => unreachable!("not out of range"),
};
assert!(swap_file.clear_range(199..200).is_ok());
match swap_file.clear_range(200..201) {
Err(Error::OutOfRange) => {}
_ => unreachable!("not out of range"),
};
}
#[test]
fn free_range() {
let file = tempfile::tempfile().unwrap();
let mut swap_file = SwapFile::new(&file, 200).unwrap();
let data = &vec![1; pagesize()];
swap_file.write_to_file(0, data).unwrap();
swap_file.free_range(0..1).unwrap();
assert!(swap_file.page_content(0, false).unwrap().is_none());
assert!(swap_file.page_content(0, true).unwrap().is_none());
}
#[test]
#[cfg(target_arch = "x86_64")] fn free_range_unlocked_pages() {
let file = tempfile::tempfile().unwrap();
let mut swap_file = SwapFile::new(&file, 200).unwrap();
swap_file
.write_to_file(1, &vec![1; 10 * pagesize()])
.unwrap();
assert_eq!(swap_file.lock_and_async_prefetch(5).unwrap(), 5);
assert_eq!(swap_file.free_range(0..1).unwrap(), 0);
assert_eq!(swap_file.free_range(0..2).unwrap(), 1);
assert_eq!(swap_file.free_range(2..4).unwrap(), 2);
assert_eq!(swap_file.free_range(3..7).unwrap(), 2);
assert_eq!(swap_file.free_range(10..11).unwrap(), 0);
}
#[test]
fn free_range_out_of_range() {
let file = tempfile::tempfile().unwrap();
let mut swap_file = SwapFile::new(&file, 200).unwrap();
assert_eq!(swap_file.free_range(199..200).is_ok(), true);
match swap_file.free_range(200..201) {
Err(Error::OutOfRange) => {}
_ => unreachable!("not out of range"),
};
match swap_file.free_range(199..201) {
Err(Error::OutOfRange) => {}
_ => unreachable!("not out of range"),
};
}
#[test]
fn free_range_and_write() {
let file = tempfile::tempfile().unwrap();
let mut swap_file = SwapFile::new(&file, 200).unwrap();
let data = &vec![1; 5 * pagesize()];
swap_file.write_to_file(0, data).unwrap();
swap_file.free_range(0..5).unwrap();
swap_file
.write_to_file(0, &vec![2; 2 * pagesize()])
.unwrap();
swap_file
.write_to_file(5, &vec![3; 4 * pagesize()])
.unwrap();
assert_page_content(&swap_file, 0, &vec![2; pagesize()]);
assert_page_content(&swap_file, 1, &vec![2; pagesize()]);
assert!(swap_file.page_content(2, true).unwrap().is_none());
assert!(swap_file.page_content(3, true).unwrap().is_none());
assert!(swap_file.page_content(4, true).unwrap().is_none());
assert_page_content(&swap_file, 5, &vec![3; pagesize()]);
assert_page_content(&swap_file, 6, &vec![3; pagesize()]);
assert_page_content(&swap_file, 7, &vec![3; pagesize()]);
assert_page_content(&swap_file, 8, &vec![3; pagesize()]);
assert!(swap_file.page_content(9, true).unwrap().is_none());
let data = FileDataIterator::new(&file, 0, file.metadata().unwrap().len())
.collect::<std::result::Result<Vec<_>, _>>();
assert_eq!(data, Ok(vec![0..6 * pagesize() as u64]));
}
#[test]
#[cfg(target_arch = "x86_64")] fn clear_mlock() {
let file = tempfile::tempfile().unwrap();
let mut swap_file = SwapFile::new(&file, 200).unwrap();
swap_file
.write_to_file(1, &vec![1; 10 * pagesize()])
.unwrap();
assert!(swap_file.clear_mlock().is_ok());
assert_eq!(swap_file.lock_and_async_prefetch(11).unwrap(), 10);
assert!(swap_file.clear_mlock().is_ok());
assert_eq!(swap_file.lock_and_async_prefetch(11).unwrap(), 10);
}
#[test]
fn first_data_range() {
let file = tempfile::tempfile().unwrap();
let mut swap_file = SwapFile::new(&file, 200).unwrap();
swap_file
.write_to_file(1, &vec![1; 2 * pagesize()])
.unwrap();
swap_file.write_to_file(3, &vec![2; pagesize()]).unwrap();
assert_eq!(swap_file.first_data_range(200).unwrap(), 1..4);
assert_eq!(swap_file.first_data_range(2).unwrap(), 1..3);
assert_eq!(swap_file.first_data_range(1).unwrap(), 1..2);
swap_file.clear_range(1..3).unwrap();
assert_eq!(swap_file.first_data_range(2).unwrap(), 3..4);
swap_file.clear_range(3..4).unwrap();
assert!(swap_file.first_data_range(2).is_none());
}
#[test]
fn get_slice() {
let file = tempfile::tempfile().unwrap();
let mut swap_file = SwapFile::new(&file, 200).unwrap();
swap_file.write_to_file(1, &vec![1; pagesize()]).unwrap();
swap_file.write_to_file(2, &vec![2; pagesize()]).unwrap();
let slice = swap_file.get_slice(1..3).unwrap();
assert_eq!(slice.size(), 2 * pagesize());
let mut buf = vec![0u8; pagesize()];
slice.get_slice(0, pagesize()).unwrap().copy_to(&mut buf);
assert_eq!(buf, vec![1; pagesize()]);
let mut buf = vec![0u8; pagesize()];
slice
.get_slice(pagesize(), pagesize())
.unwrap()
.copy_to(&mut buf);
assert_eq!(buf, vec![2; pagesize()]);
}
#[test]
fn get_slice_out_of_range() {
let file = tempfile::tempfile().unwrap();
let swap_file = SwapFile::new(&file, 200).unwrap();
match swap_file.get_slice(200..201) {
Err(Error::OutOfRange) => {}
other => {
unreachable!("unexpected result {:?}", other);
}
}
}
#[test]
fn present_pages() {
let file = tempfile::tempfile().unwrap();
let mut swap_file = SwapFile::new(&file, 200).unwrap();
swap_file.write_to_file(1, &vec![1; pagesize()]).unwrap();
swap_file.write_to_file(2, &vec![2; pagesize()]).unwrap();
assert_eq!(swap_file.present_pages(), 2);
}
}