use base::linux::FileDataIterator;
use base::linux::MemfdSeals;
use base::linux::MemoryMappingUnix;
use base::linux::SharedMemoryLinux;
use base::MappedRegion;
use base::SharedMemory;
use bitflags::bitflags;
use crate::Error;
use crate::FileBackedMappingParameters;
use crate::GuestAddress;
use crate::GuestMemory;
use crate::MemoryRegion;
use crate::Result;
bitflags! {
    #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
    #[repr(transparent)]
    pub struct MemoryPolicy: u32 {
        const USE_HUGEPAGES = 1;
        const LOCK_GUEST_MEMORY = (1 << 1);
        const USE_PUNCHHOLE_LOCKED = (1 << 2);
    }
}
pub(crate) fn finalize_shm(shm: &mut SharedMemory) -> Result<()> {
    let mut seals = MemfdSeals::new();
    seals.set_shrink_seal();
    seals.set_grow_seal();
    seals.set_seal_seal();
    shm.add_seals(seals).map_err(Error::MemoryAddSealsFailed)
}
impl GuestMemory {
    pub fn remove_range(&self, addr: GuestAddress, count: u64) -> Result<()> {
        let (mapping, offset, _) = self.find_region(addr)?;
        mapping
            .remove_range(offset, count as usize)
            .map_err(|e| Error::MemoryAccess(addr, e))
    }
    pub fn punch_hole_range(&self, addr: GuestAddress, count: u64) -> Result<()> {
        let region = self
            .regions
            .iter()
            .find(|region| region.contains_range(addr, count))
            .ok_or(Error::InvalidGuestRange(addr, count))?;
        base::sys::linux::fallocate(
            ®ion.shared_obj,
            base::sys::linux::FallocateMode::PunchHole,
            region.obj_offset + addr.offset_from(region.guest_base),
            count,
        )
        .map_err(Error::PunchHole)
    }
    pub fn set_memory_policy(&mut self, mem_policy: MemoryPolicy) {
        if mem_policy.is_empty() {
            return;
        }
        for region in self.regions.iter() {
            if mem_policy.contains(MemoryPolicy::USE_HUGEPAGES) {
                let ret = region.mapping.use_hugepages();
                if let Err(err) = ret {
                    println!("Failed to enable HUGEPAGE for mapping {}", err);
                }
            }
            if mem_policy.contains(MemoryPolicy::LOCK_GUEST_MEMORY) {
                self.locked = true;
                let ret = region.mapping.lock_all();
                if let Err(err) = ret {
                    println!("Failed to lock memory for mapping {}", err);
                }
            }
            if mem_policy.contains(MemoryPolicy::USE_PUNCHHOLE_LOCKED) {
                self.use_punchhole_locked = true;
            }
        }
    }
    pub fn use_dontfork(&self) -> anyhow::Result<()> {
        for region in self.regions.iter() {
            region.mapping.use_dontfork()?;
        }
        Ok(())
    }
}
impl FileBackedMappingParameters {
    pub fn open(&self) -> std::io::Result<std::fs::File> {
        use std::os::unix::fs::OpenOptionsExt;
        Ok(base::open_file_or_duplicate(
            &self.path,
            std::fs::OpenOptions::new()
                .read(true)
                .write(self.writable)
                .custom_flags(if self.sync { libc::O_SYNC } else { 0 }),
        )?)
    }
}
impl MemoryRegion {
    pub(crate) fn find_data_ranges(&self) -> anyhow::Result<Vec<std::ops::Range<usize>>> {
        FileDataIterator::new(
            &self.shared_obj,
            self.obj_offset,
            u64::try_from(self.mapping.size()).unwrap(),
        )
        .map(|range| {
            let range = range?;
            Ok(usize::try_from(range.start - self.obj_offset).unwrap()
                ..usize::try_from(range.end - self.obj_offset).unwrap())
        })
        .collect()
    }
    pub(crate) fn zero_range(&self, offset: usize, size: usize) -> anyhow::Result<()> {
        self.mapping.remove_range(offset, size)?;
        Ok(())
    }
}