use std::fmt;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::sync::Arc;
#[cfg(target_arch = "x86_64")]
use std::time::Instant;
#[cfg(target_arch = "x86_64")]
use base::error;
use base::Event;
use base::EventToken;
use base::WaitContext;
use base::WorkerThread;
#[cfg(target_arch = "x86_64")]
use metrics::log_metric;
#[cfg(target_arch = "x86_64")]
use metrics::MetricEventType;
use serde::Deserialize;
use serde::Serialize;
use sync::Mutex;
use super::INTERRUPT_STATUS_CONFIG_CHANGED;
use super::INTERRUPT_STATUS_USED_RING;
use super::VIRTIO_MSI_NO_VECTOR;
#[cfg(target_arch = "x86_64")]
use crate::acpi::PmWakeupEvent;
use crate::irq_event::IrqEdgeEvent;
use crate::irq_event::IrqLevelEvent;
use crate::pci::MsixConfig;
struct TransportPci {
    irq_evt_lvl: IrqLevelEvent,
    msix_config: Option<Arc<Mutex<MsixConfig>>>,
    config_msix_vector: u16,
}
enum Transport {
    Pci {
        pci: TransportPci,
    },
    Mmio {
        irq_evt_edge: IrqEdgeEvent,
    },
    VhostUser {
        call_evt: Event,
        signal_config_changed_fn: Box<dyn Fn() + Send + Sync>,
    },
}
struct InterruptInner {
    interrupt_status: AtomicUsize,
    transport: Transport,
    async_intr_status: bool,
    pm_state: Mutex<PmState>,
}
impl InterruptInner {
    fn update_interrupt_status(&self, interrupt_status_mask: u32) -> bool {
        self.interrupt_status
            .fetch_or(interrupt_status_mask as usize, Ordering::SeqCst)
            == 0
            || self.async_intr_status
    }
}
#[derive(Clone)]
pub struct Interrupt {
    inner: Arc<InterruptInner>,
}
impl fmt::Debug for Interrupt {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "Interrupt")
    }
}
#[derive(Serialize, Deserialize)]
pub struct InterruptSnapshot {
    interrupt_status: usize,
}
impl Interrupt {
    pub fn signal(&self, vector: u16, interrupt_status_mask: u32) {
        if self
            .inner
            .pm_state
            .lock()
            .handle_interrupt(vector, interrupt_status_mask)
        {
            return;
        }
        match &self.inner.transport {
            Transport::Pci { pci } => {
                if let Some(msix_config) = &pci.msix_config {
                    let mut msix_config = msix_config.lock();
                    if msix_config.enabled() {
                        if vector != VIRTIO_MSI_NO_VECTOR {
                            msix_config.trigger(vector);
                        }
                        return;
                    }
                }
                if self.inner.update_interrupt_status(interrupt_status_mask) {
                    pci.irq_evt_lvl.trigger().unwrap();
                }
            }
            Transport::Mmio { irq_evt_edge } => {
                if self.inner.update_interrupt_status(interrupt_status_mask) {
                    irq_evt_edge.trigger().unwrap();
                }
            }
            Transport::VhostUser { call_evt, .. } => {
                call_evt.signal().unwrap();
            }
        }
    }
    pub fn signal_used_queue(&self, vector: u16) {
        self.signal(vector, INTERRUPT_STATUS_USED_RING)
    }
    pub fn signal_config_changed(&self) {
        match &self.inner.as_ref().transport {
            Transport::Pci { pci } => {
                self.signal(pci.config_msix_vector, INTERRUPT_STATUS_CONFIG_CHANGED)
            }
            Transport::Mmio { .. } => {
                self.signal(VIRTIO_MSI_NO_VECTOR, INTERRUPT_STATUS_CONFIG_CHANGED)
            }
            Transport::VhostUser {
                signal_config_changed_fn,
                ..
            } => signal_config_changed_fn(),
        }
    }
    fn get_resample_evt(&self) -> Option<&Event> {
        match &self.inner.as_ref().transport {
            Transport::Pci { pci } => Some(pci.irq_evt_lvl.get_resample()),
            _ => None,
        }
    }
    pub fn spawn_resample_thread(&self) -> Option<WorkerThread<()>> {
        if self.get_resample_evt().is_some() {
            let interrupt = self.clone();
            Some(WorkerThread::start("crosvm_resample", move |kill_evt| {
                interrupt_resample_thread(kill_evt, interrupt)
            }))
        } else {
            None
        }
    }
}
impl Interrupt {
    pub fn new(
        irq_evt_lvl: IrqLevelEvent,
        msix_config: Option<Arc<Mutex<MsixConfig>>>,
        config_msix_vector: u16,
        #[cfg(target_arch = "x86_64")] wakeup_event: Option<(PmWakeupEvent, MetricEventType)>,
    ) -> Interrupt {
        Interrupt {
            inner: Arc::new(InterruptInner {
                interrupt_status: AtomicUsize::new(0),
                async_intr_status: false,
                transport: Transport::Pci {
                    pci: TransportPci {
                        irq_evt_lvl,
                        msix_config,
                        config_msix_vector,
                    },
                },
                pm_state: PmState::new(
                    #[cfg(target_arch = "x86_64")]
                    wakeup_event,
                ),
            }),
        }
    }
    pub fn new_from_snapshot(
        irq_evt_lvl: IrqLevelEvent,
        msix_config: Option<Arc<Mutex<MsixConfig>>>,
        config_msix_vector: u16,
        snapshot: InterruptSnapshot,
        #[cfg(target_arch = "x86_64")] wakeup_event: Option<(PmWakeupEvent, MetricEventType)>,
    ) -> Interrupt {
        Interrupt {
            inner: Arc::new(InterruptInner {
                interrupt_status: AtomicUsize::new(snapshot.interrupt_status),
                async_intr_status: false,
                transport: Transport::Pci {
                    pci: TransportPci {
                        irq_evt_lvl,
                        msix_config,
                        config_msix_vector,
                    },
                },
                pm_state: PmState::new(
                    #[cfg(target_arch = "x86_64")]
                    wakeup_event,
                ),
            }),
        }
    }
    pub fn new_mmio(irq_evt_edge: IrqEdgeEvent, async_intr_status: bool) -> Interrupt {
        Interrupt {
            inner: Arc::new(InterruptInner {
                interrupt_status: AtomicUsize::new(0),
                transport: Transport::Mmio { irq_evt_edge },
                async_intr_status,
                pm_state: PmState::new(
                    #[cfg(target_arch = "x86_64")]
                    None,
                ),
            }),
        }
    }
    pub fn new_vhost_user(
        call_evt: Event,
        signal_config_changed_fn: Box<dyn Fn() + Send + Sync>,
    ) -> Interrupt {
        Interrupt {
            inner: Arc::new(InterruptInner {
                interrupt_status: AtomicUsize::new(0),
                transport: Transport::VhostUser {
                    call_evt,
                    signal_config_changed_fn,
                },
                async_intr_status: false,
                pm_state: PmState::new(
                    #[cfg(target_arch = "x86_64")]
                    None,
                ),
            }),
        }
    }
    #[cfg(test)]
    pub fn new_for_test() -> Interrupt {
        Interrupt::new(
            IrqLevelEvent::new().unwrap(),
            None,
            VIRTIO_MSI_NO_VECTOR,
            #[cfg(target_arch = "x86_64")]
            None,
        )
    }
    #[cfg(test)]
    pub fn new_for_test_with_msix() -> Interrupt {
        let (_, unused_config_tube) = base::Tube::pair().unwrap();
        let msix_vectors = 2;
        let msix_cfg = MsixConfig::new(
            msix_vectors,
            unused_config_tube,
            0,
            "test_device".to_owned(),
        );
        Interrupt::new(
            IrqLevelEvent::new().unwrap(),
            Some(Arc::new(Mutex::new(msix_cfg))),
            msix_vectors,
            #[cfg(target_arch = "x86_64")]
            None,
        )
    }
    pub fn get_interrupt_evt(&self) -> &Event {
        match &self.inner.as_ref().transport {
            Transport::Pci { pci } => pci.irq_evt_lvl.get_trigger(),
            Transport::Mmio { irq_evt_edge } => irq_evt_edge.get_trigger(),
            Transport::VhostUser { call_evt, .. } => call_evt,
        }
    }
    pub fn get_msix_config(&self) -> &Option<Arc<Mutex<MsixConfig>>> {
        match &self.inner.as_ref().transport {
            Transport::Pci { pci } => &pci.msix_config,
            _ => &None,
        }
    }
    pub fn read_interrupt_status(&self) -> u8 {
        self.inner.interrupt_status.load(Ordering::SeqCst) as u8
    }
    pub fn read_and_reset_interrupt_status(&self) -> u8 {
        self.inner.interrupt_status.swap(0, Ordering::SeqCst) as u8
    }
    pub fn clear_interrupt_status_bits(&self, mask: u8) {
        self.inner
            .interrupt_status
            .fetch_and(!(mask as usize), Ordering::SeqCst);
    }
    pub fn snapshot(&self) -> InterruptSnapshot {
        InterruptSnapshot {
            interrupt_status: self.inner.interrupt_status.load(Ordering::SeqCst),
        }
    }
    pub fn set_suspended(&self, suspended: bool) {
        let retrigger_evts = self.inner.pm_state.lock().set_suspended(suspended);
        for (vector, interrupt_status_mask) in retrigger_evts.into_iter() {
            self.signal(vector, interrupt_status_mask);
        }
    }
    #[cfg(target_arch = "x86_64")]
    pub fn set_wakeup_event_active(&self, active: bool) {
        self.inner.pm_state.lock().set_wakeup_event_active(active);
    }
}
#[cfg(target_arch = "x86_64")]
struct WakeupState {
    wakeup_event: PmWakeupEvent,
    wakeup_enabled: bool,
    armed_time: Instant,
    metrics_event: MetricEventType,
    wakeup_clear_evt: Option<Event>,
}
#[cfg(target_arch = "x86_64")]
impl WakeupState {
    fn new(wakeup_event: Option<(PmWakeupEvent, MetricEventType)>) -> Option<Self> {
        wakeup_event.map(|(wakeup_event, metrics_event)| Self {
            wakeup_event,
            wakeup_enabled: false,
            armed_time: Instant::now(),
            metrics_event,
            wakeup_clear_evt: None,
        })
    }
    fn trigger_wakeup(&mut self) {
        if self.wakeup_clear_evt.is_some() {
            return;
        }
        let elapsed = self.armed_time.elapsed().as_millis();
        log_metric(
            self.metrics_event.clone(),
            elapsed.try_into().unwrap_or(i64::MAX),
        );
        match self.wakeup_event.trigger_wakeup() {
            Ok(clear_evt) => self.wakeup_clear_evt = clear_evt,
            Err(err) => error!("Wakeup trigger failed {:?}", err),
        }
    }
}
struct PmState {
    suspended: bool,
    pending_signals: Vec<(u16, u32)>,
    #[cfg(target_arch = "x86_64")]
    wakeup_state: Option<WakeupState>,
}
impl PmState {
    fn new(
        #[cfg(target_arch = "x86_64")] wakeup_event: Option<(PmWakeupEvent, MetricEventType)>,
    ) -> Mutex<Self> {
        Mutex::new(Self {
            suspended: false,
            pending_signals: Vec::new(),
            #[cfg(target_arch = "x86_64")]
            wakeup_state: WakeupState::new(wakeup_event),
        })
    }
    fn handle_interrupt(&mut self, vector: u16, mask: u32) -> bool {
        if self.suspended {
            self.pending_signals.push((vector, mask));
            #[cfg(target_arch = "x86_64")]
            if let Some(wakeup_state) = self.wakeup_state.as_mut() {
                if wakeup_state.wakeup_enabled {
                    wakeup_state.trigger_wakeup();
                }
            }
        }
        self.suspended
    }
    fn set_suspended(&mut self, suspended: bool) -> Vec<(u16, u32)> {
        self.suspended = suspended;
        std::mem::take(&mut self.pending_signals)
    }
    #[cfg(target_arch = "x86_64")]
    fn set_wakeup_event_active(&mut self, active: bool) {
        let Some(wakeup_state) = self.wakeup_state.as_mut() else {
            return;
        };
        wakeup_state.wakeup_enabled = active;
        if active {
            wakeup_state.armed_time = Instant::now();
            if !self.pending_signals.is_empty() {
                wakeup_state.trigger_wakeup();
            }
        } else if let Some(clear_evt) = wakeup_state.wakeup_clear_evt.take() {
            if let Err(e) = clear_evt.signal() {
                error!("failed to signal clear event {}", e);
            }
        }
    }
}
fn interrupt_resample_thread(kill_evt: Event, interrupt: Interrupt) {
    #[derive(EventToken)]
    enum Token {
        Resample,
        Kill,
    }
    let interrupt_status = &interrupt.inner.interrupt_status;
    let interrupt_evt = interrupt.get_interrupt_evt();
    let resample_evt = interrupt
        .get_resample_evt()
        .expect("must have resample evt in interrupt_resample_thread");
    let wait_ctx =
        WaitContext::build_with(&[(resample_evt, Token::Resample), (&kill_evt, Token::Kill)])
            .expect("failed to create WaitContext");
    loop {
        let events = wait_ctx.wait().expect("WaitContext::wait() failed");
        for event in events {
            match event.token {
                Token::Resample => {
                    let _ = resample_evt.wait();
                    if interrupt_status.load(Ordering::SeqCst) != 0 {
                        interrupt_evt.signal().unwrap();
                    }
                }
                Token::Kill => return,
            }
        }
    }
}