audio_streams_conformance_test/
performance_data.rs1use std::fmt;
6use std::time::Duration;
7
8use serde::Serialize;
9
10use crate::args::Args;
11use crate::error::*;
12
13const NANOS_PER_MICROS: f32 = 1_000_000.0;
14
15#[derive(Debug, Serialize)]
18pub struct PerformanceReport {
19 args: Args,
20 cold_start_latency: Duration,
21 record_count: usize,
22 rate: EstimatedRate,
23 min_time: Duration,
25 max_time: Duration,
26 avg_time: Duration,
27 stddev_time: Duration,
28 mismatched_frame_count: u32,
30}
31
32impl fmt::Display for PerformanceReport {
33 #[allow(clippy::print_in_format_impl)]
34 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
35 if self.mismatched_frame_count != 0 {
36 eprint!(
37 "[Error] {} consumed buffers size != {} frames",
38 self.mismatched_frame_count, self.args.buffer_frames
39 );
40 }
41 write!(
42 f,
43 r#"{}
44Cold start latency: {:?}
45Records count: {}
46[Step] min: {:.2} ms, max: {:.2} ms, average: {:.2} ms, standard deviation: {:.2} ms.
47{}
48"#,
49 self.args,
50 self.cold_start_latency,
51 self.record_count,
52 to_micros(self.min_time),
53 to_micros(self.max_time),
54 to_micros(self.avg_time),
55 to_micros(self.stddev_time),
56 self.rate,
57 )
58 }
59}
60
61#[derive(Debug, Default)]
65pub struct BufferConsumptionRecord {
66 pub ts: Duration,
67 pub frames: usize,
68}
69
70impl BufferConsumptionRecord {
71 pub fn new(frames: usize, ts: Duration) -> Self {
72 Self { ts, frames }
73 }
74}
75
76#[derive(Debug, Serialize, PartialEq)]
77pub struct EstimatedRate {
78 rate: f64,
80 error: f64,
82}
83
84impl EstimatedRate {
85 fn new(rate: f64, error: f64) -> Self {
86 Self { rate, error }
87 }
88}
89
90impl fmt::Display for EstimatedRate {
91 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
92 write!(
93 f,
94 "[Linear Regression] rate: {:.2} frames/s, standard error: {:.2} ",
95 self.rate, self.error
96 )
97 }
98}
99
100#[derive(Debug, Default)]
101pub struct PerformanceData {
102 pub cold_start: Duration,
103 pub records: Vec<BufferConsumptionRecord>,
104}
105
106fn to_micros(t: Duration) -> f32 {
107 t.as_nanos() as f32 / NANOS_PER_MICROS
108}
109
110fn linear_regression(x: &[f64], y: &[f64]) -> Result<EstimatedRate> {
111 if x.len() != y.len() {
112 return Err(Error::MismatchedSamples);
113 }
114
115 if x.len() <= 2 {
116 return Err(Error::NotEnoughSamples);
117 }
118
119 let x_sum: f64 = x.iter().sum();
121 let x_average = x_sum / x.len() as f64;
122 let x_square_sum: f64 = x.iter().map(|&xi| xi * xi).sum();
124 let x_y_sum: f64 = x.iter().zip(y.iter()).map(|(&xi, &yi)| xi * yi).sum();
126
127 let y_sum: f64 = y.iter().sum();
128
129 let y_square_sum: f64 = y.iter().map(|yi| yi * yi).sum();
130 let b = (x_y_sum - x_average * y_sum) / (x_square_sum - x_average * x_sum);
133 let n = y.len() as f64;
134 let err: f64 =
136 ((n * y_square_sum - y_sum * y_sum - b * b * (n * x_square_sum - x_sum * x_sum))
137 / (n * (n - 2.0)))
138 .sqrt();
139
140 Ok(EstimatedRate::new(b, err))
141}
142
143impl PerformanceData {
144 pub fn print_records(&self) {
145 println!("TS\t\tTS_DIFF\t\tPLAYED");
146 let mut previous_ts = 0.0;
147 for record in &self.records {
148 println!(
149 "{:.6}\t{:.6}\t{}",
150 record.ts.as_secs_f64(),
151 record.ts.as_secs_f64() - previous_ts,
152 record.frames
153 );
154 previous_ts = record.ts.as_secs_f64();
155 }
156 }
157 pub fn gen_report(&self, args: Args) -> Result<PerformanceReport> {
158 let time_records: Vec<f64> = self
159 .records
160 .iter()
161 .map(|record| record.ts.as_secs_f64())
162 .collect();
163
164 let frames: Vec<f64> = self
165 .records
166 .iter()
167 .map(|record| record.frames as f64)
168 .collect();
169
170 let mut steps = Vec::new();
171 let mut mismatched_frame_count = 0;
172 for i in 1..frames.len() {
173 let time_diff = self.records[i].ts - self.records[i - 1].ts;
174 steps.push(time_diff);
175
176 let frame_diff = self.records[i].frames - self.records[i - 1].frames;
177 if frame_diff != args.buffer_frames {
178 mismatched_frame_count += 1;
179 }
180 }
181 let avg_time = steps
182 .iter()
183 .sum::<Duration>()
184 .checked_div(steps.len() as u32)
185 .ok_or(Error::NotEnoughSamples)?;
186 let stddev_time = (steps
187 .iter()
188 .map(|x| {
189 (x.as_nanos().abs_diff(avg_time.as_nanos())
190 * x.as_nanos().abs_diff(avg_time.as_nanos())) as f64
191 })
192 .sum::<f64>()
193 / steps.len() as f64)
194 .sqrt();
195
196 let rate = linear_regression(&time_records, &frames)?;
197 let min_time = steps.iter().min().unwrap().to_owned();
198 let max_time = steps.iter().max().unwrap().to_owned();
199
200 Ok(PerformanceReport {
201 args,
202 cold_start_latency: self.cold_start,
203 record_count: self.records.len(),
204 rate,
205 min_time,
206 max_time,
207 avg_time,
208 stddev_time: Duration::from_nanos(stddev_time as u64),
209 mismatched_frame_count,
210 })
211 }
212}
213
214#[cfg(test)]
215mod tests {
216 use super::*;
217
218 #[test]
219 fn test1() {
220 let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0];
221 let ys: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0];
222
223 assert_eq!(
224 EstimatedRate::new(1.0, 0.0),
225 linear_regression(&xs, &ys).expect("test1 should pass")
226 );
227 }
228
229 #[test]
230 fn test2() {
231 let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0];
232 let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0];
233
234 assert_eq!(
235 EstimatedRate::new(0.6, 0.8944271909999159),
236 linear_regression(&xs, &ys).expect("test2 should pass")
237 );
238 }
239}