1use crate::runtime::{Config, MetricsBatch, WorkerMetrics};
23use std::time::{Duration, Instant};
45/// Per-worker statistics. This is used for both tuning the scheduler and
6/// reporting runtime-level metrics/stats.
7pub(crate) struct Stats {
8/// The metrics batch used to report runtime-level metrics/stats to the
9 /// user.
10batch: MetricsBatch,
1112/// Instant at which work last resumed (continued after park).
13 ///
14 /// This duplicates the value stored in `MetricsBatch`. We will unify
15 /// `Stats` and `MetricsBatch` when we stabilize metrics.
16processing_scheduled_tasks_started_at: Instant,
1718/// Number of tasks polled in the batch of scheduled tasks
19tasks_polled_in_batch: usize,
2021/// Exponentially-weighted moving average of time spent polling scheduled a
22 /// task.
23 ///
24 /// Tracked in nanoseconds, stored as a `f64` since that is what we use with
25 /// the EWMA calculations
26task_poll_time_ewma: f64,
27}
2829/// How to weigh each individual poll time, value is plucked from thin air.
30const TASK_POLL_TIME_EWMA_ALPHA: f64 = 0.1;
3132/// Ideally, we wouldn't go above this, value is plucked from thin air.
33const TARGET_GLOBAL_QUEUE_INTERVAL: f64 = Duration::from_micros(200).as_nanos() as f64;
3435/// Max value for the global queue interval. This is 2x the previous default
36const MAX_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL: u32 = 127;
3738/// This is the previous default
39const TARGET_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL: u32 = 61;
4041impl Stats {
42pub(crate) fn new(worker_metrics: &WorkerMetrics) -> Stats {
43// Seed the value with what we hope to see.
44let task_poll_time_ewma =
45 TARGET_GLOBAL_QUEUE_INTERVAL / TARGET_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL as f64;
4647 Stats {
48 batch: MetricsBatch::new(worker_metrics),
49 processing_scheduled_tasks_started_at: Instant::now(),
50 tasks_polled_in_batch: 0,
51 task_poll_time_ewma,
52 }
53 }
5455pub(crate) fn tuned_global_queue_interval(&self, config: &Config) -> u32 {
56// If an interval is explicitly set, don't tune.
57if let Some(configured) = config.global_queue_interval {
58return configured;
59 }
6061// As of Rust 1.45, casts from f64 -> u32 are saturating, which is fine here.
62let tasks_per_interval = (TARGET_GLOBAL_QUEUE_INTERVAL / self.task_poll_time_ewma) as u32;
6364// If we are using self-tuning, we don't want to return less than 2 as that would result in the
65 // global queue always getting checked first.
66tasks_per_interval.clamp(2, MAX_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL)
67 }
6869pub(crate) fn submit(&mut self, to: &WorkerMetrics) {
70self.batch.submit(to, self.task_poll_time_ewma as u64);
71 }
7273pub(crate) fn about_to_park(&mut self) {
74self.batch.about_to_park();
75 }
7677pub(crate) fn unparked(&mut self) {
78self.batch.unparked();
79 }
8081pub(crate) fn inc_local_schedule_count(&mut self) {
82self.batch.inc_local_schedule_count();
83 }
8485pub(crate) fn start_processing_scheduled_tasks(&mut self) {
86self.batch.start_processing_scheduled_tasks();
8788self.processing_scheduled_tasks_started_at = Instant::now();
89self.tasks_polled_in_batch = 0;
90 }
9192pub(crate) fn end_processing_scheduled_tasks(&mut self) {
93self.batch.end_processing_scheduled_tasks();
9495// Update the EWMA task poll time
96if self.tasks_polled_in_batch > 0 {
97let now = Instant::now();
9899// If we "overflow" this conversion, we have bigger problems than
100 // slightly off stats.
101let elapsed = (now - self.processing_scheduled_tasks_started_at).as_nanos() as f64;
102let num_polls = self.tasks_polled_in_batch as f64;
103104// Calculate the mean poll duration for a single task in the batch
105let mean_poll_duration = elapsed / num_polls;
106107// Compute the alpha weighted by the number of tasks polled this batch.
108let weighted_alpha = 1.0 - (1.0 - TASK_POLL_TIME_EWMA_ALPHA).powf(num_polls);
109110// Now compute the new weighted average task poll time.
111self.task_poll_time_ewma = weighted_alpha * mean_poll_duration
112 + (1.0 - weighted_alpha) * self.task_poll_time_ewma;
113 }
114 }
115116pub(crate) fn start_poll(&mut self) {
117self.batch.start_poll();
118119self.tasks_polled_in_batch += 1;
120 }
121122pub(crate) fn end_poll(&mut self) {
123self.batch.end_poll();
124 }
125126pub(crate) fn incr_steal_count(&mut self, by: u16) {
127self.batch.incr_steal_count(by);
128 }
129130pub(crate) fn incr_steal_operations(&mut self) {
131self.batch.incr_steal_operations();
132 }
133134pub(crate) fn incr_overflow_count(&mut self) {
135self.batch.incr_overflow_count();
136 }
137}