relay_metrics/aggregator/
mod.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
//! Core functionality of metrics aggregation.

use std::time::{Duration, SystemTime};

use hashbrown::HashMap;
use relay_base_schema::metrics::MetricNamespace;
use relay_base_schema::project::ProjectKey;
use relay_common::time::UnixTimestamp;

use crate::statsd::{MetricCounters, MetricGauges};
use crate::Bucket;

mod config;
mod inner;
mod stats;

pub use self::config::*;
use self::inner::{BucketData, BucketKey};

/// Default amount of partitions per second when there are no partitions configured.
const DEFAULT_PARTITIONS_PER_SECOND: u32 = 64;

/// Any error that may occur during aggregation.
#[derive(Debug, thiserror::Error, PartialEq)]
pub enum AggregateMetricsError {
    /// Internal error: Attempted to merge two metric buckets of different types.
    #[error("found incompatible metric types")]
    InvalidTypes,
    /// A metric bucket is too large for the global bytes limit.
    #[error("total metrics limit exceeded")]
    TotalLimitExceeded,
    /// A metric bucket is too large for the per-project bytes limit.
    #[error("project metrics limit exceeded")]
    ProjectLimitExceeded,
    /// The timestamp is outside the maximum allowed time range.
    #[error("the timestamp '{0}' is outside the maximum allowed time range")]
    InvalidTimestamp(UnixTimestamp),
}

/// A collector of [`Bucket`] submissions.
///
/// # Aggregation
///
/// Each metric is dispatched into the a [`Bucket`] depending on its project key (DSN), name, type,
/// unit, tags and timestamp. The bucket timestamp is rounded to the precision declared by the
/// `bucket_interval` field on the [AggregatorConfig] configuration.
///
/// Each bucket stores the accumulated value of submitted metrics:
///
/// - `Counter`: Sum of values.
/// - `Distribution`: A list of values.
/// - `Set`: A unique set of hashed values.
/// - `Gauge`: A summary of the reported values, see [`GaugeValue`](crate::GaugeValue).
///
/// # Conflicts
///
/// Metrics are uniquely identified by the combination of their name, type and unit. It is allowed
/// to send metrics of different types and units under the same name. For example, sending a metric
/// once as set and once as distribution will result in two actual metrics being recorded.
#[derive(Debug)]
pub struct Aggregator {
    name: String,
    inner: inner::Inner,
}

impl Aggregator {
    /// Creates a new named [`Self`].
    pub fn named(name: String, config: &AggregatorConfig) -> Self {
        let num_partitions = match config.flush_batching {
            FlushBatching::Project => config.flush_partitions,
            FlushBatching::Bucket => config.flush_partitions,
            FlushBatching::Partition => config.flush_partitions,
            FlushBatching::None => Some(0),
        }
        .unwrap_or(DEFAULT_PARTITIONS_PER_SECOND * config.bucket_interval.max(1));

        Self {
            name,
            inner: inner::Inner::new(inner::Config {
                start: UnixTimestamp::now(),
                bucket_interval: config.bucket_interval,
                num_time_slots: config.aggregator_size,
                num_partitions,
                delay: config.initial_delay,
                max_total_bucket_bytes: config.max_total_bucket_bytes,
                max_project_key_bucket_bytes: config.max_project_key_bucket_bytes,
                max_secs_in_past: Some(config.max_secs_in_past),
                max_secs_in_future: Some(config.max_secs_in_future),
                partition_by: config.flush_batching,
            }),
        }
    }

    /// Returns the name of the aggregator.
    pub fn name(&self) -> &str {
        &self.name
    }

    /// Returns `true` if the aggregator contains any metric buckets.
    pub fn is_empty(&self) -> bool {
        self.inner.is_empty()
    }

    /// Merge a bucket into this aggregator.
    pub fn merge(
        &mut self,
        project_key: ProjectKey,
        bucket: Bucket,
    ) -> Result<(), AggregateMetricsError> {
        let key = BucketKey {
            project_key,
            timestamp: bucket.timestamp,
            metric_name: bucket.name,
            tags: bucket.tags,
            extracted_from_indexed: bucket.metadata.extracted_from_indexed,
        };

        let value = BucketData {
            value: bucket.value,
            metadata: bucket.metadata,
        };

        self.inner.merge(key, value)
    }

    /// Attempts to flush the next batch from the aggregator.
    ///
    /// If it is too early to flush the next batch, the error contains the timestamp when the flush should be retried.
    /// After a successful flush, retry immediately until an error is returned with the next flush
    /// time, this makes sure time is eventually synchronized.
    pub fn try_flush_next(&mut self, now: SystemTime) -> Result<Partition, Duration> {
        let next_flush = SystemTime::UNIX_EPOCH + self.inner.next_flush_at();

        if let Err(err) = now.duration_since(next_flush) {
            // The flush time is in the future, return the amount of time to wait before the next flush.
            return Err(err.duration());
        }

        // Emit global stats before flushing to emit the maximum size.
        emit_stats(&self.name, self.inner.stats());

        let partition = self.inner.flush_next();
        emit_flush_partition_stats(&self.name, partition.stats);

        Ok(Partition {
            partition_key: partition.partition_key,
            buckets: partition.buckets,
            bucket_interval: self.inner.bucket_interval(),
        })
    }

    /// Returns when the next partition is ready to be flushed using [`Self::try_flush_next`].
    pub fn next_flush_at(&mut self, now: SystemTime) -> Duration {
        let next_flush = SystemTime::UNIX_EPOCH + self.inner.next_flush_at();

        match now.duration_since(next_flush) {
            Ok(_) => Duration::ZERO,
            Err(err) => err.duration(),
        }
    }

    /// Consumes the aggregator and returns all contained partitions.
    pub fn into_partitions(self) -> impl Iterator<Item = Partition> {
        let bucket_interval = self.inner.bucket_interval();

        emit_stats(&self.name, self.inner.stats());

        self.inner.into_partitions().map(move |p| Partition {
            partition_key: p.partition_key,
            buckets: p.buckets,
            bucket_interval,
        })
    }
}

/// A flushed partition from [`Aggregator::try_flush_next`].
///
/// The partition contains the partition key and all flushed buckets.
pub struct Partition {
    /// The partition key.
    pub partition_key: u32,
    buckets: HashMap<BucketKey, BucketData>,
    bucket_interval: u64,
}

impl IntoIterator for Partition {
    type Item = (ProjectKey, Bucket);
    type IntoIter = PartitionIter;

    fn into_iter(self) -> Self::IntoIter {
        PartitionIter {
            inner: self.buckets.into_iter(),
            bucket_interval: self.bucket_interval,
        }
    }
}

/// Iterator yielded from [`Partition::into_iter`].
pub struct PartitionIter {
    inner: hashbrown::hash_map::IntoIter<BucketKey, BucketData>,
    bucket_interval: u64,
}

impl Iterator for PartitionIter {
    type Item = (ProjectKey, Bucket);

    fn next(&mut self) -> Option<Self::Item> {
        let (key, data) = self.inner.next()?;

        Some((
            key.project_key,
            Bucket {
                timestamp: key.timestamp,
                width: self.bucket_interval,
                name: key.metric_name,
                tags: key.tags,
                value: data.value,
                metadata: data.metadata,
            },
        ))
    }

    fn size_hint(&self) -> (usize, Option<usize>) {
        self.inner.size_hint()
    }
}

impl std::iter::ExactSizeIterator for PartitionIter {
    fn len(&self) -> usize {
        self.inner.len()
    }
}

impl std::iter::FusedIterator for PartitionIter {}

fn emit_stats(name: &str, stats: inner::Stats) {
    for namespace in MetricNamespace::all() {
        relay_statsd::metric!(
            gauge(MetricGauges::Buckets) = *stats.count_by_namespace.get(namespace),
            namespace = namespace.as_str(),
            aggregator = name
        );
        relay_statsd::metric!(
            gauge(MetricGauges::BucketsCost) = *stats.cost_by_namespace.get(namespace),
            namespace = namespace.as_str(),
            aggregator = name
        );
    }
}

fn emit_flush_partition_stats(name: &str, stats: inner::PartitionStats) {
    relay_statsd::metric!(counter(MetricCounters::FlushCount) += 1, aggregator = name);

    for namespace in MetricNamespace::all() {
        relay_statsd::metric!(
            counter(MetricCounters::MergeMiss) += *stats.count_by_namespace.get(namespace),
            namespace = namespace.as_str(),
            aggregator = name,
        );
        relay_statsd::metric!(
            counter(MetricCounters::MergeHit) += *stats.merges_by_namespace.get(namespace),
            namespace = namespace.as_str(),
            aggregator = name,
        );
        relay_statsd::metric!(
            counter(MetricCounters::FlushCost) += *stats.cost_by_namespace.get(namespace),
            namespace = namespace.as_str(),
            aggregator = name,
        );
    }
}