relay_server/services/metrics/
router.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
//! Routing logic for metrics. Metrics from different namespaces may be routed to different aggregators,
//! with their own limits, bucket intervals, etc.

use relay_config::aggregator::Condition;
use relay_config::{AggregatorServiceConfig, ScopedAggregatorConfig};
use relay_metrics::MetricNamespace;
use relay_system::{Addr, NoResponse, Recipient, Service, ServiceRunner};

use crate::services::metrics::{
    Aggregator, AggregatorHandle, AggregatorService, FlushBuckets, MergeBuckets,
};
use crate::services::projects::cache::ProjectCacheHandle;
use crate::statsd::RelayTimers;
use crate::utils;

/// Service that routes metrics & metric buckets to the appropriate aggregator.
///
/// Each aggregator gets its own configuration.
/// Metrics are routed to the first aggregator which matches the configuration's [`Condition`].
/// If no condition matches, the metric/bucket is routed to the `default_aggregator`.
pub struct RouterService {
    default: AggregatorService,
    secondary: Vec<(AggregatorService, Condition)>,
}

impl RouterService {
    /// Create a new router service.
    pub fn new(
        default_config: AggregatorServiceConfig,
        secondary_configs: Vec<ScopedAggregatorConfig>,
        receiver: Option<Recipient<FlushBuckets, NoResponse>>,
        project_cache: ProjectCacheHandle,
    ) -> Self {
        let mut secondary = Vec::new();

        for c in secondary_configs {
            let service =
                AggregatorService::named(c.name, c.config, receiver.clone(), project_cache.clone());
            secondary.push((service, c.condition));
        }

        let default = AggregatorService::new(default_config, receiver, project_cache);
        Self { default, secondary }
    }

    pub fn handle(&self) -> RouterHandle {
        let mut handles = vec![self.default.handle()];
        for (aggregator, _) in &self.secondary {
            handles.push(aggregator.handle());
        }

        RouterHandle(handles)
    }
}

impl Service for RouterService {
    type Interface = Aggregator;

    async fn run(self, mut rx: relay_system::Receiver<Self::Interface>) {
        let mut router = StartedRouter::start_in(self, &mut ServiceRunner::new());
        relay_log::info!("metrics router started");

        // Note that currently this loop never exists and will run till the tokio runtime shuts
        // down. This is about to change with the refactoring for the shutdown process.
        loop {
            tokio::select! {
                biased;

                Some(message) = rx.recv() => {
                    router.handle_message(message)
                },

                else => break,
            }
        }
        relay_log::info!("metrics router stopped");
    }
}

/// Helper struct that holds the [`Addr`]s of started aggregators.
struct StartedRouter {
    default: Addr<Aggregator>,
    secondary: Vec<(Addr<Aggregator>, Vec<MetricNamespace>)>,
}

impl StartedRouter {
    fn start_in(router: RouterService, runner: &mut ServiceRunner) -> Self {
        let RouterService { default, secondary } = router;

        let secondary = secondary
            .into_iter()
            .map(|(aggregator, condition)| {
                let namespaces: Vec<_> = MetricNamespace::all()
                    .into_iter()
                    .filter(|&namespace| condition.matches(Some(namespace)))
                    .collect();

                (runner.start(aggregator), namespaces)
            })
            .collect();

        Self {
            default: runner.start(default),
            secondary,
        }
    }

    fn handle_message(&mut self, message: Aggregator) {
        let ty = message.variant();
        relay_statsd::metric!(
            timer(RelayTimers::MetricRouterServiceDuration),
            message = ty,
            {
                match message {
                    Aggregator::MergeBuckets(msg) => self.handle_merge_buckets(msg),
                }
            }
        )
    }

    fn handle_merge_buckets(&mut self, message: MergeBuckets) {
        let MergeBuckets {
            project_key,
            mut buckets,
        } = message;

        for (aggregator, namespaces) in &self.secondary {
            let matching;
            (buckets, matching) = utils::split_off(buckets, |bucket| {
                bucket
                    .name
                    .try_namespace()
                    .map(|namespace| namespaces.contains(&namespace))
                    .unwrap_or(false)
            });

            if !matching.is_empty() {
                aggregator.send(MergeBuckets::new(project_key, matching));
            }
        }

        if !buckets.is_empty() {
            self.default.send(MergeBuckets::new(project_key, buckets));
        }
    }
}

/// Provides sync access to the state of the [`RouterService`].
#[derive(Clone, Debug)]
pub struct RouterHandle(Vec<AggregatorHandle>);

impl RouterHandle {
    /// Returns `true` if all the aggregators can still accept metrics.
    pub fn can_accept_metrics(&self) -> bool {
        self.0.iter().all(|ah| ah.can_accept_metrics())
    }
}