relay_config/
config.rs

1use std::collections::{BTreeMap, HashMap};
2use std::error::Error;
3use std::io::Write;
4use std::net::{IpAddr, SocketAddr, ToSocketAddrs};
5use std::num::NonZeroU8;
6use std::path::{Path, PathBuf};
7use std::str::FromStr;
8use std::time::Duration;
9use std::{env, fmt, fs, io};
10
11use anyhow::Context;
12use relay_auth::{PublicKey, RelayId, SecretKey, generate_key_pair, generate_relay_id};
13use relay_common::Dsn;
14use relay_kafka::{
15    ConfigError as KafkaConfigError, KafkaConfigParam, KafkaTopic, KafkaTopicConfig,
16    TopicAssignments,
17};
18use relay_metrics::MetricNamespace;
19use serde::de::{DeserializeOwned, Unexpected, Visitor};
20use serde::{Deserialize, Deserializer, Serialize, Serializer};
21use uuid::Uuid;
22
23use crate::aggregator::{AggregatorServiceConfig, ScopedAggregatorConfig};
24use crate::byte_size::ByteSize;
25use crate::upstream::UpstreamDescriptor;
26use crate::{RedisConfig, RedisConfigs, RedisConfigsRef, build_redis_configs};
27
28const DEFAULT_NETWORK_OUTAGE_GRACE_PERIOD: u64 = 10;
29
30static CONFIG_YAML_HEADER: &str = r###"# Please see the relevant documentation.
31# Performance tuning: https://docs.sentry.io/product/relay/operating-guidelines/
32# All config options: https://docs.sentry.io/product/relay/options/
33"###;
34
35/// Indicates config related errors.
36#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
37#[non_exhaustive]
38pub enum ConfigErrorKind {
39    /// Failed to open the file.
40    CouldNotOpenFile,
41    /// Failed to save a file.
42    CouldNotWriteFile,
43    /// Parsing YAML failed.
44    BadYaml,
45    /// Parsing JSON failed.
46    BadJson,
47    /// Invalid config value
48    InvalidValue,
49    /// The user attempted to run Relay with processing enabled, but uses a binary that was
50    /// compiled without the processing feature.
51    ProcessingNotAvailable,
52}
53
54impl fmt::Display for ConfigErrorKind {
55    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
56        match self {
57            Self::CouldNotOpenFile => write!(f, "could not open config file"),
58            Self::CouldNotWriteFile => write!(f, "could not write config file"),
59            Self::BadYaml => write!(f, "could not parse yaml config file"),
60            Self::BadJson => write!(f, "could not parse json config file"),
61            Self::InvalidValue => write!(f, "invalid config value"),
62            Self::ProcessingNotAvailable => write!(
63                f,
64                "was not compiled with processing, cannot enable processing"
65            ),
66        }
67    }
68}
69
70/// Defines the source of a config error
71#[derive(Debug)]
72enum ConfigErrorSource {
73    /// An error occurring independently.
74    None,
75    /// An error originating from a configuration file.
76    File(PathBuf),
77    /// An error originating in a field override (an env var, or a CLI parameter).
78    FieldOverride(String),
79}
80
81impl Default for ConfigErrorSource {
82    fn default() -> Self {
83        Self::None
84    }
85}
86
87impl fmt::Display for ConfigErrorSource {
88    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
89        match self {
90            ConfigErrorSource::None => Ok(()),
91            ConfigErrorSource::File(file_name) => {
92                write!(f, " (file {})", file_name.display())
93            }
94            ConfigErrorSource::FieldOverride(name) => write!(f, " (field {name})"),
95        }
96    }
97}
98
99/// Indicates config related errors.
100#[derive(Debug)]
101pub struct ConfigError {
102    source: ConfigErrorSource,
103    kind: ConfigErrorKind,
104}
105
106impl ConfigError {
107    #[inline]
108    fn new(kind: ConfigErrorKind) -> Self {
109        Self {
110            source: ConfigErrorSource::None,
111            kind,
112        }
113    }
114
115    #[inline]
116    fn field(field: &'static str) -> Self {
117        Self {
118            source: ConfigErrorSource::FieldOverride(field.to_owned()),
119            kind: ConfigErrorKind::InvalidValue,
120        }
121    }
122
123    #[inline]
124    fn file(kind: ConfigErrorKind, p: impl AsRef<Path>) -> Self {
125        Self {
126            source: ConfigErrorSource::File(p.as_ref().to_path_buf()),
127            kind,
128        }
129    }
130
131    /// Returns the error kind of the error.
132    pub fn kind(&self) -> ConfigErrorKind {
133        self.kind
134    }
135}
136
137impl fmt::Display for ConfigError {
138    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
139        write!(f, "{}{}", self.kind(), self.source)
140    }
141}
142
143impl Error for ConfigError {}
144
145enum ConfigFormat {
146    Yaml,
147    Json,
148}
149
150impl ConfigFormat {
151    pub fn extension(&self) -> &'static str {
152        match self {
153            ConfigFormat::Yaml => "yml",
154            ConfigFormat::Json => "json",
155        }
156    }
157}
158
159trait ConfigObject: DeserializeOwned + Serialize {
160    /// The format in which to serialize this configuration.
161    fn format() -> ConfigFormat;
162
163    /// The basename of the config file.
164    fn name() -> &'static str;
165
166    /// The full filename of the config file, including the file extension.
167    fn path(base: &Path) -> PathBuf {
168        base.join(format!("{}.{}", Self::name(), Self::format().extension()))
169    }
170
171    /// Loads the config file from a file within the given directory location.
172    fn load(base: &Path) -> anyhow::Result<Self> {
173        let path = Self::path(base);
174
175        let f = fs::File::open(&path)
176            .with_context(|| ConfigError::file(ConfigErrorKind::CouldNotOpenFile, &path))?;
177        let f = io::BufReader::new(f);
178
179        let mut source = serde_vars::EnvSource::default();
180        match Self::format() {
181            ConfigFormat::Yaml => {
182                serde_vars::deserialize(serde_yaml::Deserializer::from_reader(f), &mut source)
183                    .with_context(|| ConfigError::file(ConfigErrorKind::BadYaml, &path))
184            }
185            ConfigFormat::Json => {
186                serde_vars::deserialize(&mut serde_json::Deserializer::from_reader(f), &mut source)
187                    .with_context(|| ConfigError::file(ConfigErrorKind::BadJson, &path))
188            }
189        }
190    }
191
192    /// Writes the configuration to a file within the given directory location.
193    fn save(&self, base: &Path) -> anyhow::Result<()> {
194        let path = Self::path(base);
195        let mut options = fs::OpenOptions::new();
196        options.write(true).truncate(true).create(true);
197
198        // Remove all non-user permissions for the newly created file
199        #[cfg(unix)]
200        {
201            use std::os::unix::fs::OpenOptionsExt;
202            options.mode(0o600);
203        }
204
205        let mut f = options
206            .open(&path)
207            .with_context(|| ConfigError::file(ConfigErrorKind::CouldNotWriteFile, &path))?;
208
209        match Self::format() {
210            ConfigFormat::Yaml => {
211                f.write_all(CONFIG_YAML_HEADER.as_bytes())?;
212                serde_yaml::to_writer(&mut f, self)
213                    .with_context(|| ConfigError::file(ConfigErrorKind::CouldNotWriteFile, &path))?
214            }
215            ConfigFormat::Json => serde_json::to_writer_pretty(&mut f, self)
216                .with_context(|| ConfigError::file(ConfigErrorKind::CouldNotWriteFile, &path))?,
217        }
218
219        f.write_all(b"\n").ok();
220
221        Ok(())
222    }
223}
224
225/// Structure used to hold information about configuration overrides via
226/// CLI parameters or environment variables
227#[derive(Debug, Default)]
228pub struct OverridableConfig {
229    /// The operation mode of this relay.
230    pub mode: Option<String>,
231    /// The instance type of this relay.
232    pub instance: Option<String>,
233    /// The log level of this relay.
234    pub log_level: Option<String>,
235    /// The log format of this relay.
236    pub log_format: Option<String>,
237    /// The upstream relay or sentry instance.
238    pub upstream: Option<String>,
239    /// Alternate upstream provided through a Sentry DSN. Key and project will be ignored.
240    pub upstream_dsn: Option<String>,
241    /// The host the relay should bind to (network interface).
242    pub host: Option<String>,
243    /// The port to bind for the unencrypted relay HTTP server.
244    pub port: Option<String>,
245    /// "true" if processing is enabled "false" otherwise
246    pub processing: Option<String>,
247    /// the kafka bootstrap.servers configuration string
248    pub kafka_url: Option<String>,
249    /// the redis server url
250    pub redis_url: Option<String>,
251    /// The globally unique ID of the relay.
252    pub id: Option<String>,
253    /// The secret key of the relay
254    pub secret_key: Option<String>,
255    /// The public key of the relay
256    pub public_key: Option<String>,
257    /// Outcome source
258    pub outcome_source: Option<String>,
259    /// shutdown timeout
260    pub shutdown_timeout: Option<String>,
261    /// Server name reported in the Sentry SDK.
262    pub server_name: Option<String>,
263}
264
265/// The relay credentials
266#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
267pub struct Credentials {
268    /// The secret key of the relay
269    pub secret_key: SecretKey,
270    /// The public key of the relay
271    pub public_key: PublicKey,
272    /// The globally unique ID of the relay.
273    pub id: RelayId,
274}
275
276impl Credentials {
277    /// Generates new random credentials.
278    pub fn generate() -> Self {
279        relay_log::info!("generating new relay credentials");
280        let (sk, pk) = generate_key_pair();
281        Self {
282            secret_key: sk,
283            public_key: pk,
284            id: generate_relay_id(),
285        }
286    }
287
288    /// Serializes this configuration to JSON.
289    pub fn to_json_string(&self) -> anyhow::Result<String> {
290        serde_json::to_string(self)
291            .with_context(|| ConfigError::new(ConfigErrorKind::CouldNotWriteFile))
292    }
293}
294
295impl ConfigObject for Credentials {
296    fn format() -> ConfigFormat {
297        ConfigFormat::Json
298    }
299    fn name() -> &'static str {
300        "credentials"
301    }
302}
303
304/// Information on a downstream Relay.
305#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
306#[serde(rename_all = "camelCase")]
307pub struct RelayInfo {
308    /// The public key that this Relay uses to authenticate and sign requests.
309    pub public_key: PublicKey,
310
311    /// Marks an internal relay that has privileged access to more project configuration.
312    #[serde(default)]
313    pub internal: bool,
314}
315
316impl RelayInfo {
317    /// Creates a new RelayInfo
318    pub fn new(public_key: PublicKey) -> Self {
319        Self {
320            public_key,
321            internal: false,
322        }
323    }
324}
325
326/// The operation mode of a relay.
327#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
328#[serde(rename_all = "camelCase")]
329pub enum RelayMode {
330    /// This relay acts as a proxy for all requests and events.
331    ///
332    /// Events are normalized and rate limits from the upstream are enforced, but the relay will not
333    /// fetch project configurations from the upstream or perform PII stripping. All events are
334    /// accepted unless overridden on the file system.
335    Proxy,
336
337    /// This relay is configured statically in the file system.
338    ///
339    /// Events are only accepted for projects configured statically in the file system. All other
340    /// events are rejected. If configured, PII stripping is also performed on those events.
341    Static,
342
343    /// Project configurations are managed by the upstream.
344    ///
345    /// Project configurations are always fetched from the upstream, unless they are statically
346    /// overridden in the file system. This relay must be allowed in the upstream Sentry. This is
347    /// only possible, if the upstream is Sentry directly, or another managed Relay.
348    Managed,
349
350    /// Events are held in memory for inspection only.
351    ///
352    /// This mode is used for testing sentry SDKs.
353    Capture,
354}
355
356impl fmt::Display for RelayMode {
357    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
358        match self {
359            RelayMode::Proxy => write!(f, "proxy"),
360            RelayMode::Static => write!(f, "static"),
361            RelayMode::Managed => write!(f, "managed"),
362            RelayMode::Capture => write!(f, "capture"),
363        }
364    }
365}
366
367/// The instance type of Relay.
368#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
369#[serde(rename_all = "camelCase")]
370pub enum RelayInstance {
371    /// This Relay is run as a default instance.
372    Default,
373
374    /// This Relay is run as a canary instance where experiments can be run.
375    Canary,
376}
377
378impl RelayInstance {
379    /// Returns `true` if the [`RelayInstance`] is of type [`RelayInstance::Canary`].
380    pub fn is_canary(&self) -> bool {
381        matches!(self, RelayInstance::Canary)
382    }
383}
384
385impl fmt::Display for RelayInstance {
386    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
387        match self {
388            RelayInstance::Default => write!(f, "default"),
389            RelayInstance::Canary => write!(f, "canary"),
390        }
391    }
392}
393
394impl FromStr for RelayInstance {
395    type Err = fmt::Error;
396
397    fn from_str(s: &str) -> Result<Self, Self::Err> {
398        match s {
399            "canary" => Ok(RelayInstance::Canary),
400            _ => Ok(RelayInstance::Default),
401        }
402    }
403}
404
405/// Error returned when parsing an invalid [`RelayMode`].
406#[derive(Clone, Copy, Debug, Eq, PartialEq)]
407pub struct ParseRelayModeError;
408
409impl fmt::Display for ParseRelayModeError {
410    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
411        write!(
412            f,
413            "Relay mode must be one of: managed, static, proxy, capture"
414        )
415    }
416}
417
418impl Error for ParseRelayModeError {}
419
420impl FromStr for RelayMode {
421    type Err = ParseRelayModeError;
422
423    fn from_str(s: &str) -> Result<Self, Self::Err> {
424        match s {
425            "proxy" => Ok(RelayMode::Proxy),
426            "static" => Ok(RelayMode::Static),
427            "managed" => Ok(RelayMode::Managed),
428            "capture" => Ok(RelayMode::Capture),
429            _ => Err(ParseRelayModeError),
430        }
431    }
432}
433
434/// Returns `true` if this value is equal to `Default::default()`.
435fn is_default<T: Default + PartialEq>(t: &T) -> bool {
436    *t == T::default()
437}
438
439/// Checks if we are running in docker.
440fn is_docker() -> bool {
441    if fs::metadata("/.dockerenv").is_ok() {
442        return true;
443    }
444
445    fs::read_to_string("/proc/self/cgroup").is_ok_and(|s| s.contains("/docker"))
446}
447
448/// Default value for the "bind" configuration.
449fn default_host() -> IpAddr {
450    if is_docker() {
451        // Docker images rely on this service being exposed
452        "0.0.0.0".parse().unwrap()
453    } else {
454        "127.0.0.1".parse().unwrap()
455    }
456}
457
458/// Controls responses from the readiness health check endpoint based on authentication.
459///
460/// Independent of the the readiness condition, shutdown always switches Relay into unready state.
461#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
462#[serde(rename_all = "lowercase")]
463pub enum ReadinessCondition {
464    /// (default) Relay is ready when authenticated and connected to the upstream.
465    ///
466    /// Before authentication has succeeded and during network outages, Relay responds as not ready.
467    /// Relay reauthenticates based on the `http.auth_interval` parameter. During reauthentication,
468    /// Relay remains ready until authentication fails.
469    ///
470    /// Authentication is only required for Relays in managed mode. Other Relays will only check for
471    /// network outages.
472    Authenticated,
473    /// Relay reports readiness regardless of the authentication and networking state.
474    Always,
475}
476
477impl Default for ReadinessCondition {
478    fn default() -> Self {
479        Self::Authenticated
480    }
481}
482
483/// Relay specific configuration values.
484#[derive(Serialize, Deserialize, Debug)]
485#[serde(default)]
486pub struct Relay {
487    /// The operation mode of this relay.
488    pub mode: RelayMode,
489    /// The instance type of this relay.
490    pub instance: RelayInstance,
491    /// The upstream relay or sentry instance.
492    pub upstream: UpstreamDescriptor<'static>,
493    /// The host the relay should bind to (network interface).
494    pub host: IpAddr,
495    /// The port to bind for the unencrypted relay HTTP server.
496    pub port: u16,
497    /// Optional port to bind for the encrypted relay HTTPS server.
498    #[serde(skip_serializing)]
499    pub tls_port: Option<u16>,
500    /// The path to the identity (DER-encoded PKCS12) to use for TLS.
501    #[serde(skip_serializing)]
502    pub tls_identity_path: Option<PathBuf>,
503    /// Password for the PKCS12 archive.
504    #[serde(skip_serializing)]
505    pub tls_identity_password: Option<String>,
506    /// Always override project IDs from the URL and DSN with the identifier used at the upstream.
507    ///
508    /// Enable this setting for Relays used to redirect traffic to a migrated Sentry instance.
509    /// Validation of project identifiers can be safely skipped in these cases.
510    #[serde(skip_serializing_if = "is_default")]
511    pub override_project_ids: bool,
512}
513
514impl Default for Relay {
515    fn default() -> Self {
516        Relay {
517            mode: RelayMode::Managed,
518            instance: RelayInstance::Default,
519            upstream: "https://sentry.io/".parse().unwrap(),
520            host: default_host(),
521            port: 3000,
522            tls_port: None,
523            tls_identity_path: None,
524            tls_identity_password: None,
525            override_project_ids: false,
526        }
527    }
528}
529
530/// Control the metrics.
531#[derive(Serialize, Deserialize, Debug)]
532#[serde(default)]
533pub struct Metrics {
534    /// Hostname and port of the statsd server.
535    ///
536    /// Defaults to `None`.
537    pub statsd: Option<String>,
538    /// Common prefix that should be added to all metrics.
539    ///
540    /// Defaults to `"sentry.relay"`.
541    pub prefix: String,
542    /// Default tags to apply to all metrics.
543    pub default_tags: BTreeMap<String, String>,
544    /// Tag name to report the hostname to for each metric. Defaults to not sending such a tag.
545    pub hostname_tag: Option<String>,
546    /// Global sample rate for all emitted metrics between `0.0` and `1.0`.
547    ///
548    /// For example, a value of `0.3` means that only 30% of the emitted metrics will be sent.
549    /// Defaults to `1.0` (100%).
550    pub sample_rate: f32,
551    /// Interval for periodic metrics emitted from Relay.
552    ///
553    /// Setting it to `0` seconds disables the periodic metrics.
554    /// Defaults to 5 seconds.
555    pub periodic_secs: u64,
556    /// Whether local metric aggregation using statdsproxy should be enabled.
557    ///
558    /// Defaults to `true`.
559    pub aggregate: bool,
560    /// Allows emission of metrics with high cardinality tags.
561    ///
562    /// High cardinality tags are dynamic values attached to metrics,
563    /// such as project IDs. When enabled, these tags will be included
564    /// in the emitted metrics. When disabled, the tags will be omitted.
565    ///
566    /// Defaults to `false`.
567    pub allow_high_cardinality_tags: bool,
568}
569
570impl Default for Metrics {
571    fn default() -> Self {
572        Metrics {
573            statsd: None,
574            prefix: "sentry.relay".into(),
575            default_tags: BTreeMap::new(),
576            hostname_tag: None,
577            sample_rate: 1.0,
578            periodic_secs: 5,
579            aggregate: true,
580            allow_high_cardinality_tags: false,
581        }
582    }
583}
584
585/// Controls processing of Sentry metrics and metric metadata.
586#[derive(Serialize, Deserialize, Debug, Default)]
587#[serde(default)]
588pub struct SentryMetrics {
589    /// Whether metric stats are collected and emitted.
590    ///
591    /// Metric stats are always collected and emitted when processing
592    /// is enabled.
593    ///
594    /// This option is required for running multiple trusted Relays in a chain
595    /// and you want the metric stats to be collected and forwarded from
596    /// the first Relay in the chain.
597    ///
598    /// Defaults to `false`.
599    pub metric_stats_enabled: bool,
600}
601
602/// Controls various limits
603#[derive(Serialize, Deserialize, Debug)]
604#[serde(default)]
605pub struct Limits {
606    /// How many requests can be sent concurrently from Relay to the upstream before Relay starts
607    /// buffering.
608    pub max_concurrent_requests: usize,
609    /// How many queries can be sent concurrently from Relay to the upstream before Relay starts
610    /// buffering.
611    ///
612    /// The concurrency of queries is additionally constrained by `max_concurrent_requests`.
613    pub max_concurrent_queries: usize,
614    /// The maximum payload size for events.
615    pub max_event_size: ByteSize,
616    /// The maximum size for each attachment.
617    pub max_attachment_size: ByteSize,
618    /// The maximum combined size for all attachments in an envelope or request.
619    pub max_attachments_size: ByteSize,
620    /// The maximum combined size for all client reports in an envelope or request.
621    pub max_client_reports_size: ByteSize,
622    /// The maximum payload size for a monitor check-in.
623    pub max_check_in_size: ByteSize,
624    /// The maximum payload size for an entire envelopes. Individual limits still apply.
625    pub max_envelope_size: ByteSize,
626    /// The maximum number of session items per envelope.
627    pub max_session_count: usize,
628    /// The maximum number of standalone span items per envelope.
629    pub max_span_count: usize,
630    /// The maximum number of log items per envelope.
631    pub max_log_count: usize,
632    /// The maximum payload size for general API requests.
633    pub max_api_payload_size: ByteSize,
634    /// The maximum payload size for file uploads and chunks.
635    pub max_api_file_upload_size: ByteSize,
636    /// The maximum payload size for chunks
637    pub max_api_chunk_upload_size: ByteSize,
638    /// The maximum payload size for a profile
639    pub max_profile_size: ByteSize,
640    /// The maximum payload size for a span.
641    pub max_log_size: ByteSize,
642    /// The maximum payload size for a span.
643    pub max_span_size: ByteSize,
644    /// The maximum payload size for an item container.
645    pub max_container_size: ByteSize,
646    /// The maximum payload size for a statsd metric.
647    pub max_statsd_size: ByteSize,
648    /// The maximum payload size for metric buckets.
649    pub max_metric_buckets_size: ByteSize,
650    /// The maximum payload size for a compressed replay.
651    pub max_replay_compressed_size: ByteSize,
652    /// The maximum payload size for an uncompressed replay.
653    #[serde(alias = "max_replay_size")]
654    max_replay_uncompressed_size: ByteSize,
655    /// The maximum size for a replay recording Kafka message.
656    pub max_replay_message_size: ByteSize,
657    /// The maximum number of threads to spawn for CPU and web work, each.
658    ///
659    /// The total number of threads spawned will roughly be `2 * max_thread_count`. Defaults to
660    /// the number of logical CPU cores on the host.
661    pub max_thread_count: usize,
662    /// Controls the maximum concurrency of each worker thread.
663    ///
664    /// Increasing the concurrency, can lead to a better utilization of worker threads by
665    /// increasing the amount of I/O done concurrently.
666    //
667    /// Currently has no effect on defaults to `1`.
668    pub max_pool_concurrency: usize,
669    /// The maximum number of seconds a query is allowed to take across retries. Individual requests
670    /// have lower timeouts. Defaults to 30 seconds.
671    pub query_timeout: u64,
672    /// The maximum number of seconds to wait for pending envelopes after receiving a shutdown
673    /// signal.
674    pub shutdown_timeout: u64,
675    /// Server keep-alive timeout in seconds.
676    ///
677    /// By default, keep-alive is set to 5 seconds.
678    pub keepalive_timeout: u64,
679    /// Server idle timeout in seconds.
680    ///
681    /// The idle timeout limits the amount of time a connection is kept open without activity.
682    /// Setting this too short may abort connections before Relay is able to send a response.
683    ///
684    /// By default there is no idle timeout.
685    pub idle_timeout: Option<u64>,
686    /// Sets the maximum number of concurrent connections.
687    ///
688    /// Upon reaching the limit, the server will stop accepting connections.
689    ///
690    /// By default there is no limit.
691    pub max_connections: Option<usize>,
692    /// The TCP listen backlog.
693    ///
694    /// Configures the TCP listen backlog for the listening socket of Relay.
695    /// See [`man listen(2)`](https://man7.org/linux/man-pages/man2/listen.2.html)
696    /// for a more detailed description of the listen backlog.
697    ///
698    /// Defaults to `1024`, a value [google has been using for a long time](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=19f92a030ca6d772ab44b22ee6a01378a8cb32d4).
699    pub tcp_listen_backlog: u32,
700}
701
702impl Default for Limits {
703    fn default() -> Self {
704        Limits {
705            max_concurrent_requests: 100,
706            max_concurrent_queries: 5,
707            max_event_size: ByteSize::mebibytes(1),
708            max_attachment_size: ByteSize::mebibytes(100),
709            max_attachments_size: ByteSize::mebibytes(100),
710            max_client_reports_size: ByteSize::kibibytes(4),
711            max_check_in_size: ByteSize::kibibytes(100),
712            max_envelope_size: ByteSize::mebibytes(100),
713            max_session_count: 100,
714            max_span_count: 1000,
715            max_log_count: 1000,
716            max_api_payload_size: ByteSize::mebibytes(20),
717            max_api_file_upload_size: ByteSize::mebibytes(40),
718            max_api_chunk_upload_size: ByteSize::mebibytes(100),
719            max_profile_size: ByteSize::mebibytes(50),
720            max_log_size: ByteSize::mebibytes(1),
721            max_span_size: ByteSize::mebibytes(1),
722            max_container_size: ByteSize::mebibytes(3),
723            max_statsd_size: ByteSize::mebibytes(1),
724            max_metric_buckets_size: ByteSize::mebibytes(1),
725            max_replay_compressed_size: ByteSize::mebibytes(10),
726            max_replay_uncompressed_size: ByteSize::mebibytes(100),
727            max_replay_message_size: ByteSize::mebibytes(15),
728            max_thread_count: num_cpus::get(),
729            max_pool_concurrency: 1,
730            query_timeout: 30,
731            shutdown_timeout: 10,
732            keepalive_timeout: 5,
733            idle_timeout: None,
734            max_connections: None,
735            tcp_listen_backlog: 1024,
736        }
737    }
738}
739
740/// Controls traffic steering.
741#[derive(Debug, Default, Deserialize, Serialize)]
742#[serde(default)]
743pub struct Routing {
744    /// Accept and forward unknown Envelope items to the upstream.
745    ///
746    /// Forwarding unknown items should be enabled in most cases to allow proxying traffic for newer
747    /// SDK versions. The upstream in Sentry makes the final decision on which items are valid. If
748    /// this is disabled, just the unknown items are removed from Envelopes, and the rest is
749    /// processed as usual.
750    ///
751    /// Defaults to `true` for all Relay modes other than processing mode. In processing mode, this
752    /// is disabled by default since the item cannot be handled.
753    pub accept_unknown_items: Option<bool>,
754}
755
756/// Http content encoding for both incoming and outgoing web requests.
757#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize)]
758#[serde(rename_all = "lowercase")]
759pub enum HttpEncoding {
760    /// Identity function without no compression.
761    ///
762    /// This is the default encoding and does not require the presence of the `content-encoding`
763    /// HTTP header.
764    #[default]
765    Identity,
766    /// Compression using a [zlib](https://en.wikipedia.org/wiki/Zlib) structure with
767    /// [deflate](https://en.wikipedia.org/wiki/DEFLATE) encoding.
768    ///
769    /// These structures are defined in [RFC 1950](https://datatracker.ietf.org/doc/html/rfc1950)
770    /// and [RFC 1951](https://datatracker.ietf.org/doc/html/rfc1951).
771    Deflate,
772    /// A format using the [Lempel-Ziv coding](https://en.wikipedia.org/wiki/LZ77_and_LZ78#LZ77)
773    /// (LZ77), with a 32-bit CRC.
774    ///
775    /// This is the original format of the UNIX gzip program. The HTTP/1.1 standard also recommends
776    /// that the servers supporting this content-encoding should recognize `x-gzip` as an alias, for
777    /// compatibility purposes.
778    Gzip,
779    /// A format using the [Brotli](https://en.wikipedia.org/wiki/Brotli) algorithm.
780    Br,
781    /// A format using the [Zstd](https://en.wikipedia.org/wiki/Zstd) compression algorithm.
782    Zstd,
783}
784
785impl HttpEncoding {
786    /// Parses a [`HttpEncoding`] from its `content-encoding` header value.
787    pub fn parse(str: &str) -> Self {
788        let str = str.trim();
789        if str.eq_ignore_ascii_case("zstd") {
790            Self::Zstd
791        } else if str.eq_ignore_ascii_case("br") {
792            Self::Br
793        } else if str.eq_ignore_ascii_case("gzip") || str.eq_ignore_ascii_case("x-gzip") {
794            Self::Gzip
795        } else if str.eq_ignore_ascii_case("deflate") {
796            Self::Deflate
797        } else {
798            Self::Identity
799        }
800    }
801
802    /// Returns the value for the `content-encoding` HTTP header.
803    ///
804    /// Returns `None` for [`Identity`](Self::Identity), and `Some` for other encodings.
805    pub fn name(&self) -> Option<&'static str> {
806        match self {
807            Self::Identity => None,
808            Self::Deflate => Some("deflate"),
809            Self::Gzip => Some("gzip"),
810            Self::Br => Some("br"),
811            Self::Zstd => Some("zstd"),
812        }
813    }
814}
815
816/// Controls authentication with upstream.
817#[derive(Serialize, Deserialize, Debug)]
818#[serde(default)]
819pub struct Http {
820    /// Timeout for upstream requests in seconds.
821    ///
822    /// This timeout covers the time from sending the request until receiving response headers.
823    /// Neither the connection process and handshakes, nor reading the response body is covered in
824    /// this timeout.
825    pub timeout: u32,
826    /// Timeout for establishing connections with the upstream in seconds.
827    ///
828    /// This includes SSL handshakes. Relay reuses connections when the upstream supports connection
829    /// keep-alive. Connections are retained for a maximum 75 seconds, or 15 seconds of inactivity.
830    pub connection_timeout: u32,
831    /// Maximum interval between failed request retries in seconds.
832    pub max_retry_interval: u32,
833    /// The custom HTTP Host header to send to the upstream.
834    pub host_header: Option<String>,
835    /// The interval in seconds at which Relay attempts to reauthenticate with the upstream server.
836    ///
837    /// Re-authentication happens even when Relay is idle. If authentication fails, Relay reverts
838    /// back into startup mode and tries to establish a connection. During this time, incoming
839    /// envelopes will be buffered.
840    ///
841    /// Defaults to `600` (10 minutes).
842    pub auth_interval: Option<u64>,
843    /// The maximum time of experiencing uninterrupted network failures until Relay considers that
844    /// it has encountered a network outage in seconds.
845    ///
846    /// During a network outage relay will try to reconnect and will buffer all upstream messages
847    /// until it manages to reconnect.
848    pub outage_grace_period: u64,
849    /// The time Relay waits before retrying an upstream request, in seconds.
850    ///
851    /// This time is only used before going into a network outage mode.
852    pub retry_delay: u64,
853    /// The interval in seconds for continued failed project fetches at which Relay will error.
854    ///
855    /// A successful fetch resets this interval. Relay does nothing during long
856    /// times without emitting requests.
857    pub project_failure_interval: u64,
858    /// Content encoding to apply to upstream store requests.
859    ///
860    /// By default, Relay applies `zstd` content encoding to compress upstream requests. Compression
861    /// can be disabled to reduce CPU consumption, but at the expense of increased network traffic.
862    ///
863    /// This setting applies to all store requests of SDK data, including events, transactions,
864    /// envelopes and sessions. At the moment, this does not apply to Relay's internal queries.
865    ///
866    /// Available options are:
867    ///
868    ///  - `identity`: Disables compression.
869    ///  - `deflate`: Compression using a zlib header with deflate encoding.
870    ///  - `gzip` (default): Compression using gzip.
871    ///  - `br`: Compression using the brotli algorithm.
872    ///  - `zstd`: Compression using the zstd algorithm.
873    pub encoding: HttpEncoding,
874    /// Submit metrics globally through a shared endpoint.
875    ///
876    /// As opposed to regular envelopes which are sent to an endpoint inferred from the project's
877    /// DSN, this submits metrics to the global endpoint with Relay authentication.
878    ///
879    /// This option does not have any effect on processing mode.
880    pub global_metrics: bool,
881}
882
883impl Default for Http {
884    fn default() -> Self {
885        Http {
886            timeout: 5,
887            connection_timeout: 3,
888            max_retry_interval: 60, // 1 minute
889            host_header: None,
890            auth_interval: Some(600), // 10 minutes
891            outage_grace_period: DEFAULT_NETWORK_OUTAGE_GRACE_PERIOD,
892            retry_delay: default_retry_delay(),
893            project_failure_interval: default_project_failure_interval(),
894            encoding: HttpEncoding::Zstd,
895            global_metrics: false,
896        }
897    }
898}
899
900/// Default for unavailable upstream retry period, 1s.
901fn default_retry_delay() -> u64 {
902    1
903}
904
905/// Default for project failure interval, 90s.
906fn default_project_failure_interval() -> u64 {
907    90
908}
909
910/// Default for max disk size, 500 MB.
911fn spool_envelopes_max_disk_size() -> ByteSize {
912    ByteSize::mebibytes(500)
913}
914
915/// Default number of encoded envelope bytes to cache before writing to disk.
916fn spool_envelopes_batch_size_bytes() -> ByteSize {
917    ByteSize::kibibytes(10)
918}
919
920fn spool_envelopes_max_envelope_delay_secs() -> u64 {
921    24 * 60 * 60
922}
923
924/// Default refresh frequency in ms for the disk usage monitoring.
925fn spool_disk_usage_refresh_frequency_ms() -> u64 {
926    100
927}
928
929/// Default bounded buffer size for handling backpressure.
930fn spool_max_backpressure_envelopes() -> usize {
931    500
932}
933
934/// Default max memory usage for unspooling.
935fn spool_max_backpressure_memory_percent() -> f32 {
936    0.9
937}
938
939/// Default number of partitions for the buffer.
940fn spool_envelopes_partitions() -> NonZeroU8 {
941    NonZeroU8::new(1).unwrap()
942}
943
944/// Persistent buffering configuration for incoming envelopes.
945#[derive(Debug, Serialize, Deserialize)]
946pub struct EnvelopeSpool {
947    /// The path of the SQLite database file(s) which persist the data.
948    ///
949    /// Based on the number of partitions, more database files will be created within the same path.
950    ///
951    /// If not set, the envelopes will be buffered in memory.
952    pub path: Option<PathBuf>,
953    /// The maximum size of the buffer to keep, in bytes.
954    ///
955    /// When the on-disk buffer reaches this size, new envelopes will be dropped.
956    ///
957    /// Defaults to 500MB.
958    #[serde(default = "spool_envelopes_max_disk_size")]
959    pub max_disk_size: ByteSize,
960    /// Size of the batch of compressed envelopes that are spooled to disk at once.
961    ///
962    /// Note that this is the size after which spooling will be triggered but it does not guarantee
963    /// that exactly this size will be spooled, it can be greater or equal.
964    ///
965    /// Defaults to 10 KiB.
966    #[serde(default = "spool_envelopes_batch_size_bytes")]
967    pub batch_size_bytes: ByteSize,
968    /// Maximum time between receiving the envelope and processing it.
969    ///
970    /// When envelopes spend too much time in the buffer (e.g. because their project cannot be loaded),
971    /// they are dropped.
972    ///
973    /// Defaults to 24h.
974    #[serde(default = "spool_envelopes_max_envelope_delay_secs")]
975    pub max_envelope_delay_secs: u64,
976    /// The refresh frequency in ms of how frequently disk usage is updated by querying SQLite
977    /// internal page stats.
978    ///
979    /// Defaults to 100ms.
980    #[serde(default = "spool_disk_usage_refresh_frequency_ms")]
981    pub disk_usage_refresh_frequency_ms: u64,
982    /// The amount of envelopes that the envelope buffer can push to its output queue.
983    ///
984    /// Defaults to 500.
985    #[serde(default = "spool_max_backpressure_envelopes")]
986    pub max_backpressure_envelopes: usize,
987    /// The relative memory usage above which the buffer service will stop dequeueing envelopes.
988    ///
989    /// Only applies when [`Self::path`] is set.
990    ///
991    /// This value should be lower than [`Health::max_memory_percent`] to prevent flip-flopping.
992    ///
993    /// Warning: This threshold can cause the buffer service to deadlock when the buffer consumes
994    /// excessive memory (as influenced by [`Self::batch_size_bytes`]).
995    ///
996    /// This scenario arises when the buffer stops spooling due to reaching the
997    /// [`Self::max_backpressure_memory_percent`] limit, but the batch threshold for spooling
998    /// ([`Self::batch_size_bytes`]) is never reached. As a result, no data is spooled, memory usage
999    /// continues to grow, and the system becomes deadlocked.
1000    ///
1001    /// ### Example
1002    /// Suppose the system has 1GB of available memory and is configured to spool only after
1003    /// accumulating 10GB worth of envelopes. If Relay consumes 900MB of memory, it will stop
1004    /// unspooling due to reaching the [`Self::max_backpressure_memory_percent`] threshold.
1005    ///
1006    /// However, because the buffer hasn't accumulated the 10GB needed to trigger spooling,
1007    /// no data will be offloaded. Memory usage keeps increasing until it hits the
1008    /// [`Health::max_memory_percent`] threshold, e.g., at 950MB. At this point:
1009    ///
1010    /// - No more envelopes are accepted.
1011    /// - The buffer remains stuck, as unspooling won’t resume until memory drops below 900MB which
1012    ///   will not happen.
1013    /// - A deadlock occurs, with the system unable to recover without manual intervention.
1014    ///
1015    /// Defaults to 90% (5% less than max memory).
1016    #[serde(default = "spool_max_backpressure_memory_percent")]
1017    pub max_backpressure_memory_percent: f32,
1018    /// Number of partitions of the buffer.
1019    ///
1020    /// A partition is a separate instance of the buffer which has its own isolated queue, stacks
1021    /// and other resources.
1022    ///
1023    /// Defaults to 1.
1024    #[serde(default = "spool_envelopes_partitions")]
1025    pub partitions: NonZeroU8,
1026}
1027
1028impl Default for EnvelopeSpool {
1029    fn default() -> Self {
1030        Self {
1031            path: None,
1032            max_disk_size: spool_envelopes_max_disk_size(),
1033            batch_size_bytes: spool_envelopes_batch_size_bytes(),
1034            max_envelope_delay_secs: spool_envelopes_max_envelope_delay_secs(),
1035            disk_usage_refresh_frequency_ms: spool_disk_usage_refresh_frequency_ms(),
1036            max_backpressure_envelopes: spool_max_backpressure_envelopes(),
1037            max_backpressure_memory_percent: spool_max_backpressure_memory_percent(),
1038            partitions: spool_envelopes_partitions(),
1039        }
1040    }
1041}
1042
1043/// Persistent buffering configuration.
1044#[derive(Debug, Serialize, Deserialize, Default)]
1045pub struct Spool {
1046    /// Configuration for envelope spooling.
1047    #[serde(default)]
1048    pub envelopes: EnvelopeSpool,
1049}
1050
1051/// Controls internal caching behavior.
1052#[derive(Serialize, Deserialize, Debug)]
1053#[serde(default)]
1054pub struct Cache {
1055    /// The full project state will be requested by this Relay if set to `true`.
1056    pub project_request_full_config: bool,
1057    /// The cache timeout for project configurations in seconds.
1058    pub project_expiry: u32,
1059    /// Continue using project state this many seconds after cache expiry while a new state is
1060    /// being fetched. This is added on top of `project_expiry`.
1061    ///
1062    /// Default is 2 minutes.
1063    pub project_grace_period: u32,
1064    /// Refresh a project after the specified seconds.
1065    ///
1066    /// The time must be between expiry time and the grace period.
1067    ///
1068    /// By default there are no refreshes enabled.
1069    pub project_refresh_interval: Option<u32>,
1070    /// The cache timeout for downstream relay info (public keys) in seconds.
1071    pub relay_expiry: u32,
1072    /// Unused cache timeout for envelopes.
1073    ///
1074    /// The envelope buffer is instead controlled by `envelope_buffer_size`, which controls the
1075    /// maximum number of envelopes in the buffer. A time based configuration may be re-introduced
1076    /// at a later point.
1077    #[serde(alias = "event_expiry")]
1078    envelope_expiry: u32,
1079    /// The maximum amount of envelopes to queue before dropping them.
1080    #[serde(alias = "event_buffer_size")]
1081    envelope_buffer_size: u32,
1082    /// The cache timeout for non-existing entries.
1083    pub miss_expiry: u32,
1084    /// The buffer timeout for batched project config queries before sending them upstream in ms.
1085    pub batch_interval: u32,
1086    /// The buffer timeout for batched queries of downstream relays in ms. Defaults to 100ms.
1087    pub downstream_relays_batch_interval: u32,
1088    /// The maximum number of project configs to fetch from Sentry at once. Defaults to 500.
1089    ///
1090    /// `cache.batch_interval` controls how quickly batches are sent, this controls the batch size.
1091    pub batch_size: usize,
1092    /// Interval for watching local cache override files in seconds.
1093    pub file_interval: u32,
1094    /// Interval for fetching new global configs from the upstream, in seconds.
1095    pub global_config_fetch_interval: u32,
1096}
1097
1098impl Default for Cache {
1099    fn default() -> Self {
1100        Cache {
1101            project_request_full_config: false,
1102            project_expiry: 300,       // 5 minutes
1103            project_grace_period: 120, // 2 minutes
1104            project_refresh_interval: None,
1105            relay_expiry: 3600,   // 1 hour
1106            envelope_expiry: 600, // 10 minutes
1107            envelope_buffer_size: 1000,
1108            miss_expiry: 60,                       // 1 minute
1109            batch_interval: 100,                   // 100ms
1110            downstream_relays_batch_interval: 100, // 100ms
1111            batch_size: 500,
1112            file_interval: 10,                // 10 seconds
1113            global_config_fetch_interval: 10, // 10 seconds
1114        }
1115    }
1116}
1117
1118fn default_max_secs_in_future() -> u32 {
1119    60 // 1 minute
1120}
1121
1122fn default_max_session_secs_in_past() -> u32 {
1123    5 * 24 * 3600 // 5 days
1124}
1125
1126fn default_chunk_size() -> ByteSize {
1127    ByteSize::mebibytes(1)
1128}
1129
1130fn default_projectconfig_cache_prefix() -> String {
1131    "relayconfig".to_owned()
1132}
1133
1134#[allow(clippy::unnecessary_wraps)]
1135fn default_max_rate_limit() -> Option<u32> {
1136    Some(300) // 5 minutes
1137}
1138
1139/// Controls Sentry-internal event processing.
1140#[derive(Serialize, Deserialize, Debug)]
1141pub struct Processing {
1142    /// True if the Relay should do processing. Defaults to `false`.
1143    pub enabled: bool,
1144    /// GeoIp DB file source.
1145    #[serde(default)]
1146    pub geoip_path: Option<PathBuf>,
1147    /// Maximum future timestamp of ingested events.
1148    #[serde(default = "default_max_secs_in_future")]
1149    pub max_secs_in_future: u32,
1150    /// Maximum age of ingested sessions. Older sessions will be dropped.
1151    #[serde(default = "default_max_session_secs_in_past")]
1152    pub max_session_secs_in_past: u32,
1153    /// Kafka producer configurations.
1154    pub kafka_config: Vec<KafkaConfigParam>,
1155    /// Configure what span format to produce.
1156    #[serde(default)]
1157    pub span_producers: SpanProducers,
1158    /// Additional kafka producer configurations.
1159    ///
1160    /// The `kafka_config` is the default producer configuration used for all topics. A secondary
1161    /// kafka config can be referenced in `topics:` like this:
1162    ///
1163    /// ```yaml
1164    /// secondary_kafka_configs:
1165    ///   mycustomcluster:
1166    ///     - name: 'bootstrap.servers'
1167    ///       value: 'sentry_kafka_metrics:9093'
1168    ///
1169    /// topics:
1170    ///   transactions: ingest-transactions
1171    ///   metrics:
1172    ///     name: ingest-metrics
1173    ///     config: mycustomcluster
1174    /// ```
1175    ///
1176    /// Then metrics will be produced to an entirely different Kafka cluster.
1177    #[serde(default)]
1178    pub secondary_kafka_configs: BTreeMap<String, Vec<KafkaConfigParam>>,
1179    /// Kafka topic names.
1180    #[serde(default)]
1181    pub topics: TopicAssignments,
1182    /// Whether to validate the supplied topics by calling Kafka's metadata endpoints.
1183    #[serde(default)]
1184    pub kafka_validate_topics: bool,
1185    /// Redis hosts to connect to for storing state for rate limits.
1186    #[serde(default)]
1187    pub redis: Option<RedisConfigs>,
1188    /// Maximum chunk size of attachments for Kafka.
1189    #[serde(default = "default_chunk_size")]
1190    pub attachment_chunk_size: ByteSize,
1191    /// Prefix to use when looking up project configs in Redis. Defaults to "relayconfig".
1192    #[serde(default = "default_projectconfig_cache_prefix")]
1193    pub projectconfig_cache_prefix: String,
1194    /// Maximum rate limit to report to clients.
1195    #[serde(default = "default_max_rate_limit")]
1196    pub max_rate_limit: Option<u32>,
1197}
1198
1199impl Default for Processing {
1200    /// Constructs a disabled processing configuration.
1201    fn default() -> Self {
1202        Self {
1203            enabled: false,
1204            geoip_path: None,
1205            max_secs_in_future: default_max_secs_in_future(),
1206            max_session_secs_in_past: default_max_session_secs_in_past(),
1207            kafka_config: Vec::new(),
1208            secondary_kafka_configs: BTreeMap::new(),
1209            topics: TopicAssignments::default(),
1210            kafka_validate_topics: false,
1211            redis: None,
1212            attachment_chunk_size: default_chunk_size(),
1213            projectconfig_cache_prefix: default_projectconfig_cache_prefix(),
1214            max_rate_limit: default_max_rate_limit(),
1215            span_producers: Default::default(),
1216        }
1217    }
1218}
1219
1220/// Configuration for span producers.
1221#[derive(Debug, Serialize, Deserialize)]
1222#[serde(default)]
1223pub struct SpanProducers {
1224    /// Send JSON spans to `ingest-spans`.
1225    pub produce_json: bool,
1226    /// Send Protobuf (TraceItem) to `snuba-items`.
1227    pub produce_protobuf: bool,
1228}
1229
1230impl Default for SpanProducers {
1231    fn default() -> Self {
1232        Self {
1233            produce_json: true,
1234            produce_protobuf: false,
1235        }
1236    }
1237}
1238
1239/// Configuration for normalization in this Relay.
1240#[derive(Debug, Default, Serialize, Deserialize)]
1241#[serde(default)]
1242pub struct Normalization {
1243    /// Level of normalization for Relay to apply to incoming data.
1244    #[serde(default)]
1245    pub level: NormalizationLevel,
1246}
1247
1248/// Configuration for the level of normalization this Relay should do.
1249#[derive(Copy, Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq)]
1250#[serde(rename_all = "lowercase")]
1251pub enum NormalizationLevel {
1252    /// Runs normalization, excluding steps that break future compatibility.
1253    ///
1254    /// Processing Relays run [`NormalizationLevel::Full`] if this option is set.
1255    #[default]
1256    Default,
1257    /// Run full normalization.
1258    ///
1259    /// It includes steps that break future compatibility and should only run in
1260    /// the last layer of relays.
1261    Full,
1262}
1263
1264/// Configuration values for the outcome aggregator
1265#[derive(Serialize, Deserialize, Debug)]
1266#[serde(default)]
1267pub struct OutcomeAggregatorConfig {
1268    /// Defines the width of the buckets into which outcomes are aggregated, in seconds.
1269    pub bucket_interval: u64,
1270    /// Defines how often all buckets are flushed, in seconds.
1271    pub flush_interval: u64,
1272}
1273
1274impl Default for OutcomeAggregatorConfig {
1275    fn default() -> Self {
1276        Self {
1277            bucket_interval: 60,
1278            flush_interval: 120,
1279        }
1280    }
1281}
1282
1283/// Determines how to emit outcomes.
1284/// For compatibility reasons, this can either be true, false or AsClientReports
1285#[derive(Copy, Clone, Debug, PartialEq, Eq)]
1286
1287pub enum EmitOutcomes {
1288    /// Do not emit any outcomes
1289    None,
1290    /// Emit outcomes as client reports
1291    AsClientReports,
1292    /// Emit outcomes as outcomes
1293    AsOutcomes,
1294}
1295
1296impl EmitOutcomes {
1297    /// Returns true of outcomes are emitted via http, kafka, or client reports.
1298    pub fn any(&self) -> bool {
1299        !matches!(self, EmitOutcomes::None)
1300    }
1301}
1302
1303impl Serialize for EmitOutcomes {
1304    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
1305    where
1306        S: Serializer,
1307    {
1308        // For compatibility, serialize None and AsOutcomes as booleans.
1309        match self {
1310            Self::None => serializer.serialize_bool(false),
1311            Self::AsClientReports => serializer.serialize_str("as_client_reports"),
1312            Self::AsOutcomes => serializer.serialize_bool(true),
1313        }
1314    }
1315}
1316
1317struct EmitOutcomesVisitor;
1318
1319impl Visitor<'_> for EmitOutcomesVisitor {
1320    type Value = EmitOutcomes;
1321
1322    fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
1323        formatter.write_str("true, false, or 'as_client_reports'")
1324    }
1325
1326    fn visit_bool<E>(self, v: bool) -> Result<Self::Value, E>
1327    where
1328        E: serde::de::Error,
1329    {
1330        Ok(if v {
1331            EmitOutcomes::AsOutcomes
1332        } else {
1333            EmitOutcomes::None
1334        })
1335    }
1336
1337    fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
1338    where
1339        E: serde::de::Error,
1340    {
1341        if v == "as_client_reports" {
1342            Ok(EmitOutcomes::AsClientReports)
1343        } else {
1344            Err(E::invalid_value(Unexpected::Str(v), &"as_client_reports"))
1345        }
1346    }
1347}
1348
1349impl<'de> Deserialize<'de> for EmitOutcomes {
1350    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
1351    where
1352        D: Deserializer<'de>,
1353    {
1354        deserializer.deserialize_any(EmitOutcomesVisitor)
1355    }
1356}
1357
1358/// Outcome generation specific configuration values.
1359#[derive(Serialize, Deserialize, Debug)]
1360#[serde(default)]
1361pub struct Outcomes {
1362    /// Controls whether outcomes will be emitted when processing is disabled.
1363    /// Processing relays always emit outcomes (for backwards compatibility).
1364    /// Can take the following values: false, "as_client_reports", true
1365    pub emit_outcomes: EmitOutcomes,
1366    /// Controls wheather client reported outcomes should be emitted.
1367    pub emit_client_outcomes: bool,
1368    /// The maximum number of outcomes that are batched before being sent
1369    /// via http to the upstream (only applies to non processing relays).
1370    pub batch_size: usize,
1371    /// The maximum time interval (in milliseconds) that an outcome may be batched
1372    /// via http to the upstream (only applies to non processing relays).
1373    pub batch_interval: u64,
1374    /// Defines the source string registered in the outcomes originating from
1375    /// this Relay (typically something like the region or the layer).
1376    pub source: Option<String>,
1377    /// Configures the outcome aggregator.
1378    pub aggregator: OutcomeAggregatorConfig,
1379}
1380
1381impl Default for Outcomes {
1382    fn default() -> Self {
1383        Outcomes {
1384            emit_outcomes: EmitOutcomes::AsClientReports,
1385            emit_client_outcomes: true,
1386            batch_size: 1000,
1387            batch_interval: 500,
1388            source: None,
1389            aggregator: OutcomeAggregatorConfig::default(),
1390        }
1391    }
1392}
1393
1394/// Minimal version of a config for dumping out.
1395#[derive(Serialize, Deserialize, Debug, Default)]
1396pub struct MinimalConfig {
1397    /// The relay part of the config.
1398    pub relay: Relay,
1399}
1400
1401impl MinimalConfig {
1402    /// Saves the config in the given config folder as config.yml
1403    pub fn save_in_folder<P: AsRef<Path>>(&self, p: P) -> anyhow::Result<()> {
1404        let path = p.as_ref();
1405        if fs::metadata(path).is_err() {
1406            fs::create_dir_all(path)
1407                .with_context(|| ConfigError::file(ConfigErrorKind::CouldNotOpenFile, path))?;
1408        }
1409        self.save(path)
1410    }
1411}
1412
1413impl ConfigObject for MinimalConfig {
1414    fn format() -> ConfigFormat {
1415        ConfigFormat::Yaml
1416    }
1417
1418    fn name() -> &'static str {
1419        "config"
1420    }
1421}
1422
1423/// Alternative serialization of RelayInfo for config file using snake case.
1424mod config_relay_info {
1425    use serde::ser::SerializeMap;
1426
1427    use super::*;
1428
1429    // Uses snake_case as opposed to camelCase.
1430    #[derive(Debug, Serialize, Deserialize, Clone)]
1431    struct RelayInfoConfig {
1432        public_key: PublicKey,
1433        #[serde(default)]
1434        internal: bool,
1435    }
1436
1437    impl From<RelayInfoConfig> for RelayInfo {
1438        fn from(v: RelayInfoConfig) -> Self {
1439            RelayInfo {
1440                public_key: v.public_key,
1441                internal: v.internal,
1442            }
1443        }
1444    }
1445
1446    impl From<RelayInfo> for RelayInfoConfig {
1447        fn from(v: RelayInfo) -> Self {
1448            RelayInfoConfig {
1449                public_key: v.public_key,
1450                internal: v.internal,
1451            }
1452        }
1453    }
1454
1455    pub(super) fn deserialize<'de, D>(des: D) -> Result<HashMap<RelayId, RelayInfo>, D::Error>
1456    where
1457        D: Deserializer<'de>,
1458    {
1459        let map = HashMap::<RelayId, RelayInfoConfig>::deserialize(des)?;
1460        Ok(map.into_iter().map(|(k, v)| (k, v.into())).collect())
1461    }
1462
1463    pub(super) fn serialize<S>(elm: &HashMap<RelayId, RelayInfo>, ser: S) -> Result<S::Ok, S::Error>
1464    where
1465        S: Serializer,
1466    {
1467        let mut map = ser.serialize_map(Some(elm.len()))?;
1468
1469        for (k, v) in elm {
1470            map.serialize_entry(k, &RelayInfoConfig::from(v.clone()))?;
1471        }
1472
1473        map.end()
1474    }
1475}
1476
1477/// Authentication options.
1478#[derive(Serialize, Deserialize, Debug, Default)]
1479pub struct AuthConfig {
1480    /// Controls responses from the readiness health check endpoint based on authentication.
1481    #[serde(default, skip_serializing_if = "is_default")]
1482    pub ready: ReadinessCondition,
1483
1484    /// Statically authenticated downstream relays.
1485    #[serde(default, with = "config_relay_info")]
1486    pub static_relays: HashMap<RelayId, RelayInfo>,
1487}
1488
1489/// GeoIp database configuration options.
1490#[derive(Serialize, Deserialize, Debug, Default)]
1491pub struct GeoIpConfig {
1492    /// The path to GeoIP database.
1493    pub path: Option<PathBuf>,
1494}
1495
1496/// Cardinality Limiter configuration options.
1497#[derive(Serialize, Deserialize, Debug)]
1498#[serde(default)]
1499pub struct CardinalityLimiter {
1500    /// Cache vacuum interval in seconds for the in memory cache.
1501    ///
1502    /// The cache will scan for expired values based on this interval.
1503    ///
1504    /// Defaults to 180 seconds, 3 minutes.
1505    pub cache_vacuum_interval: u64,
1506}
1507
1508impl Default for CardinalityLimiter {
1509    fn default() -> Self {
1510        Self {
1511            cache_vacuum_interval: 180,
1512        }
1513    }
1514}
1515
1516/// Settings to control Relay's health checks.
1517///
1518/// After breaching one of the configured thresholds, Relay will
1519/// return an `unhealthy` status from its health endpoint.
1520#[derive(Serialize, Deserialize, Debug)]
1521#[serde(default)]
1522pub struct Health {
1523    /// Interval to refresh internal health checks.
1524    ///
1525    /// Shorter intervals will decrease the time it takes the health check endpoint to report
1526    /// issues, but can also increase sporadic unhealthy responses.
1527    ///
1528    /// Defaults to `3000`` (3 seconds).
1529    pub refresh_interval_ms: u64,
1530    /// Maximum memory watermark in bytes.
1531    ///
1532    /// By default, there is no absolute limit set and the watermark
1533    /// is only controlled by setting [`Self::max_memory_percent`].
1534    pub max_memory_bytes: Option<ByteSize>,
1535    /// Maximum memory watermark as a percentage of maximum system memory.
1536    ///
1537    /// Defaults to `0.95` (95%).
1538    pub max_memory_percent: f32,
1539    /// Health check probe timeout in milliseconds.
1540    ///
1541    /// Any probe exceeding the timeout will be considered failed.
1542    /// This limits the max execution time of Relay health checks.
1543    ///
1544    /// Defaults to 900 milliseconds.
1545    pub probe_timeout_ms: u64,
1546    /// The refresh frequency of memory stats which are used to poll memory
1547    /// usage of Relay.
1548    ///
1549    /// The implementation of memory stats guarantees that the refresh will happen at
1550    /// least every `x` ms since memory readings are lazy and are updated only if needed.
1551    pub memory_stat_refresh_frequency_ms: u64,
1552}
1553
1554impl Default for Health {
1555    fn default() -> Self {
1556        Self {
1557            refresh_interval_ms: 3000,
1558            max_memory_bytes: None,
1559            max_memory_percent: 0.95,
1560            probe_timeout_ms: 900,
1561            memory_stat_refresh_frequency_ms: 100,
1562        }
1563    }
1564}
1565
1566/// COGS configuration.
1567#[derive(Serialize, Deserialize, Debug)]
1568#[serde(default)]
1569pub struct Cogs {
1570    /// Maximium amount of COGS measurements allowed to backlog.
1571    ///
1572    /// Any additional COGS measurements recorded will be dropped.
1573    ///
1574    /// Defaults to `10_000`.
1575    pub max_queue_size: u64,
1576    /// Relay COGS resource id.
1577    ///
1578    /// All Relay related COGS measurements are emitted with this resource id.
1579    ///
1580    /// Defaults to `relay_service`.
1581    pub relay_resource_id: String,
1582}
1583
1584impl Default for Cogs {
1585    fn default() -> Self {
1586        Self {
1587            max_queue_size: 10_000,
1588            relay_resource_id: "relay_service".to_owned(),
1589        }
1590    }
1591}
1592
1593#[derive(Serialize, Deserialize, Debug, Default)]
1594struct ConfigValues {
1595    #[serde(default)]
1596    relay: Relay,
1597    #[serde(default)]
1598    http: Http,
1599    #[serde(default)]
1600    cache: Cache,
1601    #[serde(default)]
1602    spool: Spool,
1603    #[serde(default)]
1604    limits: Limits,
1605    #[serde(default)]
1606    logging: relay_log::LogConfig,
1607    #[serde(default)]
1608    routing: Routing,
1609    #[serde(default)]
1610    metrics: Metrics,
1611    #[serde(default)]
1612    sentry_metrics: SentryMetrics,
1613    #[serde(default)]
1614    sentry: relay_log::SentryConfig,
1615    #[serde(default)]
1616    processing: Processing,
1617    #[serde(default)]
1618    outcomes: Outcomes,
1619    #[serde(default)]
1620    aggregator: AggregatorServiceConfig,
1621    #[serde(default)]
1622    secondary_aggregators: Vec<ScopedAggregatorConfig>,
1623    #[serde(default)]
1624    auth: AuthConfig,
1625    #[serde(default)]
1626    geoip: GeoIpConfig,
1627    #[serde(default)]
1628    normalization: Normalization,
1629    #[serde(default)]
1630    cardinality_limiter: CardinalityLimiter,
1631    #[serde(default)]
1632    health: Health,
1633    #[serde(default)]
1634    cogs: Cogs,
1635}
1636
1637impl ConfigObject for ConfigValues {
1638    fn format() -> ConfigFormat {
1639        ConfigFormat::Yaml
1640    }
1641
1642    fn name() -> &'static str {
1643        "config"
1644    }
1645}
1646
1647/// Config struct.
1648pub struct Config {
1649    values: ConfigValues,
1650    credentials: Option<Credentials>,
1651    path: PathBuf,
1652}
1653
1654impl fmt::Debug for Config {
1655    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1656        f.debug_struct("Config")
1657            .field("path", &self.path)
1658            .field("values", &self.values)
1659            .finish()
1660    }
1661}
1662
1663impl Config {
1664    /// Loads a config from a given config folder.
1665    pub fn from_path<P: AsRef<Path>>(path: P) -> anyhow::Result<Config> {
1666        let path = env::current_dir()
1667            .map(|x| x.join(path.as_ref()))
1668            .unwrap_or_else(|_| path.as_ref().to_path_buf());
1669
1670        let config = Config {
1671            values: ConfigValues::load(&path)?,
1672            credentials: if Credentials::path(&path).exists() {
1673                Some(Credentials::load(&path)?)
1674            } else {
1675                None
1676            },
1677            path: path.clone(),
1678        };
1679
1680        if cfg!(not(feature = "processing")) && config.processing_enabled() {
1681            return Err(ConfigError::file(ConfigErrorKind::ProcessingNotAvailable, &path).into());
1682        }
1683
1684        Ok(config)
1685    }
1686
1687    /// Creates a config from a JSON value.
1688    ///
1689    /// This is mostly useful for tests.
1690    pub fn from_json_value(value: serde_json::Value) -> anyhow::Result<Config> {
1691        Ok(Config {
1692            values: serde_json::from_value(value)
1693                .with_context(|| ConfigError::new(ConfigErrorKind::BadJson))?,
1694            credentials: None,
1695            path: PathBuf::new(),
1696        })
1697    }
1698
1699    /// Override configuration with values coming from other sources (e.g. env variables or
1700    /// command line parameters)
1701    pub fn apply_override(
1702        &mut self,
1703        mut overrides: OverridableConfig,
1704    ) -> anyhow::Result<&mut Self> {
1705        let relay = &mut self.values.relay;
1706
1707        if let Some(mode) = overrides.mode {
1708            relay.mode = mode
1709                .parse::<RelayMode>()
1710                .with_context(|| ConfigError::field("mode"))?;
1711        }
1712
1713        if let Some(deployment) = overrides.instance {
1714            relay.instance = deployment
1715                .parse::<RelayInstance>()
1716                .with_context(|| ConfigError::field("deployment"))?;
1717        }
1718
1719        if let Some(log_level) = overrides.log_level {
1720            self.values.logging.level = log_level.parse()?;
1721        }
1722
1723        if let Some(log_format) = overrides.log_format {
1724            self.values.logging.format = log_format.parse()?;
1725        }
1726
1727        if let Some(upstream) = overrides.upstream {
1728            relay.upstream = upstream
1729                .parse::<UpstreamDescriptor>()
1730                .with_context(|| ConfigError::field("upstream"))?;
1731        } else if let Some(upstream_dsn) = overrides.upstream_dsn {
1732            relay.upstream = upstream_dsn
1733                .parse::<Dsn>()
1734                .map(|dsn| UpstreamDescriptor::from_dsn(&dsn).into_owned())
1735                .with_context(|| ConfigError::field("upstream_dsn"))?;
1736        }
1737
1738        if let Some(host) = overrides.host {
1739            relay.host = host
1740                .parse::<IpAddr>()
1741                .with_context(|| ConfigError::field("host"))?;
1742        }
1743
1744        if let Some(port) = overrides.port {
1745            relay.port = port
1746                .as_str()
1747                .parse()
1748                .with_context(|| ConfigError::field("port"))?;
1749        }
1750
1751        let processing = &mut self.values.processing;
1752        if let Some(enabled) = overrides.processing {
1753            match enabled.to_lowercase().as_str() {
1754                "true" | "1" => processing.enabled = true,
1755                "false" | "0" | "" => processing.enabled = false,
1756                _ => return Err(ConfigError::field("processing").into()),
1757            }
1758        }
1759
1760        if let Some(redis) = overrides.redis_url {
1761            processing.redis = Some(RedisConfigs::Unified(RedisConfig::single(redis)))
1762        }
1763
1764        if let Some(kafka_url) = overrides.kafka_url {
1765            let existing = processing
1766                .kafka_config
1767                .iter_mut()
1768                .find(|e| e.name == "bootstrap.servers");
1769
1770            if let Some(config_param) = existing {
1771                config_param.value = kafka_url;
1772            } else {
1773                processing.kafka_config.push(KafkaConfigParam {
1774                    name: "bootstrap.servers".to_owned(),
1775                    value: kafka_url,
1776                })
1777            }
1778        }
1779        // credentials overrides
1780        let id = if let Some(id) = overrides.id {
1781            let id = Uuid::parse_str(&id).with_context(|| ConfigError::field("id"))?;
1782            Some(id)
1783        } else {
1784            None
1785        };
1786        let public_key = if let Some(public_key) = overrides.public_key {
1787            let public_key = public_key
1788                .parse::<PublicKey>()
1789                .with_context(|| ConfigError::field("public_key"))?;
1790            Some(public_key)
1791        } else {
1792            None
1793        };
1794
1795        let secret_key = if let Some(secret_key) = overrides.secret_key {
1796            let secret_key = secret_key
1797                .parse::<SecretKey>()
1798                .with_context(|| ConfigError::field("secret_key"))?;
1799            Some(secret_key)
1800        } else {
1801            None
1802        };
1803        let outcomes = &mut self.values.outcomes;
1804        if overrides.outcome_source.is_some() {
1805            outcomes.source = overrides.outcome_source.take();
1806        }
1807
1808        if let Some(credentials) = &mut self.credentials {
1809            //we have existing credentials we may override some entries
1810            if let Some(id) = id {
1811                credentials.id = id;
1812            }
1813            if let Some(public_key) = public_key {
1814                credentials.public_key = public_key;
1815            }
1816            if let Some(secret_key) = secret_key {
1817                credentials.secret_key = secret_key
1818            }
1819        } else {
1820            //no existing credentials we may only create the full credentials
1821            match (id, public_key, secret_key) {
1822                (Some(id), Some(public_key), Some(secret_key)) => {
1823                    self.credentials = Some(Credentials {
1824                        secret_key,
1825                        public_key,
1826                        id,
1827                    })
1828                }
1829                (None, None, None) => {
1830                    // nothing provided, we'll just leave the credentials None, maybe we
1831                    // don't need them in the current command or we'll override them later
1832                }
1833                _ => {
1834                    return Err(ConfigError::field("incomplete credentials").into());
1835                }
1836            }
1837        }
1838
1839        let limits = &mut self.values.limits;
1840        if let Some(shutdown_timeout) = overrides.shutdown_timeout {
1841            if let Ok(shutdown_timeout) = shutdown_timeout.parse::<u64>() {
1842                limits.shutdown_timeout = shutdown_timeout;
1843            }
1844        }
1845
1846        if let Some(server_name) = overrides.server_name {
1847            self.values.sentry.server_name = Some(server_name.into());
1848        }
1849
1850        Ok(self)
1851    }
1852
1853    /// Checks if the config is already initialized.
1854    pub fn config_exists<P: AsRef<Path>>(path: P) -> bool {
1855        fs::metadata(ConfigValues::path(path.as_ref())).is_ok()
1856    }
1857
1858    /// Returns the filename of the config file.
1859    pub fn path(&self) -> &Path {
1860        &self.path
1861    }
1862
1863    /// Dumps out a YAML string of the values.
1864    pub fn to_yaml_string(&self) -> anyhow::Result<String> {
1865        serde_yaml::to_string(&self.values)
1866            .with_context(|| ConfigError::new(ConfigErrorKind::CouldNotWriteFile))
1867    }
1868
1869    /// Regenerates the relay credentials.
1870    ///
1871    /// This also writes the credentials back to the file.
1872    pub fn regenerate_credentials(&mut self, save: bool) -> anyhow::Result<()> {
1873        let creds = Credentials::generate();
1874        if save {
1875            creds.save(&self.path)?;
1876        }
1877        self.credentials = Some(creds);
1878        Ok(())
1879    }
1880
1881    /// Return the current credentials
1882    pub fn credentials(&self) -> Option<&Credentials> {
1883        self.credentials.as_ref()
1884    }
1885
1886    /// Set new credentials.
1887    ///
1888    /// This also writes the credentials back to the file.
1889    pub fn replace_credentials(
1890        &mut self,
1891        credentials: Option<Credentials>,
1892    ) -> anyhow::Result<bool> {
1893        if self.credentials == credentials {
1894            return Ok(false);
1895        }
1896
1897        match credentials {
1898            Some(ref creds) => {
1899                creds.save(&self.path)?;
1900            }
1901            None => {
1902                let path = Credentials::path(&self.path);
1903                if fs::metadata(&path).is_ok() {
1904                    fs::remove_file(&path).with_context(|| {
1905                        ConfigError::file(ConfigErrorKind::CouldNotWriteFile, &path)
1906                    })?;
1907                }
1908            }
1909        }
1910
1911        self.credentials = credentials;
1912        Ok(true)
1913    }
1914
1915    /// Returns `true` if the config is ready to use.
1916    pub fn has_credentials(&self) -> bool {
1917        self.credentials.is_some()
1918    }
1919
1920    /// Returns the secret key if set.
1921    pub fn secret_key(&self) -> Option<&SecretKey> {
1922        self.credentials.as_ref().map(|x| &x.secret_key)
1923    }
1924
1925    /// Returns the public key if set.
1926    pub fn public_key(&self) -> Option<&PublicKey> {
1927        self.credentials.as_ref().map(|x| &x.public_key)
1928    }
1929
1930    /// Returns the relay ID.
1931    pub fn relay_id(&self) -> Option<&RelayId> {
1932        self.credentials.as_ref().map(|x| &x.id)
1933    }
1934
1935    /// Returns the relay mode.
1936    pub fn relay_mode(&self) -> RelayMode {
1937        self.values.relay.mode
1938    }
1939
1940    /// Returns the instance type of relay.
1941    pub fn relay_instance(&self) -> RelayInstance {
1942        self.values.relay.instance
1943    }
1944
1945    /// Returns the upstream target as descriptor.
1946    pub fn upstream_descriptor(&self) -> &UpstreamDescriptor<'_> {
1947        &self.values.relay.upstream
1948    }
1949
1950    /// Returns the custom HTTP "Host" header.
1951    pub fn http_host_header(&self) -> Option<&str> {
1952        self.values.http.host_header.as_deref()
1953    }
1954
1955    /// Returns the listen address.
1956    pub fn listen_addr(&self) -> SocketAddr {
1957        (self.values.relay.host, self.values.relay.port).into()
1958    }
1959
1960    /// Returns the TLS listen address.
1961    pub fn tls_listen_addr(&self) -> Option<SocketAddr> {
1962        if self.values.relay.tls_identity_path.is_some() {
1963            let port = self.values.relay.tls_port.unwrap_or(3443);
1964            Some((self.values.relay.host, port).into())
1965        } else {
1966            None
1967        }
1968    }
1969
1970    /// Returns the path to the identity bundle
1971    pub fn tls_identity_path(&self) -> Option<&Path> {
1972        self.values.relay.tls_identity_path.as_deref()
1973    }
1974
1975    /// Returns the password for the identity bundle
1976    pub fn tls_identity_password(&self) -> Option<&str> {
1977        self.values.relay.tls_identity_password.as_deref()
1978    }
1979
1980    /// Returns `true` when project IDs should be overriden rather than validated.
1981    ///
1982    /// Defaults to `false`, which requires project ID validation.
1983    pub fn override_project_ids(&self) -> bool {
1984        self.values.relay.override_project_ids
1985    }
1986
1987    /// Returns `true` if Relay requires authentication for readiness.
1988    ///
1989    /// See [`ReadinessCondition`] for more information.
1990    pub fn requires_auth(&self) -> bool {
1991        match self.values.auth.ready {
1992            ReadinessCondition::Authenticated => self.relay_mode() == RelayMode::Managed,
1993            ReadinessCondition::Always => false,
1994        }
1995    }
1996
1997    /// Returns the interval at which Realy should try to re-authenticate with the upstream.
1998    ///
1999    /// Always disabled in processing mode.
2000    pub fn http_auth_interval(&self) -> Option<Duration> {
2001        if self.processing_enabled() {
2002            return None;
2003        }
2004
2005        match self.values.http.auth_interval {
2006            None | Some(0) => None,
2007            Some(secs) => Some(Duration::from_secs(secs)),
2008        }
2009    }
2010
2011    /// The maximum time of experiencing uninterrupted network failures until Relay considers that
2012    /// it has encountered a network outage.
2013    pub fn http_outage_grace_period(&self) -> Duration {
2014        Duration::from_secs(self.values.http.outage_grace_period)
2015    }
2016
2017    /// Time Relay waits before retrying an upstream request.
2018    ///
2019    /// Before going into a network outage, Relay may fail to make upstream
2020    /// requests. This is the time Relay waits before retrying the same request.
2021    pub fn http_retry_delay(&self) -> Duration {
2022        Duration::from_secs(self.values.http.retry_delay)
2023    }
2024
2025    /// Time of continued project request failures before Relay emits an error.
2026    pub fn http_project_failure_interval(&self) -> Duration {
2027        Duration::from_secs(self.values.http.project_failure_interval)
2028    }
2029
2030    /// Content encoding of upstream requests.
2031    pub fn http_encoding(&self) -> HttpEncoding {
2032        self.values.http.encoding
2033    }
2034
2035    /// Returns whether metrics should be sent globally through a shared endpoint.
2036    pub fn http_global_metrics(&self) -> bool {
2037        self.values.http.global_metrics
2038    }
2039
2040    /// Returns whether this Relay should emit outcomes.
2041    ///
2042    /// This is `true` either if `outcomes.emit_outcomes` is explicitly enabled, or if this Relay is
2043    /// in processing mode.
2044    pub fn emit_outcomes(&self) -> EmitOutcomes {
2045        if self.processing_enabled() {
2046            return EmitOutcomes::AsOutcomes;
2047        }
2048        self.values.outcomes.emit_outcomes
2049    }
2050
2051    /// Returns whether this Relay should emit client outcomes
2052    ///
2053    /// Relays that do not emit client outcomes will forward client recieved outcomes
2054    /// directly to the next relay in the chain as client report envelope.  This is only done
2055    /// if this relay emits outcomes at all. A relay that will not emit outcomes
2056    /// will forward the envelope unchanged.
2057    ///
2058    /// This flag can be explicitly disabled on processing relays as well to prevent the
2059    /// emitting of client outcomes to the kafka topic.
2060    pub fn emit_client_outcomes(&self) -> bool {
2061        self.values.outcomes.emit_client_outcomes
2062    }
2063
2064    /// Returns the maximum number of outcomes that are batched before being sent
2065    pub fn outcome_batch_size(&self) -> usize {
2066        self.values.outcomes.batch_size
2067    }
2068
2069    /// Returns the maximum interval that an outcome may be batched
2070    pub fn outcome_batch_interval(&self) -> Duration {
2071        Duration::from_millis(self.values.outcomes.batch_interval)
2072    }
2073
2074    /// The originating source of the outcome
2075    pub fn outcome_source(&self) -> Option<&str> {
2076        self.values.outcomes.source.as_deref()
2077    }
2078
2079    /// Returns the width of the buckets into which outcomes are aggregated, in seconds.
2080    pub fn outcome_aggregator(&self) -> &OutcomeAggregatorConfig {
2081        &self.values.outcomes.aggregator
2082    }
2083
2084    /// Returns logging configuration.
2085    pub fn logging(&self) -> &relay_log::LogConfig {
2086        &self.values.logging
2087    }
2088
2089    /// Returns logging configuration.
2090    pub fn sentry(&self) -> &relay_log::SentryConfig {
2091        &self.values.sentry
2092    }
2093
2094    /// Returns the socket addresses for statsd.
2095    ///
2096    /// If stats is disabled an empty vector is returned.
2097    pub fn statsd_addrs(&self) -> anyhow::Result<Vec<SocketAddr>> {
2098        if let Some(ref addr) = self.values.metrics.statsd {
2099            let addrs = addr
2100                .as_str()
2101                .to_socket_addrs()
2102                .with_context(|| ConfigError::file(ConfigErrorKind::InvalidValue, &self.path))?
2103                .collect();
2104            Ok(addrs)
2105        } else {
2106            Ok(vec![])
2107        }
2108    }
2109
2110    /// Return the prefix for statsd metrics.
2111    pub fn metrics_prefix(&self) -> &str {
2112        &self.values.metrics.prefix
2113    }
2114
2115    /// Returns the default tags for statsd metrics.
2116    pub fn metrics_default_tags(&self) -> &BTreeMap<String, String> {
2117        &self.values.metrics.default_tags
2118    }
2119
2120    /// Returns the name of the hostname tag that should be attached to each outgoing metric.
2121    pub fn metrics_hostname_tag(&self) -> Option<&str> {
2122        self.values.metrics.hostname_tag.as_deref()
2123    }
2124
2125    /// Returns the global sample rate for all metrics.
2126    pub fn metrics_sample_rate(&self) -> f32 {
2127        self.values.metrics.sample_rate
2128    }
2129
2130    /// Returns whether local metric aggregation should be enabled.
2131    pub fn metrics_aggregate(&self) -> bool {
2132        self.values.metrics.aggregate
2133    }
2134
2135    /// Returns whether high cardinality tags should be removed before sending metrics.
2136    pub fn metrics_allow_high_cardinality_tags(&self) -> bool {
2137        self.values.metrics.allow_high_cardinality_tags
2138    }
2139
2140    /// Returns the interval for periodic metrics emitted from Relay.
2141    ///
2142    /// `None` if periodic metrics are disabled.
2143    pub fn metrics_periodic_interval(&self) -> Option<Duration> {
2144        match self.values.metrics.periodic_secs {
2145            0 => None,
2146            secs => Some(Duration::from_secs(secs)),
2147        }
2148    }
2149
2150    /// Returns the default timeout for all upstream HTTP requests.
2151    pub fn http_timeout(&self) -> Duration {
2152        Duration::from_secs(self.values.http.timeout.into())
2153    }
2154
2155    /// Returns the connection timeout for all upstream HTTP requests.
2156    pub fn http_connection_timeout(&self) -> Duration {
2157        Duration::from_secs(self.values.http.connection_timeout.into())
2158    }
2159
2160    /// Returns the failed upstream request retry interval.
2161    pub fn http_max_retry_interval(&self) -> Duration {
2162        Duration::from_secs(self.values.http.max_retry_interval.into())
2163    }
2164
2165    /// Returns the expiry timeout for cached projects.
2166    pub fn project_cache_expiry(&self) -> Duration {
2167        Duration::from_secs(self.values.cache.project_expiry.into())
2168    }
2169
2170    /// Returns `true` if the full project state should be requested from upstream.
2171    pub fn request_full_project_config(&self) -> bool {
2172        self.values.cache.project_request_full_config
2173    }
2174
2175    /// Returns the expiry timeout for cached relay infos (public keys).
2176    pub fn relay_cache_expiry(&self) -> Duration {
2177        Duration::from_secs(self.values.cache.relay_expiry.into())
2178    }
2179
2180    /// Returns the maximum number of buffered envelopes
2181    pub fn envelope_buffer_size(&self) -> usize {
2182        self.values
2183            .cache
2184            .envelope_buffer_size
2185            .try_into()
2186            .unwrap_or(usize::MAX)
2187    }
2188
2189    /// Returns the expiry timeout for cached misses before trying to refetch.
2190    pub fn cache_miss_expiry(&self) -> Duration {
2191        Duration::from_secs(self.values.cache.miss_expiry.into())
2192    }
2193
2194    /// Returns the grace period for project caches.
2195    pub fn project_grace_period(&self) -> Duration {
2196        Duration::from_secs(self.values.cache.project_grace_period.into())
2197    }
2198
2199    /// Returns the refresh interval for a project.
2200    ///
2201    /// Validates the refresh time to be between the grace period and expiry.
2202    pub fn project_refresh_interval(&self) -> Option<Duration> {
2203        self.values
2204            .cache
2205            .project_refresh_interval
2206            .map(Into::into)
2207            .map(Duration::from_secs)
2208    }
2209
2210    /// Returns the duration in which batchable project config queries are
2211    /// collected before sending them in a single request.
2212    pub fn query_batch_interval(&self) -> Duration {
2213        Duration::from_millis(self.values.cache.batch_interval.into())
2214    }
2215
2216    /// Returns the duration in which downstream relays are requested from upstream.
2217    pub fn downstream_relays_batch_interval(&self) -> Duration {
2218        Duration::from_millis(self.values.cache.downstream_relays_batch_interval.into())
2219    }
2220
2221    /// Returns the interval in seconds in which local project configurations should be reloaded.
2222    pub fn local_cache_interval(&self) -> Duration {
2223        Duration::from_secs(self.values.cache.file_interval.into())
2224    }
2225
2226    /// Returns the interval in seconds in which fresh global configs should be
2227    /// fetched from  upstream.
2228    pub fn global_config_fetch_interval(&self) -> Duration {
2229        Duration::from_secs(self.values.cache.global_config_fetch_interval.into())
2230    }
2231
2232    /// Returns the path of the buffer file if the `cache.persistent_envelope_buffer.path` is configured.
2233    ///
2234    /// In case a partition with id > 0 is supplied, the filename of the envelopes path will be
2235    /// suffixed with `.{partition_id}`.
2236    pub fn spool_envelopes_path(&self, partition_id: u8) -> Option<PathBuf> {
2237        let mut path = self
2238            .values
2239            .spool
2240            .envelopes
2241            .path
2242            .as_ref()
2243            .map(|path| path.to_owned())?;
2244
2245        if partition_id == 0 {
2246            return Some(path);
2247        }
2248
2249        let file_name = path.file_name().and_then(|f| f.to_str())?;
2250        let new_file_name = format!("{file_name}.{partition_id}");
2251        path.set_file_name(new_file_name);
2252
2253        Some(path)
2254    }
2255
2256    /// The maximum size of the buffer, in bytes.
2257    pub fn spool_envelopes_max_disk_size(&self) -> usize {
2258        self.values.spool.envelopes.max_disk_size.as_bytes()
2259    }
2260
2261    /// Number of encoded envelope bytes that need to be accumulated before
2262    /// flushing one batch to disk.
2263    pub fn spool_envelopes_batch_size_bytes(&self) -> usize {
2264        self.values.spool.envelopes.batch_size_bytes.as_bytes()
2265    }
2266
2267    /// Returns the time after which we drop envelopes as a [`Duration`] object.
2268    pub fn spool_envelopes_max_age(&self) -> Duration {
2269        Duration::from_secs(self.values.spool.envelopes.max_envelope_delay_secs)
2270    }
2271
2272    /// Returns the refresh frequency for disk usage monitoring as a [`Duration`] object.
2273    pub fn spool_disk_usage_refresh_frequency_ms(&self) -> Duration {
2274        Duration::from_millis(self.values.spool.envelopes.disk_usage_refresh_frequency_ms)
2275    }
2276
2277    /// Returns the maximum number of envelopes that can be put in the bounded buffer.
2278    pub fn spool_max_backpressure_envelopes(&self) -> usize {
2279        self.values.spool.envelopes.max_backpressure_envelopes
2280    }
2281
2282    /// Returns the relative memory usage up to which the disk buffer will unspool envelopes.
2283    pub fn spool_max_backpressure_memory_percent(&self) -> f32 {
2284        self.values.spool.envelopes.max_backpressure_memory_percent
2285    }
2286
2287    /// Returns the number of partitions for the buffer.
2288    pub fn spool_partitions(&self) -> NonZeroU8 {
2289        self.values.spool.envelopes.partitions
2290    }
2291
2292    /// Returns the maximum size of an event payload in bytes.
2293    pub fn max_event_size(&self) -> usize {
2294        self.values.limits.max_event_size.as_bytes()
2295    }
2296
2297    /// Returns the maximum size of each attachment.
2298    pub fn max_attachment_size(&self) -> usize {
2299        self.values.limits.max_attachment_size.as_bytes()
2300    }
2301
2302    /// Returns the maximum combined size of attachments or payloads containing attachments
2303    /// (minidump, unreal, standalone attachments) in bytes.
2304    pub fn max_attachments_size(&self) -> usize {
2305        self.values.limits.max_attachments_size.as_bytes()
2306    }
2307
2308    /// Returns the maximum combined size of client reports in bytes.
2309    pub fn max_client_reports_size(&self) -> usize {
2310        self.values.limits.max_client_reports_size.as_bytes()
2311    }
2312
2313    /// Returns the maximum payload size of a monitor check-in in bytes.
2314    pub fn max_check_in_size(&self) -> usize {
2315        self.values.limits.max_check_in_size.as_bytes()
2316    }
2317
2318    /// Returns the maximum payload size of a log in bytes.
2319    pub fn max_log_size(&self) -> usize {
2320        self.values.limits.max_log_size.as_bytes()
2321    }
2322
2323    /// Returns the maximum payload size of a span in bytes.
2324    pub fn max_span_size(&self) -> usize {
2325        self.values.limits.max_span_size.as_bytes()
2326    }
2327
2328    /// Returns the maximum payload size of an item container in bytes.
2329    pub fn max_container_size(&self) -> usize {
2330        self.values.limits.max_container_size.as_bytes()
2331    }
2332
2333    /// Returns the maximum size of an envelope payload in bytes.
2334    ///
2335    /// Individual item size limits still apply.
2336    pub fn max_envelope_size(&self) -> usize {
2337        self.values.limits.max_envelope_size.as_bytes()
2338    }
2339
2340    /// Returns the maximum number of sessions per envelope.
2341    pub fn max_session_count(&self) -> usize {
2342        self.values.limits.max_session_count
2343    }
2344
2345    /// Returns the maximum number of standalone spans per envelope.
2346    pub fn max_span_count(&self) -> usize {
2347        self.values.limits.max_span_count
2348    }
2349
2350    /// Returns the maximum number of logs per envelope.
2351    pub fn max_log_count(&self) -> usize {
2352        self.values.limits.max_log_count
2353    }
2354
2355    /// Returns the maximum payload size of a statsd metric in bytes.
2356    pub fn max_statsd_size(&self) -> usize {
2357        self.values.limits.max_statsd_size.as_bytes()
2358    }
2359
2360    /// Returns the maximum payload size of metric buckets in bytes.
2361    pub fn max_metric_buckets_size(&self) -> usize {
2362        self.values.limits.max_metric_buckets_size.as_bytes()
2363    }
2364
2365    /// Whether metric stats are collected and emitted.
2366    ///
2367    /// Metric stats are always collected and emitted when processing
2368    /// is enabled.
2369    pub fn metric_stats_enabled(&self) -> bool {
2370        self.values.sentry_metrics.metric_stats_enabled || self.values.processing.enabled
2371    }
2372
2373    /// Returns the maximum payload size for general API requests.
2374    pub fn max_api_payload_size(&self) -> usize {
2375        self.values.limits.max_api_payload_size.as_bytes()
2376    }
2377
2378    /// Returns the maximum payload size for file uploads and chunks.
2379    pub fn max_api_file_upload_size(&self) -> usize {
2380        self.values.limits.max_api_file_upload_size.as_bytes()
2381    }
2382
2383    /// Returns the maximum payload size for chunks
2384    pub fn max_api_chunk_upload_size(&self) -> usize {
2385        self.values.limits.max_api_chunk_upload_size.as_bytes()
2386    }
2387
2388    /// Returns the maximum payload size for a profile
2389    pub fn max_profile_size(&self) -> usize {
2390        self.values.limits.max_profile_size.as_bytes()
2391    }
2392
2393    /// Returns the maximum payload size for a compressed replay.
2394    pub fn max_replay_compressed_size(&self) -> usize {
2395        self.values.limits.max_replay_compressed_size.as_bytes()
2396    }
2397
2398    /// Returns the maximum payload size for an uncompressed replay.
2399    pub fn max_replay_uncompressed_size(&self) -> usize {
2400        self.values.limits.max_replay_uncompressed_size.as_bytes()
2401    }
2402
2403    /// Returns the maximum message size for an uncompressed replay.
2404    ///
2405    /// This is greater than max_replay_compressed_size because
2406    /// it can include additional metadata about the replay in
2407    /// addition to the recording.
2408    pub fn max_replay_message_size(&self) -> usize {
2409        self.values.limits.max_replay_message_size.as_bytes()
2410    }
2411
2412    /// Returns the maximum number of active requests
2413    pub fn max_concurrent_requests(&self) -> usize {
2414        self.values.limits.max_concurrent_requests
2415    }
2416
2417    /// Returns the maximum number of active queries
2418    pub fn max_concurrent_queries(&self) -> usize {
2419        self.values.limits.max_concurrent_queries
2420    }
2421
2422    /// The maximum number of seconds a query is allowed to take across retries.
2423    pub fn query_timeout(&self) -> Duration {
2424        Duration::from_secs(self.values.limits.query_timeout)
2425    }
2426
2427    /// The maximum number of seconds to wait for pending envelopes after receiving a shutdown
2428    /// signal.
2429    pub fn shutdown_timeout(&self) -> Duration {
2430        Duration::from_secs(self.values.limits.shutdown_timeout)
2431    }
2432
2433    /// Returns the server keep-alive timeout in seconds.
2434    ///
2435    /// By default keep alive is set to a 5 seconds.
2436    pub fn keepalive_timeout(&self) -> Duration {
2437        Duration::from_secs(self.values.limits.keepalive_timeout)
2438    }
2439
2440    /// Returns the server idle timeout in seconds.
2441    pub fn idle_timeout(&self) -> Option<Duration> {
2442        self.values.limits.idle_timeout.map(Duration::from_secs)
2443    }
2444
2445    /// Returns the maximum connections.
2446    pub fn max_connections(&self) -> Option<usize> {
2447        self.values.limits.max_connections
2448    }
2449
2450    /// TCP listen backlog to configure on Relay's listening socket.
2451    pub fn tcp_listen_backlog(&self) -> u32 {
2452        self.values.limits.tcp_listen_backlog
2453    }
2454
2455    /// Returns the number of cores to use for thread pools.
2456    pub fn cpu_concurrency(&self) -> usize {
2457        self.values.limits.max_thread_count
2458    }
2459
2460    /// Returns the number of tasks that can run concurrently in the worker pool.
2461    pub fn pool_concurrency(&self) -> usize {
2462        self.values.limits.max_pool_concurrency
2463    }
2464
2465    /// Returns the maximum size of a project config query.
2466    pub fn query_batch_size(&self) -> usize {
2467        self.values.cache.batch_size
2468    }
2469
2470    /// Get filename for static project config.
2471    pub fn project_configs_path(&self) -> PathBuf {
2472        self.path.join("projects")
2473    }
2474
2475    /// True if the Relay should do processing.
2476    pub fn processing_enabled(&self) -> bool {
2477        self.values.processing.enabled
2478    }
2479
2480    /// Level of normalization for Relay to apply to incoming data.
2481    pub fn normalization_level(&self) -> NormalizationLevel {
2482        self.values.normalization.level
2483    }
2484
2485    /// The path to the GeoIp database required for event processing.
2486    pub fn geoip_path(&self) -> Option<&Path> {
2487        self.values
2488            .geoip
2489            .path
2490            .as_deref()
2491            .or(self.values.processing.geoip_path.as_deref())
2492    }
2493
2494    /// Maximum future timestamp of ingested data.
2495    ///
2496    /// Events past this timestamp will be adjusted to `now()`. Sessions will be dropped.
2497    pub fn max_secs_in_future(&self) -> i64 {
2498        self.values.processing.max_secs_in_future.into()
2499    }
2500
2501    /// Maximum age of ingested sessions. Older sessions will be dropped.
2502    pub fn max_session_secs_in_past(&self) -> i64 {
2503        self.values.processing.max_session_secs_in_past.into()
2504    }
2505
2506    /// Configuration name and list of Kafka configuration parameters for a given topic.
2507    pub fn kafka_configs(
2508        &self,
2509        topic: KafkaTopic,
2510    ) -> Result<KafkaTopicConfig<'_>, KafkaConfigError> {
2511        self.values.processing.topics.get(topic).kafka_configs(
2512            &self.values.processing.kafka_config,
2513            &self.values.processing.secondary_kafka_configs,
2514        )
2515    }
2516
2517    /// Whether to validate the topics against Kafka.
2518    pub fn kafka_validate_topics(&self) -> bool {
2519        self.values.processing.kafka_validate_topics
2520    }
2521
2522    /// All unused but configured topic assignments.
2523    pub fn unused_topic_assignments(&self) -> &relay_kafka::Unused {
2524        &self.values.processing.topics.unused
2525    }
2526
2527    /// Redis servers to connect to for project configs, cardinality limits,
2528    /// rate limiting, and metrics metadata.
2529    pub fn redis(&self) -> Option<RedisConfigsRef> {
2530        let redis_configs = self.values.processing.redis.as_ref()?;
2531
2532        Some(build_redis_configs(
2533            redis_configs,
2534            self.cpu_concurrency() as u32,
2535        ))
2536    }
2537
2538    /// Chunk size of attachments in bytes.
2539    pub fn attachment_chunk_size(&self) -> usize {
2540        self.values.processing.attachment_chunk_size.as_bytes()
2541    }
2542
2543    /// Maximum metrics batch size in bytes.
2544    pub fn metrics_max_batch_size_bytes(&self) -> usize {
2545        self.values.aggregator.max_flush_bytes
2546    }
2547
2548    /// Default prefix to use when looking up project configs in Redis. This is only done when
2549    /// Relay is in processing mode.
2550    pub fn projectconfig_cache_prefix(&self) -> &str {
2551        &self.values.processing.projectconfig_cache_prefix
2552    }
2553
2554    /// Maximum rate limit to report to clients in seconds.
2555    pub fn max_rate_limit(&self) -> Option<u64> {
2556        self.values.processing.max_rate_limit.map(u32::into)
2557    }
2558
2559    /// Cache vacuum interval for the cardinality limiter in memory cache.
2560    ///
2561    /// The cache will scan for expired values based on this interval.
2562    pub fn cardinality_limiter_cache_vacuum_interval(&self) -> Duration {
2563        Duration::from_secs(self.values.cardinality_limiter.cache_vacuum_interval)
2564    }
2565
2566    /// Interval to refresh internal health checks.
2567    pub fn health_refresh_interval(&self) -> Duration {
2568        Duration::from_millis(self.values.health.refresh_interval_ms)
2569    }
2570
2571    /// Maximum memory watermark in bytes.
2572    pub fn health_max_memory_watermark_bytes(&self) -> u64 {
2573        self.values
2574            .health
2575            .max_memory_bytes
2576            .as_ref()
2577            .map_or(u64::MAX, |b| b.as_bytes() as u64)
2578    }
2579
2580    /// Maximum memory watermark as a percentage of maximum system memory.
2581    pub fn health_max_memory_watermark_percent(&self) -> f32 {
2582        self.values.health.max_memory_percent
2583    }
2584
2585    /// Health check probe timeout.
2586    pub fn health_probe_timeout(&self) -> Duration {
2587        Duration::from_millis(self.values.health.probe_timeout_ms)
2588    }
2589
2590    /// Refresh frequency for polling new memory stats.
2591    pub fn memory_stat_refresh_frequency_ms(&self) -> u64 {
2592        self.values.health.memory_stat_refresh_frequency_ms
2593    }
2594
2595    /// Maximum amount of COGS measurements buffered in memory.
2596    pub fn cogs_max_queue_size(&self) -> u64 {
2597        self.values.cogs.max_queue_size
2598    }
2599
2600    /// Resource ID to use for Relay COGS measurements.
2601    pub fn cogs_relay_resource_id(&self) -> &str {
2602        &self.values.cogs.relay_resource_id
2603    }
2604
2605    /// Returns configuration for the default metrics aggregator.
2606    pub fn default_aggregator_config(&self) -> &AggregatorServiceConfig {
2607        &self.values.aggregator
2608    }
2609
2610    /// Returns configuration for non-default metrics aggregator.
2611    pub fn secondary_aggregator_configs(&self) -> &Vec<ScopedAggregatorConfig> {
2612        &self.values.secondary_aggregators
2613    }
2614
2615    /// Returns aggregator config for a given metrics namespace.
2616    pub fn aggregator_config_for(&self, namespace: MetricNamespace) -> &AggregatorServiceConfig {
2617        for entry in &self.values.secondary_aggregators {
2618            if entry.condition.matches(Some(namespace)) {
2619                return &entry.config;
2620            }
2621        }
2622        &self.values.aggregator
2623    }
2624
2625    /// Return the statically configured Relays.
2626    pub fn static_relays(&self) -> &HashMap<RelayId, RelayInfo> {
2627        &self.values.auth.static_relays
2628    }
2629
2630    /// Returns `true` if unknown items should be accepted and forwarded.
2631    pub fn accept_unknown_items(&self) -> bool {
2632        let forward = self.values.routing.accept_unknown_items;
2633        forward.unwrap_or_else(|| !self.processing_enabled())
2634    }
2635
2636    /// Returns `true` if we should produce TraceItem spans on `snuba-items`.
2637    pub fn produce_protobuf_spans(&self) -> bool {
2638        self.values.processing.span_producers.produce_protobuf
2639    }
2640
2641    /// Returns `true` if we should produce JSON spans on `ingest-spans`.
2642    pub fn produce_json_spans(&self) -> bool {
2643        self.values.processing.span_producers.produce_json
2644    }
2645}
2646
2647impl Default for Config {
2648    fn default() -> Self {
2649        Self {
2650            values: ConfigValues::default(),
2651            credentials: None,
2652            path: PathBuf::new(),
2653        }
2654    }
2655}
2656
2657#[cfg(test)]
2658mod tests {
2659
2660    use super::*;
2661
2662    /// Regression test for renaming the envelope buffer flags.
2663    #[test]
2664    fn test_event_buffer_size() {
2665        let yaml = r###"
2666cache:
2667    event_buffer_size: 1000000
2668    event_expiry: 1800
2669"###;
2670
2671        let values: ConfigValues = serde_yaml::from_str(yaml).unwrap();
2672        assert_eq!(values.cache.envelope_buffer_size, 1_000_000);
2673        assert_eq!(values.cache.envelope_expiry, 1800);
2674    }
2675
2676    #[test]
2677    fn test_emit_outcomes() {
2678        for (serialized, deserialized) in &[
2679            ("true", EmitOutcomes::AsOutcomes),
2680            ("false", EmitOutcomes::None),
2681            ("\"as_client_reports\"", EmitOutcomes::AsClientReports),
2682        ] {
2683            let value: EmitOutcomes = serde_json::from_str(serialized).unwrap();
2684            assert_eq!(value, *deserialized);
2685            assert_eq!(serde_json::to_string(&value).unwrap(), *serialized);
2686        }
2687    }
2688
2689    #[test]
2690    fn test_emit_outcomes_invalid() {
2691        assert!(serde_json::from_str::<EmitOutcomes>("asdf").is_err());
2692    }
2693}