1use std::collections::{BTreeMap, HashMap};
2use std::error::Error;
3use std::io::Write;
4use std::net::{IpAddr, SocketAddr};
5use std::num::{NonZeroU8, NonZeroU16};
6use std::path::{Path, PathBuf};
7use std::str::FromStr;
8use std::time::Duration;
9use std::{env, fmt, fs, io};
10
11use anyhow::Context;
12use relay_auth::{PublicKey, RelayId, SecretKey, generate_key_pair, generate_relay_id};
13use relay_common::Dsn;
14use relay_kafka::{
15 ConfigError as KafkaConfigError, KafkaConfigParam, KafkaTopic, KafkaTopicConfig,
16 TopicAssignments,
17};
18use relay_metrics::MetricNamespace;
19use serde::de::{DeserializeOwned, Unexpected, Visitor};
20use serde::{Deserialize, Deserializer, Serialize, Serializer};
21use uuid::Uuid;
22
23use crate::aggregator::{AggregatorServiceConfig, ScopedAggregatorConfig};
24use crate::byte_size::ByteSize;
25use crate::upstream::UpstreamDescriptor;
26use crate::{RedisConfig, RedisConfigs, RedisConfigsRef, build_redis_configs};
27
28const DEFAULT_NETWORK_OUTAGE_GRACE_PERIOD: u64 = 10;
29
30static CONFIG_YAML_HEADER: &str = r###"# Please see the relevant documentation.
31# Performance tuning: https://docs.sentry.io/product/relay/operating-guidelines/
32# All config options: https://docs.sentry.io/product/relay/options/
33"###;
34
35#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
37#[non_exhaustive]
38pub enum ConfigErrorKind {
39 CouldNotOpenFile,
41 CouldNotWriteFile,
43 BadYaml,
45 BadJson,
47 InvalidValue,
49 ProcessingNotAvailable,
52}
53
54impl fmt::Display for ConfigErrorKind {
55 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
56 match self {
57 Self::CouldNotOpenFile => write!(f, "could not open config file"),
58 Self::CouldNotWriteFile => write!(f, "could not write config file"),
59 Self::BadYaml => write!(f, "could not parse yaml config file"),
60 Self::BadJson => write!(f, "could not parse json config file"),
61 Self::InvalidValue => write!(f, "invalid config value"),
62 Self::ProcessingNotAvailable => write!(
63 f,
64 "was not compiled with processing, cannot enable processing"
65 ),
66 }
67 }
68}
69
70#[derive(Debug, Default)]
72enum ConfigErrorSource {
73 #[default]
75 None,
76 File(PathBuf),
78 FieldOverride(String),
80}
81
82impl fmt::Display for ConfigErrorSource {
83 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
84 match self {
85 ConfigErrorSource::None => Ok(()),
86 ConfigErrorSource::File(file_name) => {
87 write!(f, " (file {})", file_name.display())
88 }
89 ConfigErrorSource::FieldOverride(name) => write!(f, " (field {name})"),
90 }
91 }
92}
93
94#[derive(Debug)]
96pub struct ConfigError {
97 source: ConfigErrorSource,
98 kind: ConfigErrorKind,
99}
100
101impl ConfigError {
102 #[inline]
103 fn new(kind: ConfigErrorKind) -> Self {
104 Self {
105 source: ConfigErrorSource::None,
106 kind,
107 }
108 }
109
110 #[inline]
111 fn field(field: &'static str) -> Self {
112 Self {
113 source: ConfigErrorSource::FieldOverride(field.to_owned()),
114 kind: ConfigErrorKind::InvalidValue,
115 }
116 }
117
118 #[inline]
119 fn file(kind: ConfigErrorKind, p: impl AsRef<Path>) -> Self {
120 Self {
121 source: ConfigErrorSource::File(p.as_ref().to_path_buf()),
122 kind,
123 }
124 }
125
126 pub fn kind(&self) -> ConfigErrorKind {
128 self.kind
129 }
130}
131
132impl fmt::Display for ConfigError {
133 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
134 write!(f, "{}{}", self.kind(), self.source)
135 }
136}
137
138impl Error for ConfigError {}
139
140enum ConfigFormat {
141 Yaml,
142 Json,
143}
144
145impl ConfigFormat {
146 pub fn extension(&self) -> &'static str {
147 match self {
148 ConfigFormat::Yaml => "yml",
149 ConfigFormat::Json => "json",
150 }
151 }
152}
153
154trait ConfigObject: DeserializeOwned + Serialize {
155 fn format() -> ConfigFormat;
157
158 fn name() -> &'static str;
160
161 fn path(base: &Path) -> PathBuf {
163 base.join(format!("{}.{}", Self::name(), Self::format().extension()))
164 }
165
166 fn load(base: &Path) -> anyhow::Result<Self> {
168 let path = Self::path(base);
169
170 let f = fs::File::open(&path)
171 .with_context(|| ConfigError::file(ConfigErrorKind::CouldNotOpenFile, &path))?;
172 let f = io::BufReader::new(f);
173
174 let mut source = {
175 let file = serde_vars::FileSource::default()
176 .with_variable_prefix("${file:")
177 .with_variable_suffix("}")
178 .with_base_path(base);
179 let env = serde_vars::EnvSource::default()
180 .with_variable_prefix("${")
181 .with_variable_suffix("}");
182 (file, env)
183 };
184 match Self::format() {
185 ConfigFormat::Yaml => {
186 serde_vars::deserialize(serde_yaml::Deserializer::from_reader(f), &mut source)
187 .with_context(|| ConfigError::file(ConfigErrorKind::BadYaml, &path))
188 }
189 ConfigFormat::Json => {
190 serde_vars::deserialize(&mut serde_json::Deserializer::from_reader(f), &mut source)
191 .with_context(|| ConfigError::file(ConfigErrorKind::BadJson, &path))
192 }
193 }
194 }
195
196 fn save(&self, base: &Path) -> anyhow::Result<()> {
198 let path = Self::path(base);
199 let mut options = fs::OpenOptions::new();
200 options.write(true).truncate(true).create(true);
201
202 #[cfg(unix)]
204 {
205 use std::os::unix::fs::OpenOptionsExt;
206 options.mode(0o600);
207 }
208
209 let mut f = options
210 .open(&path)
211 .with_context(|| ConfigError::file(ConfigErrorKind::CouldNotWriteFile, &path))?;
212
213 match Self::format() {
214 ConfigFormat::Yaml => {
215 f.write_all(CONFIG_YAML_HEADER.as_bytes())?;
216 serde_yaml::to_writer(&mut f, self)
217 .with_context(|| ConfigError::file(ConfigErrorKind::CouldNotWriteFile, &path))?
218 }
219 ConfigFormat::Json => serde_json::to_writer_pretty(&mut f, self)
220 .with_context(|| ConfigError::file(ConfigErrorKind::CouldNotWriteFile, &path))?,
221 }
222
223 f.write_all(b"\n").ok();
224
225 Ok(())
226 }
227}
228
229#[derive(Debug, Default)]
232pub struct OverridableConfig {
233 pub mode: Option<String>,
235 pub instance: Option<String>,
237 pub log_level: Option<String>,
239 pub log_format: Option<String>,
241 pub upstream: Option<String>,
243 pub upstream_dsn: Option<String>,
245 pub host: Option<String>,
247 pub port: Option<String>,
249 pub processing: Option<String>,
251 pub kafka_url: Option<String>,
253 pub redis_url: Option<String>,
255 pub id: Option<String>,
257 pub secret_key: Option<String>,
259 pub public_key: Option<String>,
261 pub outcome_source: Option<String>,
263 pub shutdown_timeout: Option<String>,
265 pub server_name: Option<String>,
267}
268
269#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
271pub struct Credentials {
272 pub secret_key: SecretKey,
274 pub public_key: PublicKey,
276 pub id: RelayId,
278}
279
280impl Credentials {
281 pub fn generate() -> Self {
283 relay_log::info!("generating new relay credentials");
284 let (sk, pk) = generate_key_pair();
285 Self {
286 secret_key: sk,
287 public_key: pk,
288 id: generate_relay_id(),
289 }
290 }
291
292 pub fn to_json_string(&self) -> anyhow::Result<String> {
294 serde_json::to_string(self)
295 .with_context(|| ConfigError::new(ConfigErrorKind::CouldNotWriteFile))
296 }
297}
298
299impl ConfigObject for Credentials {
300 fn format() -> ConfigFormat {
301 ConfigFormat::Json
302 }
303 fn name() -> &'static str {
304 "credentials"
305 }
306}
307
308#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
310#[serde(rename_all = "camelCase")]
311pub struct RelayInfo {
312 pub public_key: PublicKey,
314
315 #[serde(default)]
317 pub internal: bool,
318}
319
320impl RelayInfo {
321 pub fn new(public_key: PublicKey) -> Self {
323 Self {
324 public_key,
325 internal: false,
326 }
327 }
328}
329
330#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize)]
332#[serde(rename_all = "camelCase")]
333pub enum RelayMode {
334 Proxy,
340
341 Managed,
347}
348
349impl<'de> Deserialize<'de> for RelayMode {
350 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
351 where
352 D: Deserializer<'de>,
353 {
354 let s = String::deserialize(deserializer)?;
355 match s.as_str() {
356 "proxy" => Ok(RelayMode::Proxy),
357 "managed" => Ok(RelayMode::Managed),
358 "static" => Err(serde::de::Error::custom(
359 "Relay mode 'static' has been removed. Please use 'managed' or 'proxy' instead.",
360 )),
361 other => Err(serde::de::Error::unknown_variant(
362 other,
363 &["proxy", "managed"],
364 )),
365 }
366 }
367}
368
369impl fmt::Display for RelayMode {
370 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
371 match self {
372 RelayMode::Proxy => write!(f, "proxy"),
373 RelayMode::Managed => write!(f, "managed"),
374 }
375 }
376}
377
378#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
380#[serde(rename_all = "camelCase")]
381pub enum RelayInstance {
382 Default,
384
385 Canary,
387}
388
389impl RelayInstance {
390 pub fn is_canary(&self) -> bool {
392 matches!(self, RelayInstance::Canary)
393 }
394}
395
396impl fmt::Display for RelayInstance {
397 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
398 match self {
399 RelayInstance::Default => write!(f, "default"),
400 RelayInstance::Canary => write!(f, "canary"),
401 }
402 }
403}
404
405impl FromStr for RelayInstance {
406 type Err = fmt::Error;
407
408 fn from_str(s: &str) -> Result<Self, Self::Err> {
409 match s {
410 "canary" => Ok(RelayInstance::Canary),
411 _ => Ok(RelayInstance::Default),
412 }
413 }
414}
415
416#[derive(Clone, Copy, Debug, Eq, PartialEq)]
418pub struct ParseRelayModeError;
419
420impl fmt::Display for ParseRelayModeError {
421 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
422 write!(f, "Relay mode must be one of: managed or proxy")
423 }
424}
425
426impl Error for ParseRelayModeError {}
427
428impl FromStr for RelayMode {
429 type Err = ParseRelayModeError;
430
431 fn from_str(s: &str) -> Result<Self, Self::Err> {
432 match s {
433 "proxy" => Ok(RelayMode::Proxy),
434 "managed" => Ok(RelayMode::Managed),
435 _ => Err(ParseRelayModeError),
436 }
437 }
438}
439
440fn is_default<T: Default + PartialEq>(t: &T) -> bool {
442 *t == T::default()
443}
444
445fn is_docker() -> bool {
447 if fs::metadata("/.dockerenv").is_ok() {
448 return true;
449 }
450
451 fs::read_to_string("/proc/self/cgroup").is_ok_and(|s| s.contains("/docker"))
452}
453
454fn default_host() -> IpAddr {
456 if is_docker() {
457 "0.0.0.0".parse().unwrap()
459 } else {
460 "127.0.0.1".parse().unwrap()
461 }
462}
463
464#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
468#[serde(rename_all = "lowercase")]
469#[derive(Default)]
470pub enum ReadinessCondition {
471 #[default]
480 Authenticated,
481 Always,
483}
484
485#[derive(Serialize, Deserialize, Debug)]
487#[serde(default)]
488pub struct Relay {
489 pub mode: RelayMode,
491 pub instance: RelayInstance,
493 pub upstream: UpstreamDescriptor,
495 pub advertised_upstream: Option<UpstreamDescriptor>,
504 pub host: IpAddr,
506 pub port: u16,
508 pub internal_host: Option<IpAddr>,
522 pub internal_port: Option<u16>,
526 #[serde(skip_serializing)]
528 pub tls_port: Option<u16>,
529 #[serde(skip_serializing)]
531 pub tls_identity_path: Option<PathBuf>,
532 #[serde(skip_serializing)]
534 pub tls_identity_password: Option<String>,
535 #[serde(skip_serializing_if = "is_default")]
540 pub override_project_ids: bool,
541}
542
543impl Default for Relay {
544 fn default() -> Self {
545 Relay {
546 mode: RelayMode::Managed,
547 instance: RelayInstance::Default,
548 upstream: "https://sentry.io/".parse().unwrap(),
549 advertised_upstream: None,
550 host: default_host(),
551 port: 3000,
552 internal_host: None,
553 internal_port: None,
554 tls_port: None,
555 tls_identity_path: None,
556 tls_identity_password: None,
557 override_project_ids: false,
558 }
559 }
560}
561
562#[derive(Serialize, Deserialize, Debug)]
564#[serde(default)]
565pub struct Metrics {
566 pub statsd: Option<String>,
570 pub statsd_buffer_size: Option<usize>,
574 pub prefix: String,
578 pub default_tags: BTreeMap<String, String>,
580 pub hostname_tag: Option<String>,
582 pub periodic_secs: u64,
587}
588
589impl Default for Metrics {
590 fn default() -> Self {
591 Metrics {
592 statsd: None,
593 statsd_buffer_size: None,
594 prefix: "sentry.relay".into(),
595 default_tags: BTreeMap::new(),
596 hostname_tag: None,
597 periodic_secs: 5,
598 }
599 }
600}
601
602#[derive(Serialize, Deserialize, Debug)]
604#[serde(default)]
605pub struct Limits {
606 pub max_concurrent_requests: usize,
609 pub max_concurrent_queries: usize,
614 pub max_event_size: ByteSize,
616 pub max_attachment_size: ByteSize,
618 pub max_upload_size: ByteSize,
620 pub max_attachments_size: ByteSize,
622 pub max_client_reports_size: ByteSize,
624 pub max_check_in_size: ByteSize,
626 pub max_envelope_size: ByteSize,
628 pub max_session_count: usize,
630 pub max_api_payload_size: ByteSize,
632 pub max_api_file_upload_size: ByteSize,
634 pub max_api_chunk_upload_size: ByteSize,
636 pub max_profile_size: ByteSize,
638 pub max_trace_metric_size: ByteSize,
640 pub max_log_size: ByteSize,
642 pub max_span_size: ByteSize,
644 pub max_container_size: ByteSize,
646 pub max_statsd_size: ByteSize,
648 pub max_metric_buckets_size: ByteSize,
650 pub max_replay_compressed_size: ByteSize,
652 #[serde(alias = "max_replay_size")]
654 max_replay_uncompressed_size: ByteSize,
655 pub max_replay_message_size: ByteSize,
657 pub max_removed_attribute_key_size: ByteSize,
667 pub max_thread_count: usize,
672 pub max_pool_concurrency: usize,
679 pub query_timeout: u64,
682 pub shutdown_timeout: u64,
685 pub keepalive_timeout: u64,
689 pub idle_timeout: Option<u64>,
696 pub max_connections: Option<usize>,
702 pub tcp_listen_backlog: u32,
710}
711
712impl Default for Limits {
713 fn default() -> Self {
714 Limits {
715 max_concurrent_requests: 100,
716 max_concurrent_queries: 5,
717 max_event_size: ByteSize::mebibytes(1),
718 max_attachment_size: ByteSize::mebibytes(200),
719 max_upload_size: ByteSize::mebibytes(1024),
720 max_attachments_size: ByteSize::mebibytes(200),
721 max_client_reports_size: ByteSize::kibibytes(4),
722 max_check_in_size: ByteSize::kibibytes(100),
723 max_envelope_size: ByteSize::mebibytes(200),
724 max_session_count: 100,
725 max_api_payload_size: ByteSize::mebibytes(20),
726 max_api_file_upload_size: ByteSize::mebibytes(40),
727 max_api_chunk_upload_size: ByteSize::mebibytes(100),
728 max_profile_size: ByteSize::mebibytes(50),
729 max_trace_metric_size: ByteSize::mebibytes(1),
730 max_log_size: ByteSize::mebibytes(1),
731 max_span_size: ByteSize::mebibytes(10),
732 max_container_size: ByteSize::mebibytes(12),
733 max_statsd_size: ByteSize::mebibytes(1),
734 max_metric_buckets_size: ByteSize::mebibytes(1),
735 max_replay_compressed_size: ByteSize::mebibytes(10),
736 max_replay_uncompressed_size: ByteSize::mebibytes(100),
737 max_replay_message_size: ByteSize::mebibytes(15),
738 max_thread_count: num_cpus::get(),
739 max_pool_concurrency: 1,
740 query_timeout: 30,
741 shutdown_timeout: 10,
742 keepalive_timeout: 5,
743 idle_timeout: None,
744 max_connections: None,
745 tcp_listen_backlog: 1024,
746 max_removed_attribute_key_size: ByteSize::kibibytes(10),
747 }
748 }
749}
750
751#[derive(Debug, Default, Deserialize, Serialize)]
753#[serde(default)]
754pub struct Routing {
755 pub accept_unknown_items: Option<bool>,
765}
766
767#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize)]
769#[serde(rename_all = "lowercase")]
770pub enum HttpEncoding {
771 #[default]
776 Identity,
777 Deflate,
783 Gzip,
790 Br,
792 Zstd,
794}
795
796impl HttpEncoding {
797 pub fn parse(str: &str) -> Self {
799 let str = str.trim();
800 if str.eq_ignore_ascii_case("zstd") {
801 Self::Zstd
802 } else if str.eq_ignore_ascii_case("br") {
803 Self::Br
804 } else if str.eq_ignore_ascii_case("gzip") || str.eq_ignore_ascii_case("x-gzip") {
805 Self::Gzip
806 } else if str.eq_ignore_ascii_case("deflate") {
807 Self::Deflate
808 } else {
809 Self::Identity
810 }
811 }
812
813 pub fn name(&self) -> Option<&'static str> {
817 match self {
818 Self::Identity => None,
819 Self::Deflate => Some("deflate"),
820 Self::Gzip => Some("gzip"),
821 Self::Br => Some("br"),
822 Self::Zstd => Some("zstd"),
823 }
824 }
825}
826
827#[derive(Serialize, Deserialize, Debug)]
829#[serde(default)]
830pub struct Http {
831 pub timeout: u32,
837 pub connection_timeout: u32,
842 pub max_retry_interval: u32,
844 pub host_header: Option<String>,
846 pub auth_interval: Option<u64>,
854 pub outage_grace_period: u64,
860 pub retry_delay: u64,
864 pub project_failure_interval: u64,
869 pub encoding: HttpEncoding,
885 pub global_metrics: bool,
892 pub forward: bool,
896 pub dns_cache: bool,
900}
901
902impl Default for Http {
903 fn default() -> Self {
904 Http {
905 timeout: 5,
906 connection_timeout: 3,
907 max_retry_interval: 60, host_header: None,
909 auth_interval: Some(600), outage_grace_period: DEFAULT_NETWORK_OUTAGE_GRACE_PERIOD,
911 retry_delay: default_retry_delay(),
912 project_failure_interval: default_project_failure_interval(),
913 encoding: HttpEncoding::Zstd,
914 global_metrics: false,
915 forward: true,
916 dns_cache: true,
917 }
918 }
919}
920
921fn default_retry_delay() -> u64 {
923 1
924}
925
926fn default_project_failure_interval() -> u64 {
928 90
929}
930
931fn spool_envelopes_max_disk_size() -> ByteSize {
933 ByteSize::mebibytes(500)
934}
935
936fn spool_envelopes_batch_size_bytes() -> ByteSize {
938 ByteSize::kibibytes(10)
939}
940
941fn spool_envelopes_max_envelope_delay_secs() -> u64 {
942 24 * 60 * 60
943}
944
945fn spool_disk_usage_refresh_frequency_ms() -> u64 {
947 100
948}
949
950fn spool_max_backpressure_memory_percent() -> f32 {
952 0.8
953}
954
955fn spool_envelopes_partitions() -> NonZeroU8 {
957 NonZeroU8::new(1).unwrap()
958}
959
960#[derive(Clone, Copy, Debug, Eq, PartialEq, Default, Deserialize, Serialize)]
962#[serde(rename_all = "snake_case")]
963pub enum EnvelopeSpoolPartitioning {
964 #[default]
968 ProjectKeyPair,
969 RoundRobin,
976}
977
978#[derive(Debug, Serialize, Deserialize)]
980pub struct EnvelopeSpool {
981 pub path: Option<PathBuf>,
987 #[serde(default = "spool_envelopes_max_disk_size")]
993 pub max_disk_size: ByteSize,
994 #[serde(default = "spool_envelopes_batch_size_bytes")]
1001 pub batch_size_bytes: ByteSize,
1002 #[serde(default = "spool_envelopes_max_envelope_delay_secs")]
1009 pub max_envelope_delay_secs: u64,
1010 #[serde(default = "spool_disk_usage_refresh_frequency_ms")]
1015 pub disk_usage_refresh_frequency_ms: u64,
1016 #[serde(default = "spool_max_backpressure_memory_percent")]
1046 pub max_backpressure_memory_percent: f32,
1047 #[serde(default = "spool_envelopes_partitions")]
1054 pub partitions: NonZeroU8,
1055 #[serde(default)]
1061 pub partitioning: EnvelopeSpoolPartitioning,
1062 #[serde(default)]
1069 pub ephemeral: bool,
1070}
1071
1072impl Default for EnvelopeSpool {
1073 fn default() -> Self {
1074 Self {
1075 path: None,
1076 max_disk_size: spool_envelopes_max_disk_size(),
1077 batch_size_bytes: spool_envelopes_batch_size_bytes(),
1078 max_envelope_delay_secs: spool_envelopes_max_envelope_delay_secs(),
1079 disk_usage_refresh_frequency_ms: spool_disk_usage_refresh_frequency_ms(),
1080 max_backpressure_memory_percent: spool_max_backpressure_memory_percent(),
1081 partitions: spool_envelopes_partitions(),
1082 partitioning: EnvelopeSpoolPartitioning::default(),
1083 ephemeral: false,
1084 }
1085 }
1086}
1087
1088#[derive(Debug, Serialize, Deserialize, Default)]
1090pub struct Spool {
1091 #[serde(default)]
1093 pub envelopes: EnvelopeSpool,
1094}
1095
1096#[derive(Serialize, Deserialize, Debug)]
1098#[serde(default)]
1099pub struct Cache {
1100 pub project_request_full_config: bool,
1102 pub project_expiry: u32,
1104 pub project_grace_period: u32,
1109 pub project_refresh_interval: Option<u32>,
1115 pub relay_expiry: u32,
1117 #[serde(alias = "event_expiry")]
1123 envelope_expiry: u32,
1124 #[serde(alias = "event_buffer_size")]
1126 envelope_buffer_size: u32,
1127 pub miss_expiry: u32,
1129 pub batch_interval: u32,
1131 pub downstream_relays_batch_interval: u32,
1133 pub batch_size: usize,
1137 pub file_interval: u32,
1139 pub global_config_fetch_interval: u32,
1141}
1142
1143impl Default for Cache {
1144 fn default() -> Self {
1145 Cache {
1146 project_request_full_config: false,
1147 project_expiry: 300, project_grace_period: 120, project_refresh_interval: None,
1150 relay_expiry: 3600, envelope_expiry: 600, envelope_buffer_size: 1000,
1153 miss_expiry: 60, batch_interval: 100, downstream_relays_batch_interval: 100, batch_size: 500,
1157 file_interval: 10, global_config_fetch_interval: 10, }
1160 }
1161}
1162
1163fn default_max_secs_in_future() -> u32 {
1164 60 }
1166
1167fn default_max_session_secs_in_past() -> u32 {
1168 5 * 24 * 3600 }
1170
1171fn default_chunk_size() -> ByteSize {
1172 ByteSize::mebibytes(1)
1173}
1174
1175fn default_projectconfig_cache_prefix() -> String {
1176 "relayconfig".to_owned()
1177}
1178
1179#[allow(clippy::unnecessary_wraps)]
1180fn default_max_rate_limit() -> Option<u32> {
1181 Some(300) }
1183
1184#[derive(Serialize, Deserialize, Debug)]
1186pub struct Processing {
1187 pub enabled: bool,
1189 #[serde(default)]
1191 pub geoip_path: Option<PathBuf>,
1192 #[serde(default = "default_max_secs_in_future")]
1194 pub max_secs_in_future: u32,
1195 #[serde(default = "default_max_session_secs_in_past")]
1197 pub max_session_secs_in_past: u32,
1198 pub kafka_config: Vec<KafkaConfigParam>,
1200 #[serde(default)]
1220 pub secondary_kafka_configs: BTreeMap<String, Vec<KafkaConfigParam>>,
1221 #[serde(default)]
1223 pub topics: TopicAssignments,
1224 #[serde(default)]
1226 pub kafka_validate_topics: bool,
1227 #[serde(default)]
1229 pub redis: Option<RedisConfigs>,
1230 #[serde(default = "default_chunk_size")]
1232 pub attachment_chunk_size: ByteSize,
1233 #[serde(default = "default_projectconfig_cache_prefix")]
1235 pub projectconfig_cache_prefix: String,
1236 #[serde(default = "default_max_rate_limit")]
1238 pub max_rate_limit: Option<u32>,
1239 pub quota_cache_ratio: Option<f32>,
1250 pub quota_cache_max: Option<f32>,
1257 #[serde(default, alias = "upload")]
1259 pub objectstore: ObjectstoreServiceConfig,
1260}
1261
1262impl Default for Processing {
1263 fn default() -> Self {
1265 Self {
1266 enabled: false,
1267 geoip_path: None,
1268 max_secs_in_future: default_max_secs_in_future(),
1269 max_session_secs_in_past: default_max_session_secs_in_past(),
1270 kafka_config: Vec::new(),
1271 secondary_kafka_configs: BTreeMap::new(),
1272 topics: TopicAssignments::default(),
1273 kafka_validate_topics: false,
1274 redis: None,
1275 attachment_chunk_size: default_chunk_size(),
1276 projectconfig_cache_prefix: default_projectconfig_cache_prefix(),
1277 max_rate_limit: default_max_rate_limit(),
1278 quota_cache_ratio: None,
1279 quota_cache_max: None,
1280 objectstore: ObjectstoreServiceConfig::default(),
1281 }
1282 }
1283}
1284
1285#[derive(Debug, Default, Serialize, Deserialize)]
1287#[serde(default)]
1288pub struct Normalization {
1289 #[serde(default)]
1291 pub level: NormalizationLevel,
1292}
1293
1294#[derive(Copy, Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq)]
1296#[serde(rename_all = "lowercase")]
1297pub enum NormalizationLevel {
1298 #[default]
1302 Default,
1303 Full,
1308}
1309
1310#[derive(Serialize, Deserialize, Debug)]
1312#[serde(default)]
1313pub struct OutcomeAggregatorConfig {
1314 pub bucket_interval: u64,
1316 pub flush_interval: u64,
1318}
1319
1320impl Default for OutcomeAggregatorConfig {
1321 fn default() -> Self {
1322 Self {
1323 bucket_interval: 60,
1324 flush_interval: 120,
1325 }
1326 }
1327}
1328
1329#[derive(Serialize, Deserialize)]
1331pub struct ObjectstoreAuthConfig {
1332 pub key_id: String,
1335
1336 pub signing_key: String,
1338}
1339
1340impl fmt::Debug for ObjectstoreAuthConfig {
1341 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1342 f.debug_struct("ObjectstoreAuthConfig")
1343 .field("key_id", &self.key_id)
1344 .field("signing_key", &"[redacted]")
1345 .finish()
1346 }
1347}
1348
1349#[derive(Serialize, Deserialize, Debug)]
1351#[serde(default)]
1352pub struct ObjectstoreServiceConfig {
1353 pub objectstore_url: Option<String>,
1358
1359 pub max_concurrent_requests: usize,
1361
1362 pub max_backlog: usize,
1366
1367 pub timeout: u64,
1372
1373 pub stream_timeout: u64,
1378
1379 pub retry_delay: f64,
1381
1382 pub max_attempts: NonZeroU16,
1384
1385 pub auth: Option<ObjectstoreAuthConfig>,
1387}
1388
1389impl Default for ObjectstoreServiceConfig {
1390 fn default() -> Self {
1391 Self {
1392 objectstore_url: None,
1393 max_concurrent_requests: 10,
1394 max_backlog: 20,
1395 timeout: 60,
1396 stream_timeout: 5 * 60, retry_delay: 1.0,
1398 max_attempts: NonZeroU16::new(5).unwrap(),
1399 auth: None,
1400 }
1401 }
1402}
1403
1404#[derive(Copy, Clone, Debug, PartialEq, Eq)]
1407
1408pub enum EmitOutcomes {
1409 None,
1411 AsClientReports,
1413 AsOutcomes,
1415}
1416
1417impl EmitOutcomes {
1418 pub fn any(&self) -> bool {
1420 !matches!(self, EmitOutcomes::None)
1421 }
1422}
1423
1424impl Serialize for EmitOutcomes {
1425 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
1426 where
1427 S: Serializer,
1428 {
1429 match self {
1431 Self::None => serializer.serialize_bool(false),
1432 Self::AsClientReports => serializer.serialize_str("as_client_reports"),
1433 Self::AsOutcomes => serializer.serialize_bool(true),
1434 }
1435 }
1436}
1437
1438struct EmitOutcomesVisitor;
1439
1440impl Visitor<'_> for EmitOutcomesVisitor {
1441 type Value = EmitOutcomes;
1442
1443 fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
1444 formatter.write_str("true, false, or 'as_client_reports'")
1445 }
1446
1447 fn visit_bool<E>(self, v: bool) -> Result<Self::Value, E>
1448 where
1449 E: serde::de::Error,
1450 {
1451 Ok(if v {
1452 EmitOutcomes::AsOutcomes
1453 } else {
1454 EmitOutcomes::None
1455 })
1456 }
1457
1458 fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
1459 where
1460 E: serde::de::Error,
1461 {
1462 if v == "as_client_reports" {
1463 Ok(EmitOutcomes::AsClientReports)
1464 } else {
1465 Err(E::invalid_value(Unexpected::Str(v), &"as_client_reports"))
1466 }
1467 }
1468}
1469
1470impl<'de> Deserialize<'de> for EmitOutcomes {
1471 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
1472 where
1473 D: Deserializer<'de>,
1474 {
1475 deserializer.deserialize_any(EmitOutcomesVisitor)
1476 }
1477}
1478
1479#[derive(Serialize, Deserialize, Debug)]
1481#[serde(default)]
1482pub struct Outcomes {
1483 pub emit_outcomes: EmitOutcomes,
1487 pub batch_size: usize,
1490 pub batch_interval: u64,
1493 pub source: Option<String>,
1496 pub aggregator: OutcomeAggregatorConfig,
1498}
1499
1500impl Default for Outcomes {
1501 fn default() -> Self {
1502 Outcomes {
1503 emit_outcomes: EmitOutcomes::AsClientReports,
1504 batch_size: 1000,
1505 batch_interval: 500,
1506 source: None,
1507 aggregator: OutcomeAggregatorConfig::default(),
1508 }
1509 }
1510}
1511
1512#[derive(Serialize, Deserialize, Debug, Default)]
1514pub struct MinimalConfig {
1515 pub relay: Relay,
1517}
1518
1519impl MinimalConfig {
1520 pub fn save_in_folder<P: AsRef<Path>>(&self, p: P) -> anyhow::Result<()> {
1522 let path = p.as_ref();
1523 if fs::metadata(path).is_err() {
1524 fs::create_dir_all(path)
1525 .with_context(|| ConfigError::file(ConfigErrorKind::CouldNotOpenFile, path))?;
1526 }
1527 self.save(path)
1528 }
1529}
1530
1531impl ConfigObject for MinimalConfig {
1532 fn format() -> ConfigFormat {
1533 ConfigFormat::Yaml
1534 }
1535
1536 fn name() -> &'static str {
1537 "config"
1538 }
1539}
1540
1541mod config_relay_info {
1543 use serde::ser::SerializeMap;
1544
1545 use super::*;
1546
1547 #[derive(Debug, Serialize, Deserialize, Clone)]
1549 struct RelayInfoConfig {
1550 public_key: PublicKey,
1551 #[serde(default)]
1552 internal: bool,
1553 }
1554
1555 impl From<RelayInfoConfig> for RelayInfo {
1556 fn from(v: RelayInfoConfig) -> Self {
1557 RelayInfo {
1558 public_key: v.public_key,
1559 internal: v.internal,
1560 }
1561 }
1562 }
1563
1564 impl From<RelayInfo> for RelayInfoConfig {
1565 fn from(v: RelayInfo) -> Self {
1566 RelayInfoConfig {
1567 public_key: v.public_key,
1568 internal: v.internal,
1569 }
1570 }
1571 }
1572
1573 pub(super) fn deserialize<'de, D>(des: D) -> Result<HashMap<RelayId, RelayInfo>, D::Error>
1574 where
1575 D: Deserializer<'de>,
1576 {
1577 let map = HashMap::<RelayId, RelayInfoConfig>::deserialize(des)?;
1578 Ok(map.into_iter().map(|(k, v)| (k, v.into())).collect())
1579 }
1580
1581 pub(super) fn serialize<S>(elm: &HashMap<RelayId, RelayInfo>, ser: S) -> Result<S::Ok, S::Error>
1582 where
1583 S: Serializer,
1584 {
1585 let mut map = ser.serialize_map(Some(elm.len()))?;
1586
1587 for (k, v) in elm {
1588 map.serialize_entry(k, &RelayInfoConfig::from(v.clone()))?;
1589 }
1590
1591 map.end()
1592 }
1593}
1594
1595#[derive(Serialize, Deserialize, Debug, Default)]
1597pub struct AuthConfig {
1598 #[serde(default, skip_serializing_if = "is_default")]
1600 pub ready: ReadinessCondition,
1601
1602 #[serde(default, with = "config_relay_info")]
1604 pub static_relays: HashMap<RelayId, RelayInfo>,
1605
1606 #[serde(default = "default_max_age")]
1610 pub signature_max_age: u64,
1611}
1612
1613fn default_max_age() -> u64 {
1614 300
1615}
1616
1617#[derive(Serialize, Deserialize, Debug, Default)]
1619pub struct GeoIpConfig {
1620 pub path: Option<PathBuf>,
1622}
1623
1624#[derive(Serialize, Deserialize, Debug)]
1626#[serde(default)]
1627pub struct CardinalityLimiter {
1628 pub cache_vacuum_interval: u64,
1634}
1635
1636impl Default for CardinalityLimiter {
1637 fn default() -> Self {
1638 Self {
1639 cache_vacuum_interval: 180,
1640 }
1641 }
1642}
1643
1644#[derive(Serialize, Deserialize, Debug)]
1649#[serde(default)]
1650pub struct Health {
1651 pub refresh_interval_ms: u64,
1658 pub max_memory_bytes: Option<ByteSize>,
1663 pub max_memory_percent: f32,
1667 pub probe_timeout_ms: u64,
1674 pub memory_stat_refresh_frequency_ms: u64,
1680}
1681
1682impl Default for Health {
1683 fn default() -> Self {
1684 Self {
1685 refresh_interval_ms: 3000,
1686 max_memory_bytes: None,
1687 max_memory_percent: 0.95,
1688 probe_timeout_ms: 900,
1689 memory_stat_refresh_frequency_ms: 100,
1690 }
1691 }
1692}
1693
1694#[derive(Serialize, Deserialize, Debug)]
1696#[serde(default)]
1697pub struct Cogs {
1698 pub max_queue_size: u64,
1704 pub relay_resource_id: String,
1710}
1711
1712impl Default for Cogs {
1713 fn default() -> Self {
1714 Self {
1715 max_queue_size: 10_000,
1716 relay_resource_id: "relay_service".to_owned(),
1717 }
1718 }
1719}
1720
1721#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
1723#[serde(default)]
1724pub struct Upload {
1725 pub max_concurrent_requests: usize,
1729 pub timeout: u64,
1731 pub max_age: i64,
1735}
1736
1737impl Default for Upload {
1738 fn default() -> Self {
1739 Self {
1740 max_concurrent_requests: 10,
1741 timeout: 5 * 60, max_age: 60 * 60, }
1744 }
1745}
1746
1747#[derive(Serialize, Deserialize, Debug, Default)]
1749#[allow(missing_docs)]
1750pub struct ConfigValues {
1751 #[serde(default)]
1752 pub relay: Relay,
1753 #[serde(default)]
1754 pub http: Http,
1755 #[serde(default)]
1756 pub cache: Cache,
1757 #[serde(default)]
1758 pub spool: Spool,
1759 #[serde(default)]
1760 pub limits: Limits,
1761 #[serde(default)]
1762 pub logging: relay_log::LogConfig,
1763 #[serde(default)]
1764 pub routing: Routing,
1765 #[serde(default)]
1766 pub metrics: Metrics,
1767 #[serde(default)]
1768 pub sentry: relay_log::SentryConfig,
1769 #[serde(default)]
1770 pub processing: Processing,
1771 #[serde(default)]
1772 pub outcomes: Outcomes,
1773 #[serde(default)]
1774 pub aggregator: AggregatorServiceConfig,
1775 #[serde(default)]
1776 pub secondary_aggregators: Vec<ScopedAggregatorConfig>,
1777 #[serde(default)]
1778 pub auth: AuthConfig,
1779 #[serde(default)]
1780 pub geoip: GeoIpConfig,
1781 #[serde(default)]
1782 pub normalization: Normalization,
1783 #[serde(default)]
1784 pub cardinality_limiter: CardinalityLimiter,
1785 #[serde(default)]
1786 pub health: Health,
1787 #[serde(default)]
1788 pub cogs: Cogs,
1789 #[serde(default)]
1790 pub upload: Upload,
1791}
1792
1793impl ConfigObject for ConfigValues {
1794 fn format() -> ConfigFormat {
1795 ConfigFormat::Yaml
1796 }
1797
1798 fn name() -> &'static str {
1799 "config"
1800 }
1801}
1802
1803pub struct Config {
1805 values: ConfigValues,
1806 credentials: Option<Credentials>,
1807 path: PathBuf,
1808}
1809
1810impl fmt::Debug for Config {
1811 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1812 f.debug_struct("Config")
1813 .field("path", &self.path)
1814 .field("values", &self.values)
1815 .finish()
1816 }
1817}
1818
1819impl Config {
1820 pub fn from_path<P: AsRef<Path>>(path: P) -> anyhow::Result<Config> {
1822 let path = env::current_dir()
1823 .map(|x| x.join(path.as_ref()))
1824 .unwrap_or_else(|_| path.as_ref().to_path_buf());
1825
1826 let config = Config {
1827 values: ConfigValues::load(&path)?,
1828 credentials: if Credentials::path(&path).exists() {
1829 Some(Credentials::load(&path)?)
1830 } else {
1831 None
1832 },
1833 path: path.clone(),
1834 };
1835
1836 if cfg!(not(feature = "processing")) && config.processing_enabled() {
1837 return Err(ConfigError::file(ConfigErrorKind::ProcessingNotAvailable, &path).into());
1838 }
1839
1840 Ok(config)
1841 }
1842
1843 pub fn from_json_value(value: serde_json::Value) -> anyhow::Result<Config> {
1847 Ok(Config {
1848 values: serde_json::from_value(value)
1849 .with_context(|| ConfigError::new(ConfigErrorKind::BadJson))?,
1850 credentials: None,
1851 path: PathBuf::new(),
1852 })
1853 }
1854
1855 pub fn apply_override(
1858 &mut self,
1859 mut overrides: OverridableConfig,
1860 ) -> anyhow::Result<&mut Self> {
1861 let relay = &mut self.values.relay;
1862
1863 if let Some(mode) = overrides.mode {
1864 relay.mode = mode
1865 .parse::<RelayMode>()
1866 .with_context(|| ConfigError::field("mode"))?;
1867 }
1868
1869 if let Some(deployment) = overrides.instance {
1870 relay.instance = deployment
1871 .parse::<RelayInstance>()
1872 .with_context(|| ConfigError::field("deployment"))?;
1873 }
1874
1875 if let Some(log_level) = overrides.log_level {
1876 self.values.logging.level = log_level.parse()?;
1877 }
1878
1879 if let Some(log_format) = overrides.log_format {
1880 self.values.logging.format = log_format.parse()?;
1881 }
1882
1883 if let Some(upstream) = overrides.upstream {
1884 relay.upstream = upstream
1885 .parse::<UpstreamDescriptor>()
1886 .with_context(|| ConfigError::field("upstream"))?;
1887 } else if let Some(upstream_dsn) = overrides.upstream_dsn {
1888 relay.upstream = upstream_dsn
1889 .parse::<Dsn>()
1890 .map(|dsn| UpstreamDescriptor::from_dsn(&dsn))
1891 .with_context(|| ConfigError::field("upstream_dsn"))?;
1892 }
1893
1894 if let Some(host) = overrides.host {
1895 relay.host = host
1896 .parse::<IpAddr>()
1897 .with_context(|| ConfigError::field("host"))?;
1898 }
1899
1900 if let Some(port) = overrides.port {
1901 relay.port = port
1902 .as_str()
1903 .parse()
1904 .with_context(|| ConfigError::field("port"))?;
1905 }
1906
1907 let processing = &mut self.values.processing;
1908 if let Some(enabled) = overrides.processing {
1909 match enabled.to_lowercase().as_str() {
1910 "true" | "1" => processing.enabled = true,
1911 "false" | "0" | "" => processing.enabled = false,
1912 _ => return Err(ConfigError::field("processing").into()),
1913 }
1914 }
1915
1916 if let Some(redis) = overrides.redis_url {
1917 processing.redis = Some(RedisConfigs::Unified(RedisConfig::single(redis)))
1918 }
1919
1920 if let Some(kafka_url) = overrides.kafka_url {
1921 let existing = processing
1922 .kafka_config
1923 .iter_mut()
1924 .find(|e| e.name == "bootstrap.servers");
1925
1926 if let Some(config_param) = existing {
1927 config_param.value = kafka_url;
1928 } else {
1929 processing.kafka_config.push(KafkaConfigParam {
1930 name: "bootstrap.servers".to_owned(),
1931 value: kafka_url,
1932 })
1933 }
1934 }
1935 let id = if let Some(id) = overrides.id {
1937 let id = Uuid::parse_str(&id).with_context(|| ConfigError::field("id"))?;
1938 Some(id)
1939 } else {
1940 None
1941 };
1942 let public_key = if let Some(public_key) = overrides.public_key {
1943 let public_key = public_key
1944 .parse::<PublicKey>()
1945 .with_context(|| ConfigError::field("public_key"))?;
1946 Some(public_key)
1947 } else {
1948 None
1949 };
1950
1951 let secret_key = if let Some(secret_key) = overrides.secret_key {
1952 let secret_key = secret_key
1953 .parse::<SecretKey>()
1954 .with_context(|| ConfigError::field("secret_key"))?;
1955 Some(secret_key)
1956 } else {
1957 None
1958 };
1959 let outcomes = &mut self.values.outcomes;
1960 if overrides.outcome_source.is_some() {
1961 outcomes.source = overrides.outcome_source.take();
1962 }
1963
1964 if let Some(credentials) = &mut self.credentials {
1965 if let Some(id) = id {
1967 credentials.id = id;
1968 }
1969 if let Some(public_key) = public_key {
1970 credentials.public_key = public_key;
1971 }
1972 if let Some(secret_key) = secret_key {
1973 credentials.secret_key = secret_key
1974 }
1975 } else {
1976 match (id, public_key, secret_key) {
1978 (Some(id), Some(public_key), Some(secret_key)) => {
1979 self.credentials = Some(Credentials {
1980 secret_key,
1981 public_key,
1982 id,
1983 })
1984 }
1985 (None, None, None) => {
1986 }
1989 _ => {
1990 return Err(ConfigError::field("incomplete credentials").into());
1991 }
1992 }
1993 }
1994
1995 let limits = &mut self.values.limits;
1996 if let Some(shutdown_timeout) = overrides.shutdown_timeout
1997 && let Ok(shutdown_timeout) = shutdown_timeout.parse::<u64>()
1998 {
1999 limits.shutdown_timeout = shutdown_timeout;
2000 }
2001
2002 if let Some(server_name) = overrides.server_name {
2003 self.values.sentry.server_name = Some(server_name.into());
2004 }
2005
2006 Ok(self)
2007 }
2008
2009 pub fn config_exists<P: AsRef<Path>>(path: P) -> bool {
2011 fs::metadata(ConfigValues::path(path.as_ref())).is_ok()
2012 }
2013
2014 pub fn path(&self) -> &Path {
2016 &self.path
2017 }
2018
2019 pub fn to_yaml_string(&self) -> anyhow::Result<String> {
2021 serde_yaml::to_string(&self.values)
2022 .with_context(|| ConfigError::new(ConfigErrorKind::CouldNotWriteFile))
2023 }
2024
2025 pub fn regenerate_credentials(&mut self, save: bool) -> anyhow::Result<()> {
2029 let creds = Credentials::generate();
2030 if save {
2031 creds.save(&self.path)?;
2032 }
2033 self.credentials = Some(creds);
2034 Ok(())
2035 }
2036
2037 pub fn credentials(&self) -> Option<&Credentials> {
2039 self.credentials.as_ref()
2040 }
2041
2042 pub fn replace_credentials(
2046 &mut self,
2047 credentials: Option<Credentials>,
2048 ) -> anyhow::Result<bool> {
2049 if self.credentials == credentials {
2050 return Ok(false);
2051 }
2052
2053 match credentials {
2054 Some(ref creds) => {
2055 creds.save(&self.path)?;
2056 }
2057 None => {
2058 let path = Credentials::path(&self.path);
2059 if fs::metadata(&path).is_ok() {
2060 fs::remove_file(&path).with_context(|| {
2061 ConfigError::file(ConfigErrorKind::CouldNotWriteFile, &path)
2062 })?;
2063 }
2064 }
2065 }
2066
2067 self.credentials = credentials;
2068 Ok(true)
2069 }
2070
2071 pub fn has_credentials(&self) -> bool {
2073 self.credentials.is_some()
2074 }
2075
2076 pub fn secret_key(&self) -> Option<&SecretKey> {
2078 self.credentials.as_ref().map(|x| &x.secret_key)
2079 }
2080
2081 pub fn public_key(&self) -> Option<&PublicKey> {
2083 self.credentials.as_ref().map(|x| &x.public_key)
2084 }
2085
2086 pub fn relay_id(&self) -> Option<&RelayId> {
2088 self.credentials.as_ref().map(|x| &x.id)
2089 }
2090
2091 pub fn relay_mode(&self) -> RelayMode {
2093 self.values.relay.mode
2094 }
2095
2096 pub fn relay_instance(&self) -> RelayInstance {
2098 self.values.relay.instance
2099 }
2100
2101 pub fn upstream(&self) -> &UpstreamDescriptor {
2103 &self.values.relay.upstream
2104 }
2105
2106 pub fn advertised_upstream(&self) -> Option<&UpstreamDescriptor> {
2108 self.values.relay.advertised_upstream.as_ref()
2109 }
2110
2111 pub fn http_host_header(&self) -> Option<&str> {
2113 self.values.http.host_header.as_deref()
2114 }
2115
2116 pub fn listen_addr(&self) -> SocketAddr {
2118 (self.values.relay.host, self.values.relay.port).into()
2119 }
2120
2121 pub fn listen_addr_internal(&self) -> Option<SocketAddr> {
2129 match (
2130 self.values.relay.internal_host,
2131 self.values.relay.internal_port,
2132 ) {
2133 (Some(host), None) => Some((host, self.values.relay.port).into()),
2134 (None, Some(port)) => Some((self.values.relay.host, port).into()),
2135 (Some(host), Some(port)) => Some((host, port).into()),
2136 (None, None) => None,
2137 }
2138 }
2139
2140 pub fn tls_listen_addr(&self) -> Option<SocketAddr> {
2142 if self.values.relay.tls_identity_path.is_some() {
2143 let port = self.values.relay.tls_port.unwrap_or(3443);
2144 Some((self.values.relay.host, port).into())
2145 } else {
2146 None
2147 }
2148 }
2149
2150 pub fn tls_identity_path(&self) -> Option<&Path> {
2152 self.values.relay.tls_identity_path.as_deref()
2153 }
2154
2155 pub fn tls_identity_password(&self) -> Option<&str> {
2157 self.values.relay.tls_identity_password.as_deref()
2158 }
2159
2160 pub fn override_project_ids(&self) -> bool {
2164 self.values.relay.override_project_ids
2165 }
2166
2167 pub fn requires_auth(&self) -> bool {
2171 match self.values.auth.ready {
2172 ReadinessCondition::Authenticated => self.relay_mode() == RelayMode::Managed,
2173 ReadinessCondition::Always => false,
2174 }
2175 }
2176
2177 pub fn http_auth_interval(&self) -> Option<Duration> {
2181 if self.processing_enabled() {
2182 return None;
2183 }
2184
2185 match self.values.http.auth_interval {
2186 None | Some(0) => None,
2187 Some(secs) => Some(Duration::from_secs(secs)),
2188 }
2189 }
2190
2191 pub fn http_outage_grace_period(&self) -> Duration {
2194 Duration::from_secs(self.values.http.outage_grace_period)
2195 }
2196
2197 pub fn http_retry_delay(&self) -> Duration {
2202 Duration::from_secs(self.values.http.retry_delay)
2203 }
2204
2205 pub fn http_project_failure_interval(&self) -> Duration {
2207 Duration::from_secs(self.values.http.project_failure_interval)
2208 }
2209
2210 pub fn http_encoding(&self) -> HttpEncoding {
2212 self.values.http.encoding
2213 }
2214
2215 pub fn http_global_metrics(&self) -> bool {
2217 self.values.http.global_metrics
2218 }
2219
2220 pub fn http_forward(&self) -> bool {
2222 self.values.http.forward
2223 }
2224
2225 pub fn emit_outcomes(&self) -> EmitOutcomes {
2230 if self.processing_enabled() {
2231 return EmitOutcomes::AsOutcomes;
2232 }
2233 self.values.outcomes.emit_outcomes
2234 }
2235
2236 pub fn outcome_batch_size(&self) -> usize {
2238 self.values.outcomes.batch_size
2239 }
2240
2241 pub fn outcome_batch_interval(&self) -> Duration {
2243 Duration::from_millis(self.values.outcomes.batch_interval)
2244 }
2245
2246 pub fn outcome_source(&self) -> Option<&str> {
2248 self.values.outcomes.source.as_deref()
2249 }
2250
2251 pub fn outcome_aggregator(&self) -> &OutcomeAggregatorConfig {
2253 &self.values.outcomes.aggregator
2254 }
2255
2256 pub fn logging(&self) -> &relay_log::LogConfig {
2258 &self.values.logging
2259 }
2260
2261 pub fn sentry(&self) -> &relay_log::SentryConfig {
2263 &self.values.sentry
2264 }
2265
2266 pub fn statsd_addr(&self) -> Option<&str> {
2268 self.values.metrics.statsd.as_deref()
2269 }
2270
2271 pub fn statsd_buffer_size(&self) -> Option<usize> {
2273 self.values.metrics.statsd_buffer_size
2274 }
2275
2276 pub fn metrics_prefix(&self) -> &str {
2278 &self.values.metrics.prefix
2279 }
2280
2281 pub fn metrics_default_tags(&self) -> &BTreeMap<String, String> {
2283 &self.values.metrics.default_tags
2284 }
2285
2286 pub fn metrics_hostname_tag(&self) -> Option<&str> {
2288 self.values.metrics.hostname_tag.as_deref()
2289 }
2290
2291 pub fn metrics_periodic_interval(&self) -> Option<Duration> {
2295 match self.values.metrics.periodic_secs {
2296 0 => None,
2297 secs => Some(Duration::from_secs(secs)),
2298 }
2299 }
2300
2301 pub fn http_timeout(&self) -> Duration {
2303 Duration::from_secs(self.values.http.timeout.into())
2304 }
2305
2306 pub fn http_connection_timeout(&self) -> Duration {
2308 Duration::from_secs(self.values.http.connection_timeout.into())
2309 }
2310
2311 pub fn http_max_retry_interval(&self) -> Duration {
2313 Duration::from_secs(self.values.http.max_retry_interval.into())
2314 }
2315
2316 pub fn http_dns_cache(&self) -> bool {
2318 self.values.http.dns_cache
2319 }
2320
2321 pub fn project_cache_expiry(&self) -> Duration {
2323 Duration::from_secs(self.values.cache.project_expiry.into())
2324 }
2325
2326 pub fn request_full_project_config(&self) -> bool {
2328 self.values.cache.project_request_full_config
2329 }
2330
2331 pub fn relay_cache_expiry(&self) -> Duration {
2333 Duration::from_secs(self.values.cache.relay_expiry.into())
2334 }
2335
2336 pub fn envelope_buffer_size(&self) -> usize {
2338 self.values
2339 .cache
2340 .envelope_buffer_size
2341 .try_into()
2342 .unwrap_or(usize::MAX)
2343 }
2344
2345 pub fn cache_miss_expiry(&self) -> Duration {
2347 Duration::from_secs(self.values.cache.miss_expiry.into())
2348 }
2349
2350 pub fn project_grace_period(&self) -> Duration {
2352 Duration::from_secs(self.values.cache.project_grace_period.into())
2353 }
2354
2355 pub fn project_refresh_interval(&self) -> Option<Duration> {
2359 self.values
2360 .cache
2361 .project_refresh_interval
2362 .map(Into::into)
2363 .map(Duration::from_secs)
2364 }
2365
2366 pub fn query_batch_interval(&self) -> Duration {
2369 Duration::from_millis(self.values.cache.batch_interval.into())
2370 }
2371
2372 pub fn downstream_relays_batch_interval(&self) -> Duration {
2374 Duration::from_millis(self.values.cache.downstream_relays_batch_interval.into())
2375 }
2376
2377 pub fn local_cache_interval(&self) -> Duration {
2379 Duration::from_secs(self.values.cache.file_interval.into())
2380 }
2381
2382 pub fn global_config_fetch_interval(&self) -> Duration {
2385 Duration::from_secs(self.values.cache.global_config_fetch_interval.into())
2386 }
2387
2388 pub fn spool_envelopes_path(&self, partition_id: u8) -> Option<PathBuf> {
2393 let mut path = self
2394 .values
2395 .spool
2396 .envelopes
2397 .path
2398 .as_ref()
2399 .map(|path| path.to_owned())?;
2400
2401 if partition_id == 0 {
2402 return Some(path);
2403 }
2404
2405 let file_name = path.file_name().and_then(|f| f.to_str())?;
2406 let new_file_name = format!("{file_name}.{partition_id}");
2407 path.set_file_name(new_file_name);
2408
2409 Some(path)
2410 }
2411
2412 pub fn spool_envelopes_max_disk_size(&self) -> usize {
2414 self.values.spool.envelopes.max_disk_size.as_bytes()
2415 }
2416
2417 pub fn spool_envelopes_batch_size_bytes(&self) -> usize {
2420 self.values.spool.envelopes.batch_size_bytes.as_bytes()
2421 }
2422
2423 pub fn spool_envelopes_max_age(&self) -> Duration {
2425 Duration::from_secs(self.values.spool.envelopes.max_envelope_delay_secs)
2426 }
2427
2428 pub fn spool_disk_usage_refresh_frequency_ms(&self) -> Duration {
2430 Duration::from_millis(self.values.spool.envelopes.disk_usage_refresh_frequency_ms)
2431 }
2432
2433 pub fn spool_max_backpressure_memory_percent(&self) -> f32 {
2435 self.values.spool.envelopes.max_backpressure_memory_percent
2436 }
2437
2438 pub fn spool_partitions(&self) -> NonZeroU8 {
2440 self.values.spool.envelopes.partitions
2441 }
2442
2443 pub fn spool_partitioning(&self) -> EnvelopeSpoolPartitioning {
2445 self.values.spool.envelopes.partitioning
2446 }
2447
2448 pub fn spool_ephemeral(&self) -> bool {
2450 self.values.spool.envelopes.ephemeral
2451 }
2452
2453 pub fn max_event_size(&self) -> usize {
2455 self.values.limits.max_event_size.as_bytes()
2456 }
2457
2458 pub fn max_attachment_size(&self) -> usize {
2460 self.values.limits.max_attachment_size.as_bytes()
2461 }
2462
2463 pub fn max_upload_size(&self) -> usize {
2465 self.values.limits.max_upload_size.as_bytes()
2466 }
2467
2468 pub fn max_attachments_size(&self) -> usize {
2471 self.values.limits.max_attachments_size.as_bytes()
2472 }
2473
2474 pub fn max_client_reports_size(&self) -> usize {
2476 self.values.limits.max_client_reports_size.as_bytes()
2477 }
2478
2479 pub fn max_check_in_size(&self) -> usize {
2481 self.values.limits.max_check_in_size.as_bytes()
2482 }
2483
2484 pub fn max_log_size(&self) -> usize {
2486 self.values.limits.max_log_size.as_bytes()
2487 }
2488
2489 pub fn max_span_size(&self) -> usize {
2491 self.values.limits.max_span_size.as_bytes()
2492 }
2493
2494 pub fn max_container_size(&self) -> usize {
2496 self.values.limits.max_container_size.as_bytes()
2497 }
2498
2499 pub fn max_logs_integration_size(&self) -> usize {
2501 self.max_container_size()
2503 }
2504
2505 pub fn max_spans_integration_size(&self) -> usize {
2507 self.max_container_size()
2509 }
2510
2511 pub fn max_envelope_size(&self) -> usize {
2515 self.values.limits.max_envelope_size.as_bytes()
2516 }
2517
2518 pub fn max_session_count(&self) -> usize {
2520 self.values.limits.max_session_count
2521 }
2522
2523 pub fn max_statsd_size(&self) -> usize {
2525 self.values.limits.max_statsd_size.as_bytes()
2526 }
2527
2528 pub fn max_metric_buckets_size(&self) -> usize {
2530 self.values.limits.max_metric_buckets_size.as_bytes()
2531 }
2532
2533 pub fn max_api_payload_size(&self) -> usize {
2535 self.values.limits.max_api_payload_size.as_bytes()
2536 }
2537
2538 pub fn max_api_file_upload_size(&self) -> usize {
2540 self.values.limits.max_api_file_upload_size.as_bytes()
2541 }
2542
2543 pub fn max_api_chunk_upload_size(&self) -> usize {
2545 self.values.limits.max_api_chunk_upload_size.as_bytes()
2546 }
2547
2548 pub fn max_profile_size(&self) -> usize {
2550 self.values.limits.max_profile_size.as_bytes()
2551 }
2552
2553 pub fn max_trace_metric_size(&self) -> usize {
2555 self.values.limits.max_trace_metric_size.as_bytes()
2556 }
2557
2558 pub fn max_replay_compressed_size(&self) -> usize {
2560 self.values.limits.max_replay_compressed_size.as_bytes()
2561 }
2562
2563 pub fn max_replay_uncompressed_size(&self) -> usize {
2565 self.values.limits.max_replay_uncompressed_size.as_bytes()
2566 }
2567
2568 pub fn max_replay_message_size(&self) -> usize {
2574 self.values.limits.max_replay_message_size.as_bytes()
2575 }
2576
2577 pub fn max_concurrent_requests(&self) -> usize {
2579 self.values.limits.max_concurrent_requests
2580 }
2581
2582 pub fn max_concurrent_queries(&self) -> usize {
2584 self.values.limits.max_concurrent_queries
2585 }
2586
2587 pub fn max_removed_attribute_key_size(&self) -> usize {
2589 self.values.limits.max_removed_attribute_key_size.as_bytes()
2590 }
2591
2592 pub fn query_timeout(&self) -> Duration {
2594 Duration::from_secs(self.values.limits.query_timeout)
2595 }
2596
2597 pub fn shutdown_timeout(&self) -> Duration {
2600 Duration::from_secs(self.values.limits.shutdown_timeout)
2601 }
2602
2603 pub fn keepalive_timeout(&self) -> Duration {
2607 Duration::from_secs(self.values.limits.keepalive_timeout)
2608 }
2609
2610 pub fn idle_timeout(&self) -> Option<Duration> {
2612 self.values.limits.idle_timeout.map(Duration::from_secs)
2613 }
2614
2615 pub fn max_connections(&self) -> Option<usize> {
2617 self.values.limits.max_connections
2618 }
2619
2620 pub fn tcp_listen_backlog(&self) -> u32 {
2622 self.values.limits.tcp_listen_backlog
2623 }
2624
2625 pub fn cpu_concurrency(&self) -> usize {
2627 self.values.limits.max_thread_count
2628 }
2629
2630 pub fn pool_concurrency(&self) -> usize {
2632 self.values.limits.max_pool_concurrency
2633 }
2634
2635 pub fn query_batch_size(&self) -> usize {
2637 self.values.cache.batch_size
2638 }
2639
2640 pub fn project_configs_path(&self) -> PathBuf {
2642 self.path.join("projects")
2643 }
2644
2645 pub fn processing_enabled(&self) -> bool {
2647 self.values.processing.enabled
2648 }
2649
2650 pub fn normalization_level(&self) -> NormalizationLevel {
2652 self.values.normalization.level
2653 }
2654
2655 pub fn geoip_path(&self) -> Option<&Path> {
2657 self.values
2658 .geoip
2659 .path
2660 .as_deref()
2661 .or(self.values.processing.geoip_path.as_deref())
2662 }
2663
2664 pub fn max_secs_in_future(&self) -> i64 {
2668 self.values.processing.max_secs_in_future.into()
2669 }
2670
2671 pub fn max_session_secs_in_past(&self) -> i64 {
2673 self.values.processing.max_session_secs_in_past.into()
2674 }
2675
2676 pub fn kafka_configs(
2678 &self,
2679 topic: KafkaTopic,
2680 ) -> Result<KafkaTopicConfig<'_>, KafkaConfigError> {
2681 self.values.processing.topics.get(topic).kafka_configs(
2682 &self.values.processing.kafka_config,
2683 &self.values.processing.secondary_kafka_configs,
2684 )
2685 }
2686
2687 pub fn kafka_validate_topics(&self) -> bool {
2689 self.values.processing.kafka_validate_topics
2690 }
2691
2692 pub fn unused_topic_assignments(&self) -> &relay_kafka::Unused {
2694 &self.values.processing.topics.unused
2695 }
2696
2697 pub fn objectstore(&self) -> &ObjectstoreServiceConfig {
2699 &self.values.processing.objectstore
2700 }
2701
2702 pub fn upload(&self) -> &Upload {
2704 &self.values.upload
2705 }
2706
2707 pub fn redis(&self) -> Option<RedisConfigsRef<'_>> {
2710 let redis_configs = self.values.processing.redis.as_ref()?;
2711
2712 Some(build_redis_configs(
2713 redis_configs,
2714 self.cpu_concurrency() as u32,
2715 self.pool_concurrency() as u32,
2716 ))
2717 }
2718
2719 pub fn attachment_chunk_size(&self) -> usize {
2721 self.values.processing.attachment_chunk_size.as_bytes()
2722 }
2723
2724 pub fn metrics_max_batch_size_bytes(&self) -> usize {
2726 self.values.aggregator.max_flush_bytes
2727 }
2728
2729 pub fn projectconfig_cache_prefix(&self) -> &str {
2732 &self.values.processing.projectconfig_cache_prefix
2733 }
2734
2735 pub fn max_rate_limit(&self) -> Option<u64> {
2737 self.values.processing.max_rate_limit.map(u32::into)
2738 }
2739
2740 pub fn quota_cache_ratio(&self) -> Option<f32> {
2742 self.values.processing.quota_cache_ratio
2743 }
2744
2745 pub fn quota_cache_max(&self) -> Option<f32> {
2747 self.values.processing.quota_cache_max
2748 }
2749
2750 pub fn cardinality_limiter_cache_vacuum_interval(&self) -> Duration {
2754 Duration::from_secs(self.values.cardinality_limiter.cache_vacuum_interval)
2755 }
2756
2757 pub fn health_refresh_interval(&self) -> Duration {
2759 Duration::from_millis(self.values.health.refresh_interval_ms)
2760 }
2761
2762 pub fn health_max_memory_watermark_bytes(&self) -> u64 {
2764 self.values
2765 .health
2766 .max_memory_bytes
2767 .as_ref()
2768 .map_or(u64::MAX, |b| b.as_bytes() as u64)
2769 }
2770
2771 pub fn health_max_memory_watermark_percent(&self) -> f32 {
2773 self.values.health.max_memory_percent
2774 }
2775
2776 pub fn health_probe_timeout(&self) -> Duration {
2778 Duration::from_millis(self.values.health.probe_timeout_ms)
2779 }
2780
2781 pub fn memory_stat_refresh_frequency_ms(&self) -> u64 {
2783 self.values.health.memory_stat_refresh_frequency_ms
2784 }
2785
2786 pub fn cogs_max_queue_size(&self) -> u64 {
2788 self.values.cogs.max_queue_size
2789 }
2790
2791 pub fn cogs_relay_resource_id(&self) -> &str {
2793 &self.values.cogs.relay_resource_id
2794 }
2795
2796 pub fn default_aggregator_config(&self) -> &AggregatorServiceConfig {
2798 &self.values.aggregator
2799 }
2800
2801 pub fn secondary_aggregator_configs(&self) -> &Vec<ScopedAggregatorConfig> {
2803 &self.values.secondary_aggregators
2804 }
2805
2806 pub fn aggregator_config_for(&self, namespace: MetricNamespace) -> &AggregatorServiceConfig {
2808 for entry in &self.values.secondary_aggregators {
2809 if entry.condition.matches(Some(namespace)) {
2810 return &entry.config;
2811 }
2812 }
2813 &self.values.aggregator
2814 }
2815
2816 pub fn static_relays(&self) -> &HashMap<RelayId, RelayInfo> {
2818 &self.values.auth.static_relays
2819 }
2820
2821 pub fn signature_max_age(&self) -> Duration {
2823 Duration::from_secs(self.values.auth.signature_max_age)
2824 }
2825
2826 pub fn accept_unknown_items(&self) -> bool {
2828 let forward = self.values.routing.accept_unknown_items;
2829 forward.unwrap_or_else(|| !self.processing_enabled())
2830 }
2831}
2832
2833impl Default for Config {
2834 fn default() -> Self {
2835 Self {
2836 values: ConfigValues::default(),
2837 credentials: None,
2838 path: PathBuf::new(),
2839 }
2840 }
2841}
2842
2843#[cfg(test)]
2844mod tests {
2845
2846 use super::*;
2847
2848 #[test]
2850 fn test_event_buffer_size() {
2851 let yaml = r###"
2852cache:
2853 event_buffer_size: 1000000
2854 event_expiry: 1800
2855"###;
2856
2857 let values: ConfigValues = serde_yaml::from_str(yaml).unwrap();
2858 assert_eq!(values.cache.envelope_buffer_size, 1_000_000);
2859 assert_eq!(values.cache.envelope_expiry, 1800);
2860 }
2861
2862 #[test]
2863 fn test_emit_outcomes() {
2864 for (serialized, deserialized) in &[
2865 ("true", EmitOutcomes::AsOutcomes),
2866 ("false", EmitOutcomes::None),
2867 ("\"as_client_reports\"", EmitOutcomes::AsClientReports),
2868 ] {
2869 let value: EmitOutcomes = serde_json::from_str(serialized).unwrap();
2870 assert_eq!(value, *deserialized);
2871 assert_eq!(serde_json::to_string(&value).unwrap(), *serialized);
2872 }
2873 }
2874
2875 #[test]
2876 fn test_emit_outcomes_invalid() {
2877 assert!(serde_json::from_str::<EmitOutcomes>("asdf").is_err());
2878 }
2879}