1use std::collections::{BTreeMap, HashMap};
2use std::error::Error;
3use std::io::Write;
4use std::net::{IpAddr, SocketAddr, ToSocketAddrs};
5use std::num::NonZeroU8;
6use std::path::{Path, PathBuf};
7use std::str::FromStr;
8use std::time::Duration;
9use std::{env, fmt, fs, io};
10
11use anyhow::Context;
12use relay_auth::{PublicKey, RelayId, SecretKey, generate_key_pair, generate_relay_id};
13use relay_common::Dsn;
14use relay_kafka::{
15 ConfigError as KafkaConfigError, KafkaConfigParam, KafkaParams, KafkaTopic, TopicAssignment,
16 TopicAssignments,
17};
18use relay_metrics::MetricNamespace;
19use serde::de::{DeserializeOwned, Unexpected, Visitor};
20use serde::{Deserialize, Deserializer, Serialize, Serializer};
21use uuid::Uuid;
22
23use crate::aggregator::{AggregatorServiceConfig, ScopedAggregatorConfig};
24use crate::byte_size::ByteSize;
25use crate::upstream::UpstreamDescriptor;
26use crate::{RedisConfig, RedisConfigs, RedisConfigsRef, build_redis_configs};
27
28const DEFAULT_NETWORK_OUTAGE_GRACE_PERIOD: u64 = 10;
29
30static CONFIG_YAML_HEADER: &str = r###"# Please see the relevant documentation.
31# Performance tuning: https://docs.sentry.io/product/relay/operating-guidelines/
32# All config options: https://docs.sentry.io/product/relay/options/
33"###;
34
35#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
37#[non_exhaustive]
38pub enum ConfigErrorKind {
39 CouldNotOpenFile,
41 CouldNotWriteFile,
43 BadYaml,
45 BadJson,
47 InvalidValue,
49 ProcessingNotAvailable,
52}
53
54impl fmt::Display for ConfigErrorKind {
55 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
56 match self {
57 Self::CouldNotOpenFile => write!(f, "could not open config file"),
58 Self::CouldNotWriteFile => write!(f, "could not write config file"),
59 Self::BadYaml => write!(f, "could not parse yaml config file"),
60 Self::BadJson => write!(f, "could not parse json config file"),
61 Self::InvalidValue => write!(f, "invalid config value"),
62 Self::ProcessingNotAvailable => write!(
63 f,
64 "was not compiled with processing, cannot enable processing"
65 ),
66 }
67 }
68}
69
70#[derive(Debug)]
72enum ConfigErrorSource {
73 None,
75 File(PathBuf),
77 FieldOverride(String),
79}
80
81impl Default for ConfigErrorSource {
82 fn default() -> Self {
83 Self::None
84 }
85}
86
87impl fmt::Display for ConfigErrorSource {
88 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
89 match self {
90 ConfigErrorSource::None => Ok(()),
91 ConfigErrorSource::File(file_name) => {
92 write!(f, " (file {})", file_name.display())
93 }
94 ConfigErrorSource::FieldOverride(name) => write!(f, " (field {name})"),
95 }
96 }
97}
98
99#[derive(Debug)]
101pub struct ConfigError {
102 source: ConfigErrorSource,
103 kind: ConfigErrorKind,
104}
105
106impl ConfigError {
107 #[inline]
108 fn new(kind: ConfigErrorKind) -> Self {
109 Self {
110 source: ConfigErrorSource::None,
111 kind,
112 }
113 }
114
115 #[inline]
116 fn field(field: &'static str) -> Self {
117 Self {
118 source: ConfigErrorSource::FieldOverride(field.to_owned()),
119 kind: ConfigErrorKind::InvalidValue,
120 }
121 }
122
123 #[inline]
124 fn file(kind: ConfigErrorKind, p: impl AsRef<Path>) -> Self {
125 Self {
126 source: ConfigErrorSource::File(p.as_ref().to_path_buf()),
127 kind,
128 }
129 }
130
131 pub fn kind(&self) -> ConfigErrorKind {
133 self.kind
134 }
135}
136
137impl fmt::Display for ConfigError {
138 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
139 write!(f, "{}{}", self.kind(), self.source)
140 }
141}
142
143impl Error for ConfigError {}
144
145enum ConfigFormat {
146 Yaml,
147 Json,
148}
149
150impl ConfigFormat {
151 pub fn extension(&self) -> &'static str {
152 match self {
153 ConfigFormat::Yaml => "yml",
154 ConfigFormat::Json => "json",
155 }
156 }
157}
158
159trait ConfigObject: DeserializeOwned + Serialize {
160 fn format() -> ConfigFormat;
162
163 fn name() -> &'static str;
165
166 fn path(base: &Path) -> PathBuf {
168 base.join(format!("{}.{}", Self::name(), Self::format().extension()))
169 }
170
171 fn load(base: &Path) -> anyhow::Result<Self> {
173 let path = Self::path(base);
174
175 let f = fs::File::open(&path)
176 .with_context(|| ConfigError::file(ConfigErrorKind::CouldNotOpenFile, &path))?;
177 let f = io::BufReader::new(f);
178
179 let mut source = serde_vars::EnvSource::default();
180 match Self::format() {
181 ConfigFormat::Yaml => {
182 serde_vars::deserialize(serde_yaml::Deserializer::from_reader(f), &mut source)
183 .with_context(|| ConfigError::file(ConfigErrorKind::BadYaml, &path))
184 }
185 ConfigFormat::Json => {
186 serde_vars::deserialize(&mut serde_json::Deserializer::from_reader(f), &mut source)
187 .with_context(|| ConfigError::file(ConfigErrorKind::BadJson, &path))
188 }
189 }
190 }
191
192 fn save(&self, base: &Path) -> anyhow::Result<()> {
194 let path = Self::path(base);
195 let mut options = fs::OpenOptions::new();
196 options.write(true).truncate(true).create(true);
197
198 #[cfg(unix)]
200 {
201 use std::os::unix::fs::OpenOptionsExt;
202 options.mode(0o600);
203 }
204
205 let mut f = options
206 .open(&path)
207 .with_context(|| ConfigError::file(ConfigErrorKind::CouldNotWriteFile, &path))?;
208
209 match Self::format() {
210 ConfigFormat::Yaml => {
211 f.write_all(CONFIG_YAML_HEADER.as_bytes())?;
212 serde_yaml::to_writer(&mut f, self)
213 .with_context(|| ConfigError::file(ConfigErrorKind::CouldNotWriteFile, &path))?
214 }
215 ConfigFormat::Json => serde_json::to_writer_pretty(&mut f, self)
216 .with_context(|| ConfigError::file(ConfigErrorKind::CouldNotWriteFile, &path))?,
217 }
218
219 f.write_all(b"\n").ok();
220
221 Ok(())
222 }
223}
224
225#[derive(Debug, Default)]
228pub struct OverridableConfig {
229 pub mode: Option<String>,
231 pub instance: Option<String>,
233 pub log_level: Option<String>,
235 pub log_format: Option<String>,
237 pub upstream: Option<String>,
239 pub upstream_dsn: Option<String>,
241 pub host: Option<String>,
243 pub port: Option<String>,
245 pub processing: Option<String>,
247 pub kafka_url: Option<String>,
249 pub redis_url: Option<String>,
251 pub id: Option<String>,
253 pub secret_key: Option<String>,
255 pub public_key: Option<String>,
257 pub outcome_source: Option<String>,
259 pub shutdown_timeout: Option<String>,
261 pub server_name: Option<String>,
263}
264
265#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
267pub struct Credentials {
268 pub secret_key: SecretKey,
270 pub public_key: PublicKey,
272 pub id: RelayId,
274}
275
276impl Credentials {
277 pub fn generate() -> Self {
279 relay_log::info!("generating new relay credentials");
280 let (sk, pk) = generate_key_pair();
281 Self {
282 secret_key: sk,
283 public_key: pk,
284 id: generate_relay_id(),
285 }
286 }
287
288 pub fn to_json_string(&self) -> anyhow::Result<String> {
290 serde_json::to_string(self)
291 .with_context(|| ConfigError::new(ConfigErrorKind::CouldNotWriteFile))
292 }
293}
294
295impl ConfigObject for Credentials {
296 fn format() -> ConfigFormat {
297 ConfigFormat::Json
298 }
299 fn name() -> &'static str {
300 "credentials"
301 }
302}
303
304#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
306#[serde(rename_all = "camelCase")]
307pub struct RelayInfo {
308 pub public_key: PublicKey,
310
311 #[serde(default)]
313 pub internal: bool,
314}
315
316impl RelayInfo {
317 pub fn new(public_key: PublicKey) -> Self {
319 Self {
320 public_key,
321 internal: false,
322 }
323 }
324}
325
326#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
328#[serde(rename_all = "camelCase")]
329pub enum RelayMode {
330 Proxy,
336
337 Static,
342
343 Managed,
349
350 Capture,
354}
355
356impl fmt::Display for RelayMode {
357 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
358 match self {
359 RelayMode::Proxy => write!(f, "proxy"),
360 RelayMode::Static => write!(f, "static"),
361 RelayMode::Managed => write!(f, "managed"),
362 RelayMode::Capture => write!(f, "capture"),
363 }
364 }
365}
366
367#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
369#[serde(rename_all = "camelCase")]
370pub enum RelayInstance {
371 Default,
373
374 Canary,
376}
377
378impl RelayInstance {
379 pub fn is_canary(&self) -> bool {
381 matches!(self, RelayInstance::Canary)
382 }
383}
384
385impl fmt::Display for RelayInstance {
386 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
387 match self {
388 RelayInstance::Default => write!(f, "default"),
389 RelayInstance::Canary => write!(f, "canary"),
390 }
391 }
392}
393
394impl FromStr for RelayInstance {
395 type Err = fmt::Error;
396
397 fn from_str(s: &str) -> Result<Self, Self::Err> {
398 match s {
399 "canary" => Ok(RelayInstance::Canary),
400 _ => Ok(RelayInstance::Default),
401 }
402 }
403}
404
405#[derive(Clone, Copy, Debug, Eq, PartialEq)]
407pub struct ParseRelayModeError;
408
409impl fmt::Display for ParseRelayModeError {
410 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
411 write!(
412 f,
413 "Relay mode must be one of: managed, static, proxy, capture"
414 )
415 }
416}
417
418impl Error for ParseRelayModeError {}
419
420impl FromStr for RelayMode {
421 type Err = ParseRelayModeError;
422
423 fn from_str(s: &str) -> Result<Self, Self::Err> {
424 match s {
425 "proxy" => Ok(RelayMode::Proxy),
426 "static" => Ok(RelayMode::Static),
427 "managed" => Ok(RelayMode::Managed),
428 "capture" => Ok(RelayMode::Capture),
429 _ => Err(ParseRelayModeError),
430 }
431 }
432}
433
434fn is_default<T: Default + PartialEq>(t: &T) -> bool {
436 *t == T::default()
437}
438
439fn is_docker() -> bool {
441 if fs::metadata("/.dockerenv").is_ok() {
442 return true;
443 }
444
445 fs::read_to_string("/proc/self/cgroup").is_ok_and(|s| s.contains("/docker"))
446}
447
448fn default_host() -> IpAddr {
450 if is_docker() {
451 "0.0.0.0".parse().unwrap()
453 } else {
454 "127.0.0.1".parse().unwrap()
455 }
456}
457
458#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
462#[serde(rename_all = "lowercase")]
463pub enum ReadinessCondition {
464 Authenticated,
473 Always,
475}
476
477impl Default for ReadinessCondition {
478 fn default() -> Self {
479 Self::Authenticated
480 }
481}
482
483#[derive(Serialize, Deserialize, Debug)]
485#[serde(default)]
486pub struct Relay {
487 pub mode: RelayMode,
489 pub instance: RelayInstance,
491 pub upstream: UpstreamDescriptor<'static>,
493 pub host: IpAddr,
495 pub port: u16,
497 #[serde(skip_serializing)]
499 pub tls_port: Option<u16>,
500 #[serde(skip_serializing)]
502 pub tls_identity_path: Option<PathBuf>,
503 #[serde(skip_serializing)]
505 pub tls_identity_password: Option<String>,
506 #[serde(skip_serializing_if = "is_default")]
511 pub override_project_ids: bool,
512}
513
514impl Default for Relay {
515 fn default() -> Self {
516 Relay {
517 mode: RelayMode::Managed,
518 instance: RelayInstance::Default,
519 upstream: "https://sentry.io/".parse().unwrap(),
520 host: default_host(),
521 port: 3000,
522 tls_port: None,
523 tls_identity_path: None,
524 tls_identity_password: None,
525 override_project_ids: false,
526 }
527 }
528}
529
530#[derive(Serialize, Deserialize, Debug)]
532#[serde(default)]
533pub struct Metrics {
534 pub statsd: Option<String>,
538 pub prefix: String,
542 pub default_tags: BTreeMap<String, String>,
544 pub hostname_tag: Option<String>,
546 pub sample_rate: f32,
551 pub periodic_secs: u64,
556 pub aggregate: bool,
560}
561
562impl Default for Metrics {
563 fn default() -> Self {
564 Metrics {
565 statsd: None,
566 prefix: "sentry.relay".into(),
567 default_tags: BTreeMap::new(),
568 hostname_tag: None,
569 sample_rate: 1.0,
570 periodic_secs: 5,
571 aggregate: true,
572 }
573 }
574}
575
576#[derive(Serialize, Deserialize, Debug, Default)]
578#[serde(default)]
579pub struct SentryMetrics {
580 pub metric_stats_enabled: bool,
591}
592
593#[derive(Serialize, Deserialize, Debug)]
595#[serde(default)]
596pub struct Limits {
597 pub max_concurrent_requests: usize,
600 pub max_concurrent_queries: usize,
605 pub max_event_size: ByteSize,
607 pub max_attachment_size: ByteSize,
609 pub max_attachments_size: ByteSize,
611 pub max_client_reports_size: ByteSize,
613 pub max_check_in_size: ByteSize,
615 pub max_envelope_size: ByteSize,
617 pub max_session_count: usize,
619 pub max_api_payload_size: ByteSize,
621 pub max_api_file_upload_size: ByteSize,
623 pub max_api_chunk_upload_size: ByteSize,
625 pub max_profile_size: ByteSize,
627 pub max_log_size: ByteSize,
629 pub max_span_size: ByteSize,
631 pub max_statsd_size: ByteSize,
633 pub max_metric_buckets_size: ByteSize,
635 pub max_replay_compressed_size: ByteSize,
637 #[serde(alias = "max_replay_size")]
639 max_replay_uncompressed_size: ByteSize,
640 pub max_replay_message_size: ByteSize,
642 pub max_thread_count: usize,
647 pub max_pool_concurrency: usize,
654 pub query_timeout: u64,
657 pub shutdown_timeout: u64,
660 pub keepalive_timeout: u64,
664 pub idle_timeout: Option<u64>,
671 pub max_connections: Option<usize>,
677 pub tcp_listen_backlog: u32,
685}
686
687impl Default for Limits {
688 fn default() -> Self {
689 Limits {
690 max_concurrent_requests: 100,
691 max_concurrent_queries: 5,
692 max_event_size: ByteSize::mebibytes(1),
693 max_attachment_size: ByteSize::mebibytes(100),
694 max_attachments_size: ByteSize::mebibytes(100),
695 max_client_reports_size: ByteSize::kibibytes(4),
696 max_check_in_size: ByteSize::kibibytes(100),
697 max_envelope_size: ByteSize::mebibytes(100),
698 max_session_count: 100,
699 max_api_payload_size: ByteSize::mebibytes(20),
700 max_api_file_upload_size: ByteSize::mebibytes(40),
701 max_api_chunk_upload_size: ByteSize::mebibytes(100),
702 max_profile_size: ByteSize::mebibytes(50),
703 max_log_size: ByteSize::mebibytes(1),
704 max_span_size: ByteSize::mebibytes(1),
705 max_statsd_size: ByteSize::mebibytes(1),
706 max_metric_buckets_size: ByteSize::mebibytes(1),
707 max_replay_compressed_size: ByteSize::mebibytes(10),
708 max_replay_uncompressed_size: ByteSize::mebibytes(100),
709 max_replay_message_size: ByteSize::mebibytes(15),
710 max_thread_count: num_cpus::get(),
711 max_pool_concurrency: 1,
712 query_timeout: 30,
713 shutdown_timeout: 10,
714 keepalive_timeout: 5,
715 idle_timeout: None,
716 max_connections: None,
717 tcp_listen_backlog: 1024,
718 }
719 }
720}
721
722#[derive(Debug, Default, Deserialize, Serialize)]
724#[serde(default)]
725pub struct Routing {
726 pub accept_unknown_items: Option<bool>,
736}
737
738#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize)]
740#[serde(rename_all = "lowercase")]
741pub enum HttpEncoding {
742 #[default]
747 Identity,
748 Deflate,
754 Gzip,
761 Br,
763 Zstd,
765}
766
767impl HttpEncoding {
768 pub fn parse(str: &str) -> Self {
770 let str = str.trim();
771 if str.eq_ignore_ascii_case("zstd") {
772 Self::Zstd
773 } else if str.eq_ignore_ascii_case("br") {
774 Self::Br
775 } else if str.eq_ignore_ascii_case("gzip") || str.eq_ignore_ascii_case("x-gzip") {
776 Self::Gzip
777 } else if str.eq_ignore_ascii_case("deflate") {
778 Self::Deflate
779 } else {
780 Self::Identity
781 }
782 }
783
784 pub fn name(&self) -> Option<&'static str> {
788 match self {
789 Self::Identity => None,
790 Self::Deflate => Some("deflate"),
791 Self::Gzip => Some("gzip"),
792 Self::Br => Some("br"),
793 Self::Zstd => Some("zstd"),
794 }
795 }
796}
797
798#[derive(Serialize, Deserialize, Debug)]
800#[serde(default)]
801pub struct Http {
802 pub timeout: u32,
808 pub connection_timeout: u32,
813 pub max_retry_interval: u32,
815 pub host_header: Option<String>,
817 pub auth_interval: Option<u64>,
825 pub outage_grace_period: u64,
831 pub retry_delay: u64,
835 pub project_failure_interval: u64,
840 pub encoding: HttpEncoding,
856 pub global_metrics: bool,
863}
864
865impl Default for Http {
866 fn default() -> Self {
867 Http {
868 timeout: 5,
869 connection_timeout: 3,
870 max_retry_interval: 60, host_header: None,
872 auth_interval: Some(600), outage_grace_period: DEFAULT_NETWORK_OUTAGE_GRACE_PERIOD,
874 retry_delay: default_retry_delay(),
875 project_failure_interval: default_project_failure_interval(),
876 encoding: HttpEncoding::Zstd,
877 global_metrics: false,
878 }
879 }
880}
881
882fn default_retry_delay() -> u64 {
884 1
885}
886
887fn default_project_failure_interval() -> u64 {
889 90
890}
891
892fn spool_envelopes_max_disk_size() -> ByteSize {
894 ByteSize::mebibytes(500)
895}
896
897fn spool_envelopes_batch_size_bytes() -> ByteSize {
899 ByteSize::kibibytes(10)
900}
901
902fn spool_envelopes_max_envelope_delay_secs() -> u64 {
903 24 * 60 * 60
904}
905
906fn spool_disk_usage_refresh_frequency_ms() -> u64 {
908 100
909}
910
911fn spool_max_backpressure_envelopes() -> usize {
913 500
914}
915
916fn spool_max_backpressure_memory_percent() -> f32 {
918 0.9
919}
920
921fn spool_envelopes_partitions() -> NonZeroU8 {
923 NonZeroU8::new(1).unwrap()
924}
925
926#[derive(Debug, Serialize, Deserialize)]
928pub struct EnvelopeSpool {
929 pub path: Option<PathBuf>,
935 #[serde(default = "spool_envelopes_max_disk_size")]
941 pub max_disk_size: ByteSize,
942 #[serde(default = "spool_envelopes_batch_size_bytes")]
949 pub batch_size_bytes: ByteSize,
950 #[serde(default = "spool_envelopes_max_envelope_delay_secs")]
957 pub max_envelope_delay_secs: u64,
958 #[serde(default = "spool_disk_usage_refresh_frequency_ms")]
963 pub disk_usage_refresh_frequency_ms: u64,
964 #[serde(default = "spool_max_backpressure_envelopes")]
968 pub max_backpressure_envelopes: usize,
969 #[serde(default = "spool_max_backpressure_memory_percent")]
999 pub max_backpressure_memory_percent: f32,
1000 #[serde(default = "spool_envelopes_partitions")]
1007 pub partitions: NonZeroU8,
1008}
1009
1010impl Default for EnvelopeSpool {
1011 fn default() -> Self {
1012 Self {
1013 path: None,
1014 max_disk_size: spool_envelopes_max_disk_size(),
1015 batch_size_bytes: spool_envelopes_batch_size_bytes(),
1016 max_envelope_delay_secs: spool_envelopes_max_envelope_delay_secs(),
1017 disk_usage_refresh_frequency_ms: spool_disk_usage_refresh_frequency_ms(),
1018 max_backpressure_envelopes: spool_max_backpressure_envelopes(),
1019 max_backpressure_memory_percent: spool_max_backpressure_memory_percent(),
1020 partitions: spool_envelopes_partitions(),
1021 }
1022 }
1023}
1024
1025#[derive(Debug, Serialize, Deserialize, Default)]
1027pub struct Spool {
1028 #[serde(default)]
1030 pub envelopes: EnvelopeSpool,
1031}
1032
1033#[derive(Serialize, Deserialize, Debug)]
1035#[serde(default)]
1036pub struct Cache {
1037 pub project_request_full_config: bool,
1039 pub project_expiry: u32,
1041 pub project_grace_period: u32,
1046 pub project_refresh_interval: Option<u32>,
1052 pub relay_expiry: u32,
1054 #[serde(alias = "event_expiry")]
1060 envelope_expiry: u32,
1061 #[serde(alias = "event_buffer_size")]
1063 envelope_buffer_size: u32,
1064 pub miss_expiry: u32,
1066 pub batch_interval: u32,
1068 pub downstream_relays_batch_interval: u32,
1070 pub batch_size: usize,
1074 pub file_interval: u32,
1076 pub global_config_fetch_interval: u32,
1078}
1079
1080impl Default for Cache {
1081 fn default() -> Self {
1082 Cache {
1083 project_request_full_config: false,
1084 project_expiry: 300, project_grace_period: 120, project_refresh_interval: None,
1087 relay_expiry: 3600, envelope_expiry: 600, envelope_buffer_size: 1000,
1090 miss_expiry: 60, batch_interval: 100, downstream_relays_batch_interval: 100, batch_size: 500,
1094 file_interval: 10, global_config_fetch_interval: 10, }
1097 }
1098}
1099
1100fn default_max_secs_in_future() -> u32 {
1101 60 }
1103
1104fn default_max_session_secs_in_past() -> u32 {
1105 5 * 24 * 3600 }
1107
1108fn default_chunk_size() -> ByteSize {
1109 ByteSize::mebibytes(1)
1110}
1111
1112fn default_projectconfig_cache_prefix() -> String {
1113 "relayconfig".to_owned()
1114}
1115
1116#[allow(clippy::unnecessary_wraps)]
1117fn default_max_rate_limit() -> Option<u32> {
1118 Some(300) }
1120
1121#[derive(Serialize, Deserialize, Debug)]
1123pub struct Processing {
1124 pub enabled: bool,
1126 #[serde(default)]
1128 pub geoip_path: Option<PathBuf>,
1129 #[serde(default = "default_max_secs_in_future")]
1131 pub max_secs_in_future: u32,
1132 #[serde(default = "default_max_session_secs_in_past")]
1134 pub max_session_secs_in_past: u32,
1135 pub kafka_config: Vec<KafkaConfigParam>,
1137 #[serde(default)]
1157 pub secondary_kafka_configs: BTreeMap<String, Vec<KafkaConfigParam>>,
1158 #[serde(default)]
1160 pub topics: TopicAssignments,
1161 #[serde(default)]
1163 pub kafka_validate_topics: bool,
1164 #[serde(default)]
1166 pub redis: Option<RedisConfigs>,
1167 #[serde(default = "default_chunk_size")]
1169 pub attachment_chunk_size: ByteSize,
1170 #[serde(default = "default_projectconfig_cache_prefix")]
1172 pub projectconfig_cache_prefix: String,
1173 #[serde(default = "default_max_rate_limit")]
1175 pub max_rate_limit: Option<u32>,
1176}
1177
1178impl Default for Processing {
1179 fn default() -> Self {
1181 Self {
1182 enabled: false,
1183 geoip_path: None,
1184 max_secs_in_future: default_max_secs_in_future(),
1185 max_session_secs_in_past: default_max_session_secs_in_past(),
1186 kafka_config: Vec::new(),
1187 secondary_kafka_configs: BTreeMap::new(),
1188 topics: TopicAssignments::default(),
1189 kafka_validate_topics: false,
1190 redis: None,
1191 attachment_chunk_size: default_chunk_size(),
1192 projectconfig_cache_prefix: default_projectconfig_cache_prefix(),
1193 max_rate_limit: default_max_rate_limit(),
1194 }
1195 }
1196}
1197
1198#[derive(Debug, Default, Serialize, Deserialize)]
1200#[serde(default)]
1201pub struct Normalization {
1202 #[serde(default)]
1204 pub level: NormalizationLevel,
1205}
1206
1207#[derive(Copy, Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq)]
1209#[serde(rename_all = "lowercase")]
1210pub enum NormalizationLevel {
1211 #[default]
1215 Default,
1216 Full,
1221}
1222
1223#[derive(Serialize, Deserialize, Debug)]
1225#[serde(default)]
1226pub struct OutcomeAggregatorConfig {
1227 pub bucket_interval: u64,
1229 pub flush_interval: u64,
1231}
1232
1233impl Default for OutcomeAggregatorConfig {
1234 fn default() -> Self {
1235 Self {
1236 bucket_interval: 60,
1237 flush_interval: 120,
1238 }
1239 }
1240}
1241
1242#[derive(Copy, Clone, Debug, PartialEq, Eq)]
1245
1246pub enum EmitOutcomes {
1247 None,
1249 AsClientReports,
1251 AsOutcomes,
1253}
1254
1255impl EmitOutcomes {
1256 pub fn any(&self) -> bool {
1258 !matches!(self, EmitOutcomes::None)
1259 }
1260}
1261
1262impl Serialize for EmitOutcomes {
1263 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
1264 where
1265 S: Serializer,
1266 {
1267 match self {
1269 Self::None => serializer.serialize_bool(false),
1270 Self::AsClientReports => serializer.serialize_str("as_client_reports"),
1271 Self::AsOutcomes => serializer.serialize_bool(true),
1272 }
1273 }
1274}
1275
1276struct EmitOutcomesVisitor;
1277
1278impl Visitor<'_> for EmitOutcomesVisitor {
1279 type Value = EmitOutcomes;
1280
1281 fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
1282 formatter.write_str("true, false, or 'as_client_reports'")
1283 }
1284
1285 fn visit_bool<E>(self, v: bool) -> Result<Self::Value, E>
1286 where
1287 E: serde::de::Error,
1288 {
1289 Ok(if v {
1290 EmitOutcomes::AsOutcomes
1291 } else {
1292 EmitOutcomes::None
1293 })
1294 }
1295
1296 fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
1297 where
1298 E: serde::de::Error,
1299 {
1300 if v == "as_client_reports" {
1301 Ok(EmitOutcomes::AsClientReports)
1302 } else {
1303 Err(E::invalid_value(Unexpected::Str(v), &"as_client_reports"))
1304 }
1305 }
1306}
1307
1308impl<'de> Deserialize<'de> for EmitOutcomes {
1309 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
1310 where
1311 D: Deserializer<'de>,
1312 {
1313 deserializer.deserialize_any(EmitOutcomesVisitor)
1314 }
1315}
1316
1317#[derive(Serialize, Deserialize, Debug)]
1319#[serde(default)]
1320pub struct Outcomes {
1321 pub emit_outcomes: EmitOutcomes,
1325 pub emit_client_outcomes: bool,
1327 pub batch_size: usize,
1330 pub batch_interval: u64,
1333 pub source: Option<String>,
1336 pub aggregator: OutcomeAggregatorConfig,
1338}
1339
1340impl Default for Outcomes {
1341 fn default() -> Self {
1342 Outcomes {
1343 emit_outcomes: EmitOutcomes::AsClientReports,
1344 emit_client_outcomes: true,
1345 batch_size: 1000,
1346 batch_interval: 500,
1347 source: None,
1348 aggregator: OutcomeAggregatorConfig::default(),
1349 }
1350 }
1351}
1352
1353#[derive(Serialize, Deserialize, Debug, Default)]
1355pub struct MinimalConfig {
1356 pub relay: Relay,
1358}
1359
1360impl MinimalConfig {
1361 pub fn save_in_folder<P: AsRef<Path>>(&self, p: P) -> anyhow::Result<()> {
1363 let path = p.as_ref();
1364 if fs::metadata(path).is_err() {
1365 fs::create_dir_all(path)
1366 .with_context(|| ConfigError::file(ConfigErrorKind::CouldNotOpenFile, path))?;
1367 }
1368 self.save(path)
1369 }
1370}
1371
1372impl ConfigObject for MinimalConfig {
1373 fn format() -> ConfigFormat {
1374 ConfigFormat::Yaml
1375 }
1376
1377 fn name() -> &'static str {
1378 "config"
1379 }
1380}
1381
1382mod config_relay_info {
1384 use serde::ser::SerializeMap;
1385
1386 use super::*;
1387
1388 #[derive(Debug, Serialize, Deserialize, Clone)]
1390 struct RelayInfoConfig {
1391 public_key: PublicKey,
1392 #[serde(default)]
1393 internal: bool,
1394 }
1395
1396 impl From<RelayInfoConfig> for RelayInfo {
1397 fn from(v: RelayInfoConfig) -> Self {
1398 RelayInfo {
1399 public_key: v.public_key,
1400 internal: v.internal,
1401 }
1402 }
1403 }
1404
1405 impl From<RelayInfo> for RelayInfoConfig {
1406 fn from(v: RelayInfo) -> Self {
1407 RelayInfoConfig {
1408 public_key: v.public_key,
1409 internal: v.internal,
1410 }
1411 }
1412 }
1413
1414 pub(super) fn deserialize<'de, D>(des: D) -> Result<HashMap<RelayId, RelayInfo>, D::Error>
1415 where
1416 D: Deserializer<'de>,
1417 {
1418 let map = HashMap::<RelayId, RelayInfoConfig>::deserialize(des)?;
1419 Ok(map.into_iter().map(|(k, v)| (k, v.into())).collect())
1420 }
1421
1422 pub(super) fn serialize<S>(elm: &HashMap<RelayId, RelayInfo>, ser: S) -> Result<S::Ok, S::Error>
1423 where
1424 S: Serializer,
1425 {
1426 let mut map = ser.serialize_map(Some(elm.len()))?;
1427
1428 for (k, v) in elm {
1429 map.serialize_entry(k, &RelayInfoConfig::from(v.clone()))?;
1430 }
1431
1432 map.end()
1433 }
1434}
1435
1436#[derive(Serialize, Deserialize, Debug, Default)]
1438pub struct AuthConfig {
1439 #[serde(default, skip_serializing_if = "is_default")]
1441 pub ready: ReadinessCondition,
1442
1443 #[serde(default, with = "config_relay_info")]
1445 pub static_relays: HashMap<RelayId, RelayInfo>,
1446}
1447
1448#[derive(Serialize, Deserialize, Debug, Default)]
1450pub struct GeoIpConfig {
1451 pub path: Option<PathBuf>,
1453}
1454
1455#[derive(Serialize, Deserialize, Debug)]
1457#[serde(default)]
1458pub struct CardinalityLimiter {
1459 pub cache_vacuum_interval: u64,
1465}
1466
1467impl Default for CardinalityLimiter {
1468 fn default() -> Self {
1469 Self {
1470 cache_vacuum_interval: 180,
1471 }
1472 }
1473}
1474
1475#[derive(Serialize, Deserialize, Debug)]
1480#[serde(default)]
1481pub struct Health {
1482 pub refresh_interval_ms: u64,
1489 pub max_memory_bytes: Option<ByteSize>,
1494 pub max_memory_percent: f32,
1498 pub probe_timeout_ms: u64,
1505 pub memory_stat_refresh_frequency_ms: u64,
1511}
1512
1513impl Default for Health {
1514 fn default() -> Self {
1515 Self {
1516 refresh_interval_ms: 3000,
1517 max_memory_bytes: None,
1518 max_memory_percent: 0.95,
1519 probe_timeout_ms: 900,
1520 memory_stat_refresh_frequency_ms: 100,
1521 }
1522 }
1523}
1524
1525#[derive(Serialize, Deserialize, Debug)]
1527#[serde(default)]
1528pub struct Cogs {
1529 pub max_queue_size: u64,
1535 pub relay_resource_id: String,
1541}
1542
1543impl Default for Cogs {
1544 fn default() -> Self {
1545 Self {
1546 max_queue_size: 10_000,
1547 relay_resource_id: "relay_service".to_owned(),
1548 }
1549 }
1550}
1551
1552#[derive(Serialize, Deserialize, Debug, Default)]
1553struct ConfigValues {
1554 #[serde(default)]
1555 relay: Relay,
1556 #[serde(default)]
1557 http: Http,
1558 #[serde(default)]
1559 cache: Cache,
1560 #[serde(default)]
1561 spool: Spool,
1562 #[serde(default)]
1563 limits: Limits,
1564 #[serde(default)]
1565 logging: relay_log::LogConfig,
1566 #[serde(default)]
1567 routing: Routing,
1568 #[serde(default)]
1569 metrics: Metrics,
1570 #[serde(default)]
1571 sentry_metrics: SentryMetrics,
1572 #[serde(default)]
1573 sentry: relay_log::SentryConfig,
1574 #[serde(default)]
1575 processing: Processing,
1576 #[serde(default)]
1577 outcomes: Outcomes,
1578 #[serde(default)]
1579 aggregator: AggregatorServiceConfig,
1580 #[serde(default)]
1581 secondary_aggregators: Vec<ScopedAggregatorConfig>,
1582 #[serde(default)]
1583 auth: AuthConfig,
1584 #[serde(default)]
1585 geoip: GeoIpConfig,
1586 #[serde(default)]
1587 normalization: Normalization,
1588 #[serde(default)]
1589 cardinality_limiter: CardinalityLimiter,
1590 #[serde(default)]
1591 health: Health,
1592 #[serde(default)]
1593 cogs: Cogs,
1594}
1595
1596impl ConfigObject for ConfigValues {
1597 fn format() -> ConfigFormat {
1598 ConfigFormat::Yaml
1599 }
1600
1601 fn name() -> &'static str {
1602 "config"
1603 }
1604}
1605
1606pub struct Config {
1608 values: ConfigValues,
1609 credentials: Option<Credentials>,
1610 path: PathBuf,
1611}
1612
1613impl fmt::Debug for Config {
1614 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1615 f.debug_struct("Config")
1616 .field("path", &self.path)
1617 .field("values", &self.values)
1618 .finish()
1619 }
1620}
1621
1622impl Config {
1623 pub fn from_path<P: AsRef<Path>>(path: P) -> anyhow::Result<Config> {
1625 let path = env::current_dir()
1626 .map(|x| x.join(path.as_ref()))
1627 .unwrap_or_else(|_| path.as_ref().to_path_buf());
1628
1629 let config = Config {
1630 values: ConfigValues::load(&path)?,
1631 credentials: if Credentials::path(&path).exists() {
1632 Some(Credentials::load(&path)?)
1633 } else {
1634 None
1635 },
1636 path: path.clone(),
1637 };
1638
1639 if cfg!(not(feature = "processing")) && config.processing_enabled() {
1640 return Err(ConfigError::file(ConfigErrorKind::ProcessingNotAvailable, &path).into());
1641 }
1642
1643 Ok(config)
1644 }
1645
1646 pub fn from_json_value(value: serde_json::Value) -> anyhow::Result<Config> {
1650 Ok(Config {
1651 values: serde_json::from_value(value)
1652 .with_context(|| ConfigError::new(ConfigErrorKind::BadJson))?,
1653 credentials: None,
1654 path: PathBuf::new(),
1655 })
1656 }
1657
1658 pub fn apply_override(
1661 &mut self,
1662 mut overrides: OverridableConfig,
1663 ) -> anyhow::Result<&mut Self> {
1664 let relay = &mut self.values.relay;
1665
1666 if let Some(mode) = overrides.mode {
1667 relay.mode = mode
1668 .parse::<RelayMode>()
1669 .with_context(|| ConfigError::field("mode"))?;
1670 }
1671
1672 if let Some(deployment) = overrides.instance {
1673 relay.instance = deployment
1674 .parse::<RelayInstance>()
1675 .with_context(|| ConfigError::field("deployment"))?;
1676 }
1677
1678 if let Some(log_level) = overrides.log_level {
1679 self.values.logging.level = log_level.parse()?;
1680 }
1681
1682 if let Some(log_format) = overrides.log_format {
1683 self.values.logging.format = log_format.parse()?;
1684 }
1685
1686 if let Some(upstream) = overrides.upstream {
1687 relay.upstream = upstream
1688 .parse::<UpstreamDescriptor>()
1689 .with_context(|| ConfigError::field("upstream"))?;
1690 } else if let Some(upstream_dsn) = overrides.upstream_dsn {
1691 relay.upstream = upstream_dsn
1692 .parse::<Dsn>()
1693 .map(|dsn| UpstreamDescriptor::from_dsn(&dsn).into_owned())
1694 .with_context(|| ConfigError::field("upstream_dsn"))?;
1695 }
1696
1697 if let Some(host) = overrides.host {
1698 relay.host = host
1699 .parse::<IpAddr>()
1700 .with_context(|| ConfigError::field("host"))?;
1701 }
1702
1703 if let Some(port) = overrides.port {
1704 relay.port = port
1705 .as_str()
1706 .parse()
1707 .with_context(|| ConfigError::field("port"))?;
1708 }
1709
1710 let processing = &mut self.values.processing;
1711 if let Some(enabled) = overrides.processing {
1712 match enabled.to_lowercase().as_str() {
1713 "true" | "1" => processing.enabled = true,
1714 "false" | "0" | "" => processing.enabled = false,
1715 _ => return Err(ConfigError::field("processing").into()),
1716 }
1717 }
1718
1719 if let Some(redis) = overrides.redis_url {
1720 processing.redis = Some(RedisConfigs::Unified(RedisConfig::single(redis)))
1721 }
1722
1723 if let Some(kafka_url) = overrides.kafka_url {
1724 let existing = processing
1725 .kafka_config
1726 .iter_mut()
1727 .find(|e| e.name == "bootstrap.servers");
1728
1729 if let Some(config_param) = existing {
1730 config_param.value = kafka_url;
1731 } else {
1732 processing.kafka_config.push(KafkaConfigParam {
1733 name: "bootstrap.servers".to_owned(),
1734 value: kafka_url,
1735 })
1736 }
1737 }
1738 let id = if let Some(id) = overrides.id {
1740 let id = Uuid::parse_str(&id).with_context(|| ConfigError::field("id"))?;
1741 Some(id)
1742 } else {
1743 None
1744 };
1745 let public_key = if let Some(public_key) = overrides.public_key {
1746 let public_key = public_key
1747 .parse::<PublicKey>()
1748 .with_context(|| ConfigError::field("public_key"))?;
1749 Some(public_key)
1750 } else {
1751 None
1752 };
1753
1754 let secret_key = if let Some(secret_key) = overrides.secret_key {
1755 let secret_key = secret_key
1756 .parse::<SecretKey>()
1757 .with_context(|| ConfigError::field("secret_key"))?;
1758 Some(secret_key)
1759 } else {
1760 None
1761 };
1762 let outcomes = &mut self.values.outcomes;
1763 if overrides.outcome_source.is_some() {
1764 outcomes.source = overrides.outcome_source.take();
1765 }
1766
1767 if let Some(credentials) = &mut self.credentials {
1768 if let Some(id) = id {
1770 credentials.id = id;
1771 }
1772 if let Some(public_key) = public_key {
1773 credentials.public_key = public_key;
1774 }
1775 if let Some(secret_key) = secret_key {
1776 credentials.secret_key = secret_key
1777 }
1778 } else {
1779 match (id, public_key, secret_key) {
1781 (Some(id), Some(public_key), Some(secret_key)) => {
1782 self.credentials = Some(Credentials {
1783 secret_key,
1784 public_key,
1785 id,
1786 })
1787 }
1788 (None, None, None) => {
1789 }
1792 _ => {
1793 return Err(ConfigError::field("incomplete credentials").into());
1794 }
1795 }
1796 }
1797
1798 let limits = &mut self.values.limits;
1799 if let Some(shutdown_timeout) = overrides.shutdown_timeout {
1800 if let Ok(shutdown_timeout) = shutdown_timeout.parse::<u64>() {
1801 limits.shutdown_timeout = shutdown_timeout;
1802 }
1803 }
1804
1805 if let Some(server_name) = overrides.server_name {
1806 self.values.sentry.server_name = Some(server_name.into());
1807 }
1808
1809 Ok(self)
1810 }
1811
1812 pub fn config_exists<P: AsRef<Path>>(path: P) -> bool {
1814 fs::metadata(ConfigValues::path(path.as_ref())).is_ok()
1815 }
1816
1817 pub fn path(&self) -> &Path {
1819 &self.path
1820 }
1821
1822 pub fn to_yaml_string(&self) -> anyhow::Result<String> {
1824 serde_yaml::to_string(&self.values)
1825 .with_context(|| ConfigError::new(ConfigErrorKind::CouldNotWriteFile))
1826 }
1827
1828 pub fn regenerate_credentials(&mut self, save: bool) -> anyhow::Result<()> {
1832 let creds = Credentials::generate();
1833 if save {
1834 creds.save(&self.path)?;
1835 }
1836 self.credentials = Some(creds);
1837 Ok(())
1838 }
1839
1840 pub fn credentials(&self) -> Option<&Credentials> {
1842 self.credentials.as_ref()
1843 }
1844
1845 pub fn replace_credentials(
1849 &mut self,
1850 credentials: Option<Credentials>,
1851 ) -> anyhow::Result<bool> {
1852 if self.credentials == credentials {
1853 return Ok(false);
1854 }
1855
1856 match credentials {
1857 Some(ref creds) => {
1858 creds.save(&self.path)?;
1859 }
1860 None => {
1861 let path = Credentials::path(&self.path);
1862 if fs::metadata(&path).is_ok() {
1863 fs::remove_file(&path).with_context(|| {
1864 ConfigError::file(ConfigErrorKind::CouldNotWriteFile, &path)
1865 })?;
1866 }
1867 }
1868 }
1869
1870 self.credentials = credentials;
1871 Ok(true)
1872 }
1873
1874 pub fn has_credentials(&self) -> bool {
1876 self.credentials.is_some()
1877 }
1878
1879 pub fn secret_key(&self) -> Option<&SecretKey> {
1881 self.credentials.as_ref().map(|x| &x.secret_key)
1882 }
1883
1884 pub fn public_key(&self) -> Option<&PublicKey> {
1886 self.credentials.as_ref().map(|x| &x.public_key)
1887 }
1888
1889 pub fn relay_id(&self) -> Option<&RelayId> {
1891 self.credentials.as_ref().map(|x| &x.id)
1892 }
1893
1894 pub fn relay_mode(&self) -> RelayMode {
1896 self.values.relay.mode
1897 }
1898
1899 pub fn relay_instance(&self) -> RelayInstance {
1901 self.values.relay.instance
1902 }
1903
1904 pub fn upstream_descriptor(&self) -> &UpstreamDescriptor<'_> {
1906 &self.values.relay.upstream
1907 }
1908
1909 pub fn http_host_header(&self) -> Option<&str> {
1911 self.values.http.host_header.as_deref()
1912 }
1913
1914 pub fn listen_addr(&self) -> SocketAddr {
1916 (self.values.relay.host, self.values.relay.port).into()
1917 }
1918
1919 pub fn tls_listen_addr(&self) -> Option<SocketAddr> {
1921 if self.values.relay.tls_identity_path.is_some() {
1922 let port = self.values.relay.tls_port.unwrap_or(3443);
1923 Some((self.values.relay.host, port).into())
1924 } else {
1925 None
1926 }
1927 }
1928
1929 pub fn tls_identity_path(&self) -> Option<&Path> {
1931 self.values.relay.tls_identity_path.as_deref()
1932 }
1933
1934 pub fn tls_identity_password(&self) -> Option<&str> {
1936 self.values.relay.tls_identity_password.as_deref()
1937 }
1938
1939 pub fn override_project_ids(&self) -> bool {
1943 self.values.relay.override_project_ids
1944 }
1945
1946 pub fn requires_auth(&self) -> bool {
1950 match self.values.auth.ready {
1951 ReadinessCondition::Authenticated => self.relay_mode() == RelayMode::Managed,
1952 ReadinessCondition::Always => false,
1953 }
1954 }
1955
1956 pub fn http_auth_interval(&self) -> Option<Duration> {
1960 if self.processing_enabled() {
1961 return None;
1962 }
1963
1964 match self.values.http.auth_interval {
1965 None | Some(0) => None,
1966 Some(secs) => Some(Duration::from_secs(secs)),
1967 }
1968 }
1969
1970 pub fn http_outage_grace_period(&self) -> Duration {
1973 Duration::from_secs(self.values.http.outage_grace_period)
1974 }
1975
1976 pub fn http_retry_delay(&self) -> Duration {
1981 Duration::from_secs(self.values.http.retry_delay)
1982 }
1983
1984 pub fn http_project_failure_interval(&self) -> Duration {
1986 Duration::from_secs(self.values.http.project_failure_interval)
1987 }
1988
1989 pub fn http_encoding(&self) -> HttpEncoding {
1991 self.values.http.encoding
1992 }
1993
1994 pub fn http_global_metrics(&self) -> bool {
1996 self.values.http.global_metrics
1997 }
1998
1999 pub fn emit_outcomes(&self) -> EmitOutcomes {
2004 if self.processing_enabled() {
2005 return EmitOutcomes::AsOutcomes;
2006 }
2007 self.values.outcomes.emit_outcomes
2008 }
2009
2010 pub fn emit_client_outcomes(&self) -> bool {
2020 self.values.outcomes.emit_client_outcomes
2021 }
2022
2023 pub fn outcome_batch_size(&self) -> usize {
2025 self.values.outcomes.batch_size
2026 }
2027
2028 pub fn outcome_batch_interval(&self) -> Duration {
2030 Duration::from_millis(self.values.outcomes.batch_interval)
2031 }
2032
2033 pub fn outcome_source(&self) -> Option<&str> {
2035 self.values.outcomes.source.as_deref()
2036 }
2037
2038 pub fn outcome_aggregator(&self) -> &OutcomeAggregatorConfig {
2040 &self.values.outcomes.aggregator
2041 }
2042
2043 pub fn logging(&self) -> &relay_log::LogConfig {
2045 &self.values.logging
2046 }
2047
2048 pub fn sentry(&self) -> &relay_log::SentryConfig {
2050 &self.values.sentry
2051 }
2052
2053 pub fn statsd_addrs(&self) -> anyhow::Result<Vec<SocketAddr>> {
2057 if let Some(ref addr) = self.values.metrics.statsd {
2058 let addrs = addr
2059 .as_str()
2060 .to_socket_addrs()
2061 .with_context(|| ConfigError::file(ConfigErrorKind::InvalidValue, &self.path))?
2062 .collect();
2063 Ok(addrs)
2064 } else {
2065 Ok(vec![])
2066 }
2067 }
2068
2069 pub fn metrics_prefix(&self) -> &str {
2071 &self.values.metrics.prefix
2072 }
2073
2074 pub fn metrics_default_tags(&self) -> &BTreeMap<String, String> {
2076 &self.values.metrics.default_tags
2077 }
2078
2079 pub fn metrics_hostname_tag(&self) -> Option<&str> {
2081 self.values.metrics.hostname_tag.as_deref()
2082 }
2083
2084 pub fn metrics_sample_rate(&self) -> f32 {
2086 self.values.metrics.sample_rate
2087 }
2088
2089 pub fn metrics_aggregate(&self) -> bool {
2091 self.values.metrics.aggregate
2092 }
2093
2094 pub fn metrics_periodic_interval(&self) -> Option<Duration> {
2098 match self.values.metrics.periodic_secs {
2099 0 => None,
2100 secs => Some(Duration::from_secs(secs)),
2101 }
2102 }
2103
2104 pub fn http_timeout(&self) -> Duration {
2106 Duration::from_secs(self.values.http.timeout.into())
2107 }
2108
2109 pub fn http_connection_timeout(&self) -> Duration {
2111 Duration::from_secs(self.values.http.connection_timeout.into())
2112 }
2113
2114 pub fn http_max_retry_interval(&self) -> Duration {
2116 Duration::from_secs(self.values.http.max_retry_interval.into())
2117 }
2118
2119 pub fn project_cache_expiry(&self) -> Duration {
2121 Duration::from_secs(self.values.cache.project_expiry.into())
2122 }
2123
2124 pub fn request_full_project_config(&self) -> bool {
2126 self.values.cache.project_request_full_config
2127 }
2128
2129 pub fn relay_cache_expiry(&self) -> Duration {
2131 Duration::from_secs(self.values.cache.relay_expiry.into())
2132 }
2133
2134 pub fn envelope_buffer_size(&self) -> usize {
2136 self.values
2137 .cache
2138 .envelope_buffer_size
2139 .try_into()
2140 .unwrap_or(usize::MAX)
2141 }
2142
2143 pub fn cache_miss_expiry(&self) -> Duration {
2145 Duration::from_secs(self.values.cache.miss_expiry.into())
2146 }
2147
2148 pub fn project_grace_period(&self) -> Duration {
2150 Duration::from_secs(self.values.cache.project_grace_period.into())
2151 }
2152
2153 pub fn project_refresh_interval(&self) -> Option<Duration> {
2157 self.values
2158 .cache
2159 .project_refresh_interval
2160 .map(Into::into)
2161 .map(Duration::from_secs)
2162 }
2163
2164 pub fn query_batch_interval(&self) -> Duration {
2167 Duration::from_millis(self.values.cache.batch_interval.into())
2168 }
2169
2170 pub fn downstream_relays_batch_interval(&self) -> Duration {
2172 Duration::from_millis(self.values.cache.downstream_relays_batch_interval.into())
2173 }
2174
2175 pub fn local_cache_interval(&self) -> Duration {
2177 Duration::from_secs(self.values.cache.file_interval.into())
2178 }
2179
2180 pub fn global_config_fetch_interval(&self) -> Duration {
2183 Duration::from_secs(self.values.cache.global_config_fetch_interval.into())
2184 }
2185
2186 pub fn spool_envelopes_path(&self, partition_id: u8) -> Option<PathBuf> {
2191 let mut path = self
2192 .values
2193 .spool
2194 .envelopes
2195 .path
2196 .as_ref()
2197 .map(|path| path.to_owned())?;
2198
2199 if partition_id == 0 {
2200 return Some(path);
2201 }
2202
2203 let file_name = path.file_name().and_then(|f| f.to_str())?;
2204 let new_file_name = format!("{}.{}", file_name, partition_id);
2205 path.set_file_name(new_file_name);
2206
2207 Some(path)
2208 }
2209
2210 pub fn spool_envelopes_max_disk_size(&self) -> usize {
2212 self.values.spool.envelopes.max_disk_size.as_bytes()
2213 }
2214
2215 pub fn spool_envelopes_batch_size_bytes(&self) -> usize {
2218 self.values.spool.envelopes.batch_size_bytes.as_bytes()
2219 }
2220
2221 pub fn spool_envelopes_max_age(&self) -> Duration {
2223 Duration::from_secs(self.values.spool.envelopes.max_envelope_delay_secs)
2224 }
2225
2226 pub fn spool_disk_usage_refresh_frequency_ms(&self) -> Duration {
2228 Duration::from_millis(self.values.spool.envelopes.disk_usage_refresh_frequency_ms)
2229 }
2230
2231 pub fn spool_max_backpressure_envelopes(&self) -> usize {
2233 self.values.spool.envelopes.max_backpressure_envelopes
2234 }
2235
2236 pub fn spool_max_backpressure_memory_percent(&self) -> f32 {
2238 self.values.spool.envelopes.max_backpressure_memory_percent
2239 }
2240
2241 pub fn spool_partitions(&self) -> NonZeroU8 {
2243 self.values.spool.envelopes.partitions
2244 }
2245
2246 pub fn max_event_size(&self) -> usize {
2248 self.values.limits.max_event_size.as_bytes()
2249 }
2250
2251 pub fn max_attachment_size(&self) -> usize {
2253 self.values.limits.max_attachment_size.as_bytes()
2254 }
2255
2256 pub fn max_attachments_size(&self) -> usize {
2259 self.values.limits.max_attachments_size.as_bytes()
2260 }
2261
2262 pub fn max_client_reports_size(&self) -> usize {
2264 self.values.limits.max_client_reports_size.as_bytes()
2265 }
2266
2267 pub fn max_check_in_size(&self) -> usize {
2269 self.values.limits.max_check_in_size.as_bytes()
2270 }
2271
2272 pub fn max_log_size(&self) -> usize {
2274 self.values.limits.max_log_size.as_bytes()
2275 }
2276
2277 pub fn max_span_size(&self) -> usize {
2279 self.values.limits.max_span_size.as_bytes()
2280 }
2281
2282 pub fn max_envelope_size(&self) -> usize {
2286 self.values.limits.max_envelope_size.as_bytes()
2287 }
2288
2289 pub fn max_session_count(&self) -> usize {
2291 self.values.limits.max_session_count
2292 }
2293
2294 pub fn max_statsd_size(&self) -> usize {
2296 self.values.limits.max_statsd_size.as_bytes()
2297 }
2298
2299 pub fn max_metric_buckets_size(&self) -> usize {
2301 self.values.limits.max_metric_buckets_size.as_bytes()
2302 }
2303
2304 pub fn metric_stats_enabled(&self) -> bool {
2309 self.values.sentry_metrics.metric_stats_enabled || self.values.processing.enabled
2310 }
2311
2312 pub fn max_api_payload_size(&self) -> usize {
2314 self.values.limits.max_api_payload_size.as_bytes()
2315 }
2316
2317 pub fn max_api_file_upload_size(&self) -> usize {
2319 self.values.limits.max_api_file_upload_size.as_bytes()
2320 }
2321
2322 pub fn max_api_chunk_upload_size(&self) -> usize {
2324 self.values.limits.max_api_chunk_upload_size.as_bytes()
2325 }
2326
2327 pub fn max_profile_size(&self) -> usize {
2329 self.values.limits.max_profile_size.as_bytes()
2330 }
2331
2332 pub fn max_replay_compressed_size(&self) -> usize {
2334 self.values.limits.max_replay_compressed_size.as_bytes()
2335 }
2336
2337 pub fn max_replay_uncompressed_size(&self) -> usize {
2339 self.values.limits.max_replay_uncompressed_size.as_bytes()
2340 }
2341
2342 pub fn max_replay_message_size(&self) -> usize {
2348 self.values.limits.max_replay_message_size.as_bytes()
2349 }
2350
2351 pub fn max_concurrent_requests(&self) -> usize {
2353 self.values.limits.max_concurrent_requests
2354 }
2355
2356 pub fn max_concurrent_queries(&self) -> usize {
2358 self.values.limits.max_concurrent_queries
2359 }
2360
2361 pub fn query_timeout(&self) -> Duration {
2363 Duration::from_secs(self.values.limits.query_timeout)
2364 }
2365
2366 pub fn shutdown_timeout(&self) -> Duration {
2369 Duration::from_secs(self.values.limits.shutdown_timeout)
2370 }
2371
2372 pub fn keepalive_timeout(&self) -> Duration {
2376 Duration::from_secs(self.values.limits.keepalive_timeout)
2377 }
2378
2379 pub fn idle_timeout(&self) -> Option<Duration> {
2381 self.values.limits.idle_timeout.map(Duration::from_secs)
2382 }
2383
2384 pub fn max_connections(&self) -> Option<usize> {
2386 self.values.limits.max_connections
2387 }
2388
2389 pub fn tcp_listen_backlog(&self) -> u32 {
2391 self.values.limits.tcp_listen_backlog
2392 }
2393
2394 pub fn cpu_concurrency(&self) -> usize {
2396 self.values.limits.max_thread_count
2397 }
2398
2399 pub fn pool_concurrency(&self) -> usize {
2401 self.values.limits.max_pool_concurrency
2402 }
2403
2404 pub fn query_batch_size(&self) -> usize {
2406 self.values.cache.batch_size
2407 }
2408
2409 pub fn project_configs_path(&self) -> PathBuf {
2411 self.path.join("projects")
2412 }
2413
2414 pub fn processing_enabled(&self) -> bool {
2416 self.values.processing.enabled
2417 }
2418
2419 pub fn normalization_level(&self) -> NormalizationLevel {
2421 self.values.normalization.level
2422 }
2423
2424 pub fn geoip_path(&self) -> Option<&Path> {
2426 self.values
2427 .geoip
2428 .path
2429 .as_deref()
2430 .or(self.values.processing.geoip_path.as_deref())
2431 }
2432
2433 pub fn max_secs_in_future(&self) -> i64 {
2437 self.values.processing.max_secs_in_future.into()
2438 }
2439
2440 pub fn max_session_secs_in_past(&self) -> i64 {
2442 self.values.processing.max_session_secs_in_past.into()
2443 }
2444
2445 pub fn kafka_config(&self, topic: KafkaTopic) -> Result<KafkaParams, KafkaConfigError> {
2447 self.values.processing.topics.get(topic).kafka_config(
2448 &self.values.processing.kafka_config,
2449 &self.values.processing.secondary_kafka_configs,
2450 )
2451 }
2452
2453 pub fn kafka_validate_topics(&self) -> bool {
2455 self.values.processing.kafka_validate_topics
2456 }
2457
2458 pub fn unused_topic_assignments(&self) -> &BTreeMap<String, TopicAssignment> {
2460 &self.values.processing.topics.unused
2461 }
2462
2463 pub fn redis(&self) -> Option<RedisConfigsRef> {
2466 let redis_configs = self.values.processing.redis.as_ref()?;
2467
2468 Some(build_redis_configs(
2469 redis_configs,
2470 self.cpu_concurrency() as u32,
2471 ))
2472 }
2473
2474 pub fn attachment_chunk_size(&self) -> usize {
2476 self.values.processing.attachment_chunk_size.as_bytes()
2477 }
2478
2479 pub fn metrics_max_batch_size_bytes(&self) -> usize {
2481 self.values.aggregator.max_flush_bytes
2482 }
2483
2484 pub fn projectconfig_cache_prefix(&self) -> &str {
2487 &self.values.processing.projectconfig_cache_prefix
2488 }
2489
2490 pub fn max_rate_limit(&self) -> Option<u64> {
2492 self.values.processing.max_rate_limit.map(u32::into)
2493 }
2494
2495 pub fn cardinality_limiter_cache_vacuum_interval(&self) -> Duration {
2499 Duration::from_secs(self.values.cardinality_limiter.cache_vacuum_interval)
2500 }
2501
2502 pub fn health_refresh_interval(&self) -> Duration {
2504 Duration::from_millis(self.values.health.refresh_interval_ms)
2505 }
2506
2507 pub fn health_max_memory_watermark_bytes(&self) -> u64 {
2509 self.values
2510 .health
2511 .max_memory_bytes
2512 .as_ref()
2513 .map_or(u64::MAX, |b| b.as_bytes() as u64)
2514 }
2515
2516 pub fn health_max_memory_watermark_percent(&self) -> f32 {
2518 self.values.health.max_memory_percent
2519 }
2520
2521 pub fn health_probe_timeout(&self) -> Duration {
2523 Duration::from_millis(self.values.health.probe_timeout_ms)
2524 }
2525
2526 pub fn memory_stat_refresh_frequency_ms(&self) -> u64 {
2528 self.values.health.memory_stat_refresh_frequency_ms
2529 }
2530
2531 pub fn cogs_max_queue_size(&self) -> u64 {
2533 self.values.cogs.max_queue_size
2534 }
2535
2536 pub fn cogs_relay_resource_id(&self) -> &str {
2538 &self.values.cogs.relay_resource_id
2539 }
2540
2541 pub fn default_aggregator_config(&self) -> &AggregatorServiceConfig {
2543 &self.values.aggregator
2544 }
2545
2546 pub fn secondary_aggregator_configs(&self) -> &Vec<ScopedAggregatorConfig> {
2548 &self.values.secondary_aggregators
2549 }
2550
2551 pub fn aggregator_config_for(&self, namespace: MetricNamespace) -> &AggregatorServiceConfig {
2553 for entry in &self.values.secondary_aggregators {
2554 if entry.condition.matches(Some(namespace)) {
2555 return &entry.config;
2556 }
2557 }
2558 &self.values.aggregator
2559 }
2560
2561 pub fn static_relays(&self) -> &HashMap<RelayId, RelayInfo> {
2563 &self.values.auth.static_relays
2564 }
2565
2566 pub fn accept_unknown_items(&self) -> bool {
2568 let forward = self.values.routing.accept_unknown_items;
2569 forward.unwrap_or_else(|| !self.processing_enabled())
2570 }
2571}
2572
2573impl Default for Config {
2574 fn default() -> Self {
2575 Self {
2576 values: ConfigValues::default(),
2577 credentials: None,
2578 path: PathBuf::new(),
2579 }
2580 }
2581}
2582
2583#[cfg(test)]
2584mod tests {
2585
2586 use super::*;
2587
2588 #[test]
2590 fn test_event_buffer_size() {
2591 let yaml = r###"
2592cache:
2593 event_buffer_size: 1000000
2594 event_expiry: 1800
2595"###;
2596
2597 let values: ConfigValues = serde_yaml::from_str(yaml).unwrap();
2598 assert_eq!(values.cache.envelope_buffer_size, 1_000_000);
2599 assert_eq!(values.cache.envelope_expiry, 1800);
2600 }
2601
2602 #[test]
2603 fn test_emit_outcomes() {
2604 for (serialized, deserialized) in &[
2605 ("true", EmitOutcomes::AsOutcomes),
2606 ("false", EmitOutcomes::None),
2607 ("\"as_client_reports\"", EmitOutcomes::AsClientReports),
2608 ] {
2609 let value: EmitOutcomes = serde_json::from_str(serialized).unwrap();
2610 assert_eq!(value, *deserialized);
2611 assert_eq!(serde_json::to_string(&value).unwrap(), *serialized);
2612 }
2613 }
2614
2615 #[test]
2616 fn test_emit_outcomes_invalid() {
2617 assert!(serde_json::from_str::<EmitOutcomes>("asdf").is_err());
2618 }
2619}