relay_server/statsd.rs
1use relay_statsd::{CounterMetric, DistributionMetric, GaugeMetric, TimerMetric};
2#[cfg(doc)]
3use relay_system::RuntimeMetrics;
4
5/// Gauge metrics used by Relay
6pub enum RelayGauges {
7 /// Tracks the number of futures waiting to be executed in the pool's queue.
8 ///
9 /// Useful for understanding the backlog of work and identifying potential bottlenecks.
10 ///
11 /// This metric is tagged with:
12 /// - `pool`: the name of the pool.
13 AsyncPoolQueueSize,
14 /// Tracks the utilization of the async pool.
15 ///
16 /// The utilization is a value between 0.0 and 100.0 which determines how busy the pool is doing
17 /// CPU-bound work.
18 ///
19 /// This metric is tagged with:
20 /// - `pool`: the name of the pool.
21 AsyncPoolUtilization,
22 /// Tracks the activity of the async pool.
23 ///
24 /// The activity is a value between 0.0 and 100.0 which determines how busy is the pool
25 /// w.r.t. to its provisioned capacity.
26 ///
27 /// This metric is tagged with:
28 /// - `pool`: the name of the pool.
29 AsyncPoolActivity,
30 /// The state of Relay with respect to the upstream connection.
31 /// Possible values are `0` for normal operations and `1` for a network outage.
32 NetworkOutage,
33 /// The number of individual stacks in the priority queue.
34 ///
35 /// Per combination of `(own_key, sampling_key)`, a new stack is created.
36 BufferStackCount,
37 /// The used disk for the buffer.
38 BufferDiskUsed,
39 /// The currently used memory by the entire system.
40 ///
41 /// Relay uses the same value for its memory health check.
42 SystemMemoryUsed,
43 /// The total system memory.
44 ///
45 /// Relay uses the same value for its memory health check.
46 SystemMemoryTotal,
47 /// The number of connections currently being managed by the Redis Pool.
48 #[cfg(feature = "processing")]
49 RedisPoolConnections,
50 /// The number of idle connections in the Redis Pool.
51 #[cfg(feature = "processing")]
52 RedisPoolIdleConnections,
53 /// The maximum number of connections in the Redis pool.
54 #[cfg(feature = "processing")]
55 RedisPoolMaxConnections,
56 /// The number of futures waiting to grab a connection.
57 #[cfg(feature = "processing")]
58 RedisPoolWaitingForConnection,
59 /// The number of notifications in the broadcast channel of the project cache.
60 ProjectCacheNotificationChannel,
61 /// The number of scheduled and in progress fetches in the project cache.
62 ProjectCacheScheduledFetches,
63 /// Exposes the amount of currently open and handled connections by the server.
64 ServerActiveConnections,
65 /// Maximum delay of a metric bucket in seconds.
66 ///
67 /// The maximum is measured from initial creation of the bucket in an internal Relay
68 /// until it is produced to Kafka.
69 ///
70 /// This metric is tagged with:
71 /// - `namespace`: the metric namespace.
72 #[cfg(feature = "processing")]
73 MetricDelayMax,
74 /// Estimated percentage [0-100] of how busy Relay's internal services are.
75 ///
76 /// This metric is tagged with:
77 /// - `service`: the service name.
78 /// - `instance_id`: a for the service name unique identifier for the running service
79 ServiceUtilization,
80 /// Number of attachment uploads currently in flight.
81 #[cfg(feature = "processing")]
82 ConcurrentAttachmentUploads,
83}
84
85impl GaugeMetric for RelayGauges {
86 fn name(&self) -> &'static str {
87 match self {
88 RelayGauges::AsyncPoolQueueSize => "async_pool.queue_size",
89 RelayGauges::AsyncPoolUtilization => "async_pool.utilization",
90 RelayGauges::AsyncPoolActivity => "async_pool.activity",
91 RelayGauges::NetworkOutage => "upstream.network_outage",
92 RelayGauges::BufferStackCount => "buffer.stack_count",
93 RelayGauges::BufferDiskUsed => "buffer.disk_used",
94 RelayGauges::SystemMemoryUsed => "health.system_memory.used",
95 RelayGauges::SystemMemoryTotal => "health.system_memory.total",
96 #[cfg(feature = "processing")]
97 RelayGauges::RedisPoolConnections => "redis.pool.connections",
98 #[cfg(feature = "processing")]
99 RelayGauges::RedisPoolIdleConnections => "redis.pool.idle_connections",
100 #[cfg(feature = "processing")]
101 RelayGauges::RedisPoolMaxConnections => "redis.pool.max_connections",
102 #[cfg(feature = "processing")]
103 RelayGauges::RedisPoolWaitingForConnection => "redis.pool.waiting_for_connection",
104 RelayGauges::ProjectCacheNotificationChannel => {
105 "project_cache.notification_channel.size"
106 }
107 RelayGauges::ProjectCacheScheduledFetches => "project_cache.fetches.size",
108 RelayGauges::ServerActiveConnections => "server.http.connections",
109 #[cfg(feature = "processing")]
110 RelayGauges::MetricDelayMax => "metrics.delay.max",
111 RelayGauges::ServiceUtilization => "service.utilization",
112 #[cfg(feature = "processing")]
113 RelayGauges::ConcurrentAttachmentUploads => "attachment.upload.concurrent",
114 }
115 }
116}
117
118/// Gauge metrics collected from the Runtime.
119pub enum RuntimeGauges {
120 /// Exposes [`RuntimeMetrics::num_idle_threads`].
121 NumIdleThreads,
122 /// Exposes [`RuntimeMetrics::num_alive_tasks`].
123 NumAliveTasks,
124 /// Exposes [`RuntimeMetrics::blocking_queue_depth`].
125 BlockingQueueDepth,
126 /// Exposes [`RuntimeMetrics::num_blocking_threads`].
127 NumBlockingThreads,
128 /// Exposes [`RuntimeMetrics::num_idle_blocking_threads`].
129 NumIdleBlockingThreads,
130 /// Exposes [`RuntimeMetrics::num_workers`].
131 NumWorkers,
132 /// Exposes [`RuntimeMetrics::worker_local_queue_depth`].
133 ///
134 /// This metric is tagged with:
135 /// - `worker`: the worker id.
136 WorkerLocalQueueDepth,
137 /// Exposes [`RuntimeMetrics::worker_mean_poll_time`].
138 ///
139 /// This metric is tagged with:
140 /// - `worker`: the worker id.
141 WorkerMeanPollTime,
142}
143
144impl GaugeMetric for RuntimeGauges {
145 fn name(&self) -> &'static str {
146 match self {
147 RuntimeGauges::NumIdleThreads => "runtime.idle_threads",
148 RuntimeGauges::NumAliveTasks => "runtime.alive_tasks",
149 RuntimeGauges::BlockingQueueDepth => "runtime.blocking_queue_depth",
150 RuntimeGauges::NumBlockingThreads => "runtime.num_blocking_threads",
151 RuntimeGauges::NumIdleBlockingThreads => "runtime.num_idle_blocking_threads",
152 RuntimeGauges::NumWorkers => "runtime.num_workers",
153 RuntimeGauges::WorkerLocalQueueDepth => "runtime.worker_local_queue_depth",
154 RuntimeGauges::WorkerMeanPollTime => "runtime.worker_mean_poll_time",
155 }
156 }
157}
158
159/// Counter metrics collected from the Runtime.
160pub enum RuntimeCounters {
161 /// Exposes [`RuntimeMetrics::budget_forced_yield_count`].
162 BudgetForcedYieldCount,
163 /// Exposes [`RuntimeMetrics::worker_local_schedule_count`].
164 ///
165 /// This metric is tagged with:
166 /// - `worker`: the worker id.
167 WorkerLocalScheduleCount,
168 /// Exposes [`RuntimeMetrics::worker_noop_count`].
169 ///
170 /// This metric is tagged with:
171 /// - `worker`: the worker id.
172 WorkerNoopCount,
173 /// Exposes [`RuntimeMetrics::worker_overflow_count`].
174 ///
175 /// This metric is tagged with:
176 /// - `worker`: the worker id.
177 WorkerOverflowCount,
178 /// Exposes [`RuntimeMetrics::worker_park_count`].
179 ///
180 /// This metric is tagged with:
181 /// - `worker`: the worker id.
182 WorkerParkCount,
183 /// Exposes [`RuntimeMetrics::worker_poll_count`].
184 ///
185 /// This metric is tagged with:
186 /// - `worker`: the worker id.
187 WorkerPollCount,
188 /// Exposes [`RuntimeMetrics::worker_steal_count`].
189 ///
190 /// This metric is tagged with:
191 /// - `worker`: the worker id.
192 WorkerStealCount,
193 /// Exposes [`RuntimeMetrics::worker_steal_operations`].
194 ///
195 /// This metric is tagged with:
196 /// - `worker`: the worker id.
197 WorkerStealOperations,
198 /// Exposes [`RuntimeMetrics::worker_total_busy_duration`].
199 ///
200 /// This metric is tagged with:
201 /// - `worker`: the worker id.
202 WorkerTotalBusyDuration,
203}
204
205impl CounterMetric for RuntimeCounters {
206 fn name(&self) -> &'static str {
207 match self {
208 RuntimeCounters::BudgetForcedYieldCount => "runtime.budget_forced_yield_count",
209 RuntimeCounters::WorkerLocalScheduleCount => "runtime.worker_local_schedule_count",
210 RuntimeCounters::WorkerNoopCount => "runtime.worker_noop_count",
211 RuntimeCounters::WorkerOverflowCount => "runtime.worker_overflow_count",
212 RuntimeCounters::WorkerParkCount => "runtime.worker_park_count",
213 RuntimeCounters::WorkerPollCount => "runtime.worker_poll_count",
214 RuntimeCounters::WorkerStealCount => "runtime.worker_steal_count",
215 RuntimeCounters::WorkerStealOperations => "runtime.worker_steal_operations",
216 RuntimeCounters::WorkerTotalBusyDuration => "runtime.worker_total_busy_duration",
217 }
218 }
219}
220
221/// Histogram metrics used by Relay.
222pub enum RelayDistributions {
223 /// The number of bytes received by Relay for each individual envelope item type.
224 ///
225 /// This metric is tagged with:
226 /// - `item_type`: The type of the items being counted.
227 /// - `is_container`: Whether this item is a container holding multiple items.
228 EnvelopeItemSize,
229
230 /// Number of elements in the envelope buffer across all the stacks.
231 ///
232 /// This metric is tagged with:
233 /// - `storage_type`: The type of storage used in the envelope buffer.
234 BufferEnvelopesCount,
235 /// The amount of bytes in the item payloads of an envelope pushed to the envelope buffer.
236 ///
237 /// This is not quite the same as the actual size of a serialized envelope, because it ignores
238 /// the envelope header and item headers.
239 BufferEnvelopeBodySize,
240 /// Size of a serialized envelope pushed to the envelope buffer.
241 BufferEnvelopeSize,
242 /// Size of a compressed envelope pushed to the envelope buffer.
243 BufferEnvelopeSizeCompressed,
244 /// The number of batches emitted per partition.
245 BatchesPerPartition,
246 /// The number of buckets in a batch emitted.
247 ///
248 /// This corresponds to the number of buckets that will end up in an envelope.
249 BucketsPerBatch,
250 /// The number of spans per processed transaction event.
251 ///
252 /// This metric is tagged with:
253 /// - `platform`: The event's platform, such as `"javascript"`.
254 /// - `sdk`: The name of the Sentry SDK sending the transaction. This tag is only set for
255 /// Sentry's SDKs and defaults to "proprietary".
256 EventSpans,
257 /// Number of projects in the in-memory project cache that are waiting for their state to be
258 /// updated.
259 ///
260 /// See `project_cache.size` for more description of the project cache.
261 ProjectStatePending,
262 /// Number of project states **requested** from the upstream for each batch request.
263 ///
264 /// If multiple batches are updated concurrently, this metric is reported multiple times.
265 ///
266 /// The batch size can be configured with `cache.batch_size`. See `project_cache.size` for more
267 /// description of the project cache.
268 ProjectStateRequestBatchSize,
269 /// Number of project states **returned** from the upstream for each batch request.
270 ///
271 /// If multiple batches are updated concurrently, this metric is reported multiple times.
272 ///
273 /// See `project_cache.size` for more description of the project cache.
274 ProjectStateReceived,
275 /// Number of attempts required to fetch the config for a given project key.
276 ProjectStateAttempts,
277 /// Number of project states currently held in the in-memory project cache.
278 ///
279 /// The cache duration for project states can be configured with the following options:
280 ///
281 /// - `cache.project_expiry`: The time after which a project state counts as expired. It is
282 /// automatically refreshed if a request references the project after it has expired.
283 /// - `cache.project_grace_period`: The time after expiry at which the project state will still
284 /// be used to ingest events. Once the grace period expires, the cache is evicted and new
285 /// requests wait for an update.
286 ///
287 /// There is no limit to the number of cached projects.
288 ProjectStateCacheSize,
289 /// The size of the compressed project config in the redis cache, in bytes.
290 #[cfg(feature = "processing")]
291 ProjectStateSizeBytesCompressed,
292 /// The size of the uncompressed project config in the redis cache, in bytes.
293 #[cfg(feature = "processing")]
294 ProjectStateSizeBytesDecompressed,
295 /// The number of upstream requests queued up for sending.
296 ///
297 /// Relay employs connection keep-alive whenever possible. Connections are kept open for _15_
298 /// seconds of inactivity or _75_ seconds of activity. If all connections are busy, they are
299 /// queued, which is reflected in this metric.
300 ///
301 /// This metric is tagged with:
302 /// - `priority`: The queueing priority of the request, either `"high"` or `"low"`. The
303 /// priority determines precedence in executing requests.
304 ///
305 /// The number of concurrent connections can be configured with:
306 /// - `limits.max_concurrent_requests` for the overall number of connections
307 /// - `limits.max_concurrent_queries` for the number of concurrent high-priority requests
308 UpstreamMessageQueueSize,
309 /// Counts the number of retries for each upstream http request.
310 ///
311 /// This metric is tagged with:
312 ///
313 /// - `result`: What happened to the request, an enumeration with the following values:
314 /// * `success`: The request was sent and returned a success code `HTTP 2xx`
315 /// * `response_error`: The request was sent and it returned an HTTP error.
316 /// * `payload_failed`: The request was sent but there was an error in interpreting the response.
317 /// * `send_failed`: Failed to send the request due to a network error.
318 /// * `rate_limited`: The request was rate limited.
319 /// * `invalid_json`: The response could not be parsed back into JSON.
320 /// - `route`: The endpoint that was called on the upstream.
321 /// - `status-code`: The status code of the request when available, otherwise "-".
322 UpstreamRetries,
323 /// Size of envelopes sent over HTTP in bytes.
324 UpstreamQueryBodySize,
325 /// Size of queries (projectconfig queries, i.e. the request payload, not the response) sent by
326 /// Relay over HTTP in bytes.
327 UpstreamEnvelopeBodySize,
328 /// Size of batched global metrics requests sent by Relay over HTTP in bytes.
329 UpstreamMetricsBodySize,
330 /// Distribution of flush buckets over partition keys.
331 ///
332 /// The distribution of buckets should be even.
333 /// If it is not, this metric should expose it.
334 PartitionKeys,
335 /// Measures how many splits were performed when sending out a partition.
336 PartitionSplits,
337 /// Canonical size of a Trace Item.
338 ///
339 /// This is not the size in bytes, this is using the same algorithm we're using for the logs
340 /// billing category.
341 ///
342 /// This metric is tagged with:
343 /// - `item`: the trace item type.
344 /// - `too_large`: `true` or `false`, whether the item is bigger than the allowed size limit.
345 TraceItemCanonicalSize,
346}
347
348impl DistributionMetric for RelayDistributions {
349 fn name(&self) -> &'static str {
350 match self {
351 Self::EnvelopeItemSize => "event.item_size",
352 Self::EventSpans => "event.spans",
353 Self::BatchesPerPartition => "metrics.buckets.batches_per_partition",
354 Self::BucketsPerBatch => "metrics.buckets.per_batch",
355 Self::BufferEnvelopesCount => "buffer.envelopes_count",
356 Self::BufferEnvelopeBodySize => "buffer.envelope_body_size",
357 Self::BufferEnvelopeSize => "buffer.envelope_size",
358 Self::BufferEnvelopeSizeCompressed => "buffer.envelope_size.compressed",
359 Self::ProjectStatePending => "project_state.pending",
360 Self::ProjectStateAttempts => "project_state.attempts",
361 Self::ProjectStateRequestBatchSize => "project_state.request.batch_size",
362 Self::ProjectStateReceived => "project_state.received",
363 Self::ProjectStateCacheSize => "project_cache.size",
364 #[cfg(feature = "processing")]
365 Self::ProjectStateSizeBytesCompressed => "project_state.size_bytes.compressed",
366 #[cfg(feature = "processing")]
367 Self::ProjectStateSizeBytesDecompressed => "project_state.size_bytes.decompressed",
368 Self::UpstreamMessageQueueSize => "http_queue.size",
369 Self::UpstreamRetries => "upstream.retries",
370 Self::UpstreamQueryBodySize => "upstream.query.body_size",
371 Self::UpstreamEnvelopeBodySize => "upstream.envelope.body_size",
372 Self::UpstreamMetricsBodySize => "upstream.metrics.body_size",
373 Self::PartitionKeys => "metrics.buckets.partition_keys",
374 Self::PartitionSplits => "partition_splits",
375 Self::TraceItemCanonicalSize => "trace_item.canonical_size",
376 }
377 }
378}
379
380/// Timer metrics used by Relay
381pub enum RelayTimers {
382 /// Time in milliseconds spent deserializing an event from JSON bytes into the native data
383 /// structure on which Relay operates.
384 EventProcessingDeserialize,
385 /// Time in milliseconds spent running normalization on an event. Normalization
386 /// happens before envelope filtering and metric extraction.
387 EventProcessingNormalization,
388 /// Time in milliseconds spent running inbound data filters on an event.
389 EventProcessingFiltering,
390 /// Time in milliseconds spent checking for organization, project, and DSN rate limits.
391 ///
392 /// Not all events reach this point. After an event is rate limited for the first time, the rate
393 /// limit is cached. Events coming in after this will be discarded earlier in the request queue
394 /// and do not reach the processing queue.
395 ///
396 /// This metric is tagged with:
397 /// - `type`: The type of limiter executed, `cached` or `consistent`.
398 /// - `unit`: The item/unit of work which is being rate limited, only available for new
399 /// processing pipelines.
400 EventProcessingRateLimiting,
401 /// Time in milliseconds spent in data scrubbing for the current event. Data scrubbing happens
402 /// last before serializing the event back to JSON.
403 EventProcessingPii,
404 /// Time spent converting the event from its in-memory reprsentation into a JSON string.
405 EventProcessingSerialization,
406 /// Time used to extract span metrics from an event.
407 EventProcessingSpanMetricsExtraction,
408 /// Time spent between the start of request handling and processing of the envelope.
409 ///
410 /// This includes streaming the request body, scheduling overheads, project config fetching,
411 /// batched requests and congestions in the internal processor. This does not include delays in
412 /// the incoming request (body upload) and skips all envelopes that are fast-rejected.
413 EnvelopeWaitTime,
414 /// Time in milliseconds spent in synchronous processing of envelopes.
415 ///
416 /// This timing covers the end-to-end processing in the CPU pool and comprises:
417 ///
418 /// - `event_processing.deserialize`
419 /// - `event_processing.pii`
420 /// - `event_processing.serialization`
421 ///
422 /// With Relay in processing mode, this also includes the following timings:
423 ///
424 /// - `event_processing.process`
425 /// - `event_processing.filtering`
426 /// - `event_processing.rate_limiting`
427 EnvelopeProcessingTime,
428 /// Total time in milliseconds an envelope spends in Relay from the time it is received until it
429 /// finishes processing and has been submitted to the upstream.
430 EnvelopeTotalTime,
431 /// Latency of project config updates until they reach Relay.
432 ///
433 /// The metric is calculated by using the creation timestamp of the project config
434 /// and when Relay updates its local cache with the new project config.
435 ///
436 /// No metric is emitted when Relay fetches a project config for the first time.
437 ///
438 /// This metric is tagged with:
439 /// - `delay`: Bucketed amount of seconds passed between fetches.
440 ProjectCacheUpdateLatency,
441 /// Total time spent from starting to fetch a project config update to completing the fetch.
442 ProjectCacheFetchDuration,
443 /// Total time in milliseconds spent fetching queued project configuration updates requests to
444 /// resolve.
445 ///
446 /// Relay updates projects in batches. Every update cycle, Relay requests
447 /// `limits.max_concurrent_queries * cache.batch_size` projects from the upstream. This metric
448 /// measures the wall clock time for all concurrent requests in this loop.
449 ///
450 /// Note that after an update loop has completed, there may be more projects pending updates.
451 /// This is indicated by `project_state.pending`.
452 ProjectStateRequestDuration,
453 /// Time in milliseconds required to decompress a project config from redis.
454 ///
455 /// Note that this also times the cases where project config is uncompressed,
456 /// in which case the timer should be very close to zero.
457 #[cfg(feature = "processing")]
458 ProjectStateDecompression,
459 /// Total duration in milliseconds for handling inbound web requests until the HTTP response is
460 /// returned to the client.
461 ///
462 /// This does **not** correspond to the full event ingestion time. Requests for events that are
463 /// not immediately rejected due to bad data or cached rate limits always return `200 OK`. Full
464 /// validation and normalization occur asynchronously, which is reported by
465 /// `event.processing_time`.
466 ///
467 /// This metric is tagged with:
468 /// - `method`: The HTTP method of the request.
469 /// - `route`: Unique dashed identifier of the endpoint.
470 RequestsDuration,
471 /// Time spent on minidump scrubbing.
472 ///
473 /// This is the total time spent on parsing and scrubbing the minidump. Even if no PII
474 /// scrubbing rules applied the minidump will still be parsed and the rules evaluated on
475 /// the parsed minidump, this duration is reported here with status of "n/a".
476 ///
477 /// This metric is tagged with:
478 ///
479 /// - `status`: Scrubbing status: "ok" means successful scrubbed, "error" means there
480 /// was an error during scrubbing and finally "n/a" means scrubbing was successful
481 /// but no scurbbing rules applied.
482 MinidumpScrubbing,
483 /// Time spent on view hierarchy scrubbing.
484 ///
485 /// This is the total time spent on parsing and scrubbing the view hierarchy json file.
486 ///
487 /// This metric is tagged with:
488 ///
489 /// - `status`: "ok" means successful scrubbed, "error" means there was an error during
490 /// scrubbing
491 ViewHierarchyScrubbing,
492 /// Time spend on attachment scrubbing.
493 ///
494 /// This represents the total time spent on evaluating the scrubbing rules for an
495 /// attachment and the attachment scrubbing itself, regardless of whether any rules were
496 /// applied. Note that minidumps which failed to be parsed (status="error" in
497 /// scrubbing.minidumps.duration) will be scrubbed as plain attachments and count
498 /// towards this.
499 ///
500 /// This metric is tagged with:
501 ///
502 /// - `attachment_type`: The type of attachment, e.g. "minidump".
503 AttachmentScrubbing,
504 /// Total time spent to send request to upstream Relay and handle the response.
505 ///
506 /// This metric is tagged with:
507 ///
508 /// - `result`: What happened to the request, an enumeration with the following values:
509 /// * `success`: The request was sent and returned a success code `HTTP 2xx`
510 /// * `response_error`: The request was sent and it returned an HTTP error.
511 /// * `payload_failed`: The request was sent but there was an error in interpreting the response.
512 /// * `send_failed`: Failed to send the request due to a network error.
513 /// * `rate_limited`: The request was rate limited.
514 /// * `invalid_json`: The response could not be parsed back into JSON.
515 /// - `route`: The endpoint that was called on the upstream.
516 /// - `status-code`: The status code of the request when available, otherwise "-".
517 /// - `retries`: Number of retries bucket 0, 1, 2, few (3 - 10), many (more than 10).
518 UpstreamRequestsDuration,
519 /// The delay between the timestamp stated in a payload and the receive time.
520 ///
521 /// SDKs cannot transmit payloads immediately in all cases. Sometimes, crashes require that
522 /// events are sent after restarting the application. Similarly, SDKs buffer events during
523 /// network downtimes for later transmission. This metric measures the delay between the time of
524 /// the event and the time it arrives in Relay. The delay is measured after clock drift
525 /// correction is applied.
526 ///
527 /// Only payloads with a delay of more than 1 minute are captured.
528 ///
529 /// This metric is tagged with:
530 ///
531 /// - `category`: The data category of the payload. Can be one of: `event`, `transaction`,
532 /// `security`, or `session`.
533 TimestampDelay,
534 /// The time it takes the outcome aggregator to flush aggregated outcomes.
535 OutcomeAggregatorFlushTime,
536 /// Time in milliseconds spent on parsing, normalizing and scrubbing replay recordings.
537 ReplayRecordingProcessing,
538 /// Total time spent to send a request and receive the response from upstream.
539 GlobalConfigRequestDuration,
540 /// Timing in milliseconds for processing a message in the internal CPU pool.
541 ///
542 /// This metric is tagged with:
543 ///
544 /// - `message`: The type of message that was processed.
545 ProcessMessageDuration,
546 /// Timing in milliseconds for processing a task in the project cache service.
547 ///
548 /// This metric is tagged with:
549 /// - `task`: The type of the task the project cache does.
550 ProjectCacheTaskDuration,
551 /// Timing in milliseconds for handling and responding to a health check request.
552 ///
553 /// This metric is tagged with:
554 /// - `type`: The type of the health check, `liveness` or `readiness`.
555 HealthCheckDuration,
556 /// Temporary timing metric for how much time was spent evaluating span and transaction
557 /// rate limits using the `RateLimitBuckets` message in the processor.
558 ///
559 /// This metric is tagged with:
560 /// - `category`: The data category evaluated.
561 /// - `limited`: Whether the batch is rate limited.
562 /// - `count`: How many items matching the data category are contained in the batch.
563 #[cfg(feature = "processing")]
564 RateLimitBucketsDuration,
565 /// Timing in milliseconds for processing a task in the aggregator service.
566 ///
567 /// This metric is tagged with:
568 /// - `task`: The task being executed by the aggregator.
569 /// - `aggregator`: The name of the aggregator.
570 AggregatorServiceDuration,
571 /// Timing in milliseconds for processing a message in the metric router service.
572 ///
573 /// This metric is tagged with:
574 /// - `message`: The type of message that was processed.
575 MetricRouterServiceDuration,
576 /// Timing in milliseconds for processing a message in the metric store service.
577 ///
578 /// This metric is tagged with:
579 /// - `message`: The type of message that was processed.
580 #[cfg(feature = "processing")]
581 StoreServiceDuration,
582 /// Timing in milliseconds for the time it takes for initialize the buffer.
583 BufferInitialization,
584 /// Timing in milliseconds for the time it takes for the buffer to pack & spool a batch.
585 ///
586 /// Contains the time it takes to pack multiple envelopes into a single memory blob.
587 BufferSpool,
588 /// Timing in milliseconds for the time it takes for the buffer to spool data to SQLite.
589 BufferSqlWrite,
590 /// Timing in milliseconds for the time it takes for the buffer to unspool data from disk.
591 BufferUnspool,
592 /// Timing in milliseconds for the time it takes for the buffer to push.
593 BufferPush,
594 /// Timing in milliseconds for the time it takes for the buffer to peek.
595 BufferPeek,
596 /// Timing in milliseconds for the time it takes for the buffer to pop.
597 BufferPop,
598 /// Timing in milliseconds for the time it takes for the buffer to drain its envelopes.
599 BufferDrain,
600 /// Timing in milliseconds for the time it takes for an envelope to be serialized.
601 BufferEnvelopesSerialization,
602 /// Timing in milliseconds for the time it takes for an envelope to be compressed.
603 BufferEnvelopeCompression,
604 /// Timing in milliseconds for the time it takes for an envelope to be decompressed.
605 BufferEnvelopeDecompression,
606 /// Timing in milliseconds to count spans in a serialized transaction payload.
607 CheckNestedSpans,
608 /// The time it needs to create a signature. Includes both the signature used for
609 /// trusted relays and for register challenges.
610 SignatureCreationDuration,
611 /// Time needed to upload an attachment to objectstore.
612 ///
613 /// Tagged by:
614 /// - `type`: "envelope" or "attachment_v2".
615 #[cfg(feature = "processing")]
616 AttachmentUploadDuration,
617}
618
619impl TimerMetric for RelayTimers {
620 fn name(&self) -> &'static str {
621 match self {
622 RelayTimers::EventProcessingDeserialize => "event_processing.deserialize",
623 RelayTimers::EventProcessingNormalization => "event_processing.normalization",
624 RelayTimers::EventProcessingFiltering => "event_processing.filtering",
625 RelayTimers::EventProcessingRateLimiting => "event_processing.rate_limiting",
626 RelayTimers::EventProcessingPii => "event_processing.pii",
627 RelayTimers::EventProcessingSpanMetricsExtraction => {
628 "event_processing.span_metrics_extraction"
629 }
630 RelayTimers::EventProcessingSerialization => "event_processing.serialization",
631 RelayTimers::EnvelopeWaitTime => "event.wait_time",
632 RelayTimers::EnvelopeProcessingTime => "event.processing_time",
633 RelayTimers::EnvelopeTotalTime => "event.total_time",
634 RelayTimers::ProjectStateRequestDuration => "project_state.request.duration",
635 #[cfg(feature = "processing")]
636 RelayTimers::ProjectStateDecompression => "project_state.decompression",
637 RelayTimers::ProjectCacheUpdateLatency => "project_cache.latency",
638 RelayTimers::ProjectCacheFetchDuration => "project_cache.fetch.duration",
639 RelayTimers::RequestsDuration => "requests.duration",
640 RelayTimers::MinidumpScrubbing => "scrubbing.minidumps.duration",
641 RelayTimers::ViewHierarchyScrubbing => "scrubbing.view_hierarchy_scrubbing.duration",
642 RelayTimers::AttachmentScrubbing => "scrubbing.attachments.duration",
643 RelayTimers::UpstreamRequestsDuration => "upstream.requests.duration",
644 RelayTimers::TimestampDelay => "requests.timestamp_delay",
645 RelayTimers::OutcomeAggregatorFlushTime => "outcomes.aggregator.flush_time",
646 RelayTimers::ReplayRecordingProcessing => "replay.recording.process",
647 RelayTimers::GlobalConfigRequestDuration => "global_config.requests.duration",
648 RelayTimers::ProcessMessageDuration => "processor.message.duration",
649 RelayTimers::ProjectCacheTaskDuration => "project_cache.task.duration",
650 RelayTimers::HealthCheckDuration => "health.message.duration",
651 #[cfg(feature = "processing")]
652 RelayTimers::RateLimitBucketsDuration => "processor.rate_limit_buckets",
653 RelayTimers::AggregatorServiceDuration => "metrics.aggregator.message.duration",
654 RelayTimers::MetricRouterServiceDuration => "metrics.router.message.duration",
655 #[cfg(feature = "processing")]
656 RelayTimers::StoreServiceDuration => "store.message.duration",
657 RelayTimers::BufferInitialization => "buffer.initialization.duration",
658 RelayTimers::BufferSpool => "buffer.spool.duration",
659 RelayTimers::BufferSqlWrite => "buffer.write.duration",
660 RelayTimers::BufferUnspool => "buffer.unspool.duration",
661 RelayTimers::BufferPush => "buffer.push.duration",
662 RelayTimers::BufferPeek => "buffer.peek.duration",
663 RelayTimers::BufferPop => "buffer.pop.duration",
664 RelayTimers::BufferDrain => "buffer.drain.duration",
665 RelayTimers::BufferEnvelopesSerialization => "buffer.envelopes_serialization",
666 RelayTimers::BufferEnvelopeCompression => "buffer.envelopes_compression",
667 RelayTimers::BufferEnvelopeDecompression => "buffer.envelopes_decompression",
668 RelayTimers::CheckNestedSpans => "envelope.check_nested_spans",
669 RelayTimers::SignatureCreationDuration => "signature.create.duration",
670 #[cfg(feature = "processing")]
671 RelayTimers::AttachmentUploadDuration => "attachment.upload.duration",
672 }
673 }
674}
675
676/// Counter metrics used by Relay
677pub enum RelayCounters {
678 /// Tracks the number of tasks driven to completion by the async pool.
679 ///
680 /// This metric is tagged with:
681 /// - `pool`: the name of the pool.
682 AsyncPoolFinishedTasks,
683 /// Number of Events that had corrupted (unprintable) event attributes.
684 ///
685 /// This currently checks for `environment` and `release`, for which we know that
686 /// some SDKs may send corrupted values.
687 EventCorrupted,
688 /// Number of envelopes accepted in the current time slot.
689 ///
690 /// This represents requests that have successfully passed rate limits and filters, and have
691 /// been sent to the upstream.
692 ///
693 /// This metric is tagged with:
694 /// - `handling`: Either `"success"` if the envelope was handled correctly, or `"failure"` if
695 /// there was an error or bug.
696 EnvelopeAccepted,
697 /// Number of envelopes rejected in the current time slot.
698 ///
699 /// This includes envelopes being rejected because they are malformed or any other errors during
700 /// processing (including filtered events, invalid payloads, and rate limits).
701 ///
702 /// To check the rejection reason, check `events.outcomes`, instead.
703 ///
704 /// This metric is tagged with:
705 /// - `handling`: Either `"success"` if the envelope was handled correctly, or `"failure"` if
706 /// there was an error or bug.
707 EnvelopeRejected,
708 /// Number of total envelope items we received.
709 ///
710 /// Note: This does not count raw items, it counts the logical amount of items,
711 /// e.g. a single item container counts all its contained items.
712 ///
713 /// This metric is tagged with:
714 /// - `item_type`: The type of the items being counted.
715 /// - `is_container`: Whether this item is a container holding multiple items.
716 /// - `sdk`: The name of the Sentry SDK sending the envelope. This tag is only set for
717 /// Sentry's SDKs and defaults to "proprietary".
718 EnvelopeItems,
719 /// Number of bytes we processed per envelope item.
720 ///
721 /// This metric is tagged with:
722 /// - `item_type`: The type of the items being counted.
723 /// - `is_container`: Whether this item is a container holding multiple items.
724 /// - `sdk`: The name of the Sentry SDK sending the envelope. This tag is only set for
725 /// Sentry's SDKs and defaults to "proprietary".
726 EnvelopeItemBytes,
727 /// Number of times an envelope from the buffer is trying to be popped.
728 BufferTryPop,
729 /// Number of envelopes spool to disk.
730 BufferSpooledEnvelopes,
731 /// Number of envelopes unspooled from disk.
732 BufferUnspooledEnvelopes,
733 /// Number of project changed updates received by the buffer.
734 BufferProjectChangedEvent,
735 /// Number of times one or more projects of an envelope were pending when trying to pop
736 /// their envelope.
737 BufferProjectPending,
738 /// Number of outcomes and reasons for rejected Envelopes.
739 ///
740 /// This metric is tagged with:
741 /// - `outcome`: The basic cause for rejecting the event.
742 /// - `reason`: A more detailed identifier describing the rule or mechanism leading to the
743 /// outcome.
744 /// - `to`: Describes the destination of the outcome. Can be either 'kafka' (when in
745 /// processing mode) or 'http' (when outcomes are enabled in an external relay).
746 ///
747 /// Possible outcomes are:
748 /// - `filtered`: Dropped by inbound data filters. The reason specifies the filter that
749 /// matched.
750 /// - `rate_limited`: Dropped by organization, project, or DSN rate limit, as well as exceeding
751 /// the Sentry plan quota. The reason contains the rate limit or quota that was exceeded.
752 /// - `invalid`: Data was considered invalid and could not be recovered. The reason indicates
753 /// the validation that failed.
754 Outcomes,
755 /// The number of individual outcomes including their quantity.
756 ///
757 /// While [`RelayCounters::Outcomes`] tracks the number of times aggregated outcomes
758 /// have been emitted, this counter tracks the total quantity of individual outcomes.
759 OutcomeQuantity,
760 /// Number of project state HTTP requests.
761 ///
762 /// Relay updates projects in batches. Every update cycle, Relay requests
763 /// `limits.max_concurrent_queries` batches of `cache.batch_size` projects from the upstream.
764 /// The duration of these requests is reported via `project_state.request.duration`.
765 ///
766 /// Note that after an update loop has completed, there may be more projects pending updates.
767 /// This is indicated by `project_state.pending`.
768 ProjectStateRequest,
769 /// Number of times a project state is requested from the central Redis cache.
770 ///
771 /// This metric is tagged with:
772 /// - `hit`: One of:
773 /// - `revision`: the cached version was validated to be up to date using its revision.
774 /// - `project_config`: the request was handled by the cache.
775 /// - `project_config_revision`: the request was handled by the cache and the revision did
776 /// not change.
777 /// - `false`: the request will be sent to the sentry endpoint.
778 #[cfg(feature = "processing")]
779 ProjectStateRedis,
780 /// Number of times a project had a fetch scheduled.
781 ProjectCacheSchedule,
782 /// Number of times an upstream request for a project config is completed.
783 ///
784 /// Completion can be because a result was returned or because the config request was
785 /// dropped after there still was no response after a timeout. This metrics has tags
786 /// for `result` and `attempts` indicating whether it was succesful or a timeout and how
787 /// many attempts were made respectively.
788 ProjectUpstreamCompleted,
789 /// Number of times an upstream request for a project config failed.
790 ///
791 /// Failure can happen, for example, when there's a network error. Refer to
792 /// [`UpstreamRequestError`](crate::services::upstream::UpstreamRequestError) for all cases.
793 ProjectUpstreamFailed,
794 /// Number of Relay server starts.
795 ///
796 /// This can be used to track unwanted restarts due to crashes or termination.
797 ServerStarting,
798 /// Number of messages placed on the Kafka queues.
799 ///
800 /// When Relay operates as Sentry service and an Envelope item is successfully processed, each
801 /// Envelope item results in a dedicated message on one of the ingestion topics on Kafka.
802 ///
803 /// This metric is tagged with:
804 /// - `event_type`: The kind of message produced to Kafka.
805 /// - `namespace` (only for metrics): The namespace that the metric belongs to.
806 /// - `is_segment` (only for event_type span): `true` the span is the root of a segment.
807 /// - `has_parent` (only for event_type span): `false` if the span is the root of a trace.
808 /// - `platform` (only for event_type span): The platform from which the span was spent.
809 /// - `metric_type` (only for event_type metric): The metric type, counter, distribution,
810 /// gauge or set.
811 /// - `metric_encoding` (only for event_type metric): The encoding used for distribution and
812 /// set metrics.
813 ///
814 /// The message types can be:
815 ///
816 /// - `event`: An error or transaction event. Error events are sent to `ingest-events`,
817 /// transactions to `ingest-transactions`, and errors with attachments are sent to
818 /// `ingest-attachments`.
819 /// - `attachment`: An attachment file associated with an error event, sent to
820 /// `ingest-attachments`.
821 /// - `user_report`: A message from the user feedback dialog, sent to `ingest-events`.
822 /// - `session`: A release health session update, sent to `ingest-sessions`.
823 #[cfg(feature = "processing")]
824 ProcessingMessageProduced,
825 /// Number of spans produced in the new format.
826 #[cfg(feature = "processing")]
827 SpanV2Produced,
828 /// Number of events that hit any of the store-like endpoints: Envelope, Store, Security,
829 /// Minidump, Unreal.
830 ///
831 /// The events are counted before they are rate limited, filtered, or processed in any way.
832 ///
833 /// This metric is tagged with:
834 /// - `version`: The event protocol version number defaulting to `7`.
835 EventProtocol,
836 /// The number of transaction events processed by the source of the transaction name.
837 ///
838 /// This metric is tagged with:
839 /// - `platform`: The event's platform, such as `"javascript"`.
840 /// - `source`: The source of the transaction name on the client. See the [transaction source
841 /// documentation](https://develop.sentry.dev/sdk/event-payloads/properties/transaction_info/)
842 /// for all valid values.
843 /// - `contains_slashes`: Whether the transaction name contains `/`. We use this as a heuristic
844 /// to represent URL transactions.
845 EventTransaction,
846 /// The number of transaction events processed grouped by transaction name modifications.
847 /// This metric is tagged with:
848 /// - `source_in`: The source of the transaction name before normalization.
849 /// See the [transaction source
850 /// documentation](https://develop.sentry.dev/sdk/event-payloads/properties/transaction_info/)
851 /// for all valid values.
852 /// - `change`: The mechanism that changed the transaction name.
853 /// Either `"none"`, `"pattern"`, `"rule"`, or `"both"`.
854 /// - `source_out`: The source of the transaction name after normalization.
855 TransactionNameChanges,
856 /// Number of HTTP requests reaching Relay.
857 Requests,
858 /// Number of completed HTTP requests.
859 ///
860 /// This metric is tagged with:
861 ///
862 /// - `status_code`: The HTTP status code number.
863 /// - `method`: The HTTP method used in the request in uppercase.
864 /// - `route`: Unique dashed identifier of the endpoint.
865 ResponsesStatusCodes,
866 /// Number of evicted stale projects from the cache.
867 ///
868 /// Relay scans the in-memory project cache for stale entries in a regular interval configured
869 /// by `cache.eviction_interval`.
870 ///
871 /// The cache duration for project states can be configured with the following options:
872 ///
873 /// - `cache.project_expiry`: The time after which a project state counts as expired. It is
874 /// automatically refreshed if a request references the project after it has expired.
875 /// - `cache.project_grace_period`: The time after expiry at which the project state will still
876 /// be used to ingest events. Once the grace period expires, the cache is evicted and new
877 /// requests wait for an update.
878 EvictingStaleProjectCaches,
879 /// Number of refreshes for stale projects in the cache.
880 RefreshStaleProjectCaches,
881 /// Number of times that parsing a metrics bucket item from an envelope failed.
882 MetricBucketsParsingFailed,
883 /// Count extraction of transaction names. Tag with the decision to drop / replace / use original.
884 MetricsTransactionNameExtracted,
885 /// Number of Events with an OpenTelemetry Context
886 ///
887 /// This metric is tagged with:
888 /// - `platform`: The event's platform, such as `"javascript"`.
889 /// - `sdk`: The name of the Sentry SDK sending the transaction. This tag is only set for
890 /// Sentry's SDKs and defaults to "proprietary".
891 OpenTelemetryEvent,
892 /// Number of global config fetches from upstream. Only 2XX responses are
893 /// considered and ignores send errors (e.g. auth or network errors).
894 ///
895 /// This metric is tagged with:
896 /// - `success`: whether deserializing the global config succeeded.
897 GlobalConfigFetched,
898 /// The number of attachments processed in the same envelope as a user_report_v2 event.
899 FeedbackAttachments,
900 /// All COGS tracked values.
901 ///
902 /// This metric is tagged with:
903 /// - `resource_id`: The COGS resource id.
904 /// - `app_feature`: The COGS app feature.
905 CogsUsage,
906 /// The amount of times metrics of a project have been flushed without the project being
907 /// fetched/available.
908 ProjectStateFlushMetricsNoProject,
909 /// Incremented every time a bucket is dropped.
910 ///
911 /// This should only happen when a project state is invalid during graceful shutdown.
912 ///
913 /// This metric is tagged with:
914 /// - `aggregator`: The name of the metrics aggregator (usually `"default"`).
915 BucketsDropped,
916 /// Incremented every time a segment exceeds the expected limit.
917 ReplayExceededSegmentLimit,
918 /// Incremented every time the server accepts a new connection.
919 ServerSocketAccept,
920 /// Incremented every time the server aborts a connection because of an idle timeout.
921 ServerConnectionIdleTimeout,
922 /// The total delay of metric buckets in seconds.
923 ///
924 /// The delay is measured from initial creation of the bucket in an internal Relay
925 /// until it is produced to Kafka.
926 ///
927 /// Use [`Self::MetricDelayCount`] to calculate the average delay.
928 ///
929 /// This metric is tagged with:
930 /// - `namespace`: the metric namespace.
931 #[cfg(feature = "processing")]
932 MetricDelaySum,
933 /// The amount of buckets counted for the [`Self::MetricDelaySum`] metric.
934 ///
935 /// This metric is tagged with:
936 /// - `namespace`: the metric namespace.
937 #[cfg(feature = "processing")]
938 MetricDelayCount,
939 /// The amount of times PlayStation processing was attempted.
940 #[cfg(all(sentry, feature = "processing"))]
941 PlaystationProcessing,
942 /// The number of times a sampling decision was made.
943 ///
944 /// This metric is tagged with:
945 /// - `item`: what item the decision is taken for (transaction vs span).
946 SamplingDecision,
947 /// The number of times an upload of an attachment occurs.
948 ///
949 /// This metric is tagged with:
950 /// - `result`: `success` or the failure reason.
951 /// - `type`: `envelope` or `attachment_v2`
952 #[cfg(feature = "processing")]
953 AttachmentUpload,
954 /// Whether a logs envelope has a trace context header or not
955 ///
956 /// This metric is tagged with:
957 /// - `dsc`: yes or no
958 /// - `sdk`: low-cardinality client name
959 EnvelopeWithLogs,
960}
961
962impl CounterMetric for RelayCounters {
963 fn name(&self) -> &'static str {
964 match self {
965 RelayCounters::AsyncPoolFinishedTasks => "async_pool.finished_tasks",
966 RelayCounters::EventCorrupted => "event.corrupted",
967 RelayCounters::EnvelopeAccepted => "event.accepted",
968 RelayCounters::EnvelopeRejected => "event.rejected",
969 RelayCounters::EnvelopeItems => "event.items",
970 RelayCounters::EnvelopeItemBytes => "event.item_bytes",
971 RelayCounters::BufferTryPop => "buffer.try_pop",
972 RelayCounters::BufferSpooledEnvelopes => "buffer.spooled_envelopes",
973 RelayCounters::BufferUnspooledEnvelopes => "buffer.unspooled_envelopes",
974 RelayCounters::BufferProjectChangedEvent => "buffer.project_changed_event",
975 RelayCounters::BufferProjectPending => "buffer.project_pending",
976 RelayCounters::Outcomes => "events.outcomes",
977 RelayCounters::OutcomeQuantity => "events.outcome_quantity",
978 RelayCounters::ProjectStateRequest => "project_state.request",
979 #[cfg(feature = "processing")]
980 RelayCounters::ProjectStateRedis => "project_state.redis.requests",
981 RelayCounters::ProjectUpstreamCompleted => "project_upstream.completed",
982 RelayCounters::ProjectUpstreamFailed => "project_upstream.failed",
983 RelayCounters::ProjectCacheSchedule => "project_cache.schedule",
984 RelayCounters::ServerStarting => "server.starting",
985 #[cfg(feature = "processing")]
986 RelayCounters::ProcessingMessageProduced => "processing.event.produced",
987 #[cfg(feature = "processing")]
988 RelayCounters::SpanV2Produced => "store.produced.span_v2",
989 RelayCounters::EventProtocol => "event.protocol",
990 RelayCounters::EventTransaction => "event.transaction",
991 RelayCounters::TransactionNameChanges => "event.transaction_name_changes",
992 RelayCounters::Requests => "requests",
993 RelayCounters::ResponsesStatusCodes => "responses.status_codes",
994 RelayCounters::EvictingStaleProjectCaches => "project_cache.eviction",
995 RelayCounters::RefreshStaleProjectCaches => "project_cache.refresh",
996 RelayCounters::MetricBucketsParsingFailed => "metrics.buckets.parsing_failed",
997 RelayCounters::MetricsTransactionNameExtracted => "metrics.transaction_name",
998 RelayCounters::OpenTelemetryEvent => "event.opentelemetry",
999 RelayCounters::GlobalConfigFetched => "global_config.fetch",
1000 RelayCounters::FeedbackAttachments => "processing.feedback_attachments",
1001 RelayCounters::CogsUsage => "cogs.usage",
1002 RelayCounters::ProjectStateFlushMetricsNoProject => "project_state.metrics.no_project",
1003 RelayCounters::BucketsDropped => "metrics.buckets.dropped",
1004 RelayCounters::ReplayExceededSegmentLimit => "replay.segment_limit_exceeded",
1005 RelayCounters::ServerSocketAccept => "server.http.accepted",
1006 RelayCounters::ServerConnectionIdleTimeout => "server.http.idle_timeout",
1007 #[cfg(feature = "processing")]
1008 RelayCounters::MetricDelaySum => "metrics.delay.sum",
1009 #[cfg(feature = "processing")]
1010 RelayCounters::MetricDelayCount => "metrics.delay.count",
1011 #[cfg(all(sentry, feature = "processing"))]
1012 RelayCounters::PlaystationProcessing => "processing.playstation",
1013 RelayCounters::SamplingDecision => "sampling.decision",
1014 #[cfg(feature = "processing")]
1015 RelayCounters::AttachmentUpload => "attachment.upload",
1016 RelayCounters::EnvelopeWithLogs => "logs.envelope",
1017 }
1018 }
1019}