relay_server/statsd.rs
1use relay_statsd::{CounterMetric, GaugeMetric, HistogramMetric, TimerMetric};
2#[cfg(doc)]
3use relay_system::RuntimeMetrics;
4
5/// Gauge metrics used by Relay
6pub enum RelayGauges {
7 /// Tracks the number of futures waiting to be executed in the pool's queue.
8 ///
9 /// Useful for understanding the backlog of work and identifying potential bottlenecks.
10 ///
11 /// This metric is tagged with:
12 /// - `pool`: the name of the pool.
13 AsyncPoolQueueSize,
14 /// Tracks the utilization of the async pool.
15 ///
16 /// The utilization is a value between 0.0 and 100.0 which determines how busy the pool is doing
17 /// CPU-bound work.
18 ///
19 /// This metric is tagged with:
20 /// - `pool`: the name of the pool.
21 AsyncPoolUtilization,
22 /// Tracks the activity of the async pool.
23 ///
24 /// The activity is a value between 0.0 and 100.0 which determines how busy is the pool
25 /// w.r.t. to its provisioned capacity.
26 ///
27 /// This metric is tagged with:
28 /// - `pool`: the name of the pool.
29 AsyncPoolActivity,
30 /// The state of Relay with respect to the upstream connection.
31 /// Possible values are `0` for normal operations and `1` for a network outage.
32 NetworkOutage,
33 /// The number of individual stacks in the priority queue.
34 ///
35 /// Per combination of `(own_key, sampling_key)`, a new stack is created.
36 BufferStackCount,
37 /// The used disk for the buffer.
38 BufferDiskUsed,
39 /// The currently used memory by the entire system.
40 ///
41 /// Relay uses the same value for its memory health check.
42 SystemMemoryUsed,
43 /// The total system memory.
44 ///
45 /// Relay uses the same value for its memory health check.
46 SystemMemoryTotal,
47 /// The number of connections currently being managed by the Redis Pool.
48 #[cfg(feature = "processing")]
49 RedisPoolConnections,
50 /// The number of idle connections in the Redis Pool.
51 #[cfg(feature = "processing")]
52 RedisPoolIdleConnections,
53 /// The maximum number of connections in the Redis pool.
54 #[cfg(feature = "processing")]
55 RedisPoolMaxConnections,
56 /// The number of futures waiting to grab a connection.
57 #[cfg(feature = "processing")]
58 RedisPoolWaitingForConnection,
59 /// The number of notifications in the broadcast channel of the project cache.
60 ProjectCacheNotificationChannel,
61 /// The number of scheduled and in progress fetches in the project cache.
62 ProjectCacheScheduledFetches,
63 /// Exposes the amount of currently open and handled connections by the server.
64 ServerActiveConnections,
65 /// Maximum delay of a metric bucket in seconds.
66 ///
67 /// The maximum is measured from initial creation of the bucket in an internal Relay
68 /// until it is produced to Kafka.
69 ///
70 /// This metric is tagged with:
71 /// - `namespace`: the metric namespace.
72 #[cfg(feature = "processing")]
73 MetricDelayMax,
74 /// Estimated percentage [0-100] of how busy Relay's internal services are.
75 ///
76 /// This metric is tagged with:
77 /// - `service`: the service name.
78 /// - `instance_id`: a for the service name unique identifier for the running service
79 ServiceUtilization,
80 /// Number of attachment uploads currently in flight.
81 #[cfg(feature = "processing")]
82 ConcurrentAttachmentUploads,
83}
84
85impl GaugeMetric for RelayGauges {
86 fn name(&self) -> &'static str {
87 match self {
88 RelayGauges::AsyncPoolQueueSize => "async_pool.queue_size",
89 RelayGauges::AsyncPoolUtilization => "async_pool.utilization",
90 RelayGauges::AsyncPoolActivity => "async_pool.activity",
91 RelayGauges::NetworkOutage => "upstream.network_outage",
92 RelayGauges::BufferStackCount => "buffer.stack_count",
93 RelayGauges::BufferDiskUsed => "buffer.disk_used",
94 RelayGauges::SystemMemoryUsed => "health.system_memory.used",
95 RelayGauges::SystemMemoryTotal => "health.system_memory.total",
96 #[cfg(feature = "processing")]
97 RelayGauges::RedisPoolConnections => "redis.pool.connections",
98 #[cfg(feature = "processing")]
99 RelayGauges::RedisPoolIdleConnections => "redis.pool.idle_connections",
100 #[cfg(feature = "processing")]
101 RelayGauges::RedisPoolMaxConnections => "redis.pool.max_connections",
102 #[cfg(feature = "processing")]
103 RelayGauges::RedisPoolWaitingForConnection => "redis.pool.waiting_for_connection",
104 RelayGauges::ProjectCacheNotificationChannel => {
105 "project_cache.notification_channel.size"
106 }
107 RelayGauges::ProjectCacheScheduledFetches => "project_cache.fetches.size",
108 RelayGauges::ServerActiveConnections => "server.http.connections",
109 #[cfg(feature = "processing")]
110 RelayGauges::MetricDelayMax => "metrics.delay.max",
111 RelayGauges::ServiceUtilization => "service.utilization",
112 #[cfg(feature = "processing")]
113 RelayGauges::ConcurrentAttachmentUploads => "attachment.upload.concurrent",
114 }
115 }
116}
117
118/// Gauge metrics collected from the Runtime.
119pub enum RuntimeGauges {
120 /// Exposes [`RuntimeMetrics::num_idle_threads`].
121 NumIdleThreads,
122 /// Exposes [`RuntimeMetrics::num_alive_tasks`].
123 NumAliveTasks,
124 /// Exposes [`RuntimeMetrics::blocking_queue_depth`].
125 BlockingQueueDepth,
126 /// Exposes [`RuntimeMetrics::num_blocking_threads`].
127 NumBlockingThreads,
128 /// Exposes [`RuntimeMetrics::num_idle_blocking_threads`].
129 NumIdleBlockingThreads,
130 /// Exposes [`RuntimeMetrics::num_workers`].
131 NumWorkers,
132 /// Exposes [`RuntimeMetrics::worker_local_queue_depth`].
133 ///
134 /// This metric is tagged with:
135 /// - `worker`: the worker id.
136 WorkerLocalQueueDepth,
137 /// Exposes [`RuntimeMetrics::worker_mean_poll_time`].
138 ///
139 /// This metric is tagged with:
140 /// - `worker`: the worker id.
141 WorkerMeanPollTime,
142}
143
144impl GaugeMetric for RuntimeGauges {
145 fn name(&self) -> &'static str {
146 match self {
147 RuntimeGauges::NumIdleThreads => "runtime.idle_threads",
148 RuntimeGauges::NumAliveTasks => "runtime.alive_tasks",
149 RuntimeGauges::BlockingQueueDepth => "runtime.blocking_queue_depth",
150 RuntimeGauges::NumBlockingThreads => "runtime.num_blocking_threads",
151 RuntimeGauges::NumIdleBlockingThreads => "runtime.num_idle_blocking_threads",
152 RuntimeGauges::NumWorkers => "runtime.num_workers",
153 RuntimeGauges::WorkerLocalQueueDepth => "runtime.worker_local_queue_depth",
154 RuntimeGauges::WorkerMeanPollTime => "runtime.worker_mean_poll_time",
155 }
156 }
157}
158
159/// Counter metrics collected from the Runtime.
160pub enum RuntimeCounters {
161 /// Exposes [`RuntimeMetrics::budget_forced_yield_count`].
162 BudgetForcedYieldCount,
163 /// Exposes [`RuntimeMetrics::worker_local_schedule_count`].
164 ///
165 /// This metric is tagged with:
166 /// - `worker`: the worker id.
167 WorkerLocalScheduleCount,
168 /// Exposes [`RuntimeMetrics::worker_noop_count`].
169 ///
170 /// This metric is tagged with:
171 /// - `worker`: the worker id.
172 WorkerNoopCount,
173 /// Exposes [`RuntimeMetrics::worker_overflow_count`].
174 ///
175 /// This metric is tagged with:
176 /// - `worker`: the worker id.
177 WorkerOverflowCount,
178 /// Exposes [`RuntimeMetrics::worker_park_count`].
179 ///
180 /// This metric is tagged with:
181 /// - `worker`: the worker id.
182 WorkerParkCount,
183 /// Exposes [`RuntimeMetrics::worker_poll_count`].
184 ///
185 /// This metric is tagged with:
186 /// - `worker`: the worker id.
187 WorkerPollCount,
188 /// Exposes [`RuntimeMetrics::worker_steal_count`].
189 ///
190 /// This metric is tagged with:
191 /// - `worker`: the worker id.
192 WorkerStealCount,
193 /// Exposes [`RuntimeMetrics::worker_steal_operations`].
194 ///
195 /// This metric is tagged with:
196 /// - `worker`: the worker id.
197 WorkerStealOperations,
198 /// Exposes [`RuntimeMetrics::worker_total_busy_duration`].
199 ///
200 /// This metric is tagged with:
201 /// - `worker`: the worker id.
202 WorkerTotalBusyDuration,
203}
204
205impl CounterMetric for RuntimeCounters {
206 fn name(&self) -> &'static str {
207 match self {
208 RuntimeCounters::BudgetForcedYieldCount => "runtime.budget_forced_yield_count",
209 RuntimeCounters::WorkerLocalScheduleCount => "runtime.worker_local_schedule_count",
210 RuntimeCounters::WorkerNoopCount => "runtime.worker_noop_count",
211 RuntimeCounters::WorkerOverflowCount => "runtime.worker_overflow_count",
212 RuntimeCounters::WorkerParkCount => "runtime.worker_park_count",
213 RuntimeCounters::WorkerPollCount => "runtime.worker_poll_count",
214 RuntimeCounters::WorkerStealCount => "runtime.worker_steal_count",
215 RuntimeCounters::WorkerStealOperations => "runtime.worker_steal_operations",
216 RuntimeCounters::WorkerTotalBusyDuration => "runtime.worker_total_busy_duration",
217 }
218 }
219}
220
221/// Histogram metrics used by Relay.
222pub enum RelayHistograms {
223 /// The number of bytes received by Relay for each individual envelope item type.
224 ///
225 /// This metric is tagged with:
226 /// - `item_type`: The type of the items being counted.
227 /// - `is_container`: Whether this item is a container holding multiple items.
228 EnvelopeItemSize,
229
230 /// Number of elements in the envelope buffer across all the stacks.
231 ///
232 /// This metric is tagged with:
233 /// - `storage_type`: The type of storage used in the envelope buffer.
234 BufferEnvelopesCount,
235 /// The amount of bytes in the item payloads of an envelope pushed to the envelope buffer.
236 ///
237 /// This is not quite the same as the actual size of a serialized envelope, because it ignores
238 /// the envelope header and item headers.
239 BufferEnvelopeBodySize,
240 /// Size of a serialized envelope pushed to the envelope buffer.
241 BufferEnvelopeSize,
242 /// Size of a compressed envelope pushed to the envelope buffer.
243 BufferEnvelopeSizeCompressed,
244 /// The number of batches emitted per partition.
245 BatchesPerPartition,
246 /// The number of buckets in a batch emitted.
247 ///
248 /// This corresponds to the number of buckets that will end up in an envelope.
249 BucketsPerBatch,
250 /// The number of spans per processed transaction event.
251 ///
252 /// This metric is tagged with:
253 /// - `platform`: The event's platform, such as `"javascript"`.
254 /// - `sdk`: The name of the Sentry SDK sending the transaction. This tag is only set for
255 /// Sentry's SDKs and defaults to "proprietary".
256 EventSpans,
257 /// Number of projects in the in-memory project cache that are waiting for their state to be
258 /// updated.
259 ///
260 /// See `project_cache.size` for more description of the project cache.
261 ProjectStatePending,
262 /// Number of project states **requested** from the upstream for each batch request.
263 ///
264 /// If multiple batches are updated concurrently, this metric is reported multiple times.
265 ///
266 /// The batch size can be configured with `cache.batch_size`. See `project_cache.size` for more
267 /// description of the project cache.
268 ProjectStateRequestBatchSize,
269 /// Number of project states **returned** from the upstream for each batch request.
270 ///
271 /// If multiple batches are updated concurrently, this metric is reported multiple times.
272 ///
273 /// See `project_cache.size` for more description of the project cache.
274 ProjectStateReceived,
275 /// Number of attempts required to fetch the config for a given project key.
276 ProjectStateAttempts,
277 /// Number of project states currently held in the in-memory project cache.
278 ///
279 /// The cache duration for project states can be configured with the following options:
280 ///
281 /// - `cache.project_expiry`: The time after which a project state counts as expired. It is
282 /// automatically refreshed if a request references the project after it has expired.
283 /// - `cache.project_grace_period`: The time after expiry at which the project state will still
284 /// be used to ingest events. Once the grace period expires, the cache is evicted and new
285 /// requests wait for an update.
286 ///
287 /// There is no limit to the number of cached projects.
288 ProjectStateCacheSize,
289 /// The size of the compressed project config in the redis cache, in bytes.
290 #[cfg(feature = "processing")]
291 ProjectStateSizeBytesCompressed,
292 /// The size of the uncompressed project config in the redis cache, in bytes.
293 #[cfg(feature = "processing")]
294 ProjectStateSizeBytesDecompressed,
295 /// The number of upstream requests queued up for sending.
296 ///
297 /// Relay employs connection keep-alive whenever possible. Connections are kept open for _15_
298 /// seconds of inactivity or _75_ seconds of activity. If all connections are busy, they are
299 /// queued, which is reflected in this metric.
300 ///
301 /// This metric is tagged with:
302 /// - `priority`: The queueing priority of the request, either `"high"` or `"low"`. The
303 /// priority determines precedence in executing requests.
304 ///
305 /// The number of concurrent connections can be configured with:
306 /// - `limits.max_concurrent_requests` for the overall number of connections
307 /// - `limits.max_concurrent_queries` for the number of concurrent high-priority requests
308 UpstreamMessageQueueSize,
309 /// Counts the number of retries for each upstream http request.
310 ///
311 /// This metric is tagged with:
312 ///
313 /// - `result`: What happened to the request, an enumeration with the following values:
314 /// * `success`: The request was sent and returned a success code `HTTP 2xx`
315 /// * `response_error`: The request was sent and it returned an HTTP error.
316 /// * `payload_failed`: The request was sent but there was an error in interpreting the response.
317 /// * `send_failed`: Failed to send the request due to a network error.
318 /// * `rate_limited`: The request was rate limited.
319 /// * `invalid_json`: The response could not be parsed back into JSON.
320 /// - `route`: The endpoint that was called on the upstream.
321 /// - `status-code`: The status code of the request when available, otherwise "-".
322 UpstreamRetries,
323 /// Size of envelopes sent over HTTP in bytes.
324 UpstreamQueryBodySize,
325 /// Size of queries (projectconfig queries, i.e. the request payload, not the response) sent by
326 /// Relay over HTTP in bytes.
327 UpstreamEnvelopeBodySize,
328 /// Size of batched global metrics requests sent by Relay over HTTP in bytes.
329 UpstreamMetricsBodySize,
330 /// Distribution of flush buckets over partition keys.
331 ///
332 /// The distribution of buckets should be even.
333 /// If it is not, this metric should expose it.
334 PartitionKeys,
335 /// Measures how many splits were performed when sending out a partition.
336 PartitionSplits,
337}
338
339impl HistogramMetric for RelayHistograms {
340 fn name(&self) -> &'static str {
341 match self {
342 RelayHistograms::EnvelopeItemSize => "event.item_size",
343 RelayHistograms::EventSpans => "event.spans",
344 RelayHistograms::BatchesPerPartition => "metrics.buckets.batches_per_partition",
345 RelayHistograms::BucketsPerBatch => "metrics.buckets.per_batch",
346 RelayHistograms::BufferEnvelopesCount => "buffer.envelopes_count",
347 RelayHistograms::BufferEnvelopeBodySize => "buffer.envelope_body_size",
348 RelayHistograms::BufferEnvelopeSize => "buffer.envelope_size",
349 RelayHistograms::BufferEnvelopeSizeCompressed => "buffer.envelope_size.compressed",
350 RelayHistograms::ProjectStatePending => "project_state.pending",
351 RelayHistograms::ProjectStateAttempts => "project_state.attempts",
352 RelayHistograms::ProjectStateRequestBatchSize => "project_state.request.batch_size",
353 RelayHistograms::ProjectStateReceived => "project_state.received",
354 RelayHistograms::ProjectStateCacheSize => "project_cache.size",
355 #[cfg(feature = "processing")]
356 RelayHistograms::ProjectStateSizeBytesCompressed => {
357 "project_state.size_bytes.compressed"
358 }
359 #[cfg(feature = "processing")]
360 RelayHistograms::ProjectStateSizeBytesDecompressed => {
361 "project_state.size_bytes.decompressed"
362 }
363 RelayHistograms::UpstreamMessageQueueSize => "http_queue.size",
364 RelayHistograms::UpstreamRetries => "upstream.retries",
365 RelayHistograms::UpstreamQueryBodySize => "upstream.query.body_size",
366 RelayHistograms::UpstreamEnvelopeBodySize => "upstream.envelope.body_size",
367 RelayHistograms::UpstreamMetricsBodySize => "upstream.metrics.body_size",
368 RelayHistograms::PartitionKeys => "metrics.buckets.partition_keys",
369 RelayHistograms::PartitionSplits => "partition_splits",
370 }
371 }
372}
373
374/// Timer metrics used by Relay
375pub enum RelayTimers {
376 /// Time in milliseconds spent deserializing an event from JSON bytes into the native data
377 /// structure on which Relay operates.
378 EventProcessingDeserialize,
379 /// Time in milliseconds spent running normalization on an event. Normalization
380 /// happens before envelope filtering and metric extraction.
381 EventProcessingNormalization,
382 /// Time in milliseconds spent running inbound data filters on an event.
383 EventProcessingFiltering,
384 /// Time in milliseconds spent checking for organization, project, and DSN rate limits.
385 ///
386 /// Not all events reach this point. After an event is rate limited for the first time, the rate
387 /// limit is cached. Events coming in after this will be discarded earlier in the request queue
388 /// and do not reach the processing queue.
389 ///
390 /// This metric is tagged with:
391 /// - `type`: The type of limiter executed, `cached` or `consistent`.
392 EventProcessingRateLimiting,
393 /// Time in milliseconds spent in data scrubbing for the current event. Data scrubbing happens
394 /// last before serializing the event back to JSON.
395 EventProcessingPii,
396 /// Time spent converting the event from its in-memory reprsentation into a JSON string.
397 EventProcessingSerialization,
398 /// Time used to extract span metrics from an event.
399 EventProcessingSpanMetricsExtraction,
400 /// Time spent between the start of request handling and processing of the envelope.
401 ///
402 /// This includes streaming the request body, scheduling overheads, project config fetching,
403 /// batched requests and congestions in the internal processor. This does not include delays in
404 /// the incoming request (body upload) and skips all envelopes that are fast-rejected.
405 EnvelopeWaitTime,
406 /// Time in milliseconds spent in synchronous processing of envelopes.
407 ///
408 /// This timing covers the end-to-end processing in the CPU pool and comprises:
409 ///
410 /// - `event_processing.deserialize`
411 /// - `event_processing.pii`
412 /// - `event_processing.serialization`
413 ///
414 /// With Relay in processing mode, this also includes the following timings:
415 ///
416 /// - `event_processing.process`
417 /// - `event_processing.filtering`
418 /// - `event_processing.rate_limiting`
419 EnvelopeProcessingTime,
420 /// Total time in milliseconds an envelope spends in Relay from the time it is received until it
421 /// finishes processing and has been submitted to the upstream.
422 EnvelopeTotalTime,
423 /// Latency of project config updates until they reach Relay.
424 ///
425 /// The metric is calculated by using the creation timestamp of the project config
426 /// and when Relay updates its local cache with the new project config.
427 ///
428 /// No metric is emitted when Relay fetches a project config for the first time.
429 ///
430 /// This metric is tagged with:
431 /// - `delay`: Bucketed amount of seconds passed between fetches.
432 ProjectCacheUpdateLatency,
433 /// Total time spent from starting to fetch a project config update to completing the fetch.
434 ProjectCacheFetchDuration,
435 /// Total time in milliseconds spent fetching queued project configuration updates requests to
436 /// resolve.
437 ///
438 /// Relay updates projects in batches. Every update cycle, Relay requests
439 /// `limits.max_concurrent_queries * cache.batch_size` projects from the upstream. This metric
440 /// measures the wall clock time for all concurrent requests in this loop.
441 ///
442 /// Note that after an update loop has completed, there may be more projects pending updates.
443 /// This is indicated by `project_state.pending`.
444 ProjectStateRequestDuration,
445 /// Time in milliseconds required to decompress a project config from redis.
446 ///
447 /// Note that this also times the cases where project config is uncompressed,
448 /// in which case the timer should be very close to zero.
449 #[cfg(feature = "processing")]
450 ProjectStateDecompression,
451 /// Total duration in milliseconds for handling inbound web requests until the HTTP response is
452 /// returned to the client.
453 ///
454 /// This does **not** correspond to the full event ingestion time. Requests for events that are
455 /// not immediately rejected due to bad data or cached rate limits always return `200 OK`. Full
456 /// validation and normalization occur asynchronously, which is reported by
457 /// `event.processing_time`.
458 ///
459 /// This metric is tagged with:
460 /// - `method`: The HTTP method of the request.
461 /// - `route`: Unique dashed identifier of the endpoint.
462 RequestsDuration,
463 /// Time spent on minidump scrubbing.
464 ///
465 /// This is the total time spent on parsing and scrubbing the minidump. Even if no PII
466 /// scrubbing rules applied the minidump will still be parsed and the rules evaluated on
467 /// the parsed minidump, this duration is reported here with status of "n/a".
468 ///
469 /// This metric is tagged with:
470 ///
471 /// - `status`: Scrubbing status: "ok" means successful scrubbed, "error" means there
472 /// was an error during scrubbing and finally "n/a" means scrubbing was successful
473 /// but no scurbbing rules applied.
474 MinidumpScrubbing,
475 /// Time spent on view hierarchy scrubbing.
476 ///
477 /// This is the total time spent on parsing and scrubbing the view hierarchy json file.
478 ///
479 /// This metric is tagged with:
480 ///
481 /// - `status`: "ok" means successful scrubbed, "error" means there was an error during
482 /// scrubbing
483 ViewHierarchyScrubbing,
484 /// Time spend on attachment scrubbing.
485 ///
486 /// This represents the total time spent on evaluating the scrubbing rules for an
487 /// attachment and the attachment scrubbing itself, regardless of whether any rules were
488 /// applied. Note that minidumps which failed to be parsed (status="error" in
489 /// scrubbing.minidumps.duration) will be scrubbed as plain attachments and count
490 /// towards this.
491 ///
492 /// This metric is tagged with:
493 ///
494 /// - `attachment_type`: The type of attachment, e.g. "minidump".
495 AttachmentScrubbing,
496 /// Total time spent to send request to upstream Relay and handle the response.
497 ///
498 /// This metric is tagged with:
499 ///
500 /// - `result`: What happened to the request, an enumeration with the following values:
501 /// * `success`: The request was sent and returned a success code `HTTP 2xx`
502 /// * `response_error`: The request was sent and it returned an HTTP error.
503 /// * `payload_failed`: The request was sent but there was an error in interpreting the response.
504 /// * `send_failed`: Failed to send the request due to a network error.
505 /// * `rate_limited`: The request was rate limited.
506 /// * `invalid_json`: The response could not be parsed back into JSON.
507 /// - `route`: The endpoint that was called on the upstream.
508 /// - `status-code`: The status code of the request when available, otherwise "-".
509 /// - `retries`: Number of retries bucket 0, 1, 2, few (3 - 10), many (more than 10).
510 UpstreamRequestsDuration,
511 /// The delay between the timestamp stated in a payload and the receive time.
512 ///
513 /// SDKs cannot transmit payloads immediately in all cases. Sometimes, crashes require that
514 /// events are sent after restarting the application. Similarly, SDKs buffer events during
515 /// network downtimes for later transmission. This metric measures the delay between the time of
516 /// the event and the time it arrives in Relay. The delay is measured after clock drift
517 /// correction is applied.
518 ///
519 /// Only payloads with a delay of more than 1 minute are captured.
520 ///
521 /// This metric is tagged with:
522 ///
523 /// - `category`: The data category of the payload. Can be one of: `event`, `transaction`,
524 /// `security`, or `session`.
525 TimestampDelay,
526 /// The time it takes the outcome aggregator to flush aggregated outcomes.
527 OutcomeAggregatorFlushTime,
528 /// Time in milliseconds spent on parsing, normalizing and scrubbing replay recordings.
529 ReplayRecordingProcessing,
530 /// Total time spent to send a request and receive the response from upstream.
531 GlobalConfigRequestDuration,
532 /// Timing in milliseconds for processing a message in the internal CPU pool.
533 ///
534 /// This metric is tagged with:
535 ///
536 /// - `message`: The type of message that was processed.
537 ProcessMessageDuration,
538 /// Timing in milliseconds for processing a task in the project cache service.
539 ///
540 /// This metric is tagged with:
541 /// - `task`: The type of the task the project cache does.
542 ProjectCacheTaskDuration,
543 /// Timing in milliseconds for handling and responding to a health check request.
544 ///
545 /// This metric is tagged with:
546 /// - `type`: The type of the health check, `liveness` or `readiness`.
547 HealthCheckDuration,
548 /// Temporary timing metric for how much time was spent evaluating span and transaction
549 /// rate limits using the `RateLimitBuckets` message in the processor.
550 ///
551 /// This metric is tagged with:
552 /// - `category`: The data category evaluated.
553 /// - `limited`: Whether the batch is rate limited.
554 /// - `count`: How many items matching the data category are contained in the batch.
555 #[cfg(feature = "processing")]
556 RateLimitBucketsDuration,
557 /// Timing in milliseconds for processing a task in the aggregator service.
558 ///
559 /// This metric is tagged with:
560 /// - `task`: The task being executed by the aggregator.
561 /// - `aggregator`: The name of the aggregator.
562 AggregatorServiceDuration,
563 /// Timing in milliseconds for processing a message in the metric router service.
564 ///
565 /// This metric is tagged with:
566 /// - `message`: The type of message that was processed.
567 MetricRouterServiceDuration,
568 /// Timing in milliseconds for processing a message in the metric store service.
569 ///
570 /// This metric is tagged with:
571 /// - `message`: The type of message that was processed.
572 #[cfg(feature = "processing")]
573 StoreServiceDuration,
574 /// Timing in milliseconds for the time it takes for initialize the buffer.
575 BufferInitialization,
576 /// Timing in milliseconds for the time it takes for the buffer to pack & spool a batch.
577 ///
578 /// Contains the time it takes to pack multiple envelopes into a single memory blob.
579 BufferSpool,
580 /// Timing in milliseconds for the time it takes for the buffer to spool data to SQLite.
581 BufferSqlWrite,
582 /// Timing in milliseconds for the time it takes for the buffer to unspool data from disk.
583 BufferUnspool,
584 /// Timing in milliseconds for the time it takes for the buffer to push.
585 BufferPush,
586 /// Timing in milliseconds for the time it takes for the buffer to peek.
587 BufferPeek,
588 /// Timing in milliseconds for the time it takes for the buffer to pop.
589 BufferPop,
590 /// Timing in milliseconds for the time it takes for the buffer to drain its envelopes.
591 BufferDrain,
592 /// Timing in milliseconds for the time it takes for an envelope to be serialized.
593 BufferEnvelopesSerialization,
594 /// Timing in milliseconds for the time it takes for an envelope to be compressed.
595 BufferEnvelopeCompression,
596 /// Timing in milliseconds for the time it takes for an envelope to be decompressed.
597 BufferEnvelopeDecompression,
598 /// Timing in milliseconds to the time it takes to read an HTTP body.
599 BodyReadDuration,
600 /// Timing in milliseconds to count spans in a serialized transaction payload.
601 CheckNestedSpans,
602 /// The time it needs to create a signature. Includes both the signature used for
603 /// trusted relays and for register challenges.
604 SignatureCreationDuration,
605}
606
607impl TimerMetric for RelayTimers {
608 fn name(&self) -> &'static str {
609 match self {
610 RelayTimers::EventProcessingDeserialize => "event_processing.deserialize",
611 RelayTimers::EventProcessingNormalization => "event_processing.normalization",
612 RelayTimers::EventProcessingFiltering => "event_processing.filtering",
613 RelayTimers::EventProcessingRateLimiting => "event_processing.rate_limiting",
614 RelayTimers::EventProcessingPii => "event_processing.pii",
615 RelayTimers::EventProcessingSpanMetricsExtraction => {
616 "event_processing.span_metrics_extraction"
617 }
618 RelayTimers::EventProcessingSerialization => "event_processing.serialization",
619 RelayTimers::EnvelopeWaitTime => "event.wait_time",
620 RelayTimers::EnvelopeProcessingTime => "event.processing_time",
621 RelayTimers::EnvelopeTotalTime => "event.total_time",
622 RelayTimers::ProjectStateRequestDuration => "project_state.request.duration",
623 #[cfg(feature = "processing")]
624 RelayTimers::ProjectStateDecompression => "project_state.decompression",
625 RelayTimers::ProjectCacheUpdateLatency => "project_cache.latency",
626 RelayTimers::ProjectCacheFetchDuration => "project_cache.fetch.duration",
627 RelayTimers::RequestsDuration => "requests.duration",
628 RelayTimers::MinidumpScrubbing => "scrubbing.minidumps.duration",
629 RelayTimers::ViewHierarchyScrubbing => "scrubbing.view_hierarchy_scrubbing.duration",
630 RelayTimers::AttachmentScrubbing => "scrubbing.attachments.duration",
631 RelayTimers::UpstreamRequestsDuration => "upstream.requests.duration",
632 RelayTimers::TimestampDelay => "requests.timestamp_delay",
633 RelayTimers::OutcomeAggregatorFlushTime => "outcomes.aggregator.flush_time",
634 RelayTimers::ReplayRecordingProcessing => "replay.recording.process",
635 RelayTimers::GlobalConfigRequestDuration => "global_config.requests.duration",
636 RelayTimers::ProcessMessageDuration => "processor.message.duration",
637 RelayTimers::ProjectCacheTaskDuration => "project_cache.task.duration",
638 RelayTimers::HealthCheckDuration => "health.message.duration",
639 #[cfg(feature = "processing")]
640 RelayTimers::RateLimitBucketsDuration => "processor.rate_limit_buckets",
641 RelayTimers::AggregatorServiceDuration => "metrics.aggregator.message.duration",
642 RelayTimers::MetricRouterServiceDuration => "metrics.router.message.duration",
643 #[cfg(feature = "processing")]
644 RelayTimers::StoreServiceDuration => "store.message.duration",
645 RelayTimers::BufferInitialization => "buffer.initialization.duration",
646 RelayTimers::BufferSpool => "buffer.spool.duration",
647 RelayTimers::BufferSqlWrite => "buffer.write.duration",
648 RelayTimers::BufferUnspool => "buffer.unspool.duration",
649 RelayTimers::BufferPush => "buffer.push.duration",
650 RelayTimers::BufferPeek => "buffer.peek.duration",
651 RelayTimers::BufferPop => "buffer.pop.duration",
652 RelayTimers::BufferDrain => "buffer.drain.duration",
653 RelayTimers::BufferEnvelopesSerialization => "buffer.envelopes_serialization",
654 RelayTimers::BufferEnvelopeCompression => "buffer.envelopes_compression",
655 RelayTimers::BufferEnvelopeDecompression => "buffer.envelopes_decompression",
656 RelayTimers::BodyReadDuration => "requests.body_read.duration",
657 RelayTimers::CheckNestedSpans => "envelope.check_nested_spans",
658 RelayTimers::SignatureCreationDuration => "signature.create.duration",
659 }
660 }
661}
662
663/// Counter metrics used by Relay
664pub enum RelayCounters {
665 /// Tracks the number of tasks driven to completion by the async pool.
666 ///
667 /// This metric is tagged with:
668 /// - `pool`: the name of the pool.
669 AsyncPoolFinishedTasks,
670 /// Number of Events that had corrupted (unprintable) event attributes.
671 ///
672 /// This currently checks for `environment` and `release`, for which we know that
673 /// some SDKs may send corrupted values.
674 EventCorrupted,
675 /// Number of envelopes accepted in the current time slot.
676 ///
677 /// This represents requests that have successfully passed rate limits and filters, and have
678 /// been sent to the upstream.
679 ///
680 /// This metric is tagged with:
681 /// - `handling`: Either `"success"` if the envelope was handled correctly, or `"failure"` if
682 /// there was an error or bug.
683 EnvelopeAccepted,
684 /// Number of envelopes rejected in the current time slot.
685 ///
686 /// This includes envelopes being rejected because they are malformed or any other errors during
687 /// processing (including filtered events, invalid payloads, and rate limits).
688 ///
689 /// To check the rejection reason, check `events.outcomes`, instead.
690 ///
691 /// This metric is tagged with:
692 /// - `handling`: Either `"success"` if the envelope was handled correctly, or `"failure"` if
693 /// there was an error or bug.
694 EnvelopeRejected,
695 /// Number of total envelope items we received.
696 ///
697 /// Note: This does not count raw items, it counts the logical amount of items,
698 /// e.g. a single item container counts all its contained items.
699 ///
700 /// This metric is tagged with:
701 /// - `item_type`: The type of the items being counted.
702 /// - `is_container`: Whether this item is a container holding multiple items.
703 /// - `sdk`: The name of the Sentry SDK sending the envelope. This tag is only set for
704 /// Sentry's SDKs and defaults to "proprietary".
705 EnvelopeItems,
706 /// Number of bytes we processed per envelope item.
707 ///
708 /// This metric is tagged with:
709 /// - `item_type`: The type of the items being counted.
710 /// - `is_container`: Whether this item is a container holding multiple items.
711 /// - `sdk`: The name of the Sentry SDK sending the envelope. This tag is only set for
712 /// Sentry's SDKs and defaults to "proprietary".
713 EnvelopeItemBytes,
714 /// Number of times an envelope from the buffer is trying to be popped.
715 BufferTryPop,
716 /// Number of envelopes spool to disk.
717 BufferSpooledEnvelopes,
718 /// Number of envelopes unspooled from disk.
719 BufferUnspooledEnvelopes,
720 /// Number of project changed updates received by the buffer.
721 BufferProjectChangedEvent,
722 /// Number of times one or more projects of an envelope were pending when trying to pop
723 /// their envelope.
724 BufferProjectPending,
725 /// Number of outcomes and reasons for rejected Envelopes.
726 ///
727 /// This metric is tagged with:
728 /// - `outcome`: The basic cause for rejecting the event.
729 /// - `reason`: A more detailed identifier describing the rule or mechanism leading to the
730 /// outcome.
731 /// - `to`: Describes the destination of the outcome. Can be either 'kafka' (when in
732 /// processing mode) or 'http' (when outcomes are enabled in an external relay).
733 ///
734 /// Possible outcomes are:
735 /// - `filtered`: Dropped by inbound data filters. The reason specifies the filter that
736 /// matched.
737 /// - `rate_limited`: Dropped by organization, project, or DSN rate limit, as well as exceeding
738 /// the Sentry plan quota. The reason contains the rate limit or quota that was exceeded.
739 /// - `invalid`: Data was considered invalid and could not be recovered. The reason indicates
740 /// the validation that failed.
741 Outcomes,
742 /// The number of individual outcomes including their quantity.
743 ///
744 /// While [`RelayCounters::Outcomes`] tracks the number of times aggregated outcomes
745 /// have been emitted, this counter tracks the total quantity of individual outcomes.
746 OutcomeQuantity,
747 /// Number of project state HTTP requests.
748 ///
749 /// Relay updates projects in batches. Every update cycle, Relay requests
750 /// `limits.max_concurrent_queries` batches of `cache.batch_size` projects from the upstream.
751 /// The duration of these requests is reported via `project_state.request.duration`.
752 ///
753 /// Note that after an update loop has completed, there may be more projects pending updates.
754 /// This is indicated by `project_state.pending`.
755 ProjectStateRequest,
756 /// Number of times a project state is requested from the central Redis cache.
757 ///
758 /// This metric is tagged with:
759 /// - `hit`: One of:
760 /// - `revision`: the cached version was validated to be up to date using its revision.
761 /// - `project_config`: the request was handled by the cache.
762 /// - `project_config_revision`: the request was handled by the cache and the revision did
763 /// not change.
764 /// - `false`: the request will be sent to the sentry endpoint.
765 #[cfg(feature = "processing")]
766 ProjectStateRedis,
767 /// Number of times a project had a fetch scheduled.
768 ProjectCacheSchedule,
769 /// Number of times an upstream request for a project config is completed.
770 ///
771 /// Completion can be because a result was returned or because the config request was
772 /// dropped after there still was no response after a timeout. This metrics has tags
773 /// for `result` and `attempts` indicating whether it was succesful or a timeout and how
774 /// many attempts were made respectively.
775 ProjectUpstreamCompleted,
776 /// Number of times an upstream request for a project config failed.
777 ///
778 /// Failure can happen, for example, when there's a network error. Refer to
779 /// [`UpstreamRequestError`](crate::services::upstream::UpstreamRequestError) for all cases.
780 ProjectUpstreamFailed,
781 /// Number of Relay server starts.
782 ///
783 /// This can be used to track unwanted restarts due to crashes or termination.
784 ServerStarting,
785 /// Number of messages placed on the Kafka queues.
786 ///
787 /// When Relay operates as Sentry service and an Envelope item is successfully processed, each
788 /// Envelope item results in a dedicated message on one of the ingestion topics on Kafka.
789 ///
790 /// This metric is tagged with:
791 /// - `event_type`: The kind of message produced to Kafka.
792 /// - `namespace` (only for metrics): The namespace that the metric belongs to.
793 /// - `is_segment` (only for event_type span): `true` the span is the root of a segment.
794 /// - `has_parent` (only for event_type span): `false` if the span is the root of a trace.
795 /// - `platform` (only for event_type span): The platform from which the span was spent.
796 /// - `metric_type` (only for event_type metric): The metric type, counter, distribution,
797 /// gauge or set.
798 /// - `metric_encoding` (only for event_type metric): The encoding used for distribution and
799 /// set metrics.
800 ///
801 /// The message types can be:
802 ///
803 /// - `event`: An error or transaction event. Error events are sent to `ingest-events`,
804 /// transactions to `ingest-transactions`, and errors with attachments are sent to
805 /// `ingest-attachments`.
806 /// - `attachment`: An attachment file associated with an error event, sent to
807 /// `ingest-attachments`.
808 /// - `user_report`: A message from the user feedback dialog, sent to `ingest-events`.
809 /// - `session`: A release health session update, sent to `ingest-sessions`.
810 #[cfg(feature = "processing")]
811 ProcessingMessageProduced,
812 /// Number of spans produced in the new format.
813 #[cfg(feature = "processing")]
814 SpanV2Produced,
815 /// Number of events that hit any of the store-like endpoints: Envelope, Store, Security,
816 /// Minidump, Unreal.
817 ///
818 /// The events are counted before they are rate limited, filtered, or processed in any way.
819 ///
820 /// This metric is tagged with:
821 /// - `version`: The event protocol version number defaulting to `7`.
822 EventProtocol,
823 /// The number of transaction events processed by the source of the transaction name.
824 ///
825 /// This metric is tagged with:
826 /// - `platform`: The event's platform, such as `"javascript"`.
827 /// - `source`: The source of the transaction name on the client. See the [transaction source
828 /// documentation](https://develop.sentry.dev/sdk/event-payloads/properties/transaction_info/)
829 /// for all valid values.
830 /// - `contains_slashes`: Whether the transaction name contains `/`. We use this as a heuristic
831 /// to represent URL transactions.
832 EventTransaction,
833 /// The number of transaction events processed grouped by transaction name modifications.
834 /// This metric is tagged with:
835 /// - `source_in`: The source of the transaction name before normalization.
836 /// See the [transaction source
837 /// documentation](https://develop.sentry.dev/sdk/event-payloads/properties/transaction_info/)
838 /// for all valid values.
839 /// - `change`: The mechanism that changed the transaction name.
840 /// Either `"none"`, `"pattern"`, `"rule"`, or `"both"`.
841 /// - `source_out`: The source of the transaction name after normalization.
842 TransactionNameChanges,
843 /// Number of HTTP requests reaching Relay.
844 Requests,
845 /// Number of completed HTTP requests.
846 ///
847 /// This metric is tagged with:
848 ///
849 /// - `status_code`: The HTTP status code number.
850 /// - `method`: The HTTP method used in the request in uppercase.
851 /// - `route`: Unique dashed identifier of the endpoint.
852 ResponsesStatusCodes,
853 /// Number of evicted stale projects from the cache.
854 ///
855 /// Relay scans the in-memory project cache for stale entries in a regular interval configured
856 /// by `cache.eviction_interval`.
857 ///
858 /// The cache duration for project states can be configured with the following options:
859 ///
860 /// - `cache.project_expiry`: The time after which a project state counts as expired. It is
861 /// automatically refreshed if a request references the project after it has expired.
862 /// - `cache.project_grace_period`: The time after expiry at which the project state will still
863 /// be used to ingest events. Once the grace period expires, the cache is evicted and new
864 /// requests wait for an update.
865 EvictingStaleProjectCaches,
866 /// Number of refreshes for stale projects in the cache.
867 RefreshStaleProjectCaches,
868 /// Number of times that parsing a metrics bucket item from an envelope failed.
869 MetricBucketsParsingFailed,
870 /// Count extraction of transaction names. Tag with the decision to drop / replace / use original.
871 MetricsTransactionNameExtracted,
872 /// Number of Events with an OpenTelemetry Context
873 ///
874 /// This metric is tagged with:
875 /// - `platform`: The event's platform, such as `"javascript"`.
876 /// - `sdk`: The name of the Sentry SDK sending the transaction. This tag is only set for
877 /// Sentry's SDKs and defaults to "proprietary".
878 OpenTelemetryEvent,
879 /// Number of global config fetches from upstream. Only 2XX responses are
880 /// considered and ignores send errors (e.g. auth or network errors).
881 ///
882 /// This metric is tagged with:
883 /// - `success`: whether deserializing the global config succeeded.
884 GlobalConfigFetched,
885 /// The number of attachments processed in the same envelope as a user_report_v2 event.
886 FeedbackAttachments,
887 /// All COGS tracked values.
888 ///
889 /// This metric is tagged with:
890 /// - `resource_id`: The COGS resource id.
891 /// - `app_feature`: The COGS app feature.
892 CogsUsage,
893 /// The amount of times metrics of a project have been flushed without the project being
894 /// fetched/available.
895 ProjectStateFlushMetricsNoProject,
896 /// Incremented every time a bucket is dropped.
897 ///
898 /// This should only happen when a project state is invalid during graceful shutdown.
899 ///
900 /// This metric is tagged with:
901 /// - `aggregator`: The name of the metrics aggregator (usually `"default"`).
902 BucketsDropped,
903 /// Incremented every time a segment exceeds the expected limit.
904 ReplayExceededSegmentLimit,
905 /// Incremented every time the server accepts a new connection.
906 ServerSocketAccept,
907 /// Incremented every time the server aborts a connection because of an idle timeout.
908 ServerConnectionIdleTimeout,
909 /// The total delay of metric buckets in seconds.
910 ///
911 /// The delay is measured from initial creation of the bucket in an internal Relay
912 /// until it is produced to Kafka.
913 ///
914 /// Use [`Self::MetricDelayCount`] to calculate the average delay.
915 ///
916 /// This metric is tagged with:
917 /// - `namespace`: the metric namespace.
918 #[cfg(feature = "processing")]
919 MetricDelaySum,
920 /// The amount of buckets counted for the [`Self::MetricDelaySum`] metric.
921 ///
922 /// This metric is tagged with:
923 /// - `namespace`: the metric namespace.
924 #[cfg(feature = "processing")]
925 MetricDelayCount,
926 /// The amount of times PlayStation processing was attempted.
927 #[cfg(all(sentry, feature = "processing"))]
928 PlaystationProcessing,
929 /// The number of times a sampling decision was made.
930 ///
931 /// This metric is tagged with:
932 /// - `item`: what item the decision is taken for (transaction vs span).
933 SamplingDecision,
934 /// The number of times an upload of an attachment occurs.
935 ///
936 /// This metric is tagged with:
937 /// - `result`: `success` or the failure reason.
938 #[cfg(feature = "processing")]
939 AttachmentUpload,
940}
941
942impl CounterMetric for RelayCounters {
943 fn name(&self) -> &'static str {
944 match self {
945 RelayCounters::AsyncPoolFinishedTasks => "async_pool.finished_tasks",
946 RelayCounters::EventCorrupted => "event.corrupted",
947 RelayCounters::EnvelopeAccepted => "event.accepted",
948 RelayCounters::EnvelopeRejected => "event.rejected",
949 RelayCounters::EnvelopeItems => "event.items",
950 RelayCounters::EnvelopeItemBytes => "event.item_bytes",
951 RelayCounters::BufferTryPop => "buffer.try_pop",
952 RelayCounters::BufferSpooledEnvelopes => "buffer.spooled_envelopes",
953 RelayCounters::BufferUnspooledEnvelopes => "buffer.unspooled_envelopes",
954 RelayCounters::BufferProjectChangedEvent => "buffer.project_changed_event",
955 RelayCounters::BufferProjectPending => "buffer.project_pending",
956 RelayCounters::Outcomes => "events.outcomes",
957 RelayCounters::OutcomeQuantity => "events.outcome_quantity",
958 RelayCounters::ProjectStateRequest => "project_state.request",
959 #[cfg(feature = "processing")]
960 RelayCounters::ProjectStateRedis => "project_state.redis.requests",
961 RelayCounters::ProjectUpstreamCompleted => "project_upstream.completed",
962 RelayCounters::ProjectUpstreamFailed => "project_upstream.failed",
963 RelayCounters::ProjectCacheSchedule => "project_cache.schedule",
964 RelayCounters::ServerStarting => "server.starting",
965 #[cfg(feature = "processing")]
966 RelayCounters::ProcessingMessageProduced => "processing.event.produced",
967 #[cfg(feature = "processing")]
968 RelayCounters::SpanV2Produced => "store.produced.span_v2",
969 RelayCounters::EventProtocol => "event.protocol",
970 RelayCounters::EventTransaction => "event.transaction",
971 RelayCounters::TransactionNameChanges => "event.transaction_name_changes",
972 RelayCounters::Requests => "requests",
973 RelayCounters::ResponsesStatusCodes => "responses.status_codes",
974 RelayCounters::EvictingStaleProjectCaches => "project_cache.eviction",
975 RelayCounters::RefreshStaleProjectCaches => "project_cache.refresh",
976 RelayCounters::MetricBucketsParsingFailed => "metrics.buckets.parsing_failed",
977 RelayCounters::MetricsTransactionNameExtracted => "metrics.transaction_name",
978 RelayCounters::OpenTelemetryEvent => "event.opentelemetry",
979 RelayCounters::GlobalConfigFetched => "global_config.fetch",
980 RelayCounters::FeedbackAttachments => "processing.feedback_attachments",
981 RelayCounters::CogsUsage => "cogs.usage",
982 RelayCounters::ProjectStateFlushMetricsNoProject => "project_state.metrics.no_project",
983 RelayCounters::BucketsDropped => "metrics.buckets.dropped",
984 RelayCounters::ReplayExceededSegmentLimit => "replay.segment_limit_exceeded",
985 RelayCounters::ServerSocketAccept => "server.http.accepted",
986 RelayCounters::ServerConnectionIdleTimeout => "server.http.idle_timeout",
987 #[cfg(feature = "processing")]
988 RelayCounters::MetricDelaySum => "metrics.delay.sum",
989 #[cfg(feature = "processing")]
990 RelayCounters::MetricDelayCount => "metrics.delay.count",
991 #[cfg(all(sentry, feature = "processing"))]
992 RelayCounters::PlaystationProcessing => "processing.playstation",
993 RelayCounters::SamplingDecision => "sampling.decision",
994 #[cfg(feature = "processing")]
995 RelayCounters::AttachmentUpload => "attachment.upload",
996 }
997 }
998}