relay_server/statsd.rs
1use relay_statsd::{CounterMetric, DistributionMetric, GaugeMetric, TimerMetric};
2#[cfg(doc)]
3use relay_system::RuntimeMetrics;
4
5/// Gauge metrics used by Relay
6pub enum RelayGauges {
7 /// Tracks the number of futures waiting to be executed in the pool's queue.
8 ///
9 /// Useful for understanding the backlog of work and identifying potential bottlenecks.
10 ///
11 /// This metric is tagged with:
12 /// - `pool`: the name of the pool.
13 AsyncPoolQueueSize,
14 /// Tracks the utilization of the async pool.
15 ///
16 /// The utilization is a value between 0.0 and 100.0 which determines how busy the pool is doing
17 /// CPU-bound work.
18 ///
19 /// This metric is tagged with:
20 /// - `pool`: the name of the pool.
21 AsyncPoolUtilization,
22 /// Tracks the activity of the async pool.
23 ///
24 /// The activity is a value between 0.0 and 100.0 which determines how busy is the pool
25 /// w.r.t. to its provisioned capacity.
26 ///
27 /// This metric is tagged with:
28 /// - `pool`: the name of the pool.
29 AsyncPoolActivity,
30 /// The state of Relay with respect to the upstream connection.
31 /// Possible values are `0` for normal operations and `1` for a network outage.
32 NetworkOutage,
33 /// Number of elements in the envelope buffer across all the stacks.
34 ///
35 /// This metric is tagged with:
36 /// - `storage_type`: The type of storage used in the envelope buffer.
37 BufferEnvelopesCount,
38 /// The number of individual stacks in the priority queue.
39 ///
40 /// Per combination of `(own_key, sampling_key)`, a new stack is created.
41 BufferStackCount,
42 /// The used disk for the buffer.
43 BufferDiskUsed,
44 /// The currently used memory by the entire system.
45 ///
46 /// Relay uses the same value for its memory health check.
47 SystemMemoryUsed,
48 /// The total system memory.
49 ///
50 /// Relay uses the same value for its memory health check.
51 SystemMemoryTotal,
52 /// The number of connections currently being managed by the Redis Pool.
53 #[cfg(feature = "processing")]
54 RedisPoolConnections,
55 /// The number of idle connections in the Redis Pool.
56 #[cfg(feature = "processing")]
57 RedisPoolIdleConnections,
58 /// The maximum number of connections in the Redis pool.
59 #[cfg(feature = "processing")]
60 RedisPoolMaxConnections,
61 /// The number of futures waiting to grab a connection.
62 #[cfg(feature = "processing")]
63 RedisPoolWaitingForConnection,
64 /// The number of notifications in the broadcast channel of the project cache.
65 ProjectCacheNotificationChannel,
66 /// The number of scheduled and in progress fetches in the project cache.
67 ProjectCacheScheduledFetches,
68 /// Exposes the amount of currently open and handled connections by the server.
69 ServerActiveConnections,
70 /// Maximum delay of a metric bucket in seconds.
71 ///
72 /// The maximum is measured from initial creation of the bucket in an internal Relay
73 /// until it is produced to Kafka.
74 ///
75 /// This metric is tagged with:
76 /// - `namespace`: the metric namespace.
77 #[cfg(feature = "processing")]
78 MetricDelayMax,
79 /// Estimated percentage [0-100] of how busy Relay's internal services are.
80 ///
81 /// This metric is tagged with:
82 /// - `service`: the service name.
83 /// - `instance_id`: a for the service name unique identifier for the running service
84 ServiceUtilization,
85}
86
87impl GaugeMetric for RelayGauges {
88 fn name(&self) -> &'static str {
89 match self {
90 Self::AsyncPoolQueueSize => "async_pool.queue_size",
91 Self::AsyncPoolUtilization => "async_pool.utilization",
92 Self::AsyncPoolActivity => "async_pool.activity",
93 Self::NetworkOutage => "upstream.network_outage",
94 Self::BufferEnvelopesCount => "buffer.envelopes_count",
95 Self::BufferStackCount => "buffer.stack_count",
96 Self::BufferDiskUsed => "buffer.disk_used",
97 Self::SystemMemoryUsed => "health.system_memory.used",
98 Self::SystemMemoryTotal => "health.system_memory.total",
99 #[cfg(feature = "processing")]
100 Self::RedisPoolConnections => "redis.pool.connections",
101 #[cfg(feature = "processing")]
102 Self::RedisPoolIdleConnections => "redis.pool.idle_connections",
103 #[cfg(feature = "processing")]
104 Self::RedisPoolMaxConnections => "redis.pool.max_connections",
105 #[cfg(feature = "processing")]
106 Self::RedisPoolWaitingForConnection => "redis.pool.waiting_for_connection",
107 Self::ProjectCacheNotificationChannel => "project_cache.notification_channel.size",
108 Self::ProjectCacheScheduledFetches => "project_cache.fetches.size",
109 Self::ServerActiveConnections => "server.http.connections",
110 #[cfg(feature = "processing")]
111 Self::MetricDelayMax => "metrics.delay.max",
112 Self::ServiceUtilization => "service.utilization",
113 }
114 }
115}
116
117/// Gauge metrics collected from the Runtime.
118pub enum RuntimeGauges {
119 /// Exposes [`RuntimeMetrics::num_idle_threads`].
120 NumIdleThreads,
121 /// Exposes [`RuntimeMetrics::num_alive_tasks`].
122 NumAliveTasks,
123 /// Exposes [`RuntimeMetrics::blocking_queue_depth`].
124 BlockingQueueDepth,
125 /// Exposes [`RuntimeMetrics::num_blocking_threads`].
126 NumBlockingThreads,
127 /// Exposes [`RuntimeMetrics::num_idle_blocking_threads`].
128 NumIdleBlockingThreads,
129 /// Exposes [`RuntimeMetrics::num_workers`].
130 NumWorkers,
131 /// Exposes [`RuntimeMetrics::worker_local_queue_depth`].
132 ///
133 /// This metric is tagged with:
134 /// - `worker`: the worker id.
135 WorkerLocalQueueDepth,
136 /// Exposes [`RuntimeMetrics::worker_mean_poll_time`].
137 ///
138 /// This metric is tagged with:
139 /// - `worker`: the worker id.
140 WorkerMeanPollTime,
141}
142
143impl GaugeMetric for RuntimeGauges {
144 fn name(&self) -> &'static str {
145 match self {
146 RuntimeGauges::NumIdleThreads => "runtime.idle_threads",
147 RuntimeGauges::NumAliveTasks => "runtime.alive_tasks",
148 RuntimeGauges::BlockingQueueDepth => "runtime.blocking_queue_depth",
149 RuntimeGauges::NumBlockingThreads => "runtime.num_blocking_threads",
150 RuntimeGauges::NumIdleBlockingThreads => "runtime.num_idle_blocking_threads",
151 RuntimeGauges::NumWorkers => "runtime.num_workers",
152 RuntimeGauges::WorkerLocalQueueDepth => "runtime.worker_local_queue_depth",
153 RuntimeGauges::WorkerMeanPollTime => "runtime.worker_mean_poll_time",
154 }
155 }
156}
157
158/// Counter metrics collected from the Runtime.
159pub enum RuntimeCounters {
160 /// Exposes [`RuntimeMetrics::budget_forced_yield_count`].
161 BudgetForcedYieldCount,
162 /// Exposes [`RuntimeMetrics::worker_local_schedule_count`].
163 ///
164 /// This metric is tagged with:
165 /// - `worker`: the worker id.
166 WorkerLocalScheduleCount,
167 /// Exposes [`RuntimeMetrics::worker_noop_count`].
168 ///
169 /// This metric is tagged with:
170 /// - `worker`: the worker id.
171 WorkerNoopCount,
172 /// Exposes [`RuntimeMetrics::worker_overflow_count`].
173 ///
174 /// This metric is tagged with:
175 /// - `worker`: the worker id.
176 WorkerOverflowCount,
177 /// Exposes [`RuntimeMetrics::worker_park_count`].
178 ///
179 /// This metric is tagged with:
180 /// - `worker`: the worker id.
181 WorkerParkCount,
182 /// Exposes [`RuntimeMetrics::worker_poll_count`].
183 ///
184 /// This metric is tagged with:
185 /// - `worker`: the worker id.
186 WorkerPollCount,
187 /// Exposes [`RuntimeMetrics::worker_steal_count`].
188 ///
189 /// This metric is tagged with:
190 /// - `worker`: the worker id.
191 WorkerStealCount,
192 /// Exposes [`RuntimeMetrics::worker_steal_operations`].
193 ///
194 /// This metric is tagged with:
195 /// - `worker`: the worker id.
196 WorkerStealOperations,
197 /// Exposes [`RuntimeMetrics::worker_total_busy_duration`].
198 ///
199 /// This metric is tagged with:
200 /// - `worker`: the worker id.
201 WorkerTotalBusyDuration,
202}
203
204impl CounterMetric for RuntimeCounters {
205 fn name(&self) -> &'static str {
206 match self {
207 RuntimeCounters::BudgetForcedYieldCount => "runtime.budget_forced_yield_count",
208 RuntimeCounters::WorkerLocalScheduleCount => "runtime.worker_local_schedule_count",
209 RuntimeCounters::WorkerNoopCount => "runtime.worker_noop_count",
210 RuntimeCounters::WorkerOverflowCount => "runtime.worker_overflow_count",
211 RuntimeCounters::WorkerParkCount => "runtime.worker_park_count",
212 RuntimeCounters::WorkerPollCount => "runtime.worker_poll_count",
213 RuntimeCounters::WorkerStealCount => "runtime.worker_steal_count",
214 RuntimeCounters::WorkerStealOperations => "runtime.worker_steal_operations",
215 RuntimeCounters::WorkerTotalBusyDuration => "runtime.worker_total_busy_duration",
216 }
217 }
218}
219
220/// Histogram metrics used by Relay.
221pub enum RelayDistributions {
222 /// The number of bytes received by Relay for each individual envelope item type.
223 ///
224 /// This metric is tagged with:
225 /// - `item_type`: The type of the items being counted.
226 /// - `is_container`: Whether this item is a container holding multiple items.
227 EnvelopeItemSize,
228 /// The amount of bytes in the item payloads of an envelope pushed to the envelope buffer.
229 ///
230 /// This is not quite the same as the actual size of a serialized envelope, because it ignores
231 /// the envelope header and item headers.
232 BufferEnvelopeBodySize,
233 /// Size of a serialized envelope pushed to the envelope buffer.
234 BufferEnvelopeSize,
235 /// Size of a compressed envelope pushed to the envelope buffer.
236 BufferEnvelopeSizeCompressed,
237 /// The number of batches emitted per partition.
238 BatchesPerPartition,
239 /// The number of buckets in a batch emitted.
240 ///
241 /// This corresponds to the number of buckets that will end up in an envelope.
242 BucketsPerBatch,
243 /// The number of spans per processed transaction event.
244 ///
245 /// This metric is tagged with:
246 /// - `platform`: The event's platform, such as `"javascript"`.
247 /// - `sdk`: The name of the Sentry SDK sending the transaction. This tag is only set for
248 /// Sentry's SDKs and defaults to "proprietary".
249 EventSpans,
250 /// Number of projects in the in-memory project cache that are waiting for their state to be
251 /// updated.
252 ///
253 /// See `project_cache.size` for more description of the project cache.
254 ProjectStatePending,
255 /// Number of project states **requested** from the upstream for each batch request.
256 ///
257 /// If multiple batches are updated concurrently, this metric is reported multiple times.
258 ///
259 /// The batch size can be configured with `cache.batch_size`. See `project_cache.size` for more
260 /// description of the project cache.
261 ProjectStateRequestBatchSize,
262 /// Number of project states **returned** from the upstream for each batch request.
263 ///
264 /// If multiple batches are updated concurrently, this metric is reported multiple times.
265 ///
266 /// See `project_cache.size` for more description of the project cache.
267 ProjectStateReceived,
268 /// Number of attempts required to fetch the config for a given project key.
269 ProjectStateAttempts,
270 /// Number of project states currently held in the in-memory project cache.
271 ///
272 /// The cache duration for project states can be configured with the following options:
273 ///
274 /// - `cache.project_expiry`: The time after which a project state counts as expired. It is
275 /// automatically refreshed if a request references the project after it has expired.
276 /// - `cache.project_grace_period`: The time after expiry at which the project state will still
277 /// be used to ingest events. Once the grace period expires, the cache is evicted and new
278 /// requests wait for an update.
279 ///
280 /// There is no limit to the number of cached projects.
281 ProjectStateCacheSize,
282 /// The size of the compressed project config in the redis cache, in bytes.
283 #[cfg(feature = "processing")]
284 ProjectStateSizeBytesCompressed,
285 /// The size of the uncompressed project config in the redis cache, in bytes.
286 #[cfg(feature = "processing")]
287 ProjectStateSizeBytesDecompressed,
288 /// The number of upstream requests queued up for sending.
289 ///
290 /// Relay employs connection keep-alive whenever possible. Connections are kept open for _15_
291 /// seconds of inactivity or _75_ seconds of activity. If all connections are busy, they are
292 /// queued, which is reflected in this metric.
293 ///
294 /// This metric is tagged with:
295 /// - `priority`: The queueing priority of the request, either `"high"` or `"low"`. The
296 /// priority determines precedence in executing requests.
297 ///
298 /// The number of concurrent connections can be configured with:
299 /// - `limits.max_concurrent_requests` for the overall number of connections
300 /// - `limits.max_concurrent_queries` for the number of concurrent high-priority requests
301 UpstreamMessageQueueSize,
302 /// Counts the number of retries for each upstream http request.
303 ///
304 /// This metric is tagged with:
305 ///
306 /// - `result`: What happened to the request, an enumeration with the following values:
307 /// * `success`: The request was sent and returned a success code `HTTP 2xx`
308 /// * `response_error`: The request was sent and it returned an HTTP error.
309 /// * `payload_failed`: The request was sent but there was an error in interpreting the response.
310 /// * `send_failed`: Failed to send the request due to a network error.
311 /// * `rate_limited`: The request was rate limited.
312 /// * `invalid_json`: The response could not be parsed back into JSON.
313 /// - `route`: The endpoint that was called on the upstream.
314 /// - `status-code`: The status code of the request when available, otherwise "-".
315 UpstreamRetries,
316 /// Size of envelopes sent over HTTP in bytes.
317 UpstreamQueryBodySize,
318 /// Size of queries (projectconfig queries, i.e. the request payload, not the response) sent by
319 /// Relay over HTTP in bytes.
320 UpstreamEnvelopeBodySize,
321 /// Size of batched global metrics requests sent by Relay over HTTP in bytes.
322 UpstreamMetricsBodySize,
323 /// Distribution of flush buckets over partition keys.
324 ///
325 /// The distribution of buckets should be even.
326 /// If it is not, this metric should expose it.
327 PartitionKeys,
328 /// Measures how many splits were performed when sending out a partition.
329 PartitionSplits,
330 /// Canonical size of a Trace Item.
331 ///
332 /// This is not the size in bytes, this is using the same algorithm we're using for the logs
333 /// billing category.
334 ///
335 /// This metric is tagged with:
336 /// - `item`: the trace item type.
337 /// - `too_large`: `true` or `false`, whether the item is bigger than the allowed size limit.
338 TraceItemCanonicalSize,
339 /// The Content-Length of incoming HTTP requests in bytes.
340 ///
341 /// This metric is tagged with:
342 /// - `has_content_length`: Whether the Content-Length header was present ("true"/"false").
343 /// - `route`: The matched route pattern.
344 /// - `status_code`: The HTTP response status code.
345 ContentLength,
346 /// Size of individual standalone attachments in bytes.
347 ///
348 /// This metric is tagged with:
349 /// - `sdk`: The name of the Sentry SDK sending the attachments.
350 /// - `attachment_type`: The attachment type, if any.
351 StandaloneAttachmentSize,
352 /// Number of standalone attachments per envelope.
353 ///
354 /// This metric is tagged with:
355 /// - `sdk`: The name of the Sentry SDK sending the attachment.
356 StandaloneAttachmentCount,
357}
358
359impl DistributionMetric for RelayDistributions {
360 fn name(&self) -> &'static str {
361 match self {
362 Self::EnvelopeItemSize => "event.item_size",
363 Self::EventSpans => "event.spans",
364 Self::BatchesPerPartition => "metrics.buckets.batches_per_partition",
365 Self::BucketsPerBatch => "metrics.buckets.per_batch",
366 Self::BufferEnvelopeBodySize => "buffer.envelope_body_size",
367 Self::BufferEnvelopeSize => "buffer.envelope_size",
368 Self::BufferEnvelopeSizeCompressed => "buffer.envelope_size.compressed",
369 Self::ProjectStatePending => "project_state.pending",
370 Self::ProjectStateAttempts => "project_state.attempts",
371 Self::ProjectStateRequestBatchSize => "project_state.request.batch_size",
372 Self::ProjectStateReceived => "project_state.received",
373 Self::ProjectStateCacheSize => "project_cache.size",
374 #[cfg(feature = "processing")]
375 Self::ProjectStateSizeBytesCompressed => "project_state.size_bytes.compressed",
376 #[cfg(feature = "processing")]
377 Self::ProjectStateSizeBytesDecompressed => "project_state.size_bytes.decompressed",
378 Self::UpstreamMessageQueueSize => "http_queue.size",
379 Self::UpstreamRetries => "upstream.retries",
380 Self::UpstreamQueryBodySize => "upstream.query.body_size",
381 Self::UpstreamEnvelopeBodySize => "upstream.envelope.body_size",
382 Self::UpstreamMetricsBodySize => "upstream.metrics.body_size",
383 Self::PartitionKeys => "metrics.buckets.partition_keys",
384 Self::PartitionSplits => "partition_splits",
385 Self::TraceItemCanonicalSize => "trace_item.canonical_size",
386 Self::ContentLength => "requests.content_length",
387 Self::StandaloneAttachmentSize => "processing.standalone_attachment_size",
388 Self::StandaloneAttachmentCount => "processing.standalone_attachment_count",
389 }
390 }
391}
392
393/// Timer metrics used by Relay
394pub enum RelayTimers {
395 /// Time in milliseconds spent deserializing an event from JSON bytes into the native data
396 /// structure on which Relay operates.
397 EventProcessingDeserialize,
398 /// Time in milliseconds spent running normalization on an event. Normalization
399 /// happens before envelope filtering and metric extraction.
400 EventProcessingNormalization,
401 /// Time in milliseconds spent running inbound data filters on an event.
402 EventProcessingFiltering,
403 /// Time in milliseconds spent checking for organization, project, and DSN rate limits.
404 ///
405 /// Not all events reach this point. After an event is rate limited for the first time, the rate
406 /// limit is cached. Events coming in after this will be discarded earlier in the request queue
407 /// and do not reach the processing queue.
408 ///
409 /// This metric is tagged with:
410 /// - `type`: The type of limiter executed, `cached` or `consistent`.
411 /// - `unit`: The item/unit of work which is being rate limited, only available for new
412 /// processing pipelines.
413 EventProcessingRateLimiting,
414 /// Time in milliseconds spent in data scrubbing for the current event. Data scrubbing happens
415 /// last before serializing the event back to JSON.
416 EventProcessingPii,
417 /// Time spent converting the event from its in-memory reprsentation into a JSON string.
418 EventProcessingSerialization,
419 /// Time used to extract span metrics from an event.
420 EventProcessingSpanMetricsExtraction,
421 /// Time spent between the start of request handling and processing of the envelope.
422 ///
423 /// This includes streaming the request body, scheduling overheads, project config fetching,
424 /// batched requests and congestions in the internal processor. This does not include delays in
425 /// the incoming request (body upload) and skips all envelopes that are fast-rejected.
426 EnvelopeWaitTime,
427 /// Time in milliseconds spent in synchronous processing of envelopes.
428 ///
429 /// This timing covers the end-to-end processing in the CPU pool and comprises:
430 ///
431 /// - `event_processing.deserialize`
432 /// - `event_processing.pii`
433 /// - `event_processing.serialization`
434 ///
435 /// With Relay in processing mode, this also includes the following timings:
436 ///
437 /// - `event_processing.process`
438 /// - `event_processing.filtering`
439 /// - `event_processing.rate_limiting`
440 EnvelopeProcessingTime,
441 /// Total time in milliseconds an envelope spends in Relay from the time it is received until it
442 /// finishes processing and has been submitted to the upstream.
443 EnvelopeTotalTime,
444 /// Latency of project config updates until they reach Relay.
445 ///
446 /// The metric is calculated by using the creation timestamp of the project config
447 /// and when Relay updates its local cache with the new project config.
448 ///
449 /// No metric is emitted when Relay fetches a project config for the first time.
450 ///
451 /// This metric is tagged with:
452 /// - `delay`: Bucketed amount of seconds passed between fetches.
453 ProjectCacheUpdateLatency,
454 /// Total time spent from starting to fetch a project config update to completing the fetch.
455 ProjectCacheFetchDuration,
456 /// Total time in milliseconds spent fetching queued project configuration updates requests to
457 /// resolve.
458 ///
459 /// Relay updates projects in batches. Every update cycle, Relay requests
460 /// `limits.max_concurrent_queries * cache.batch_size` projects from the upstream. This metric
461 /// measures the wall clock time for all concurrent requests in this loop.
462 ///
463 /// Note that after an update loop has completed, there may be more projects pending updates.
464 /// This is indicated by `project_state.pending`.
465 ProjectStateRequestDuration,
466 /// Time in milliseconds required to decompress a project config from redis.
467 ///
468 /// Note that this also times the cases where project config is uncompressed,
469 /// in which case the timer should be very close to zero.
470 #[cfg(feature = "processing")]
471 ProjectStateDecompression,
472 /// Total duration in milliseconds for handling inbound web requests until the HTTP response is
473 /// returned to the client.
474 ///
475 /// This does **not** correspond to the full event ingestion time. Requests for events that are
476 /// not immediately rejected due to bad data or cached rate limits always return `200 OK`. Full
477 /// validation and normalization occur asynchronously, which is reported by
478 /// `event.processing_time`.
479 ///
480 /// This metric is tagged with:
481 /// - `method`: The HTTP method of the request.
482 /// - `route`: Unique dashed identifier of the endpoint.
483 RequestsDuration,
484 /// Time spent on minidump scrubbing.
485 ///
486 /// This is the total time spent on parsing and scrubbing the minidump. Even if no PII
487 /// scrubbing rules applied the minidump will still be parsed and the rules evaluated on
488 /// the parsed minidump, this duration is reported here with status of "n/a".
489 ///
490 /// This metric is tagged with:
491 ///
492 /// - `status`: Scrubbing status: "ok" means successful scrubbed, "error" means there
493 /// was an error during scrubbing and finally "n/a" means scrubbing was successful
494 /// but no scurbbing rules applied.
495 MinidumpScrubbing,
496 /// Time spent on view hierarchy scrubbing.
497 ///
498 /// This is the total time spent on parsing and scrubbing the view hierarchy json file.
499 ///
500 /// This metric is tagged with:
501 ///
502 /// - `status`: "ok" means successful scrubbed, "error" means there was an error during
503 /// scrubbing
504 ViewHierarchyScrubbing,
505 /// Time spend on attachment scrubbing.
506 ///
507 /// This represents the total time spent on evaluating the scrubbing rules for an
508 /// attachment and the attachment scrubbing itself, regardless of whether any rules were
509 /// applied. Note that minidumps which failed to be parsed (status="error" in
510 /// scrubbing.minidumps.duration) will be scrubbed as plain attachments and count
511 /// towards this.
512 ///
513 /// This metric is tagged with:
514 ///
515 /// - `attachment_type`: The type of attachment, e.g. "minidump".
516 AttachmentScrubbing,
517 /// Total time spent to send request to upstream Relay and handle the response.
518 ///
519 /// This metric is tagged with:
520 ///
521 /// - `result`: What happened to the request, an enumeration with the following values:
522 /// * `success`: The request was sent and returned a success code `HTTP 2xx`
523 /// * `response_error`: The request was sent and it returned an HTTP error.
524 /// * `payload_failed`: The request was sent but there was an error in interpreting the response.
525 /// * `send_failed`: Failed to send the request due to a network error.
526 /// * `rate_limited`: The request was rate limited.
527 /// * `invalid_json`: The response could not be parsed back into JSON.
528 /// - `route`: The endpoint that was called on the upstream.
529 /// - `status-code`: The status code of the request when available, otherwise "-".
530 /// - `retries`: Number of retries bucket 0, 1, 2, few (3 - 10), many (more than 10).
531 UpstreamRequestsDuration,
532 /// The delay between the timestamp stated in a payload and the receive time.
533 ///
534 /// SDKs cannot transmit payloads immediately in all cases. Sometimes, crashes require that
535 /// events are sent after restarting the application. Similarly, SDKs buffer events during
536 /// network downtimes for later transmission. This metric measures the delay between the time of
537 /// the event and the time it arrives in Relay. The delay is measured after clock drift
538 /// correction is applied.
539 ///
540 /// Only payloads with a delay of more than 1 minute are captured.
541 ///
542 /// This metric is tagged with:
543 ///
544 /// - `category`: The data category of the payload. Can be one of: `event`, `transaction`,
545 /// `security`, or `session`.
546 TimestampDelay,
547 /// The time it takes the outcome aggregator to flush aggregated outcomes.
548 OutcomeAggregatorFlushTime,
549 /// Time in milliseconds spent on parsing, normalizing and scrubbing replay recordings.
550 ReplayRecordingProcessing,
551 /// Total time spent to send a request and receive the response from upstream.
552 GlobalConfigRequestDuration,
553 /// Timing in milliseconds for processing a message in the internal CPU pool.
554 ///
555 /// This metric is tagged with:
556 ///
557 /// - `message`: The type of message that was processed.
558 ProcessMessageDuration,
559 /// Timing in milliseconds for processing a task in the project cache service.
560 ///
561 /// This metric is tagged with:
562 /// - `task`: The type of the task the project cache does.
563 ProjectCacheTaskDuration,
564 /// Timing in milliseconds for handling and responding to a health check request.
565 ///
566 /// This metric is tagged with:
567 /// - `type`: The type of the health check, `liveness` or `readiness`.
568 HealthCheckDuration,
569 /// Temporary timing metric for how much time was spent evaluating span and transaction
570 /// rate limits using the `RateLimitBuckets` message in the processor.
571 ///
572 /// This metric is tagged with:
573 /// - `category`: The data category evaluated.
574 /// - `limited`: Whether the batch is rate limited.
575 /// - `count`: How many items matching the data category are contained in the batch.
576 #[cfg(feature = "processing")]
577 RateLimitBucketsDuration,
578 /// Timing in milliseconds for processing a task in the aggregator service.
579 ///
580 /// This metric is tagged with:
581 /// - `task`: The task being executed by the aggregator.
582 /// - `aggregator`: The name of the aggregator.
583 AggregatorServiceDuration,
584 /// Timing in milliseconds for processing a message in the metric router service.
585 ///
586 /// This metric is tagged with:
587 /// - `message`: The type of message that was processed.
588 MetricRouterServiceDuration,
589 /// Timing in milliseconds for processing a message in the metric store service.
590 ///
591 /// This metric is tagged with:
592 /// - `message`: The type of message that was processed.
593 #[cfg(feature = "processing")]
594 StoreServiceDuration,
595 /// Timing in milliseconds for the time it takes for initialize the buffer.
596 BufferInitialization,
597 /// Timing in milliseconds for the time it takes for the buffer to pack & spool a batch.
598 ///
599 /// Contains the time it takes to pack multiple envelopes into a single memory blob.
600 BufferSpool,
601 /// Timing in milliseconds for the time it takes for the buffer to spool data to SQLite.
602 BufferSqlWrite,
603 /// Timing in milliseconds for the time it takes for the buffer to unspool data from disk.
604 BufferUnspool,
605 /// Timing in milliseconds for the time it takes for the buffer to push.
606 BufferPush,
607 /// Timing in milliseconds for the time it takes for the buffer to peek.
608 BufferPeek,
609 /// Timing in milliseconds for the time it takes for the buffer to pop.
610 BufferPop,
611 /// Timing in milliseconds for the time it takes for the buffer to drain its envelopes.
612 BufferDrain,
613 /// Timing in milliseconds for the time it takes for an envelope to be serialized.
614 BufferEnvelopesSerialization,
615 /// Timing in milliseconds for the time it takes for an envelope to be compressed.
616 BufferEnvelopeCompression,
617 /// Timing in milliseconds for the time it takes for an envelope to be decompressed.
618 BufferEnvelopeDecompression,
619 /// Timing in milliseconds to count spans in a serialized transaction payload.
620 CheckNestedSpans,
621 /// The time it needs to create a signature. Includes both the signature used for
622 /// trusted relays and for register challenges.
623 SignatureCreationDuration,
624 /// Time needed to upload an attachment to objectstore.
625 ///
626 /// Tagged by:
627 /// - `type`: "envelope" or "attachment_v2".
628 #[cfg(feature = "processing")]
629 AttachmentUploadDuration,
630
631 /// Time spent waiting for the producer of an async stream.
632 ///
633 /// Tagged by:
634 /// - `name`: Name of the stream, for example "upload".
635 StreamProducerLatency,
636 /// Time spent waiting for the consumer of an async stream.
637 ///
638 /// Tagged by:
639 /// - `name`: Name of the stream, for example "upload".
640 StreamConsumerLatency,
641}
642
643impl TimerMetric for RelayTimers {
644 fn name(&self) -> &'static str {
645 match self {
646 RelayTimers::EventProcessingDeserialize => "event_processing.deserialize",
647 RelayTimers::EventProcessingNormalization => "event_processing.normalization",
648 RelayTimers::EventProcessingFiltering => "event_processing.filtering",
649 RelayTimers::EventProcessingRateLimiting => "event_processing.rate_limiting",
650 RelayTimers::EventProcessingPii => "event_processing.pii",
651 RelayTimers::EventProcessingSpanMetricsExtraction => {
652 "event_processing.span_metrics_extraction"
653 }
654 RelayTimers::EventProcessingSerialization => "event_processing.serialization",
655 RelayTimers::EnvelopeWaitTime => "event.wait_time",
656 RelayTimers::EnvelopeProcessingTime => "event.processing_time",
657 RelayTimers::EnvelopeTotalTime => "event.total_time",
658 RelayTimers::ProjectStateRequestDuration => "project_state.request.duration",
659 #[cfg(feature = "processing")]
660 RelayTimers::ProjectStateDecompression => "project_state.decompression",
661 RelayTimers::ProjectCacheUpdateLatency => "project_cache.latency",
662 RelayTimers::ProjectCacheFetchDuration => "project_cache.fetch.duration",
663 RelayTimers::RequestsDuration => "requests.duration",
664 RelayTimers::MinidumpScrubbing => "scrubbing.minidumps.duration",
665 RelayTimers::ViewHierarchyScrubbing => "scrubbing.view_hierarchy_scrubbing.duration",
666 RelayTimers::AttachmentScrubbing => "scrubbing.attachments.duration",
667 RelayTimers::UpstreamRequestsDuration => "upstream.requests.duration",
668 RelayTimers::TimestampDelay => "requests.timestamp_delay",
669 RelayTimers::OutcomeAggregatorFlushTime => "outcomes.aggregator.flush_time",
670 RelayTimers::ReplayRecordingProcessing => "replay.recording.process",
671 RelayTimers::GlobalConfigRequestDuration => "global_config.requests.duration",
672 RelayTimers::ProcessMessageDuration => "processor.message.duration",
673 RelayTimers::ProjectCacheTaskDuration => "project_cache.task.duration",
674 RelayTimers::HealthCheckDuration => "health.message.duration",
675 #[cfg(feature = "processing")]
676 RelayTimers::RateLimitBucketsDuration => "processor.rate_limit_buckets",
677 RelayTimers::AggregatorServiceDuration => "metrics.aggregator.message.duration",
678 RelayTimers::MetricRouterServiceDuration => "metrics.router.message.duration",
679 #[cfg(feature = "processing")]
680 RelayTimers::StoreServiceDuration => "store.message.duration",
681 RelayTimers::BufferInitialization => "buffer.initialization.duration",
682 RelayTimers::BufferSpool => "buffer.spool.duration",
683 RelayTimers::BufferSqlWrite => "buffer.write.duration",
684 RelayTimers::BufferUnspool => "buffer.unspool.duration",
685 RelayTimers::BufferPush => "buffer.push.duration",
686 RelayTimers::BufferPeek => "buffer.peek.duration",
687 RelayTimers::BufferPop => "buffer.pop.duration",
688 RelayTimers::BufferDrain => "buffer.drain.duration",
689 RelayTimers::BufferEnvelopesSerialization => "buffer.envelopes_serialization",
690 RelayTimers::BufferEnvelopeCompression => "buffer.envelopes_compression",
691 RelayTimers::BufferEnvelopeDecompression => "buffer.envelopes_decompression",
692 RelayTimers::CheckNestedSpans => "envelope.check_nested_spans",
693 RelayTimers::SignatureCreationDuration => "signature.create.duration",
694 #[cfg(feature = "processing")]
695 RelayTimers::AttachmentUploadDuration => "attachment.upload.duration",
696 RelayTimers::StreamProducerLatency => "stream.producer.latency",
697 RelayTimers::StreamConsumerLatency => "stream.consumer.latency",
698 }
699 }
700}
701
702/// Counter metrics used by Relay
703pub enum RelayCounters {
704 /// Tracks the number of tasks driven to completion by the async pool.
705 ///
706 /// This metric is tagged with:
707 /// - `pool`: the name of the pool.
708 AsyncPoolFinishedTasks,
709 /// Number of Events that had corrupted (unprintable) event attributes.
710 ///
711 /// This currently checks for `environment` and `release`, for which we know that
712 /// some SDKs may send corrupted values.
713 EventCorrupted,
714 /// Number of envelopes accepted in the current time slot.
715 ///
716 /// This represents requests that have successfully passed rate limits and filters, and have
717 /// been sent to the upstream.
718 ///
719 /// This metric is tagged with:
720 /// - `handling`: Either `"success"` if the envelope was handled correctly, or `"failure"` if
721 /// there was an error or bug.
722 EnvelopeAccepted,
723 /// Number of envelopes rejected in the current time slot.
724 ///
725 /// This includes envelopes being rejected because they are malformed or any other errors during
726 /// processing (including filtered events, invalid payloads, and rate limits).
727 ///
728 /// To check the rejection reason, check `events.outcomes`, instead.
729 ///
730 /// This metric is tagged with:
731 /// - `handling`: Either `"success"` if the envelope was handled correctly, or `"failure"` if
732 /// there was an error or bug.
733 EnvelopeRejected,
734 /// Number of total envelope items we received.
735 ///
736 /// Note: This does not count raw items, it counts the logical amount of items,
737 /// e.g. a single item container counts all its contained items.
738 ///
739 /// This metric is tagged with:
740 /// - `item_type`: The type of the items being counted.
741 /// - `is_container`: Whether this item is a container holding multiple items.
742 /// - `sdk`: The name of the Sentry SDK sending the envelope. This tag is only set for
743 /// Sentry's SDKs and defaults to "proprietary".
744 EnvelopeItems,
745 /// Number of bytes we processed per envelope item.
746 ///
747 /// This metric is tagged with:
748 /// - `item_type`: The type of the items being counted.
749 /// - `is_container`: Whether this item is a container holding multiple items.
750 /// - `sdk`: The name of the Sentry SDK sending the envelope. This tag is only set for
751 /// Sentry's SDKs and defaults to "proprietary".
752 EnvelopeItemBytes,
753 /// Number of times an envelope from the buffer is trying to be popped.
754 BufferTryPop,
755 /// Number of envelopes spool to disk.
756 BufferSpooledEnvelopes,
757 /// Number of envelopes unspooled from disk.
758 BufferUnspooledEnvelopes,
759 /// Number of project changed updates received by the buffer.
760 BufferProjectChangedEvent,
761 /// Number of times one or more projects of an envelope were pending when trying to pop
762 /// their envelope.
763 BufferProjectPending,
764 /// Number of iterations of the envelope buffer service loop.
765 BufferServiceLoopIteration,
766 /// Number of outcomes and reasons for rejected Envelopes.
767 ///
768 /// This metric is tagged with:
769 /// - `outcome`: The basic cause for rejecting the event.
770 /// - `reason`: A more detailed identifier describing the rule or mechanism leading to the
771 /// outcome.
772 /// - `to`: Describes the destination of the outcome. Can be either 'kafka' (when in
773 /// processing mode) or 'http' (when outcomes are enabled in an external relay).
774 ///
775 /// Possible outcomes are:
776 /// - `filtered`: Dropped by inbound data filters. The reason specifies the filter that
777 /// matched.
778 /// - `rate_limited`: Dropped by organization, project, or DSN rate limit, as well as exceeding
779 /// the Sentry plan quota. The reason contains the rate limit or quota that was exceeded.
780 /// - `invalid`: Data was considered invalid and could not be recovered. The reason indicates
781 /// the validation that failed.
782 Outcomes,
783 /// The number of individual outcomes including their quantity.
784 ///
785 /// While [`RelayCounters::Outcomes`] tracks the number of times aggregated outcomes
786 /// have been emitted, this counter tracks the total quantity of individual outcomes.
787 OutcomeQuantity,
788 /// Number of project state HTTP requests.
789 ///
790 /// Relay updates projects in batches. Every update cycle, Relay requests
791 /// `limits.max_concurrent_queries` batches of `cache.batch_size` projects from the upstream.
792 /// The duration of these requests is reported via `project_state.request.duration`.
793 ///
794 /// Note that after an update loop has completed, there may be more projects pending updates.
795 /// This is indicated by `project_state.pending`.
796 ProjectStateRequest,
797 /// Number of times a project state is requested from the central Redis cache.
798 ///
799 /// This metric is tagged with:
800 /// - `hit`: One of:
801 /// - `revision`: the cached version was validated to be up to date using its revision.
802 /// - `project_config`: the request was handled by the cache.
803 /// - `project_config_revision`: the request was handled by the cache and the revision did
804 /// not change.
805 /// - `false`: the request will be sent to the sentry endpoint.
806 #[cfg(feature = "processing")]
807 ProjectStateRedis,
808 /// Number of times a project had a fetch scheduled.
809 ProjectCacheSchedule,
810 /// Number of times an upstream request for a project config is completed.
811 ///
812 /// Completion can be because a result was returned or because the config request was
813 /// dropped after there still was no response after a timeout. This metrics has tags
814 /// for `result` and `attempts` indicating whether it was succesful or a timeout and how
815 /// many attempts were made respectively.
816 ProjectUpstreamCompleted,
817 /// Number of times an upstream request for a project config failed.
818 ///
819 /// Failure can happen, for example, when there's a network error. Refer to
820 /// [`UpstreamRequestError`](crate::services::upstream::UpstreamRequestError) for all cases.
821 ProjectUpstreamFailed,
822 /// Number of Relay server starts.
823 ///
824 /// This can be used to track unwanted restarts due to crashes or termination.
825 ServerStarting,
826 /// Number of messages placed on the Kafka queues.
827 ///
828 /// When Relay operates as Sentry service and an Envelope item is successfully processed, each
829 /// Envelope item results in a dedicated message on one of the ingestion topics on Kafka.
830 ///
831 /// This metric is tagged with:
832 /// - `event_type`: The kind of message produced to Kafka.
833 /// - `namespace` (only for metrics): The namespace that the metric belongs to.
834 /// - `is_segment` (only for event_type span): `true` the span is the root of a segment.
835 /// - `has_parent` (only for event_type span): `false` if the span is the root of a trace.
836 /// - `platform` (only for event_type span): The platform from which the span was spent.
837 /// - `metric_type` (only for event_type metric): The metric type, counter, distribution,
838 /// gauge or set.
839 /// - `metric_encoding` (only for event_type metric): The encoding used for distribution and
840 /// set metrics.
841 ///
842 /// The message types can be:
843 ///
844 /// - `event`: An error or transaction event. Error events are sent to `ingest-events`,
845 /// transactions to `ingest-transactions`, and errors with attachments are sent to
846 /// `ingest-attachments`.
847 /// - `attachment`: An attachment file associated with an error event, sent to
848 /// `ingest-attachments`.
849 /// - `user_report`: A message from the user feedback dialog, sent to `ingest-events`.
850 /// - `session`: A release health session update, sent to `ingest-sessions`.
851 #[cfg(feature = "processing")]
852 ProcessingMessageProduced,
853 /// Number of spans produced in the new format.
854 #[cfg(feature = "processing")]
855 SpanV2Produced,
856 /// Number of events that hit any of the store-like endpoints: Envelope, Store, Security,
857 /// Minidump, Unreal.
858 ///
859 /// The events are counted before they are rate limited, filtered, or processed in any way.
860 ///
861 /// This metric is tagged with:
862 /// - `version`: The event protocol version number defaulting to `7`.
863 EventProtocol,
864 /// The number of transaction events processed by the source of the transaction name.
865 ///
866 /// This metric is tagged with:
867 /// - `platform`: The event's platform, such as `"javascript"`.
868 /// - `source`: The source of the transaction name on the client. See the [transaction source
869 /// documentation](https://develop.sentry.dev/sdk/event-payloads/properties/transaction_info/)
870 /// for all valid values.
871 /// - `contains_slashes`: Whether the transaction name contains `/`. We use this as a heuristic
872 /// to represent URL transactions.
873 EventTransaction,
874 /// The number of transaction events processed grouped by transaction name modifications.
875 /// This metric is tagged with:
876 /// - `source_in`: The source of the transaction name before normalization.
877 /// See the [transaction source
878 /// documentation](https://develop.sentry.dev/sdk/event-payloads/properties/transaction_info/)
879 /// for all valid values.
880 /// - `change`: The mechanism that changed the transaction name.
881 /// Either `"none"`, `"pattern"`, `"rule"`, or `"both"`.
882 /// - `source_out`: The source of the transaction name after normalization.
883 TransactionNameChanges,
884 /// Number of HTTP requests reaching Relay.
885 Requests,
886 /// Number of completed HTTP requests.
887 ///
888 /// This metric is tagged with:
889 ///
890 /// - `status_code`: The HTTP status code number.
891 /// - `method`: The HTTP method used in the request in uppercase.
892 /// - `route`: Unique dashed identifier of the endpoint.
893 ResponsesStatusCodes,
894 /// Number of evicted stale projects from the cache.
895 ///
896 /// Relay scans the in-memory project cache for stale entries in a regular interval configured
897 /// by `cache.eviction_interval`.
898 ///
899 /// The cache duration for project states can be configured with the following options:
900 ///
901 /// - `cache.project_expiry`: The time after which a project state counts as expired. It is
902 /// automatically refreshed if a request references the project after it has expired.
903 /// - `cache.project_grace_period`: The time after expiry at which the project state will still
904 /// be used to ingest events. Once the grace period expires, the cache is evicted and new
905 /// requests wait for an update.
906 EvictingStaleProjectCaches,
907 /// Number of refreshes for stale projects in the cache.
908 RefreshStaleProjectCaches,
909 /// Number of times that parsing a metrics bucket item from an envelope failed.
910 MetricBucketsParsingFailed,
911 /// Number of Events with an OpenTelemetry Context
912 ///
913 /// This metric is tagged with:
914 /// - `platform`: The event's platform, such as `"javascript"`.
915 /// - `sdk`: The name of the Sentry SDK sending the transaction. This tag is only set for
916 /// Sentry's SDKs and defaults to "proprietary".
917 OpenTelemetryEvent,
918 /// Number of global config fetches from upstream. Only 2XX responses are
919 /// considered and ignores send errors (e.g. auth or network errors).
920 ///
921 /// This metric is tagged with:
922 /// - `success`: whether deserializing the global config succeeded.
923 GlobalConfigFetched,
924 /// The number of attachments processed in the same envelope as a user_report_v2 event.
925 FeedbackAttachments,
926 /// All COGS tracked values.
927 ///
928 /// This metric is tagged with:
929 /// - `resource_id`: The COGS resource id.
930 /// - `app_feature`: The COGS app feature.
931 CogsUsage,
932 /// The amount of times metrics of a project have been flushed without the project being
933 /// fetched/available.
934 ProjectStateFlushMetricsNoProject,
935 /// Incremented every time a bucket is dropped.
936 ///
937 /// This should only happen when a project state is invalid during graceful shutdown.
938 ///
939 /// This metric is tagged with:
940 /// - `aggregator`: The name of the metrics aggregator (usually `"default"`).
941 BucketsDropped,
942 /// Incremented every time a segment exceeds the expected limit.
943 ReplayExceededSegmentLimit,
944 /// Incremented every time the server accepts a new connection.
945 ServerSocketAccept,
946 /// Incremented every time the server aborts a connection because of an idle timeout.
947 ServerConnectionIdleTimeout,
948 /// The total delay of metric buckets in seconds.
949 ///
950 /// The delay is measured from initial creation of the bucket in an internal Relay
951 /// until it is produced to Kafka.
952 ///
953 /// Use [`Self::MetricDelayCount`] to calculate the average delay.
954 ///
955 /// This metric is tagged with:
956 /// - `namespace`: the metric namespace.
957 #[cfg(feature = "processing")]
958 MetricDelaySum,
959 /// The amount of buckets counted for the [`Self::MetricDelaySum`] metric.
960 ///
961 /// This metric is tagged with:
962 /// - `namespace`: the metric namespace.
963 #[cfg(feature = "processing")]
964 MetricDelayCount,
965 /// The amount of times PlayStation processing was attempted.
966 #[cfg(all(sentry, feature = "processing"))]
967 PlaystationProcessing,
968 /// The number of times a sampling decision was made.
969 ///
970 /// This metric is tagged with:
971 /// - `item`: what item the decision is taken for (transaction vs span).
972 SamplingDecision,
973 /// The number of times an upload of an attachment occurs.
974 ///
975 /// This metric is tagged with:
976 /// - `result`: `success` or the failure reason.
977 /// - `type`: `envelope` or `attachment_v2`
978 #[cfg(feature = "processing")]
979 AttachmentUpload,
980 /// Whether a logs envelope has a trace context header or not
981 ///
982 /// This metric is tagged with:
983 /// - `dsc`: yes or no
984 /// - `sdk`: low-cardinality client name
985 EnvelopeWithLogs,
986 /// Amount of profile chunks without a platform item header.
987 ///
988 /// The metric is emitted when processing profile chunks, profile chunks which are fast path
989 /// rate limited are not counted in this metric.
990 ProfileChunksWithoutPlatform,
991 /// Amount of errors have been processed by the error processing pipeline.
992 ///
993 /// This metric is tagged with:
994 /// - `expansion`: What expansion was used to expand the error (e.g. unreal).
995 ErrorProcessed,
996 /// Number of 'standalone' items.
997 ///
998 /// This metric is tagged with:
999 /// - `item_type`: The type of the item.
1000 /// - `attachment_type`: The attachment type of the item, if any.
1001 StandaloneItem,
1002}
1003
1004impl CounterMetric for RelayCounters {
1005 fn name(&self) -> &'static str {
1006 match self {
1007 RelayCounters::AsyncPoolFinishedTasks => "async_pool.finished_tasks",
1008 RelayCounters::EventCorrupted => "event.corrupted",
1009 RelayCounters::EnvelopeAccepted => "event.accepted",
1010 RelayCounters::EnvelopeRejected => "event.rejected",
1011 RelayCounters::EnvelopeItems => "event.items",
1012 RelayCounters::EnvelopeItemBytes => "event.item_bytes",
1013 RelayCounters::BufferTryPop => "buffer.try_pop",
1014 RelayCounters::BufferSpooledEnvelopes => "buffer.spooled_envelopes",
1015 RelayCounters::BufferUnspooledEnvelopes => "buffer.unspooled_envelopes",
1016 RelayCounters::BufferProjectChangedEvent => "buffer.project_changed_event",
1017 RelayCounters::BufferProjectPending => "buffer.project_pending",
1018 RelayCounters::BufferServiceLoopIteration => "buffer.service_loop_iteration",
1019 RelayCounters::Outcomes => "events.outcomes",
1020 RelayCounters::OutcomeQuantity => "events.outcome_quantity",
1021 RelayCounters::ProjectStateRequest => "project_state.request",
1022 #[cfg(feature = "processing")]
1023 RelayCounters::ProjectStateRedis => "project_state.redis.requests",
1024 RelayCounters::ProjectUpstreamCompleted => "project_upstream.completed",
1025 RelayCounters::ProjectUpstreamFailed => "project_upstream.failed",
1026 RelayCounters::ProjectCacheSchedule => "project_cache.schedule",
1027 RelayCounters::ServerStarting => "server.starting",
1028 #[cfg(feature = "processing")]
1029 RelayCounters::ProcessingMessageProduced => "processing.event.produced",
1030 #[cfg(feature = "processing")]
1031 RelayCounters::SpanV2Produced => "store.produced.span_v2",
1032 RelayCounters::EventProtocol => "event.protocol",
1033 RelayCounters::EventTransaction => "event.transaction",
1034 RelayCounters::TransactionNameChanges => "event.transaction_name_changes",
1035 RelayCounters::Requests => "requests",
1036 RelayCounters::ResponsesStatusCodes => "responses.status_codes",
1037 RelayCounters::EvictingStaleProjectCaches => "project_cache.eviction",
1038 RelayCounters::RefreshStaleProjectCaches => "project_cache.refresh",
1039 RelayCounters::MetricBucketsParsingFailed => "metrics.buckets.parsing_failed",
1040 RelayCounters::OpenTelemetryEvent => "event.opentelemetry",
1041 RelayCounters::GlobalConfigFetched => "global_config.fetch",
1042 RelayCounters::FeedbackAttachments => "processing.feedback_attachments",
1043 RelayCounters::CogsUsage => "cogs.usage",
1044 RelayCounters::ProjectStateFlushMetricsNoProject => "project_state.metrics.no_project",
1045 RelayCounters::BucketsDropped => "metrics.buckets.dropped",
1046 RelayCounters::ReplayExceededSegmentLimit => "replay.segment_limit_exceeded",
1047 RelayCounters::ServerSocketAccept => "server.http.accepted",
1048 RelayCounters::ServerConnectionIdleTimeout => "server.http.idle_timeout",
1049 #[cfg(feature = "processing")]
1050 RelayCounters::MetricDelaySum => "metrics.delay.sum",
1051 #[cfg(feature = "processing")]
1052 RelayCounters::MetricDelayCount => "metrics.delay.count",
1053 #[cfg(all(sentry, feature = "processing"))]
1054 RelayCounters::PlaystationProcessing => "processing.playstation",
1055 RelayCounters::SamplingDecision => "sampling.decision",
1056 #[cfg(feature = "processing")]
1057 RelayCounters::AttachmentUpload => "attachment.upload",
1058 RelayCounters::EnvelopeWithLogs => "logs.envelope",
1059 RelayCounters::ProfileChunksWithoutPlatform => "profile_chunk.no_platform",
1060 RelayCounters::ErrorProcessed => "event.error.processed",
1061 RelayCounters::StandaloneItem => "processing.standalone_item",
1062 }
1063 }
1064}