1use crate::statsd::{Counters, map_origin_to_integration, platform_tag};
4use crate::{ModelCostV2, ModelCosts};
5use relay_event_schema::protocol::{
6 Event, Measurements, OperationType, Span, SpanData, TraceContext,
7};
8use relay_protocol::{Annotated, Getter, Value};
9
10#[derive(Debug, Copy, Clone)]
12pub struct UsedTokens {
13 pub input_tokens: f64,
15 pub input_cached_tokens: f64,
19 pub input_cache_write_tokens: f64,
23 pub output_tokens: f64,
25 pub output_reasoning_tokens: f64,
29}
30
31impl UsedTokens {
32 pub fn from_span_data(data: &SpanData) -> Self {
34 macro_rules! get_value {
35 ($e:expr) => {
36 $e.value().and_then(Value::as_f64).unwrap_or(0.0)
37 };
38 }
39
40 Self {
41 input_tokens: get_value!(data.gen_ai_usage_input_tokens),
42 output_tokens: get_value!(data.gen_ai_usage_output_tokens),
43 output_reasoning_tokens: get_value!(data.gen_ai_usage_output_tokens_reasoning),
44 input_cached_tokens: get_value!(data.gen_ai_usage_input_tokens_cached),
45 input_cache_write_tokens: get_value!(data.gen_ai_usage_input_tokens_cache_write),
46 }
47 }
48
49 pub fn has_usage(&self) -> bool {
51 self.input_tokens > 0.0 || self.output_tokens > 0.0
52 }
53
54 pub fn raw_input_tokens(&self) -> f64 {
58 self.input_tokens - self.input_cached_tokens
59 }
60
61 pub fn raw_output_tokens(&self) -> f64 {
65 self.output_tokens - self.output_reasoning_tokens
66 }
67}
68
69#[derive(Debug, Copy, Clone)]
71pub struct CalculatedCost {
72 pub input: f64,
74 pub output: f64,
76}
77
78impl CalculatedCost {
79 pub fn total(&self) -> f64 {
81 self.input + self.output
82 }
83}
84
85pub fn calculate_costs(
89 model_cost: &ModelCostV2,
90 tokens: UsedTokens,
91 integration: &str,
92 platform: &str,
93) -> Option<CalculatedCost> {
94 if !tokens.has_usage() {
95 relay_statsd::metric!(
96 counter(Counters::GenAiCostCalculationResult) += 1,
97 result = "calculation_no_tokens",
98 integration = integration,
99 platform = platform,
100 );
101 return None;
102 }
103
104 let input = (tokens.raw_input_tokens() * model_cost.input_per_token)
105 + (tokens.input_cached_tokens * model_cost.input_cached_per_token)
106 + (tokens.input_cache_write_tokens * model_cost.input_cache_write_per_token);
107
108 let reasoning_cost = match model_cost.output_reasoning_per_token {
111 reasoning_cost if reasoning_cost > 0.0 => reasoning_cost,
112 _ => model_cost.output_per_token,
113 };
114
115 let output = (tokens.raw_output_tokens() * model_cost.output_per_token)
116 + (tokens.output_reasoning_tokens * reasoning_cost);
117
118 let metric_label = match (input, output) {
119 (x, y) if x < 0.0 || y < 0.0 => "calculation_negative",
120 (0.0, 0.0) => "calculation_zero",
121 _ => "calculation_positive",
122 };
123
124 relay_statsd::metric!(
125 counter(Counters::GenAiCostCalculationResult) += 1,
126 result = metric_label,
127 integration = integration,
128 platform = platform,
129 );
130
131 Some(CalculatedCost { input, output })
132}
133
134pub const DEFAULT_AI_OPERATION: &str = "ai_client";
139
140pub fn infer_ai_operation_type(op_name: &str) -> Option<&'static str> {
152 let ai_op = match op_name {
153 "ai.run.generateText"
155 | "ai.run.generateObject"
156 | "gen_ai.invoke_agent"
157 | "ai.pipeline.generate_text"
158 | "ai.pipeline.generate_object"
159 | "ai.pipeline.stream_text"
160 | "ai.pipeline.stream_object"
161 | "gen_ai.create_agent"
162 | "invoke_agent"
163 | "create_agent" => "agent",
164 "gen_ai.execute_tool" | "execute_tool" => "tool",
165 "gen_ai.handoff" | "handoff" => "handoff",
166 "ai.processor" | "processor_run" => "other",
167 op if op.starts_with("ai.streamText.doStream") => "ai_client",
169 op if op.starts_with("ai.streamText") => "agent",
170
171 op if op.starts_with("ai.generateText.doGenerate") => "ai_client",
172 op if op.starts_with("ai.generateText") => "agent",
173
174 op if op.starts_with("ai.generateObject.doGenerate") => "ai_client",
175 op if op.starts_with("ai.generateObject") => "agent",
176
177 op if op.starts_with("ai.toolCall") => "tool",
178 _ => return None,
180 };
181
182 Some(ai_op)
183}
184
185fn extract_ai_model_cost_data(
188 model_cost: Option<&ModelCostV2>,
189 data: &mut SpanData,
190 origin: Option<&str>,
191 platform: Option<&str>,
192) {
193 let integration = map_origin_to_integration(origin);
194 let platform = platform_tag(platform);
195
196 let Some(model_cost) = model_cost else {
197 relay_statsd::metric!(
198 counter(Counters::GenAiCostCalculationResult) += 1,
199 result = "calculation_no_model_cost_available",
200 integration = integration,
201 platform = platform,
202 );
203 return;
204 };
205
206 let used_tokens = UsedTokens::from_span_data(&*data);
207 let Some(costs) = calculate_costs(model_cost, used_tokens, integration, platform) else {
208 return;
209 };
210
211 data.gen_ai_cost_total_tokens
212 .set_value(Value::F64(costs.total()).into());
213
214 data.gen_ai_cost_input_tokens
216 .set_value(Value::F64(costs.input).into());
217 data.gen_ai_cost_output_tokens
218 .set_value(Value::F64(costs.output).into());
219}
220
221fn map_ai_measurements_to_data(data: &mut SpanData, measurements: Option<&Measurements>) {
223 let set_field_from_measurement = |target_field: &mut Annotated<Value>,
224 measurement_key: &str| {
225 if let Some(measurements) = measurements
226 && target_field.value().is_none()
227 && let Some(value) = measurements.get_value(measurement_key)
228 {
229 target_field.set_value(Value::F64(value.to_f64()).into());
230 }
231 };
232
233 set_field_from_measurement(&mut data.gen_ai_usage_total_tokens, "ai_total_tokens_used");
234 set_field_from_measurement(&mut data.gen_ai_usage_input_tokens, "ai_prompt_tokens_used");
235 set_field_from_measurement(
236 &mut data.gen_ai_usage_output_tokens,
237 "ai_completion_tokens_used",
238 );
239}
240
241fn set_total_tokens(data: &mut SpanData) {
242 if data.gen_ai_usage_total_tokens.value().is_none() {
244 let input_tokens = data
245 .gen_ai_usage_input_tokens
246 .value()
247 .and_then(Value::as_f64);
248 let output_tokens = data
249 .gen_ai_usage_output_tokens
250 .value()
251 .and_then(Value::as_f64);
252
253 if input_tokens.is_none() && output_tokens.is_none() {
254 return;
256 }
257
258 data.gen_ai_usage_total_tokens.set_value(
259 Value::F64(input_tokens.unwrap_or(0.0) + output_tokens.unwrap_or(0.0)).into(),
260 );
261 }
262}
263
264fn extract_ai_data(
266 data: &mut SpanData,
267 duration: f64,
268 ai_model_costs: &ModelCosts,
269 origin: Option<&str>,
270 platform: Option<&str>,
271) {
272 if data.gen_ai_response_tokens_per_second.value().is_none()
274 && duration > 0.0
275 && let Some(output_tokens) = data
276 .gen_ai_usage_output_tokens
277 .value()
278 .and_then(Value::as_f64)
279 {
280 data.gen_ai_response_tokens_per_second
281 .set_value(Value::F64(output_tokens / (duration / 1000.0)).into());
282 }
283
284 if let Some(model_id) = data
286 .gen_ai_request_model
287 .value()
288 .and_then(|val| val.as_str())
289 .or_else(|| {
290 data.gen_ai_response_model
291 .value()
292 .and_then(|val| val.as_str())
293 })
294 {
295 extract_ai_model_cost_data(
296 ai_model_costs.cost_per_token(model_id),
297 data,
298 origin,
299 platform,
300 )
301 } else {
302 relay_statsd::metric!(
303 counter(Counters::GenAiCostCalculationResult) += 1,
304 result = "calculation_no_model_id_available",
305 integration = map_origin_to_integration(origin),
306 platform = platform_tag(platform),
307 );
308 }
309}
310
311fn enrich_ai_span_data(
313 span_data: &mut Annotated<SpanData>,
314 span_op: &Annotated<OperationType>,
315 measurements: &Annotated<Measurements>,
316 duration: f64,
317 model_costs: Option<&ModelCosts>,
318 origin: Option<&str>,
319 platform: Option<&str>,
320) {
321 if !is_ai_span(span_data, span_op.value()) {
322 return;
323 }
324
325 let data = span_data.get_or_insert_with(SpanData::default);
326
327 map_ai_measurements_to_data(data, measurements.value());
328
329 set_total_tokens(data);
330
331 if let Some(model_costs) = model_costs {
332 extract_ai_data(data, duration, model_costs, origin, platform);
333 } else {
334 relay_statsd::metric!(
335 counter(Counters::GenAiCostCalculationResult) += 1,
336 result = "calculation_no_model_cost_available",
337 integration = map_origin_to_integration(origin),
338 platform = platform_tag(platform),
339 );
340 }
341
342 let ai_op_type = data
343 .gen_ai_operation_name
344 .value()
345 .or(span_op.value())
346 .and_then(|op| infer_ai_operation_type(op))
347 .unwrap_or(DEFAULT_AI_OPERATION);
348
349 data.gen_ai_operation_type
350 .set_value(Some(ai_op_type.to_owned()));
351}
352
353pub fn enrich_ai_span(span: &mut Span, model_costs: Option<&ModelCosts>) {
355 let duration = span
356 .get_value("span.duration")
357 .and_then(|v| v.as_f64())
358 .unwrap_or(0.0);
359
360 enrich_ai_span_data(
361 &mut span.data,
362 &span.op,
363 &span.measurements,
364 duration,
365 model_costs,
366 span.origin.as_str(),
367 span.platform.as_str(),
368 );
369}
370
371pub fn enrich_ai_event_data(event: &mut Event, model_costs: Option<&ModelCosts>) {
373 let event_duration = event
374 .get_value("event.duration")
375 .and_then(|v| v.as_f64())
376 .unwrap_or(0.0);
377
378 if let Some(trace_context) = event
379 .contexts
380 .value_mut()
381 .as_mut()
382 .and_then(|c| c.get_mut::<TraceContext>())
383 {
384 enrich_ai_span_data(
385 &mut trace_context.data,
386 &trace_context.op,
387 &event.measurements,
388 event_duration,
389 model_costs,
390 trace_context.origin.as_str(),
391 event.platform.as_str(),
392 );
393 }
394 let spans = event.spans.value_mut().iter_mut().flatten();
395 let spans = spans.filter_map(|span| span.value_mut().as_mut());
396
397 for span in spans {
398 let span_duration = span
399 .get_value("span.duration")
400 .and_then(|v| v.as_f64())
401 .unwrap_or(0.0);
402 let span_platform = span.platform.as_str().or_else(|| event.platform.as_str());
403
404 enrich_ai_span_data(
405 &mut span.data,
406 &span.op,
407 &span.measurements,
408 span_duration,
409 model_costs,
410 span.origin.as_str(),
411 span_platform,
412 );
413 }
414}
415
416fn is_ai_span(span_data: &Annotated<SpanData>, span_op: Option<&OperationType>) -> bool {
420 let has_ai_op = span_data
421 .value()
422 .and_then(|data| data.gen_ai_operation_name.value())
423 .is_some();
424
425 let is_ai_span_op =
426 span_op.is_some_and(|op| op.starts_with("ai.") || op.starts_with("gen_ai."));
427
428 has_ai_op || is_ai_span_op
429}
430
431#[cfg(test)]
432mod tests {
433 use relay_protocol::{FromValue, assert_annotated_snapshot};
434 use serde_json::json;
435
436 use super::*;
437
438 fn ai_span_with_data(data: serde_json::Value) -> Span {
439 Span {
440 op: "gen_ai.test".to_owned().into(),
441 data: SpanData::from_value(data.into()),
442 ..Default::default()
443 }
444 }
445
446 #[test]
447 fn test_calculate_cost_no_tokens() {
448 let cost = calculate_costs(
449 &ModelCostV2 {
450 input_per_token: 1.0,
451 output_per_token: 1.0,
452 output_reasoning_per_token: 1.0,
453 input_cached_per_token: 1.0,
454 input_cache_write_per_token: 1.0,
455 },
456 UsedTokens::from_span_data(&SpanData::default()),
457 "test",
458 "test",
459 );
460 assert!(cost.is_none());
461 }
462
463 #[test]
464 fn test_calculate_cost_full() {
465 let cost = calculate_costs(
466 &ModelCostV2 {
467 input_per_token: 1.0,
468 output_per_token: 2.0,
469 output_reasoning_per_token: 3.0,
470 input_cached_per_token: 0.5,
471 input_cache_write_per_token: 0.75,
472 },
473 UsedTokens {
474 input_tokens: 8.0,
475 input_cached_tokens: 5.0,
476 input_cache_write_tokens: 0.0,
477 output_tokens: 15.0,
478 output_reasoning_tokens: 9.0,
479 },
480 "test",
481 "test",
482 )
483 .unwrap();
484
485 insta::assert_debug_snapshot!(cost, @r"
486 CalculatedCost {
487 input: 5.5,
488 output: 39.0,
489 }
490 ");
491 }
492
493 #[test]
494 fn test_calculate_cost_no_reasoning_cost() {
495 let cost = calculate_costs(
496 &ModelCostV2 {
497 input_per_token: 1.0,
498 output_per_token: 2.0,
499 output_reasoning_per_token: 0.0,
501 input_cached_per_token: 0.5,
502 input_cache_write_per_token: 0.0,
503 },
504 UsedTokens {
505 input_tokens: 8.0,
506 input_cached_tokens: 5.0,
507 input_cache_write_tokens: 0.0,
508 output_tokens: 15.0,
509 output_reasoning_tokens: 9.0,
510 },
511 "test",
512 "test",
513 )
514 .unwrap();
515
516 insta::assert_debug_snapshot!(cost, @r"
517 CalculatedCost {
518 input: 5.5,
519 output: 30.0,
520 }
521 ");
522 }
523
524 #[test]
528 fn test_calculate_cost_negative() {
529 let cost = calculate_costs(
530 &ModelCostV2 {
531 input_per_token: 2.0,
532 output_per_token: 2.0,
533 output_reasoning_per_token: 1.0,
534 input_cached_per_token: 1.0,
535 input_cache_write_per_token: 1.5,
536 },
537 UsedTokens {
538 input_tokens: 1.0,
539 input_cached_tokens: 11.0,
540 input_cache_write_tokens: 0.0,
541 output_tokens: 1.0,
542 output_reasoning_tokens: 9.0,
543 },
544 "test",
545 "test",
546 )
547 .unwrap();
548
549 insta::assert_debug_snapshot!(cost, @r"
550 CalculatedCost {
551 input: -9.0,
552 output: -7.0,
553 }
554 ");
555 }
556
557 #[test]
558 fn test_calculate_cost_with_cache_writes() {
559 let cost = calculate_costs(
560 &ModelCostV2 {
561 input_per_token: 1.0,
562 output_per_token: 2.0,
563 output_reasoning_per_token: 3.0,
564 input_cached_per_token: 0.5,
565 input_cache_write_per_token: 0.75,
566 },
567 UsedTokens {
568 input_tokens: 100.0,
569 input_cached_tokens: 20.0,
570 input_cache_write_tokens: 30.0,
571 output_tokens: 50.0,
572 output_reasoning_tokens: 10.0,
573 },
574 "test",
575 "test",
576 )
577 .unwrap();
578
579 insta::assert_debug_snapshot!(cost, @r"
580 CalculatedCost {
581 input: 112.5,
582 output: 110.0,
583 }
584 ");
585 }
586
587 #[test]
588 fn test_calculate_cost_backward_compatibility_no_cache_write() {
589 let span_data = SpanData {
591 gen_ai_usage_input_tokens: Annotated::new(100.0.into()),
592 gen_ai_usage_input_tokens_cached: Annotated::new(20.0.into()),
593 gen_ai_usage_output_tokens: Annotated::new(50.0.into()),
594 ..Default::default()
596 };
597
598 let tokens = UsedTokens::from_span_data(&span_data);
599
600 assert_eq!(tokens.input_cache_write_tokens, 0.0);
602
603 let cost = calculate_costs(
604 &ModelCostV2 {
605 input_per_token: 1.0,
606 output_per_token: 2.0,
607 output_reasoning_per_token: 0.0,
608 input_cached_per_token: 0.5,
609 input_cache_write_per_token: 0.75,
610 },
611 tokens,
612 "test",
613 "test",
614 )
615 .unwrap();
616
617 insta::assert_debug_snapshot!(cost, @r"
621 CalculatedCost {
622 input: 90.0,
623 output: 100.0,
624 }
625 ");
626 }
627
628 #[test]
630 fn test_infer_ai_operation_type_from_gen_ai_operation_name() {
631 let mut span = ai_span_with_data(json!({
632 "gen_ai.operation.name": "invoke_agent"
633 }));
634
635 enrich_ai_span(&mut span, None);
636
637 assert_annotated_snapshot!(&span.data, @r#"
638 {
639 "gen_ai.operation.name": "invoke_agent",
640 "gen_ai.operation.type": "agent"
641 }
642 "#);
643 }
644
645 #[test]
647 fn test_infer_ai_operation_type_from_span_op() {
648 let mut span = Span {
649 op: "gen_ai.invoke_agent".to_owned().into(),
650 ..Default::default()
651 };
652
653 enrich_ai_span(&mut span, None);
654
655 assert_annotated_snapshot!(span.data, @r#"
656 {
657 "gen_ai.operation.type": "agent"
658 }
659 "#);
660 }
661
662 #[test]
664 fn test_infer_ai_operation_type_from_fallback() {
665 let mut span = ai_span_with_data(json!({
666 "gen_ai.operation.name": "embeddings"
667 }));
668
669 enrich_ai_span(&mut span, None);
670
671 assert_annotated_snapshot!(&span.data, @r#"
672 {
673 "gen_ai.operation.name": "embeddings",
674 "gen_ai.operation.type": "ai_client"
675 }
676 "#);
677 }
678
679 #[test]
681 fn test_is_ai_span_from_gen_ai_operation_name() {
682 let mut span_data = Annotated::default();
683 span_data
684 .get_or_insert_with(SpanData::default)
685 .gen_ai_operation_name
686 .set_value(Some("chat".into()));
687 assert!(is_ai_span(&span_data, None));
688 }
689
690 #[test]
692 fn test_is_ai_span_from_span_op_ai() {
693 let span_op: OperationType = "ai.chat".into();
694 assert!(is_ai_span(&Annotated::default(), Some(&span_op)));
695 }
696
697 #[test]
699 fn test_is_ai_span_from_span_op_gen_ai() {
700 let span_op: OperationType = "gen_ai.chat".into();
701 assert!(is_ai_span(&Annotated::default(), Some(&span_op)));
702 }
703
704 #[test]
706 fn test_is_ai_span_negative() {
707 assert!(!is_ai_span(&Annotated::default(), None));
708 }
709
710 #[test]
712 fn test_enrich_ai_event_data_invoke_agent_trace_with_chat_span() {
713 let event_json = r#"{
714 "type": "transaction",
715 "timestamp": 1234567892.0,
716 "start_timestamp": 1234567889.0,
717 "contexts": {
718 "trace": {
719 "op": "gen_ai.invoke_agent",
720 "trace_id": "12345678901234567890123456789012",
721 "span_id": "1234567890123456",
722 "data": {
723 "gen_ai.operation.name": "gen_ai.invoke_agent",
724 "gen_ai.usage.input_tokens": 500,
725 "gen_ai.usage.output_tokens": 200
726 }
727 }
728 },
729 "spans": [
730 {
731 "op": "gen_ai.chat.completions",
732 "span_id": "1234567890123457",
733 "start_timestamp": 1234567889.5,
734 "timestamp": 1234567890.5,
735 "data": {
736 "gen_ai.operation.name": "chat",
737 "gen_ai.usage.input_tokens": 100,
738 "gen_ai.usage.output_tokens": 50
739 }
740 }
741 ]
742 }"#;
743
744 let mut annotated_event: Annotated<Event> = Annotated::from_json(event_json).unwrap();
745 let event = annotated_event.value_mut().as_mut().unwrap();
746
747 enrich_ai_event_data(event, None);
748
749 assert_annotated_snapshot!(&annotated_event, @r#"
750 {
751 "type": "transaction",
752 "timestamp": 1234567892.0,
753 "start_timestamp": 1234567889.0,
754 "contexts": {
755 "trace": {
756 "trace_id": "12345678901234567890123456789012",
757 "span_id": "1234567890123456",
758 "op": "gen_ai.invoke_agent",
759 "data": {
760 "gen_ai.usage.total_tokens": 700.0,
761 "gen_ai.usage.input_tokens": 500,
762 "gen_ai.usage.output_tokens": 200,
763 "gen_ai.operation.name": "gen_ai.invoke_agent",
764 "gen_ai.operation.type": "agent"
765 },
766 "type": "trace"
767 }
768 },
769 "spans": [
770 {
771 "timestamp": 1234567890.5,
772 "start_timestamp": 1234567889.5,
773 "op": "gen_ai.chat.completions",
774 "span_id": "1234567890123457",
775 "data": {
776 "gen_ai.usage.total_tokens": 150.0,
777 "gen_ai.usage.input_tokens": 100,
778 "gen_ai.usage.output_tokens": 50,
779 "gen_ai.operation.name": "chat",
780 "gen_ai.operation.type": "ai_client"
781 }
782 }
783 ]
784 }
785 "#);
786 }
787
788 #[test]
790 fn test_enrich_ai_event_data_nested_agent_and_chat_spans() {
791 let event_json = r#"{
792 "type": "transaction",
793 "timestamp": 1234567892.0,
794 "start_timestamp": 1234567889.0,
795 "contexts": {
796 "trace": {
797 "op": "http.server",
798 "trace_id": "12345678901234567890123456789012",
799 "span_id": "1234567890123456"
800 }
801 },
802 "spans": [
803 {
804 "op": "gen_ai.invoke_agent",
805 "span_id": "1234567890123457",
806 "parent_span_id": "1234567890123456",
807 "start_timestamp": 1234567889.5,
808 "timestamp": 1234567891.5,
809 "data": {
810 "gen_ai.operation.name": "invoke_agent",
811 "gen_ai.usage.input_tokens": 500,
812 "gen_ai.usage.output_tokens": 200
813 }
814 },
815 {
816 "op": "gen_ai.chat.completions",
817 "span_id": "1234567890123458",
818 "parent_span_id": "1234567890123457",
819 "start_timestamp": 1234567890.0,
820 "timestamp": 1234567891.0,
821 "data": {
822 "gen_ai.operation.name": "chat",
823 "gen_ai.usage.input_tokens": 100,
824 "gen_ai.usage.output_tokens": 50
825 }
826 }
827 ]
828 }"#;
829
830 let mut annotated_event: Annotated<Event> = Annotated::from_json(event_json).unwrap();
831 let event = annotated_event.value_mut().as_mut().unwrap();
832
833 enrich_ai_event_data(event, None);
834
835 assert_annotated_snapshot!(&annotated_event, @r#"
836 {
837 "type": "transaction",
838 "timestamp": 1234567892.0,
839 "start_timestamp": 1234567889.0,
840 "contexts": {
841 "trace": {
842 "trace_id": "12345678901234567890123456789012",
843 "span_id": "1234567890123456",
844 "op": "http.server",
845 "type": "trace"
846 }
847 },
848 "spans": [
849 {
850 "timestamp": 1234567891.5,
851 "start_timestamp": 1234567889.5,
852 "op": "gen_ai.invoke_agent",
853 "span_id": "1234567890123457",
854 "parent_span_id": "1234567890123456",
855 "data": {
856 "gen_ai.usage.total_tokens": 700.0,
857 "gen_ai.usage.input_tokens": 500,
858 "gen_ai.usage.output_tokens": 200,
859 "gen_ai.operation.name": "invoke_agent",
860 "gen_ai.operation.type": "agent"
861 }
862 },
863 {
864 "timestamp": 1234567891.0,
865 "start_timestamp": 1234567890.0,
866 "op": "gen_ai.chat.completions",
867 "span_id": "1234567890123458",
868 "parent_span_id": "1234567890123457",
869 "data": {
870 "gen_ai.usage.total_tokens": 150.0,
871 "gen_ai.usage.input_tokens": 100,
872 "gen_ai.usage.output_tokens": 50,
873 "gen_ai.operation.name": "chat",
874 "gen_ai.operation.type": "ai_client"
875 }
876 }
877 ]
878 }
879 "#);
880 }
881
882 #[test]
884 fn test_enrich_ai_event_data_legacy_measurements_and_span_op() {
885 let event_json = r#"{
886 "type": "transaction",
887 "timestamp": 1234567892.0,
888 "start_timestamp": 1234567889.0,
889 "contexts": {
890 "trace": {
891 "op": "http.server",
892 "trace_id": "12345678901234567890123456789012",
893 "span_id": "1234567890123456"
894 }
895 },
896 "spans": [
897 {
898 "op": "gen_ai.invoke_agent",
899 "span_id": "1234567890123457",
900 "parent_span_id": "1234567890123456",
901 "start_timestamp": 1234567889.5,
902 "timestamp": 1234567891.5,
903 "measurements": {
904 "ai_prompt_tokens_used": {"value": 500.0},
905 "ai_completion_tokens_used": {"value": 200.0}
906 }
907 },
908 {
909 "op": "ai.chat_completions.create.langchain.ChatOpenAI",
910 "span_id": "1234567890123458",
911 "parent_span_id": "1234567890123457",
912 "start_timestamp": 1234567890.0,
913 "timestamp": 1234567891.0,
914 "measurements": {
915 "ai_prompt_tokens_used": {"value": 100.0},
916 "ai_completion_tokens_used": {"value": 50.0}
917 }
918 }
919 ]
920 }"#;
921
922 let mut annotated_event: Annotated<Event> = Annotated::from_json(event_json).unwrap();
923 let event = annotated_event.value_mut().as_mut().unwrap();
924
925 enrich_ai_event_data(event, None);
926
927 assert_annotated_snapshot!(&annotated_event, @r#"
928 {
929 "type": "transaction",
930 "timestamp": 1234567892.0,
931 "start_timestamp": 1234567889.0,
932 "contexts": {
933 "trace": {
934 "trace_id": "12345678901234567890123456789012",
935 "span_id": "1234567890123456",
936 "op": "http.server",
937 "type": "trace"
938 }
939 },
940 "spans": [
941 {
942 "timestamp": 1234567891.5,
943 "start_timestamp": 1234567889.5,
944 "op": "gen_ai.invoke_agent",
945 "span_id": "1234567890123457",
946 "parent_span_id": "1234567890123456",
947 "data": {
948 "gen_ai.usage.total_tokens": 700.0,
949 "gen_ai.usage.input_tokens": 500.0,
950 "gen_ai.usage.output_tokens": 200.0,
951 "gen_ai.operation.type": "agent"
952 },
953 "measurements": {
954 "ai_completion_tokens_used": {
955 "value": 200.0
956 },
957 "ai_prompt_tokens_used": {
958 "value": 500.0
959 }
960 }
961 },
962 {
963 "timestamp": 1234567891.0,
964 "start_timestamp": 1234567890.0,
965 "op": "ai.chat_completions.create.langchain.ChatOpenAI",
966 "span_id": "1234567890123458",
967 "parent_span_id": "1234567890123457",
968 "data": {
969 "gen_ai.usage.total_tokens": 150.0,
970 "gen_ai.usage.input_tokens": 100.0,
971 "gen_ai.usage.output_tokens": 50.0,
972 "gen_ai.operation.type": "ai_client"
973 },
974 "measurements": {
975 "ai_completion_tokens_used": {
976 "value": 50.0
977 },
978 "ai_prompt_tokens_used": {
979 "value": 100.0
980 }
981 }
982 }
983 ]
984 }
985 "#);
986 }
987}