1use crate::normalize::AiOperationTypeMap;
4use crate::{ModelCostV2, ModelCosts};
5use relay_event_schema::protocol::{
6 Event, Measurements, OperationType, Span, SpanData, TraceContext,
7};
8use relay_protocol::{Annotated, Getter, Value};
9
10#[derive(Debug, Copy, Clone)]
12pub struct UsedTokens {
13 pub input_tokens: f64,
15 pub input_cached_tokens: f64,
19 pub input_cache_write_tokens: f64,
23 pub output_tokens: f64,
25 pub output_reasoning_tokens: f64,
29}
30
31impl UsedTokens {
32 pub fn from_span_data(data: &SpanData) -> Self {
34 macro_rules! get_value {
35 ($e:expr) => {
36 $e.value().and_then(Value::as_f64).unwrap_or(0.0)
37 };
38 }
39
40 Self {
41 input_tokens: get_value!(data.gen_ai_usage_input_tokens),
42 output_tokens: get_value!(data.gen_ai_usage_output_tokens),
43 output_reasoning_tokens: get_value!(data.gen_ai_usage_output_tokens_reasoning),
44 input_cached_tokens: get_value!(data.gen_ai_usage_input_tokens_cached),
45 input_cache_write_tokens: get_value!(data.gen_ai_usage_input_tokens_cache_write),
46 }
47 }
48
49 pub fn has_usage(&self) -> bool {
51 self.input_tokens > 0.0 || self.output_tokens > 0.0
52 }
53
54 pub fn raw_input_tokens(&self) -> f64 {
58 self.input_tokens - self.input_cached_tokens
59 }
60
61 pub fn raw_output_tokens(&self) -> f64 {
65 self.output_tokens - self.output_reasoning_tokens
66 }
67}
68
69#[derive(Debug, Copy, Clone)]
71pub struct CalculatedCost {
72 pub input: f64,
74 pub output: f64,
76}
77
78impl CalculatedCost {
79 pub fn total(&self) -> f64 {
81 self.input + self.output
82 }
83}
84
85pub fn calculate_costs(model_cost: &ModelCostV2, tokens: UsedTokens) -> Option<CalculatedCost> {
89 if !tokens.has_usage() {
90 return None;
91 }
92
93 let input = (tokens.raw_input_tokens() * model_cost.input_per_token)
94 + (tokens.input_cached_tokens * model_cost.input_cached_per_token)
95 + (tokens.input_cache_write_tokens * model_cost.input_cache_write_per_token);
96
97 let reasoning_cost = match model_cost.output_reasoning_per_token {
100 reasoning_cost if reasoning_cost > 0.0 => reasoning_cost,
101 _ => model_cost.output_per_token,
102 };
103
104 let output = (tokens.raw_output_tokens() * model_cost.output_per_token)
105 + (tokens.output_reasoning_tokens * reasoning_cost);
106
107 Some(CalculatedCost { input, output })
108}
109
110fn extract_ai_model_cost_data(model_cost: Option<&ModelCostV2>, data: &mut SpanData) {
113 let Some(model_cost) = model_cost else { return };
114
115 let used_tokens = UsedTokens::from_span_data(&*data);
116 let Some(costs) = calculate_costs(model_cost, used_tokens) else {
117 return;
118 };
119
120 data.gen_ai_cost_total_tokens
121 .set_value(Value::F64(costs.total()).into());
122
123 data.gen_ai_cost_input_tokens
125 .set_value(Value::F64(costs.input).into());
126 data.gen_ai_cost_output_tokens
127 .set_value(Value::F64(costs.output).into());
128}
129
130fn map_ai_measurements_to_data(data: &mut SpanData, measurements: Option<&Measurements>) {
132 let set_field_from_measurement = |target_field: &mut Annotated<Value>,
133 measurement_key: &str| {
134 if let Some(measurements) = measurements
135 && target_field.value().is_none()
136 && let Some(value) = measurements.get_value(measurement_key)
137 {
138 target_field.set_value(Value::F64(value.to_f64()).into());
139 }
140 };
141
142 set_field_from_measurement(&mut data.gen_ai_usage_total_tokens, "ai_total_tokens_used");
143 set_field_from_measurement(&mut data.gen_ai_usage_input_tokens, "ai_prompt_tokens_used");
144 set_field_from_measurement(
145 &mut data.gen_ai_usage_output_tokens,
146 "ai_completion_tokens_used",
147 );
148}
149
150fn set_total_tokens(data: &mut SpanData) {
151 if data.gen_ai_usage_total_tokens.value().is_none() {
153 let input_tokens = data
154 .gen_ai_usage_input_tokens
155 .value()
156 .and_then(Value::as_f64);
157 let output_tokens = data
158 .gen_ai_usage_output_tokens
159 .value()
160 .and_then(Value::as_f64);
161
162 if input_tokens.is_none() && output_tokens.is_none() {
163 return;
165 }
166
167 data.gen_ai_usage_total_tokens.set_value(
168 Value::F64(input_tokens.unwrap_or(0.0) + output_tokens.unwrap_or(0.0)).into(),
169 );
170 }
171}
172
173fn extract_ai_data(data: &mut SpanData, duration: f64, ai_model_costs: &ModelCosts) {
175 if data.gen_ai_response_tokens_per_second.value().is_none()
177 && duration > 0.0
178 && let Some(output_tokens) = data
179 .gen_ai_usage_output_tokens
180 .value()
181 .and_then(Value::as_f64)
182 {
183 data.gen_ai_response_tokens_per_second
184 .set_value(Value::F64(output_tokens / (duration / 1000.0)).into());
185 }
186
187 if let Some(model_id) = data
189 .gen_ai_request_model
190 .value()
191 .and_then(|val| val.as_str())
192 .or_else(|| {
193 data.gen_ai_response_model
194 .value()
195 .and_then(|val| val.as_str())
196 })
197 {
198 extract_ai_model_cost_data(ai_model_costs.cost_per_token(model_id), data)
199 }
200}
201
202fn enrich_ai_span_data(
204 span_data: &mut Annotated<SpanData>,
205 span_op: &Annotated<OperationType>,
206 measurements: &Annotated<Measurements>,
207 duration: f64,
208 model_costs: Option<&ModelCosts>,
209 operation_type_map: Option<&AiOperationTypeMap>,
210) {
211 if !is_ai_span(span_data, span_op.value()) {
212 return;
213 }
214
215 let data = span_data.get_or_insert_with(SpanData::default);
216
217 map_ai_measurements_to_data(data, measurements.value());
218
219 set_total_tokens(data);
220
221 if let Some(model_costs) = model_costs {
222 extract_ai_data(data, duration, model_costs);
223 }
224 if let Some(operation_type_map) = operation_type_map {
225 infer_ai_operation_type(data, span_op.value(), operation_type_map);
226 }
227}
228
229pub fn enrich_ai_span(
231 span: &mut Span,
232 model_costs: Option<&ModelCosts>,
233 operation_type_map: Option<&AiOperationTypeMap>,
234) {
235 let duration = span
236 .get_value("span.duration")
237 .and_then(|v| v.as_f64())
238 .unwrap_or(0.0);
239
240 enrich_ai_span_data(
241 &mut span.data,
242 &span.op,
243 &span.measurements,
244 duration,
245 model_costs,
246 operation_type_map,
247 );
248}
249
250pub fn enrich_ai_event_data(
252 event: &mut Event,
253 model_costs: Option<&ModelCosts>,
254 operation_type_map: Option<&AiOperationTypeMap>,
255) {
256 let event_duration = event
257 .get_value("event.duration")
258 .and_then(|v| v.as_f64())
259 .unwrap_or(0.0);
260
261 if let Some(trace_context) = event
262 .contexts
263 .value_mut()
264 .as_mut()
265 .and_then(|c| c.get_mut::<TraceContext>())
266 {
267 enrich_ai_span_data(
268 &mut trace_context.data,
269 &trace_context.op,
270 &event.measurements,
271 event_duration,
272 model_costs,
273 operation_type_map,
274 );
275 }
276 let spans = event.spans.value_mut().iter_mut().flatten();
277 let spans = spans.filter_map(|span| span.value_mut().as_mut());
278
279 for span in spans {
280 let span_duration = span
281 .get_value("span.duration")
282 .and_then(|v| v.as_f64())
283 .unwrap_or(0.0);
284
285 enrich_ai_span_data(
286 &mut span.data,
287 &span.op,
288 &span.measurements,
289 span_duration,
290 model_costs,
291 operation_type_map,
292 );
293 }
294}
295
296fn infer_ai_operation_type(
301 data: &mut SpanData,
302 span_op: Option<&OperationType>,
303 operation_type_map: &AiOperationTypeMap,
304) {
305 let op_type = data
306 .gen_ai_operation_name
307 .value()
308 .or(span_op)
309 .and_then(|op| operation_type_map.get_operation_type(op));
310
311 if let Some(operation_type) = op_type {
312 data.gen_ai_operation_type
313 .set_value(Some(operation_type.to_owned()));
314 }
315}
316
317fn is_ai_span(span_data: &Annotated<SpanData>, span_op: Option<&OperationType>) -> bool {
321 let has_ai_op = span_data
322 .value()
323 .and_then(|data| data.gen_ai_operation_name.value())
324 .is_some();
325
326 let is_ai_span_op =
327 span_op.is_some_and(|op| op.starts_with("ai.") || op.starts_with("gen_ai."));
328
329 has_ai_op || is_ai_span_op
330}
331
332#[cfg(test)]
333mod tests {
334 use std::collections::HashMap;
335
336 use relay_pattern::Pattern;
337 use relay_protocol::assert_annotated_snapshot;
338
339 use super::*;
340
341 #[test]
342 fn test_calculate_cost_no_tokens() {
343 let cost = calculate_costs(
344 &ModelCostV2 {
345 input_per_token: 1.0,
346 output_per_token: 1.0,
347 output_reasoning_per_token: 1.0,
348 input_cached_per_token: 1.0,
349 input_cache_write_per_token: 1.0,
350 },
351 UsedTokens::from_span_data(&SpanData::default()),
352 );
353 assert!(cost.is_none());
354 }
355
356 #[test]
357 fn test_calculate_cost_full() {
358 let cost = calculate_costs(
359 &ModelCostV2 {
360 input_per_token: 1.0,
361 output_per_token: 2.0,
362 output_reasoning_per_token: 3.0,
363 input_cached_per_token: 0.5,
364 input_cache_write_per_token: 0.75,
365 },
366 UsedTokens {
367 input_tokens: 8.0,
368 input_cached_tokens: 5.0,
369 input_cache_write_tokens: 0.0,
370 output_tokens: 15.0,
371 output_reasoning_tokens: 9.0,
372 },
373 )
374 .unwrap();
375
376 insta::assert_debug_snapshot!(cost, @r"
377 CalculatedCost {
378 input: 5.5,
379 output: 39.0,
380 }
381 ");
382 }
383
384 #[test]
385 fn test_calculate_cost_no_reasoning_cost() {
386 let cost = calculate_costs(
387 &ModelCostV2 {
388 input_per_token: 1.0,
389 output_per_token: 2.0,
390 output_reasoning_per_token: 0.0,
392 input_cached_per_token: 0.5,
393 input_cache_write_per_token: 0.0,
394 },
395 UsedTokens {
396 input_tokens: 8.0,
397 input_cached_tokens: 5.0,
398 input_cache_write_tokens: 0.0,
399 output_tokens: 15.0,
400 output_reasoning_tokens: 9.0,
401 },
402 )
403 .unwrap();
404
405 insta::assert_debug_snapshot!(cost, @r"
406 CalculatedCost {
407 input: 5.5,
408 output: 30.0,
409 }
410 ");
411 }
412
413 #[test]
417 fn test_calculate_cost_negative() {
418 let cost = calculate_costs(
419 &ModelCostV2 {
420 input_per_token: 2.0,
421 output_per_token: 2.0,
422 output_reasoning_per_token: 1.0,
423 input_cached_per_token: 1.0,
424 input_cache_write_per_token: 1.5,
425 },
426 UsedTokens {
427 input_tokens: 1.0,
428 input_cached_tokens: 11.0,
429 input_cache_write_tokens: 0.0,
430 output_tokens: 1.0,
431 output_reasoning_tokens: 9.0,
432 },
433 )
434 .unwrap();
435
436 insta::assert_debug_snapshot!(cost, @r"
437 CalculatedCost {
438 input: -9.0,
439 output: -7.0,
440 }
441 ");
442 }
443
444 #[test]
445 fn test_calculate_cost_with_cache_writes() {
446 let cost = calculate_costs(
447 &ModelCostV2 {
448 input_per_token: 1.0,
449 output_per_token: 2.0,
450 output_reasoning_per_token: 3.0,
451 input_cached_per_token: 0.5,
452 input_cache_write_per_token: 0.75,
453 },
454 UsedTokens {
455 input_tokens: 100.0,
456 input_cached_tokens: 20.0,
457 input_cache_write_tokens: 30.0,
458 output_tokens: 50.0,
459 output_reasoning_tokens: 10.0,
460 },
461 )
462 .unwrap();
463
464 insta::assert_debug_snapshot!(cost, @r"
465 CalculatedCost {
466 input: 112.5,
467 output: 110.0,
468 }
469 ");
470 }
471
472 #[test]
473 fn test_calculate_cost_backward_compatibility_no_cache_write() {
474 let span_data = SpanData {
476 gen_ai_usage_input_tokens: Annotated::new(100.0.into()),
477 gen_ai_usage_input_tokens_cached: Annotated::new(20.0.into()),
478 gen_ai_usage_output_tokens: Annotated::new(50.0.into()),
479 ..Default::default()
481 };
482
483 let tokens = UsedTokens::from_span_data(&span_data);
484
485 assert_eq!(tokens.input_cache_write_tokens, 0.0);
487
488 let cost = calculate_costs(
489 &ModelCostV2 {
490 input_per_token: 1.0,
491 output_per_token: 2.0,
492 output_reasoning_per_token: 0.0,
493 input_cached_per_token: 0.5,
494 input_cache_write_per_token: 0.75,
495 },
496 tokens,
497 )
498 .unwrap();
499
500 insta::assert_debug_snapshot!(cost, @r"
504 CalculatedCost {
505 input: 90.0,
506 output: 100.0,
507 }
508 ");
509 }
510
511 #[test]
513 fn test_infer_ai_operation_type_from_gen_ai_operation_name() {
514 let operation_types = HashMap::from([
515 (Pattern::new("*").unwrap(), "ai_client".to_owned()),
516 (Pattern::new("invoke_agent").unwrap(), "agent".to_owned()),
517 (
518 Pattern::new("gen_ai.invoke_agent").unwrap(),
519 "agent".to_owned(),
520 ),
521 ]);
522
523 let operation_type_map = AiOperationTypeMap {
524 version: 1,
525 operation_types,
526 };
527
528 let span_data = r#"{
529 "gen_ai.operation.name": "invoke_agent"
530 }"#;
531 let mut span_data: Annotated<SpanData> = Annotated::from_json(span_data).unwrap();
532
533 infer_ai_operation_type(
534 span_data.value_mut().as_mut().unwrap(),
535 None,
536 &operation_type_map,
537 );
538
539 assert_annotated_snapshot!(&span_data, @r#"
540 {
541 "gen_ai.operation.name": "invoke_agent",
542 "gen_ai.operation.type": "agent"
543 }
544 "#);
545 }
546
547 #[test]
549 fn test_infer_ai_operation_type_from_span_op() {
550 let operation_types = HashMap::from([
551 (Pattern::new("*").unwrap(), "ai_client".to_owned()),
552 (Pattern::new("invoke_agent").unwrap(), "agent".to_owned()),
553 (
554 Pattern::new("gen_ai.invoke_agent").unwrap(),
555 "agent".to_owned(),
556 ),
557 ]);
558 let operation_type_map = AiOperationTypeMap {
559 version: 1,
560 operation_types,
561 };
562
563 let mut span_data = SpanData::default();
564 let span_op: OperationType = "gen_ai.invoke_agent".into();
565 infer_ai_operation_type(&mut span_data, Some(&span_op), &operation_type_map);
566
567 assert_annotated_snapshot!(Annotated::new(span_data), @r#"
568 {
569 "gen_ai.operation.type": "agent"
570 }
571 "#);
572 }
573
574 #[test]
576 fn test_infer_ai_operation_type_from_fallback() {
577 let operation_types = HashMap::from([
578 (Pattern::new("*").unwrap(), "ai_client".to_owned()),
579 (Pattern::new("invoke_agent").unwrap(), "agent".to_owned()),
580 (
581 Pattern::new("gen_ai.invoke_agent").unwrap(),
582 "agent".to_owned(),
583 ),
584 ]);
585
586 let operation_type_map = AiOperationTypeMap {
587 version: 1,
588 operation_types,
589 };
590
591 let span_data = r#"{
592 "gen_ai.operation.name": "embeddings"
593 }"#;
594 let mut span_data: Annotated<SpanData> = Annotated::from_json(span_data).unwrap();
595
596 infer_ai_operation_type(
597 span_data.value_mut().as_mut().unwrap(),
598 None,
599 &operation_type_map,
600 );
601
602 assert_annotated_snapshot!(&span_data, @r#"
603 {
604 "gen_ai.operation.name": "embeddings",
605 "gen_ai.operation.type": "ai_client"
606 }
607 "#);
608 }
609
610 #[test]
612 fn test_is_ai_span_from_gen_ai_operation_name() {
613 let mut span_data = Annotated::default();
614 span_data
615 .get_or_insert_with(SpanData::default)
616 .gen_ai_operation_name
617 .set_value(Some("chat".into()));
618 assert!(is_ai_span(&span_data, None));
619 }
620
621 #[test]
623 fn test_is_ai_span_from_span_op_ai() {
624 let span_op: OperationType = "ai.chat".into();
625 assert!(is_ai_span(&Annotated::default(), Some(&span_op)));
626 }
627
628 #[test]
630 fn test_is_ai_span_from_span_op_gen_ai() {
631 let span_op: OperationType = "gen_ai.chat".into();
632 assert!(is_ai_span(&Annotated::default(), Some(&span_op)));
633 }
634
635 #[test]
637 fn test_is_ai_span_negative() {
638 assert!(!is_ai_span(&Annotated::default(), None));
639 }
640
641 #[test]
643 fn test_enrich_ai_event_data_invoke_agent_trace_with_chat_span() {
644 let event_json = r#"{
645 "type": "transaction",
646 "timestamp": 1234567892.0,
647 "start_timestamp": 1234567889.0,
648 "contexts": {
649 "trace": {
650 "op": "gen_ai.invoke_agent",
651 "trace_id": "12345678901234567890123456789012",
652 "span_id": "1234567890123456",
653 "data": {
654 "gen_ai.operation.name": "gen_ai.invoke_agent",
655 "gen_ai.usage.input_tokens": 500,
656 "gen_ai.usage.output_tokens": 200
657 }
658 }
659 },
660 "spans": [
661 {
662 "op": "gen_ai.chat.completions",
663 "span_id": "1234567890123457",
664 "start_timestamp": 1234567889.5,
665 "timestamp": 1234567890.5,
666 "data": {
667 "gen_ai.operation.name": "chat",
668 "gen_ai.usage.input_tokens": 100,
669 "gen_ai.usage.output_tokens": 50
670 }
671 }
672 ]
673 }"#;
674
675 let mut annotated_event: Annotated<Event> = Annotated::from_json(event_json).unwrap();
676 let event = annotated_event.value_mut().as_mut().unwrap();
677
678 let operation_types = HashMap::from([
679 (Pattern::new("*").unwrap(), "ai_client".to_owned()),
680 (Pattern::new("invoke_agent").unwrap(), "agent".to_owned()),
681 (
682 Pattern::new("gen_ai.invoke_agent").unwrap(),
683 "agent".to_owned(),
684 ),
685 ]);
686 let operation_type_map = AiOperationTypeMap {
687 version: 1,
688 operation_types,
689 };
690
691 enrich_ai_event_data(event, None, Some(&operation_type_map));
692
693 assert_annotated_snapshot!(&annotated_event, @r#"
694 {
695 "type": "transaction",
696 "timestamp": 1234567892.0,
697 "start_timestamp": 1234567889.0,
698 "contexts": {
699 "trace": {
700 "trace_id": "12345678901234567890123456789012",
701 "span_id": "1234567890123456",
702 "op": "gen_ai.invoke_agent",
703 "data": {
704 "gen_ai.usage.total_tokens": 700.0,
705 "gen_ai.usage.input_tokens": 500,
706 "gen_ai.usage.output_tokens": 200,
707 "gen_ai.operation.name": "gen_ai.invoke_agent",
708 "gen_ai.operation.type": "agent"
709 },
710 "type": "trace"
711 }
712 },
713 "spans": [
714 {
715 "timestamp": 1234567890.5,
716 "start_timestamp": 1234567889.5,
717 "op": "gen_ai.chat.completions",
718 "span_id": "1234567890123457",
719 "data": {
720 "gen_ai.usage.total_tokens": 150.0,
721 "gen_ai.usage.input_tokens": 100,
722 "gen_ai.usage.output_tokens": 50,
723 "gen_ai.operation.name": "chat",
724 "gen_ai.operation.type": "ai_client"
725 }
726 }
727 ]
728 }
729 "#);
730 }
731
732 #[test]
734 fn test_enrich_ai_event_data_nested_agent_and_chat_spans() {
735 let event_json = r#"{
736 "type": "transaction",
737 "timestamp": 1234567892.0,
738 "start_timestamp": 1234567889.0,
739 "contexts": {
740 "trace": {
741 "op": "http.server",
742 "trace_id": "12345678901234567890123456789012",
743 "span_id": "1234567890123456"
744 }
745 },
746 "spans": [
747 {
748 "op": "gen_ai.invoke_agent",
749 "span_id": "1234567890123457",
750 "parent_span_id": "1234567890123456",
751 "start_timestamp": 1234567889.5,
752 "timestamp": 1234567891.5,
753 "data": {
754 "gen_ai.operation.name": "invoke_agent",
755 "gen_ai.usage.input_tokens": 500,
756 "gen_ai.usage.output_tokens": 200
757 }
758 },
759 {
760 "op": "gen_ai.chat.completions",
761 "span_id": "1234567890123458",
762 "parent_span_id": "1234567890123457",
763 "start_timestamp": 1234567890.0,
764 "timestamp": 1234567891.0,
765 "data": {
766 "gen_ai.operation.name": "chat",
767 "gen_ai.usage.input_tokens": 100,
768 "gen_ai.usage.output_tokens": 50
769 }
770 }
771 ]
772 }"#;
773
774 let mut annotated_event: Annotated<Event> = Annotated::from_json(event_json).unwrap();
775 let event = annotated_event.value_mut().as_mut().unwrap();
776
777 let operation_types = HashMap::from([
778 (Pattern::new("*").unwrap(), "ai_client".to_owned()),
779 (Pattern::new("invoke_agent").unwrap(), "agent".to_owned()),
780 (
781 Pattern::new("gen_ai.invoke_agent").unwrap(),
782 "agent".to_owned(),
783 ),
784 ]);
785 let operation_type_map = AiOperationTypeMap {
786 version: 1,
787 operation_types,
788 };
789
790 enrich_ai_event_data(event, None, Some(&operation_type_map));
791
792 assert_annotated_snapshot!(&annotated_event, @r#"
793 {
794 "type": "transaction",
795 "timestamp": 1234567892.0,
796 "start_timestamp": 1234567889.0,
797 "contexts": {
798 "trace": {
799 "trace_id": "12345678901234567890123456789012",
800 "span_id": "1234567890123456",
801 "op": "http.server",
802 "type": "trace"
803 }
804 },
805 "spans": [
806 {
807 "timestamp": 1234567891.5,
808 "start_timestamp": 1234567889.5,
809 "op": "gen_ai.invoke_agent",
810 "span_id": "1234567890123457",
811 "parent_span_id": "1234567890123456",
812 "data": {
813 "gen_ai.usage.total_tokens": 700.0,
814 "gen_ai.usage.input_tokens": 500,
815 "gen_ai.usage.output_tokens": 200,
816 "gen_ai.operation.name": "invoke_agent",
817 "gen_ai.operation.type": "agent"
818 }
819 },
820 {
821 "timestamp": 1234567891.0,
822 "start_timestamp": 1234567890.0,
823 "op": "gen_ai.chat.completions",
824 "span_id": "1234567890123458",
825 "parent_span_id": "1234567890123457",
826 "data": {
827 "gen_ai.usage.total_tokens": 150.0,
828 "gen_ai.usage.input_tokens": 100,
829 "gen_ai.usage.output_tokens": 50,
830 "gen_ai.operation.name": "chat",
831 "gen_ai.operation.type": "ai_client"
832 }
833 }
834 ]
835 }
836 "#);
837 }
838
839 #[test]
841 fn test_enrich_ai_event_data_legacy_measurements_and_span_op() {
842 let event_json = r#"{
843 "type": "transaction",
844 "timestamp": 1234567892.0,
845 "start_timestamp": 1234567889.0,
846 "contexts": {
847 "trace": {
848 "op": "http.server",
849 "trace_id": "12345678901234567890123456789012",
850 "span_id": "1234567890123456"
851 }
852 },
853 "spans": [
854 {
855 "op": "gen_ai.invoke_agent",
856 "span_id": "1234567890123457",
857 "parent_span_id": "1234567890123456",
858 "start_timestamp": 1234567889.5,
859 "timestamp": 1234567891.5,
860 "measurements": {
861 "ai_prompt_tokens_used": {"value": 500.0},
862 "ai_completion_tokens_used": {"value": 200.0}
863 }
864 },
865 {
866 "op": "ai.chat_completions.create.langchain.ChatOpenAI",
867 "span_id": "1234567890123458",
868 "parent_span_id": "1234567890123457",
869 "start_timestamp": 1234567890.0,
870 "timestamp": 1234567891.0,
871 "measurements": {
872 "ai_prompt_tokens_used": {"value": 100.0},
873 "ai_completion_tokens_used": {"value": 50.0}
874 }
875 }
876 ]
877 }"#;
878
879 let mut annotated_event: Annotated<Event> = Annotated::from_json(event_json).unwrap();
880 let event = annotated_event.value_mut().as_mut().unwrap();
881
882 let operation_types = HashMap::from([
883 (Pattern::new("*").unwrap(), "ai_client".to_owned()),
884 (Pattern::new("invoke_agent").unwrap(), "agent".to_owned()),
885 (
886 Pattern::new("gen_ai.invoke_agent").unwrap(),
887 "agent".to_owned(),
888 ),
889 ]);
890 let operation_type_map = AiOperationTypeMap {
891 version: 1,
892 operation_types,
893 };
894
895 enrich_ai_event_data(event, None, Some(&operation_type_map));
896
897 assert_annotated_snapshot!(&annotated_event, @r#"
898 {
899 "type": "transaction",
900 "timestamp": 1234567892.0,
901 "start_timestamp": 1234567889.0,
902 "contexts": {
903 "trace": {
904 "trace_id": "12345678901234567890123456789012",
905 "span_id": "1234567890123456",
906 "op": "http.server",
907 "type": "trace"
908 }
909 },
910 "spans": [
911 {
912 "timestamp": 1234567891.5,
913 "start_timestamp": 1234567889.5,
914 "op": "gen_ai.invoke_agent",
915 "span_id": "1234567890123457",
916 "parent_span_id": "1234567890123456",
917 "data": {
918 "gen_ai.usage.total_tokens": 700.0,
919 "gen_ai.usage.input_tokens": 500.0,
920 "gen_ai.usage.output_tokens": 200.0,
921 "gen_ai.operation.type": "agent"
922 },
923 "measurements": {
924 "ai_completion_tokens_used": {
925 "value": 200.0
926 },
927 "ai_prompt_tokens_used": {
928 "value": 500.0
929 }
930 }
931 },
932 {
933 "timestamp": 1234567891.0,
934 "start_timestamp": 1234567890.0,
935 "op": "ai.chat_completions.create.langchain.ChatOpenAI",
936 "span_id": "1234567890123458",
937 "parent_span_id": "1234567890123457",
938 "data": {
939 "gen_ai.usage.total_tokens": 150.0,
940 "gen_ai.usage.input_tokens": 100.0,
941 "gen_ai.usage.output_tokens": 50.0,
942 "gen_ai.operation.type": "ai_client"
943 },
944 "measurements": {
945 "ai_completion_tokens_used": {
946 "value": 50.0
947 },
948 "ai_prompt_tokens_used": {
949 "value": 100.0
950 }
951 }
952 }
953 ]
954 }
955 "#);
956 }
957}