Skip to main content

relay_event_normalization/eap/
ai.rs

1use std::time::Duration;
2
3use relay_conventions::consts::*;
4use relay_event_schema::protocol::Attributes;
5use relay_protocol::Annotated;
6
7use crate::ModelMetadata;
8use crate::span::ai;
9use crate::statsd::{Counters, map_origin_to_integration, platform_tag};
10
11/// Normalizes AI attributes.
12///
13/// This aggressively overwrites existing AI attributes, in order to guarantee a consistent data
14/// set for the AI product module.
15///
16/// As an example, an OTeL user may be manually instrumenting AI request costs on spans but in a
17/// local currency. Sentry's AI model requires a consistent cost value, independent of local
18/// currencies.
19///
20/// Callers may choose to only run this normalization in processing mode to not have the
21/// normalization run multiple times.
22pub fn normalize_ai(
23    attributes: &mut Annotated<Attributes>,
24    duration: Option<Duration>,
25    model_metadata: Option<&ModelMetadata>,
26) {
27    let Some(attributes) = attributes.value_mut() else {
28        return;
29    };
30
31    // Specifically only apply normalizations if the item is recognized as an AI item by the
32    // product.
33    if !is_ai_item(attributes) {
34        return;
35    }
36
37    normalize_model(attributes);
38    normalize_ai_type(attributes);
39    normalize_total_tokens(attributes);
40    normalize_tokens_per_second(attributes, duration);
41    normalize_context_utilization(attributes, model_metadata);
42    normalize_ai_costs(attributes, model_metadata);
43}
44
45/// Returns whether the item is should have AI normalizations applied.
46fn is_ai_item(attributes: &mut Attributes) -> bool {
47    // The product indicator whether we consider an item to be an EAP item.
48    if attributes.get_value(GEN_AI_OPERATION_TYPE).is_some() {
49        return true;
50    }
51
52    // We use the operation name to infer the operation type.
53    if attributes.get_value(GEN_AI_OPERATION_NAME).is_some() {
54        return true;
55    }
56
57    // Older SDKs may only send a (span) op which we also use to infer the operation type.
58    let op = attributes.get_value(OP).and_then(|op| op.as_str());
59    if op.is_some_and(|op| op.starts_with("gen_ai.") || op.starts_with("ai.")) {
60        return true;
61    }
62
63    false
64}
65
66/// Normalizes the [`GEN_AI_RESPONSE_MODEL`] attribute by defaulting to the [`GEN_AI_REQUEST_MODEL`] if it is missing.
67fn normalize_model(attributes: &mut Attributes) {
68    if attributes.contains_key(GEN_AI_RESPONSE_MODEL) {
69        return;
70    }
71    let Some(model) = attributes
72        .get_value(GEN_AI_REQUEST_MODEL)
73        .and_then(|v| v.as_str())
74    else {
75        return;
76    };
77    attributes.insert(GEN_AI_RESPONSE_MODEL, model.to_owned());
78}
79
80/// Normalizes the [`GEN_AI_OPERATION_TYPE`] and infers it from the AI operation if it is missing.
81fn normalize_ai_type(attributes: &mut Attributes) {
82    let op_name = attributes
83        .get_value(GEN_AI_OPERATION_NAME)
84        .or_else(|| attributes.get_value(OP))
85        .and_then(|op| op.as_str())
86        .and_then(|op| ai::infer_ai_operation_type(op))
87        // This is fine, this normalization only happens for known AI spans.
88        .unwrap_or(ai::DEFAULT_AI_OPERATION);
89
90    attributes.insert(GEN_AI_OPERATION_TYPE, op_name.to_owned());
91}
92
93/// Calculates the [`GEN_AI_USAGE_TOTAL_TOKENS`] attribute.
94fn normalize_total_tokens(attributes: &mut Attributes) {
95    let input_tokens = attributes
96        .get_value(GEN_AI_USAGE_INPUT_TOKENS)
97        .and_then(|v| v.as_f64());
98
99    let output_tokens = attributes
100        .get_value(GEN_AI_USAGE_OUTPUT_TOKENS)
101        .and_then(|v| v.as_f64());
102
103    if input_tokens.is_none() && output_tokens.is_none() {
104        return;
105    }
106
107    let total_tokens = input_tokens.unwrap_or(0.0) + output_tokens.unwrap_or(0.0);
108    attributes.insert(GEN_AI_USAGE_TOTAL_TOKENS, total_tokens);
109}
110
111/// Calculates the [`GEN_AI_RESPONSE_TPS`] attribute.
112fn normalize_tokens_per_second(attributes: &mut Attributes, duration: Option<Duration>) {
113    let Some(duration) = duration.filter(|d| !d.is_zero()) else {
114        return;
115    };
116
117    let output_tokens = attributes
118        .get_value(GEN_AI_USAGE_OUTPUT_TOKENS)
119        .and_then(|v| v.as_f64())
120        .filter(|v| *v > 0.0);
121
122    if let Some(output_tokens) = output_tokens {
123        let tps = output_tokens / duration.as_secs_f64();
124        attributes.insert(GEN_AI_RESPONSE_TPS, tps);
125    }
126}
127
128/// Sets the context window size and utilization for the model.
129fn normalize_context_utilization(
130    attributes: &mut Attributes,
131    model_metadata: Option<&ModelMetadata>,
132) {
133    let model_id = attributes
134        .get_value(GEN_AI_RESPONSE_MODEL)
135        .and_then(|v| v.as_str());
136
137    let context_size = model_id.and_then(|id| model_metadata.and_then(|m| m.context_size(id)));
138
139    let Some(context_size) = context_size else {
140        return;
141    };
142
143    attributes.insert(GEN_AI_CONTEXT_WINDOW_SIZE, context_size as i64);
144
145    let total_tokens = attributes
146        .get_value(GEN_AI_USAGE_TOTAL_TOKENS)
147        .and_then(|v| v.as_f64());
148
149    if let Some(total_tokens) = total_tokens {
150        attributes.insert(
151            GEN_AI_CONTEXT_UTILIZATION,
152            total_tokens / context_size as f64,
153        );
154    }
155}
156
157/// Calculates model costs and serializes them into attributes.
158fn normalize_ai_costs(attributes: &mut Attributes, model_metadata: Option<&ModelMetadata>) {
159    let origin = extract_string_value(attributes, ORIGIN);
160    let platform = extract_string_value(attributes, PLATFORM);
161
162    let integration = map_origin_to_integration(origin);
163    let platform_tag = platform_tag(platform);
164
165    let Some(model_id) = attributes
166        .get_value(GEN_AI_RESPONSE_MODEL)
167        .and_then(|v| v.as_str())
168    else {
169        relay_statsd::metric!(
170            counter(Counters::GenAiCostCalculationResult) += 1,
171            result = "calculation_no_model_id_available",
172            integration = integration,
173            platform = platform_tag,
174        );
175        return;
176    };
177
178    let Some(model_cost) = model_metadata.and_then(|m| m.cost_per_token(model_id)) else {
179        relay_statsd::metric!(
180            counter(Counters::GenAiCostCalculationResult) += 1,
181            result = "calculation_no_model_cost_available",
182            integration = integration,
183            platform = platform_tag,
184        );
185        return;
186    };
187
188    let get_tokens = |key| {
189        attributes
190            .get_value(key)
191            .and_then(|v| v.as_f64())
192            .unwrap_or(0.0)
193    };
194
195    let tokens = ai::UsedTokens {
196        input_tokens: get_tokens(GEN_AI_USAGE_INPUT_TOKENS),
197        input_cached_tokens: get_tokens(GEN_AI_USAGE_INPUT_CACHED_TOKENS),
198        input_cache_write_tokens: get_tokens(GEN_AI_USAGE_INPUT_CACHE_WRITE_TOKENS),
199        output_tokens: get_tokens(GEN_AI_USAGE_OUTPUT_TOKENS),
200        output_reasoning_tokens: get_tokens(GEN_AI_USAGE_OUTPUT_REASONING_TOKENS),
201    };
202
203    let Some(costs) = ai::calculate_costs(model_cost, tokens, integration, platform_tag) else {
204        return;
205    };
206
207    // Overwrite all values, the attributes should reflect the values we used to calculate the total.
208    attributes.insert(GEN_AI_COST_INPUT_TOKENS, costs.input);
209    attributes.insert(GEN_AI_COST_OUTPUT_TOKENS, costs.output);
210    attributes.insert(GEN_AI_COST_TOTAL_TOKENS, costs.total());
211}
212
213fn extract_string_value<'a>(attributes: &'a Attributes, key: &str) -> Option<&'a str> {
214    attributes.get_value(key).and_then(|v| v.as_str())
215}
216
217#[cfg(test)]
218mod tests {
219    use std::collections::HashMap;
220
221    use relay_pattern::Pattern;
222    use relay_protocol::{Empty, assert_annotated_snapshot};
223
224    use crate::{ModelCostV2, ModelMetadataEntry};
225
226    use super::*;
227
228    macro_rules! attributes {
229        ($($key:expr => $value:expr),* $(,)?) => {
230            Attributes::from([
231                $(($key.into(), Annotated::new($value.into())),)*
232            ])
233        };
234    }
235
236    fn model_metadata() -> ModelMetadata {
237        ModelMetadata {
238            version: 1,
239            models: HashMap::from([
240                (
241                    Pattern::new("claude-2.1").unwrap(),
242                    ModelMetadataEntry {
243                        costs: Some(ModelCostV2 {
244                            input_per_token: 0.01,
245                            output_per_token: 0.02,
246                            output_reasoning_per_token: 0.03,
247                            input_cached_per_token: 0.04,
248                            input_cache_write_per_token: 0.0,
249                        }),
250                        context_size: None,
251                    },
252                ),
253                (
254                    Pattern::new("gpt4-21-04").unwrap(),
255                    ModelMetadataEntry {
256                        costs: Some(ModelCostV2 {
257                            input_per_token: 0.09,
258                            output_per_token: 0.05,
259                            output_reasoning_per_token: 0.0,
260                            input_cached_per_token: 0.0,
261                            input_cache_write_per_token: 0.0,
262                        }),
263                        context_size: None,
264                    },
265                ),
266            ]),
267        }
268    }
269
270    fn model_metadata_with_context_size() -> ModelMetadata {
271        ModelMetadata {
272            version: 1,
273            models: HashMap::from([(
274                Pattern::new("claude-2.1").unwrap(),
275                ModelMetadataEntry {
276                    costs: Some(ModelCostV2 {
277                        input_per_token: 0.01,
278                        output_per_token: 0.02,
279                        output_reasoning_per_token: 0.03,
280                        input_cached_per_token: 0.04,
281                        input_cache_write_per_token: 0.0,
282                    }),
283                    context_size: Some(100_000),
284                },
285            )]),
286        }
287    }
288
289    #[test]
290    fn test_normalize_ai_all_tokens() {
291        let mut attributes = Annotated::new(attributes! {
292            "gen_ai.operation.type" => "ai_client".to_owned(),
293            "gen_ai.usage.input_tokens" => 1000,
294            "gen_ai.usage.output_tokens" => 2000,
295            "gen_ai.usage.output_tokens.reasoning" => 1000,
296            "gen_ai.usage.input_tokens.cached" => 500,
297            "gen_ai.request.model" => "claude-2.1".to_owned(),
298        });
299
300        normalize_ai(
301            &mut attributes,
302            Some(Duration::from_secs(1)),
303            Some(&model_metadata()),
304        );
305
306        assert_annotated_snapshot!(attributes, @r#"
307        {
308          "gen_ai.cost.input_tokens": {
309            "type": "double",
310            "value": 25.0
311          },
312          "gen_ai.cost.output_tokens": {
313            "type": "double",
314            "value": 50.0
315          },
316          "gen_ai.cost.total_tokens": {
317            "type": "double",
318            "value": 75.0
319          },
320          "gen_ai.operation.type": {
321            "type": "string",
322            "value": "ai_client"
323          },
324          "gen_ai.request.model": {
325            "type": "string",
326            "value": "claude-2.1"
327          },
328          "gen_ai.response.model": {
329            "type": "string",
330            "value": "claude-2.1"
331          },
332          "gen_ai.response.tokens_per_second": {
333            "type": "double",
334            "value": 2000.0
335          },
336          "gen_ai.usage.input_tokens": {
337            "type": "integer",
338            "value": 1000
339          },
340          "gen_ai.usage.input_tokens.cached": {
341            "type": "integer",
342            "value": 500
343          },
344          "gen_ai.usage.output_tokens": {
345            "type": "integer",
346            "value": 2000
347          },
348          "gen_ai.usage.output_tokens.reasoning": {
349            "type": "integer",
350            "value": 1000
351          },
352          "gen_ai.usage.total_tokens": {
353            "type": "double",
354            "value": 3000.0
355          }
356        }
357        "#);
358    }
359
360    #[test]
361    fn test_normalize_ai_basic_tokens() {
362        let mut attributes = Annotated::new(attributes! {
363            "gen_ai.operation.type" => "ai_client".to_owned(),
364            "gen_ai.usage.input_tokens" => 1000,
365            "gen_ai.usage.output_tokens" => 2000,
366            "gen_ai.request.model" => "gpt4-21-04".to_owned(),
367        });
368
369        normalize_ai(
370            &mut attributes,
371            Some(Duration::from_millis(500)),
372            Some(&model_metadata()),
373        );
374
375        assert_annotated_snapshot!(attributes, @r#"
376        {
377          "gen_ai.cost.input_tokens": {
378            "type": "double",
379            "value": 90.0
380          },
381          "gen_ai.cost.output_tokens": {
382            "type": "double",
383            "value": 100.0
384          },
385          "gen_ai.cost.total_tokens": {
386            "type": "double",
387            "value": 190.0
388          },
389          "gen_ai.operation.type": {
390            "type": "string",
391            "value": "ai_client"
392          },
393          "gen_ai.request.model": {
394            "type": "string",
395            "value": "gpt4-21-04"
396          },
397          "gen_ai.response.model": {
398            "type": "string",
399            "value": "gpt4-21-04"
400          },
401          "gen_ai.response.tokens_per_second": {
402            "type": "double",
403            "value": 4000.0
404          },
405          "gen_ai.usage.input_tokens": {
406            "type": "integer",
407            "value": 1000
408          },
409          "gen_ai.usage.output_tokens": {
410            "type": "integer",
411            "value": 2000
412          },
413          "gen_ai.usage.total_tokens": {
414            "type": "double",
415            "value": 3000.0
416          }
417        }
418        "#);
419    }
420
421    #[test]
422    fn test_normalize_ai_basic_tokens_no_duration_no_cost() {
423        let mut attributes = Annotated::new(attributes! {
424            "gen_ai.operation.type" => "ai_client".to_owned(),
425            "gen_ai.usage.input_tokens" => 1000,
426            "gen_ai.usage.output_tokens" => 2000,
427            "gen_ai.request.model" => "unknown".to_owned(),
428        });
429
430        normalize_ai(
431            &mut attributes,
432            Some(Duration::ZERO),
433            Some(&model_metadata()),
434        );
435
436        assert_annotated_snapshot!(attributes, @r#"
437        {
438          "gen_ai.operation.type": {
439            "type": "string",
440            "value": "ai_client"
441          },
442          "gen_ai.request.model": {
443            "type": "string",
444            "value": "unknown"
445          },
446          "gen_ai.response.model": {
447            "type": "string",
448            "value": "unknown"
449          },
450          "gen_ai.usage.input_tokens": {
451            "type": "integer",
452            "value": 1000
453          },
454          "gen_ai.usage.output_tokens": {
455            "type": "integer",
456            "value": 2000
457          },
458          "gen_ai.usage.total_tokens": {
459            "type": "double",
460            "value": 3000.0
461          }
462        }
463        "#);
464    }
465
466    #[test]
467    fn test_normalize_ai_does_not_overwrite() {
468        let mut attributes = Annotated::new(attributes! {
469            "gen_ai.operation.type" => "ai_client".to_owned(),
470            "gen_ai.usage.input_tokens" => 1000,
471            "gen_ai.usage.output_tokens" => 2000,
472            "gen_ai.request.model" => "gpt4".to_owned(),
473            "gen_ai.response.model" => "gpt4-21-04".to_owned(),
474
475            "gen_ai.cost.input_tokens" => 999.0,
476        });
477
478        normalize_ai(
479            &mut attributes,
480            Some(Duration::from_millis(500)),
481            Some(&model_metadata()),
482        );
483
484        assert_annotated_snapshot!(attributes, @r#"
485        {
486          "gen_ai.cost.input_tokens": {
487            "type": "double",
488            "value": 90.0
489          },
490          "gen_ai.cost.output_tokens": {
491            "type": "double",
492            "value": 100.0
493          },
494          "gen_ai.cost.total_tokens": {
495            "type": "double",
496            "value": 190.0
497          },
498          "gen_ai.operation.type": {
499            "type": "string",
500            "value": "ai_client"
501          },
502          "gen_ai.request.model": {
503            "type": "string",
504            "value": "gpt4"
505          },
506          "gen_ai.response.model": {
507            "type": "string",
508            "value": "gpt4-21-04"
509          },
510          "gen_ai.response.tokens_per_second": {
511            "type": "double",
512            "value": 4000.0
513          },
514          "gen_ai.usage.input_tokens": {
515            "type": "integer",
516            "value": 1000
517          },
518          "gen_ai.usage.output_tokens": {
519            "type": "integer",
520            "value": 2000
521          },
522          "gen_ai.usage.total_tokens": {
523            "type": "double",
524            "value": 3000.0
525          }
526        }
527        "#);
528    }
529
530    #[test]
531    fn test_normalize_ai_overwrite_costs() {
532        let mut attributes = Annotated::new(attributes! {
533            "gen_ai.operation.type" => "ai_client".to_owned(),
534            "gen_ai.usage.input_tokens" => 1000,
535            "gen_ai.usage.output_tokens" => 2000,
536            "gen_ai.request.model" => "gpt4-21-04".to_owned(),
537
538            "gen_ai.usage.total_tokens" => 1337,
539
540            "gen_ai.cost.input_tokens" => 99.0,
541            "gen_ai.cost.output_tokens" => 99.0,
542            "gen_ai.cost.total_tokens" => 123.0,
543
544            "gen_ai.response.tokens_per_second" => 42.0,
545        });
546
547        normalize_ai(
548            &mut attributes,
549            Some(Duration::from_millis(500)),
550            Some(&model_metadata()),
551        );
552
553        assert_annotated_snapshot!(attributes, @r#"
554        {
555          "gen_ai.cost.input_tokens": {
556            "type": "double",
557            "value": 90.0
558          },
559          "gen_ai.cost.output_tokens": {
560            "type": "double",
561            "value": 100.0
562          },
563          "gen_ai.cost.total_tokens": {
564            "type": "double",
565            "value": 190.0
566          },
567          "gen_ai.operation.type": {
568            "type": "string",
569            "value": "ai_client"
570          },
571          "gen_ai.request.model": {
572            "type": "string",
573            "value": "gpt4-21-04"
574          },
575          "gen_ai.response.model": {
576            "type": "string",
577            "value": "gpt4-21-04"
578          },
579          "gen_ai.response.tokens_per_second": {
580            "type": "double",
581            "value": 4000.0
582          },
583          "gen_ai.usage.input_tokens": {
584            "type": "integer",
585            "value": 1000
586          },
587          "gen_ai.usage.output_tokens": {
588            "type": "integer",
589            "value": 2000
590          },
591          "gen_ai.usage.total_tokens": {
592            "type": "double",
593            "value": 3000.0
594          }
595        }
596        "#);
597    }
598
599    #[test]
600    fn test_normalize_ai_no_ai_attributes() {
601        let mut attributes = Annotated::new(attributes! {
602            "gen_ai.usage.input_tokens" => 1000,
603            "gen_ai.usage.output_tokens" => 2000,
604        });
605
606        normalize_ai(
607            &mut attributes,
608            Some(Duration::from_millis(500)),
609            Some(&model_metadata()),
610        );
611
612        assert_annotated_snapshot!(&mut attributes, @r#"
613        {
614          "gen_ai.usage.input_tokens": {
615            "type": "integer",
616            "value": 1000
617          },
618          "gen_ai.usage.output_tokens": {
619            "type": "integer",
620            "value": 2000
621          }
622        }
623        "#);
624    }
625
626    #[test]
627    fn test_normalize_ai_no_ai_indicator_attribute() {
628        let mut attributes = Annotated::new(attributes! {
629            "foo" => 123,
630        });
631
632        normalize_ai(
633            &mut attributes,
634            Some(Duration::from_millis(500)),
635            Some(&model_metadata()),
636        );
637
638        assert_annotated_snapshot!(&mut attributes, @r#"
639        {
640          "foo": {
641            "type": "integer",
642            "value": 123
643          }
644        }
645        "#);
646    }
647
648    #[test]
649    fn test_normalize_ai_empty() {
650        let mut attributes = Annotated::empty();
651
652        normalize_ai(
653            &mut attributes,
654            Some(Duration::from_millis(500)),
655            Some(&model_metadata()),
656        );
657
658        assert!(attributes.is_empty());
659    }
660
661    #[test]
662    fn test_context_utilization_with_total_tokens() {
663        let mut attributes = Annotated::new(attributes! {
664            "gen_ai.operation.type" => "ai_client".to_owned(),
665            "gen_ai.usage.input_tokens" => 30000,
666            "gen_ai.usage.output_tokens" => 12000,
667            "gen_ai.request.model" => "claude-2.1".to_owned(),
668        });
669
670        normalize_ai(
671            &mut attributes,
672            Some(Duration::from_secs(1)),
673            Some(&model_metadata_with_context_size()),
674        );
675
676        assert_annotated_snapshot!(attributes, @r#"
677        {
678          "gen_ai.context.utilization": {
679            "type": "double",
680            "value": 0.42
681          },
682          "gen_ai.context.window_size": {
683            "type": "integer",
684            "value": 100000
685          },
686          "gen_ai.cost.input_tokens": {
687            "type": "double",
688            "value": 300.0
689          },
690          "gen_ai.cost.output_tokens": {
691            "type": "double",
692            "value": 240.0
693          },
694          "gen_ai.cost.total_tokens": {
695            "type": "double",
696            "value": 540.0
697          },
698          "gen_ai.operation.type": {
699            "type": "string",
700            "value": "ai_client"
701          },
702          "gen_ai.request.model": {
703            "type": "string",
704            "value": "claude-2.1"
705          },
706          "gen_ai.response.model": {
707            "type": "string",
708            "value": "claude-2.1"
709          },
710          "gen_ai.response.tokens_per_second": {
711            "type": "double",
712            "value": 12000.0
713          },
714          "gen_ai.usage.input_tokens": {
715            "type": "integer",
716            "value": 30000
717          },
718          "gen_ai.usage.output_tokens": {
719            "type": "integer",
720            "value": 12000
721          },
722          "gen_ai.usage.total_tokens": {
723            "type": "double",
724            "value": 42000.0
725          }
726        }
727        "#);
728    }
729
730    #[test]
731    fn test_context_utilization_no_context_size() {
732        let mut attributes = Annotated::new(attributes! {
733            "gen_ai.operation.type" => "ai_client".to_owned(),
734            "gen_ai.usage.input_tokens" => 1000,
735            "gen_ai.usage.output_tokens" => 2000,
736            "gen_ai.request.model" => "claude-2.1".to_owned(),
737        });
738
739        // model_metadata() has no context_size set.
740        normalize_ai(
741            &mut attributes,
742            Some(Duration::from_secs(1)),
743            Some(&model_metadata()),
744        );
745
746        let attrs = attributes.value().unwrap();
747        assert!(attrs.get_value("gen_ai.context.window_size").is_none());
748        assert!(attrs.get_value("gen_ai.context.utilization").is_none());
749    }
750
751    #[test]
752    fn test_context_utilization_no_total_tokens() {
753        // Only context_size is available, but no token counts at all.
754        let mut attributes = Annotated::new(attributes! {
755            "gen_ai.operation.type" => "ai_client".to_owned(),
756            "gen_ai.request.model" => "claude-2.1".to_owned(),
757        });
758
759        normalize_ai(
760            &mut attributes,
761            Some(Duration::from_secs(1)),
762            Some(&model_metadata_with_context_size()),
763        );
764
765        let attrs = attributes.value().unwrap();
766        // window_size should still be set even without tokens.
767        assert_eq!(
768            attrs
769                .get_value("gen_ai.context.window_size")
770                .unwrap()
771                .as_f64(),
772            Some(100_000.0)
773        );
774        // But utilization cannot be computed without total_tokens.
775        assert!(attrs.get_value("gen_ai.context.utilization").is_none());
776    }
777
778    #[test]
779    fn test_context_utilization_unknown_model() {
780        let mut attributes = Annotated::new(attributes! {
781            "gen_ai.operation.type" => "ai_client".to_owned(),
782            "gen_ai.usage.input_tokens" => 1000,
783            "gen_ai.usage.output_tokens" => 2000,
784            "gen_ai.request.model" => "unknown-model".to_owned(),
785        });
786
787        normalize_ai(
788            &mut attributes,
789            Some(Duration::from_secs(1)),
790            Some(&model_metadata_with_context_size()),
791        );
792
793        let attrs = attributes.value().unwrap();
794        assert!(attrs.get_value("gen_ai.context.window_size").is_none());
795        assert!(attrs.get_value("gen_ai.context.utilization").is_none());
796    }
797}