1+ import json
12import pytest
23
34from sentry_sdk .utils import package_version
67 from openai import NOT_GIVEN
78except ImportError :
89 NOT_GIVEN = None
9-
1010try :
1111 from openai import omit
1212except ImportError :
4444 OpenAIIntegration ,
4545 _calculate_token_usage ,
4646)
47+ from sentry_sdk .ai .utils import MAX_GEN_AI_MESSAGE_BYTES
48+ from sentry_sdk ._types import AnnotatedValue
49+ from sentry_sdk .serializer import serialize
4750
4851from unittest import mock # python 3.3 and above
4952
@@ -1456,6 +1459,7 @@ def test_empty_tools_in_chat_completion(sentry_init, capture_events, tools):
14561459
14571460def test_openai_message_role_mapping (sentry_init , capture_events ):
14581461 """Test that OpenAI integration properly maps message roles like 'ai' to 'assistant'"""
1462+
14591463 sentry_init (
14601464 integrations = [OpenAIIntegration (include_prompts = True )],
14611465 traces_sample_rate = 1.0 ,
@@ -1465,7 +1469,6 @@ def test_openai_message_role_mapping(sentry_init, capture_events):
14651469
14661470 client = OpenAI (api_key = "z" )
14671471 client .chat .completions ._post = mock .Mock (return_value = EXAMPLE_CHAT_COMPLETION )
1468-
14691472 # Test messages with mixed roles including "ai" that should be mapped to "assistant"
14701473 test_messages = [
14711474 {"role" : "system" , "content" : "You are helpful." },
@@ -1476,11 +1479,9 @@ def test_openai_message_role_mapping(sentry_init, capture_events):
14761479
14771480 with start_transaction (name = "openai tx" ):
14781481 client .chat .completions .create (model = "test-model" , messages = test_messages )
1479-
1482+ # Verify that the span was created correctly
14801483 (event ,) = events
14811484 span = event ["spans" ][0 ]
1482-
1483- # Verify that the span was created correctly
14841485 assert span ["op" ] == "gen_ai.chat"
14851486 assert SPANDATA .GEN_AI_REQUEST_MESSAGES in span ["data" ]
14861487
@@ -1505,3 +1506,55 @@ def test_openai_message_role_mapping(sentry_init, capture_events):
15051506 # Verify no "ai" roles remain
15061507 roles = [msg ["role" ] for msg in stored_messages ]
15071508 assert "ai" not in roles
1509+
1510+
1511+ def test_openai_message_truncation (sentry_init , capture_events ):
1512+ """Test that large messages are truncated properly in OpenAI integration."""
1513+ sentry_init (
1514+ integrations = [OpenAIIntegration (include_prompts = True )],
1515+ traces_sample_rate = 1.0 ,
1516+ send_default_pii = True ,
1517+ )
1518+ events = capture_events ()
1519+
1520+ client = OpenAI (api_key = "z" )
1521+ client .chat .completions ._post = mock .Mock (return_value = EXAMPLE_CHAT_COMPLETION )
1522+
1523+ large_content = (
1524+ "This is a very long message that will exceed our size limits. " * 1000
1525+ )
1526+ large_messages = [
1527+ {"role" : "system" , "content" : "You are a helpful assistant." },
1528+ {"role" : "user" , "content" : large_content },
1529+ {"role" : "assistant" , "content" : large_content },
1530+ {"role" : "user" , "content" : large_content },
1531+ ]
1532+
1533+ with start_transaction (name = "openai tx" ):
1534+ client .chat .completions .create (
1535+ model = "some-model" ,
1536+ messages = large_messages ,
1537+ )
1538+
1539+ (event ,) = events
1540+ span = event ["spans" ][0 ]
1541+ assert SPANDATA .GEN_AI_REQUEST_MESSAGES in span ["data" ]
1542+
1543+ messages_data = span ["data" ][SPANDATA .GEN_AI_REQUEST_MESSAGES ]
1544+ assert isinstance (messages_data , str )
1545+
1546+ parsed_messages = json .loads (messages_data )
1547+ assert isinstance (parsed_messages , list )
1548+ assert len (parsed_messages ) <= len (large_messages )
1549+
1550+ if "_meta" in event and len (parsed_messages ) < len (large_messages ):
1551+ meta_path = event ["_meta" ]
1552+ if (
1553+ "spans" in meta_path
1554+ and "0" in meta_path ["spans" ]
1555+ and "data" in meta_path ["spans" ]["0" ]
1556+ ):
1557+ span_meta = meta_path ["spans" ]["0" ]["data" ]
1558+ if SPANDATA .GEN_AI_REQUEST_MESSAGES in span_meta :
1559+ messages_meta = span_meta [SPANDATA .GEN_AI_REQUEST_MESSAGES ]
1560+ assert "len" in messages_meta .get ("" , {})
0 commit comments