Skip to content

Commit e844b30

Browse files
authored
fix: Handle "prompt is too long" from Anthropic (#1137)
PR#1078 mentioned that context overflows were not handled, but I wasn't able to reproduce using the code changes in it. However, in testing (using @DEA's suggested test) I was able to reproduce and consistently got a "prompt is too long:" error Co-authored-by: Mackenzie Zastrow <zastrowm@users.noreply.github.com>
1 parent 89bab98 commit e844b30

File tree

2 files changed

+31
-0
lines changed

2 files changed

+31
-0
lines changed

src/strands/models/anthropic.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ class AnthropicModel(Model):
3939
}
4040

4141
OVERFLOW_MESSAGES = {
42+
"prompt is too long:",
4243
"input is too long",
4344
"input length exceeds context window",
4445
"input and output tokens exceed your context limit",

tests_integ/models/test_model_anthropic.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,10 @@
55

66
import strands
77
from strands import Agent
8+
from strands.agent import NullConversationManager
89
from strands.models.anthropic import AnthropicModel
10+
from strands.types.content import ContentBlock, Message
11+
from strands.types.exceptions import ContextWindowOverflowException
912

1013
"""
1114
These tests only run if we have the anthropic api key
@@ -152,3 +155,30 @@ def test_structured_output_multi_modal_input(agent, yellow_img, yellow_color):
152155
tru_color = agent.structured_output(type(yellow_color), content)
153156
exp_color = yellow_color
154157
assert tru_color == exp_color
158+
159+
160+
@pytest.mark.asyncio
161+
def test_input_and_max_tokens_exceed_context_limit():
162+
"""Test that triggers 'input length and max_tokens exceed context limit' error."""
163+
164+
# Note that this test is written specifically in a style that allows us to swap out conversation_manager and
165+
# verify behavior
166+
167+
model = AnthropicModel(
168+
model_id="claude-sonnet-4-20250514",
169+
max_tokens=64000,
170+
)
171+
172+
large_message = "This is a very long text. " * 10000
173+
174+
messages = [
175+
Message(role="user", content=[ContentBlock(text=large_message)]),
176+
Message(role="assistant", content=[ContentBlock(text=large_message)]),
177+
Message(role="user", content=[ContentBlock(text=large_message)]),
178+
]
179+
180+
# NullConversationManager will propagate ContextWindowOverflowException directly instead of handling it
181+
agent = Agent(model=model, conversation_manager=NullConversationManager())
182+
183+
with pytest.raises(ContextWindowOverflowException):
184+
agent(messages)

0 commit comments

Comments
 (0)