Skip to content

Commit 52cdb9d

Browse files
authored
fix: change token counting fallback log from warning to debug (#2220)
1 parent bab08db commit 52cdb9d

10 files changed

Lines changed: 15 additions & 15 deletions

File tree

src/strands/models/anthropic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -412,7 +412,7 @@ async def count_tokens(
412412
)
413413
return total_tokens
414414
except Exception as e:
415-
logger.warning(
415+
logger.debug(
416416
"model_id=<%s>, error=<%s> | native token counting failed, falling back to estimation",
417417
self.config["model_id"],
418418
e,

src/strands/models/bedrock.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -798,7 +798,7 @@ async def count_tokens(
798798
logger.debug("model_id=<%s>, total_tokens=<%d> | native token count", self.config["model_id"], total_tokens)
799799
return total_tokens
800800
except Exception as e:
801-
logger.warning(
801+
logger.debug(
802802
"model_id=<%s>, error=<%s> | native token counting failed, falling back to estimation",
803803
self.config["model_id"],
804804
e,

src/strands/models/gemini.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -486,7 +486,7 @@ async def count_tokens(
486486
)
487487
return total_tokens
488488
except Exception as e:
489-
logger.warning(
489+
logger.debug(
490490
"model_id=<%s>, error=<%s> | native token counting failed, falling back to estimation",
491491
self.config["model_id"],
492492
e,

src/strands/models/llamacpp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -555,7 +555,7 @@ async def count_tokens(
555555
)
556556
return total_tokens
557557
except Exception as e:
558-
logger.warning(
558+
logger.debug(
559559
"model_id=<%s>, error=<%s> | native token counting failed, falling back to estimation",
560560
self.config.get("model_id", "default"),
561561
e,

src/strands/models/openai_responses.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@ async def count_tokens(
226226
)
227227
return total_tokens
228228
except Exception as e:
229-
logger.warning(
229+
logger.debug(
230230
"model_id=<%s>, error=<%s> | native token counting failed, falling back to estimation",
231231
self.config["model_id"],
232232
e,

tests/strands/models/test_anthropic.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1131,10 +1131,10 @@ async def test_fallback_on_generic_exception(self, model_with_client, anthropic_
11311131
assert result >= 0
11321132

11331133
@pytest.mark.asyncio
1134-
async def test_fallback_logs_warning(self, model_with_client, anthropic_client, messages, caplog):
1134+
async def test_fallback_logs_debug(self, model_with_client, anthropic_client, messages, caplog):
11351135
anthropic_client.messages.count_tokens = unittest.mock.AsyncMock(side_effect=RuntimeError("API down"))
11361136

1137-
with caplog.at_level(logging.WARNING):
1137+
with caplog.at_level(logging.DEBUG, logger="strands.models.anthropic"):
11381138
await model_with_client.count_tokens(messages=messages)
11391139

11401140
assert any("native token counting failed" in record.message for record in caplog.records)

tests/strands/models/test_bedrock.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3220,10 +3220,10 @@ async def test_fallback_on_none_input_tokens(self, model_with_client, bedrock_cl
32203220
assert result >= 0
32213221

32223222
@pytest.mark.asyncio
3223-
async def test_fallback_logs_warning(self, model_with_client, bedrock_client, messages, caplog):
3223+
async def test_fallback_logs_debug(self, model_with_client, bedrock_client, messages, caplog):
32243224
bedrock_client.count_tokens.side_effect = RuntimeError("API down")
32253225

3226-
with caplog.at_level(logging.WARNING):
3226+
with caplog.at_level(logging.DEBUG, logger="strands.models.bedrock"):
32273227
await model_with_client.count_tokens(messages=messages)
32283228

32293229
assert any("native token counting failed" in record.message for record in caplog.records)

tests/strands/models/test_gemini.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1197,10 +1197,10 @@ async def test_fallback_on_generic_exception(self, model, gemini_client, message
11971197
assert result >= 0
11981198

11991199
@pytest.mark.asyncio
1200-
async def test_fallback_logs_warning(self, model, gemini_client, messages, caplog):
1200+
async def test_fallback_logs_debug(self, model, gemini_client, messages, caplog):
12011201
gemini_client.aio.models.count_tokens.side_effect = RuntimeError("API down")
12021202

1203-
with caplog.at_level(logging.WARNING):
1203+
with caplog.at_level(logging.DEBUG, logger="strands.models.gemini"):
12041204
await model.count_tokens(messages=messages)
12051205

12061206
assert any("native token counting failed" in record.message for record in caplog.records)

tests/strands/models/test_llamacpp.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -796,10 +796,10 @@ async def test_fallback_on_connection_error(self, model, messages):
796796
assert result >= 0
797797

798798
@pytest.mark.asyncio
799-
async def test_fallback_logs_warning(self, model, messages, caplog):
799+
async def test_fallback_logs_debug(self, model, messages, caplog):
800800
model.client.post = AsyncMock(side_effect=RuntimeError("Server down"))
801801

802-
with caplog.at_level(logging.WARNING):
802+
with caplog.at_level(logging.DEBUG, logger="strands.models.llamacpp"):
803803
await model.count_tokens(messages=messages)
804804

805805
assert any("native token counting failed" in record.message for record in caplog.records)

tests/strands/models/test_openai_responses.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1289,12 +1289,12 @@ async def test_fallback_on_generic_exception(self, model, openai_client, message
12891289
assert result >= 0
12901290

12911291
@pytest.mark.asyncio
1292-
async def test_fallback_logs_warning(self, model, openai_client, messages, caplog):
1292+
async def test_fallback_logs_debug(self, model, openai_client, messages, caplog):
12931293
import logging
12941294

12951295
openai_client.responses.input_tokens.count.side_effect = RuntimeError("API down")
12961296

1297-
with caplog.at_level(logging.WARNING):
1297+
with caplog.at_level(logging.DEBUG, logger="strands.models.openai_responses"):
12981298
await model.count_tokens(messages=messages)
12991299

13001300
assert any("native token counting failed" in record.message for record in caplog.records)

0 commit comments

Comments
 (0)