@@ -1139,9 +1139,11 @@ def _parse_usage_model(usage: typing.Union[pydantic.BaseModel, dict]):
11391139 and "modality" in item
11401140 and "token_count" in item
11411141 ):
1142- usage_model [f"input_modality_{ item ['modality' ]} " ] = item [
1143- "token_count"
1144- ]
1142+ value = item ["token_count" ]
1143+ usage_model [f"input_modality_{ item ['modality' ]} " ] = value
1144+
1145+ if "input" in usage_model :
1146+ usage_model ["input" ] = max (0 , usage_model ["input" ] - value )
11451147
11461148 # Vertex AI
11471149 if "candidates_tokens_details" in usage_model and isinstance (
@@ -1155,9 +1157,11 @@ def _parse_usage_model(usage: typing.Union[pydantic.BaseModel, dict]):
11551157 and "modality" in item
11561158 and "token_count" in item
11571159 ):
1158- usage_model [f"output_modality_{ item ['modality' ]} " ] = item [
1159- "token_count"
1160- ]
1160+ value = item ["token_count" ]
1161+ usage_model [f"output_modality_{ item ['modality' ]} " ] = value
1162+
1163+ if "output" in usage_model :
1164+ usage_model ["output" ] = max (0 , usage_model ["output" ] - value )
11611165
11621166 # Vertex AI
11631167 if "cache_tokens_details" in usage_model and isinstance (
@@ -1171,9 +1175,11 @@ def _parse_usage_model(usage: typing.Union[pydantic.BaseModel, dict]):
11711175 and "modality" in item
11721176 and "token_count" in item
11731177 ):
1174- usage_model [f"cached_modality_{ item ['modality' ]} " ] = item [
1175- "token_count"
1176- ]
1178+ value = item ["token_count" ]
1179+ usage_model [f"cached_modality_{ item ['modality' ]} " ] = value
1180+
1181+ if "input" in usage_model :
1182+ usage_model ["input" ] = max (0 , usage_model ["input" ] - value )
11771183
11781184 usage_model = (
11791185 {
0 commit comments