We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 95050df commit bf14c38Copy full SHA for bf14c38
1 file changed
graphgen/models/llm/local/vllm_wrapper.py
@@ -89,8 +89,7 @@ async def generate_answer(
89
result_text = final_output.outputs[0].text
90
return result_text
91
92
- except Exception as e:
93
- print(f"Error in generate_answer: {e}")
+ except (Exception, asyncio.CancelledError):
94
await self.engine.abort(request_id)
95
raise
96
@@ -142,8 +141,7 @@ async def generate_topk_per_token(
142
141
return [main_token]
143
return []
144
145
146
- print(f"Error in generate_topk_per_token: {e}")
147
148
149
0 commit comments