Skip to content

Commit afa518c

Browse files
author
Keren Finkelstein
committed
fix PR review comments
1 parent 0394586 commit afa518c

2 files changed

Lines changed: 2 additions & 3 deletions

File tree

extensions/evals/templates/deepeval-metric-template.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -165,13 +165,11 @@ def _sync_measure(self, test_case: LLMTestCase, _show_indicator: bool = True) ->
165165

166166
{{#if_evaluator_type_llm_judge}}
167167
# Convert async LLM call to sync
168-
import asyncio
169168
result = asyncio.run(self._llm_judge_evaluation(test_case))
170169
{{/if_evaluator_type_llm_judge}}
171170

172171
{{#if_evaluator_type_hybrid}}
173172
# Convert async hybrid call to sync
174-
import asyncio
175173
result = asyncio.run(self._hybrid_evaluation(test_case))
176174
{{/if_evaluator_type_hybrid}}
177175

run_evaluators.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,7 @@ def run_grader(
9999
example_input = example.get("input", "")
100100
expected_output = example.get("expected_output")
101101
expected_pass = example.get("expected_pass", True)
102+
context = example.get("context")
102103

103104
input_str = self.normalize_input(example_input)
104105

@@ -108,7 +109,7 @@ def run_grader(
108109

109110
# Signature 1: grade(output, context) - promptfoo style
110111
try:
111-
result = grader_fn(input_str, None)
112+
result = grader_fn(input_str, context)
112113
except TypeError:
113114
pass
114115

0 commit comments

Comments
 (0)