|
| 1 | +import { LDAIJudgeConfig } from '../src/api/config/types'; |
| 2 | +import { Evaluator } from '../src/api/judge/Evaluator'; |
| 3 | +import { Judge } from '../src/api/judge/Judge'; |
| 4 | +import { LDJudgeResult } from '../src/api/judge/types'; |
| 5 | +import { AIProvider } from '../src/api/providers/AIProvider'; |
| 6 | + |
| 7 | +function makeJudgeConfig(key: string): LDAIJudgeConfig { |
| 8 | + return { |
| 9 | + key, |
| 10 | + enabled: true, |
| 11 | + evaluationMetricKey: '$ld:ai:judge:quality', |
| 12 | + messages: [{ role: 'system', content: 'You are a judge.' }], |
| 13 | + createTracker: () => ({}) as any, |
| 14 | + }; |
| 15 | +} |
| 16 | + |
| 17 | +function makeProvider(): jest.Mocked<AIProvider> { |
| 18 | + return { |
| 19 | + invokeModel: jest.fn(), |
| 20 | + invokeStructuredModel: jest.fn(), |
| 21 | + } as any; |
| 22 | +} |
| 23 | + |
| 24 | +describe('Evaluator', () => { |
| 25 | + describe('noop()', () => { |
| 26 | + it('returns an empty result array', async () => { |
| 27 | + const evaluator = Evaluator.noop(); |
| 28 | + const results = await evaluator.evaluate('input', 'output'); |
| 29 | + expect(results).toEqual([]); |
| 30 | + }); |
| 31 | + |
| 32 | + it('has empty judges map', () => { |
| 33 | + const evaluator = Evaluator.noop(); |
| 34 | + expect(evaluator.judges.size).toBe(0); |
| 35 | + }); |
| 36 | + |
| 37 | + it('has empty judge configuration', () => { |
| 38 | + const evaluator = Evaluator.noop(); |
| 39 | + expect(evaluator.judgeConfiguration.judges).toEqual([]); |
| 40 | + }); |
| 41 | + }); |
| 42 | + |
| 43 | + describe('evaluate()', () => { |
| 44 | + it('calls each configured judge and returns results', async () => { |
| 45 | + const mockProvider = makeProvider(); |
| 46 | + const judgeConfig = makeJudgeConfig('judge-1'); |
| 47 | + |
| 48 | + const mockResult: LDJudgeResult = { |
| 49 | + success: true, |
| 50 | + sampled: true, |
| 51 | + score: 0.9, |
| 52 | + reasoning: 'Good response', |
| 53 | + metricKey: '$ld:ai:judge:quality', |
| 54 | + judgeConfigKey: 'judge-1', |
| 55 | + }; |
| 56 | + |
| 57 | + const judge = new Judge(judgeConfig, mockProvider); |
| 58 | + jest.spyOn(judge, 'evaluate').mockResolvedValue(mockResult); |
| 59 | + |
| 60 | + const judges = new Map([['judge-1', judge]]); |
| 61 | + const evaluator = new Evaluator(judges, { judges: [{ key: 'judge-1', samplingRate: 1.0 }] }); |
| 62 | + |
| 63 | + const results = await evaluator.evaluate('user input', 'ai output'); |
| 64 | + |
| 65 | + expect(results).toHaveLength(1); |
| 66 | + expect(results[0]).toEqual(mockResult); |
| 67 | + expect(judge.evaluate).toHaveBeenCalledWith('user input', 'ai output', 1.0); |
| 68 | + }); |
| 69 | + |
| 70 | + it('warns and skips when judge key is not found in judges map', async () => { |
| 71 | + const mockLogger = { warn: jest.fn(), debug: jest.fn(), info: jest.fn(), error: jest.fn() }; |
| 72 | + const judges = new Map<string, Judge>(); |
| 73 | + const evaluator = new Evaluator( |
| 74 | + judges, |
| 75 | + { judges: [{ key: 'missing-judge', samplingRate: 1.0 }] }, |
| 76 | + mockLogger, |
| 77 | + ); |
| 78 | + |
| 79 | + const results = await evaluator.evaluate('input', 'output'); |
| 80 | + |
| 81 | + expect(mockLogger.warn).toHaveBeenCalledWith(expect.stringContaining('missing-judge')); |
| 82 | + // Missing judge is skipped (not an error result), so results array is empty |
| 83 | + expect(results).toEqual([]); |
| 84 | + }); |
| 85 | + |
| 86 | + it('returns error result when judge throws', async () => { |
| 87 | + const mockProvider = makeProvider(); |
| 88 | + const judgeConfig = makeJudgeConfig('judge-err'); |
| 89 | + |
| 90 | + const judge = new Judge(judgeConfig, mockProvider); |
| 91 | + jest.spyOn(judge, 'evaluate').mockRejectedValue(new Error('evaluation error')); |
| 92 | + |
| 93 | + const judges = new Map([['judge-err', judge]]); |
| 94 | + const evaluator = new Evaluator(judges, { |
| 95 | + judges: [{ key: 'judge-err', samplingRate: 1.0 }], |
| 96 | + }); |
| 97 | + |
| 98 | + const results = await evaluator.evaluate('input', 'output'); |
| 99 | + |
| 100 | + expect(results).toHaveLength(1); |
| 101 | + expect(results[0].success).toBe(false); |
| 102 | + expect(results[0].sampled).toBe(true); |
| 103 | + expect(results[0].errorMessage).toBe('evaluation error'); |
| 104 | + }); |
| 105 | + |
| 106 | + it('does NOT call tracker.trackJudgeResult', async () => { |
| 107 | + const mockProvider = makeProvider(); |
| 108 | + const judgeConfig = makeJudgeConfig('judge-1'); |
| 109 | + |
| 110 | + const mockResult: LDJudgeResult = { |
| 111 | + success: true, |
| 112 | + sampled: true, |
| 113 | + score: 0.8, |
| 114 | + reasoning: 'ok', |
| 115 | + metricKey: '$ld:ai:judge:quality', |
| 116 | + }; |
| 117 | + |
| 118 | + const judge = new Judge(judgeConfig, mockProvider); |
| 119 | + jest.spyOn(judge, 'evaluate').mockResolvedValue(mockResult); |
| 120 | + |
| 121 | + const judges = new Map([['judge-1', judge]]); |
| 122 | + const evaluator = new Evaluator(judges, { judges: [{ key: 'judge-1', samplingRate: 1.0 }] }); |
| 123 | + |
| 124 | + // No tracker — if Evaluator tried to call trackJudgeResult this would throw or fail |
| 125 | + await evaluator.evaluate('input', 'output'); |
| 126 | + |
| 127 | + // Test passes if no error is thrown (no tracker involved) |
| 128 | + expect(true).toBe(true); |
| 129 | + }); |
| 130 | + |
| 131 | + it('runs multiple judges in parallel and returns all results', async () => { |
| 132 | + const makeJudge = (key: string, score: number): Judge => { |
| 133 | + const mockProvider = makeProvider(); |
| 134 | + const jc = makeJudgeConfig(key); |
| 135 | + const j = new Judge(jc, mockProvider); |
| 136 | + jest.spyOn(j, 'evaluate').mockResolvedValue({ |
| 137 | + success: true, |
| 138 | + sampled: true, |
| 139 | + score, |
| 140 | + reasoning: 'ok', |
| 141 | + metricKey: '$ld:ai:judge:quality', |
| 142 | + }); |
| 143 | + return j; |
| 144 | + }; |
| 145 | + |
| 146 | + const judges = new Map([ |
| 147 | + ['judge-a', makeJudge('judge-a', 0.5)], |
| 148 | + ['judge-b', makeJudge('judge-b', 0.9)], |
| 149 | + ]); |
| 150 | + const evaluator = new Evaluator(judges, { |
| 151 | + judges: [ |
| 152 | + { key: 'judge-a', samplingRate: 1.0 }, |
| 153 | + { key: 'judge-b', samplingRate: 1.0 }, |
| 154 | + ], |
| 155 | + }); |
| 156 | + |
| 157 | + const results = await evaluator.evaluate('input', 'output'); |
| 158 | + |
| 159 | + expect(results).toHaveLength(2); |
| 160 | + const scores = results.map((r) => r.score).sort(); |
| 161 | + expect(scores).toEqual([0.5, 0.9]); |
| 162 | + }); |
| 163 | + }); |
| 164 | +}); |
0 commit comments