|
1 | 1 | import json |
2 | | -import os |
3 | 2 | import time |
4 | 3 | from concurrent.futures import ThreadPoolExecutor |
5 | 4 | from typing import Sequence |
6 | 5 |
|
7 | | -from langchain import OpenAI, PromptTemplate |
| 6 | +from langchain import PromptTemplate |
| 7 | +from langchain_openai import OpenAI |
8 | 8 |
|
9 | 9 | from langfuse import Langfuse, observe |
10 | 10 | from langfuse.api.resources.commons.types.dataset_status import DatasetStatus |
@@ -253,7 +253,7 @@ def test_langchain_dataset(): |
253 | 253 | dataset_item_id = item.id |
254 | 254 | final_trace_id = span.trace_id |
255 | 255 |
|
256 | | - llm = OpenAI(openai_api_key=os.environ.get("OPENAI_API_KEY")) |
| 256 | + llm = OpenAI() |
257 | 257 | template = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title. |
258 | 258 | Title: {title} |
259 | 259 | Playwright: This is a synopsis for the above play:""" |
@@ -284,6 +284,11 @@ def test_langchain_dataset(): |
284 | 284 | def sorted_dependencies_from_trace(trace): |
285 | 285 | parent_to_observation = {} |
286 | 286 | for obs in trace.observations: |
| 287 | + # Filter out the generation that might leak in due to the monkey patching OpenAI integration |
| 288 | + # that might have run in the previous test suite. TODO: fix this hack |
| 289 | + if obs.name == "OpenAI-generation": |
| 290 | + continue |
| 291 | + |
287 | 292 | parent_to_observation[obs.parent_observation_id] = obs |
288 | 293 |
|
289 | 294 | # Start with the root observation (parent_observation_id is None) |
|
0 commit comments