@@ -75,6 +75,7 @@ async def load_recording(self, name: str) -> None:
7575 binary_data = await response .bytes ()
7676 self .show_flamegraph (binary_data )
7777 self .design = Design (self .flamegraph .calls )
78+ self .show_analysis (recording .analysis )
7879 except pyodide .http .AbortError as e :
7980 self .flamegraph .show_message (f"Cannot reach the Microlog server: { e } " )
8081 except Exception as e : # pylint: disable=broad-except
@@ -380,16 +381,17 @@ def post():
380381 ltk .post (
381382 "/analysis/" ,
382383 f"{ name } \n { prompt } " ,
383- ltk .proxy (lambda response : self .show_ai_response (response )), # pylint: disable=unnecessary-lambda
384+ ltk .proxy (lambda response : self .show_analysis (response )), # pylint: disable=unnecessary-lambda
384385 "text"
385386 )
386387 ltk .schedule (post , "allow windmill to load before doing a post" )
387388
388- def show_ai_response (self , response : str ) -> None :
389- """Display the response from the LLM in the analysis tab."""
390- _ , response = response .split ("\n " , 1 )
391- ltk .find ("#analysis" ).html (markdown .markdown (f"{ response } <br><br><h1>The prompt that was used:</h1>{ self .get_prompt ()} " ))
392- ltk .find ("#ask-ai" ).attr ("disabled" , False )
389+ def show_analysis (self , name_and_analysis : str ) -> None :
390+ """Display the analysis from the LLM."""
391+ name , analysis = name_and_analysis .split ("\n " , 1 )
392+ if name == self .get_recording_from_url ():
393+ ltk .find ("#analysis" ).html (markdown .markdown (f"{ analysis } <br><br><h1>The prompt that was used:</h1>{ self .get_prompt ()} " ))
394+ ltk .find ("#ask-ai" ).attr ("disabled" , False )
393395
394396 def create_sidebar (self ) -> None :
395397 """Create and render the sidebar with log filter and list."""
0 commit comments