Skip to content

Commit 2307535

Browse files
dpageclaude
andcommitted
Address remaining CodeRabbit review feedback for streaming and rendering.
- Use distinct 3-tuple ('complete', text, messages) for completion events to avoid ambiguity with ('tool_use', [...]) 2-tuples in chat streaming. - Pass conversation history from request into chat_with_database_stream() so follow-up NLQ turns retain context. - Add re.IGNORECASE to SQL fence regex for case-insensitive matching. - Render MarkdownContent as block element instead of span to avoid invalid DOM when response contains paragraphs, lists, or tables. - Keep stop notice as a separate message instead of appending to partial markdown, preventing it from being swallowed by open code fences. - Snapshot streamingIdRef before setMessages in error handler to avoid race condition where ref is cleared before React executes the updater. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent b210c08 commit 2307535

File tree

4 files changed

+48
-21
lines changed

4 files changed

+48
-21
lines changed

web/pgadmin/llm/chat.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -174,8 +174,8 @@ def chat_with_database_stream(
174174
Yields:
175175
str: Text content chunks from the final LLM response.
176176
177-
The last item yielded is a tuple of
178-
(final_response_text, updated_conversation_history).
177+
The last item yielded is a 3-tuple of
178+
('complete', final_response_text, updated_conversation_history).
179179
180180
Raises:
181181
LLMClientError: If the LLM request fails.
@@ -220,8 +220,9 @@ def chat_with_database_stream(
220220
messages.append(response.to_message())
221221

222222
if response.stop_reason != StopReason.TOOL_USE:
223-
# Final response - yield the completion tuple
224-
yield (response.content, messages)
223+
# Final response - yield a 3-tuple to distinguish from
224+
# the 2-tuple tool_use event
225+
yield ('complete', response.content, messages)
225226
return
226227

227228
# Signal that tools are being executed so the caller can

web/pgadmin/tools/sqleditor/__init__.py

Lines changed: 22 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2875,6 +2875,7 @@ def nlq_chat_stream(trans_id):
28752875
data = request.get_json(silent=True) or {}
28762876
user_message = data.get('message', '').strip()
28772877
conversation_id = data.get('conversation_id')
2878+
history_data = data.get('history', [])
28782879

28792880
if not user_message:
28802881
return make_json_response(
@@ -2885,6 +2886,10 @@ def nlq_chat_stream(trans_id):
28852886
def generate():
28862887
"""Generator for SSE events."""
28872888
import secrets as py_secrets
2889+
from pgadmin.llm.compaction import (
2890+
deserialize_history, compact_history
2891+
)
2892+
from pgadmin.llm.utils import get_default_provider
28882893

28892894
try:
28902895
# Send thinking status
@@ -2893,13 +2898,24 @@ def generate():
28932898
'message': gettext('Analyzing your request...')
28942899
})
28952900

2901+
# Deserialize and compact conversation history
2902+
conversation_history = None
2903+
if history_data:
2904+
conversation_history = deserialize_history(history_data)
2905+
provider = get_default_provider() or 'openai'
2906+
conversation_history = compact_history(
2907+
conversation_history,
2908+
provider=provider
2909+
)
2910+
28962911
# Stream the LLM response with database tools
28972912
response_text = ''
28982913
for item in chat_with_database_stream(
28992914
user_message=user_message,
29002915
sid=trans_obj.sid,
29012916
did=trans_obj.did,
2902-
system_prompt=NLQ_SYSTEM_PROMPT
2917+
system_prompt=NLQ_SYSTEM_PROMPT,
2918+
conversation_history=conversation_history
29032919
):
29042920
if isinstance(item, str):
29052921
# Text chunk from streaming LLM response
@@ -2916,15 +2932,16 @@ def generate():
29162932
'Querying the database...'
29172933
)
29182934
})
2919-
elif isinstance(item, tuple):
2920-
# Final result: (response_text, messages)
2921-
response_text = item[0]
2935+
elif isinstance(item, tuple) and \
2936+
item[0] == 'complete':
2937+
# Final result: ('complete', response_text, messages)
2938+
response_text = item[1]
29222939

29232940
# Extract SQL from markdown code fences
29242941
sql_blocks = re.findall(
29252942
r'```(?:sql|pgsql|postgresql)\s*\n(.*?)```',
29262943
response_text,
2927-
re.DOTALL
2944+
re.DOTALL | re.IGNORECASE
29282945
)
29292946
sql = ';\n\n'.join(
29302947
block.strip().rstrip(';') for block in sql_blocks

web/pgadmin/tools/sqleditor/static/js/components/sections/NLQChatPanel.jsx

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -527,9 +527,8 @@ function ChatMessage({ message, onInsertSQL, onReplaceSQL, textColors, cmKey, fo
527527
const content = seg.content?.trim();
528528
if (!content && !cursor) return null;
529529
return (
530-
<Box key={idx} sx={{ marginTop: idx > 0 ? 1 : 0, display: 'inline' }}>
530+
<Box key={idx} sx={{ marginTop: idx > 0 ? 1 : 0 }}>
531531
<MarkdownContent
532-
component="span"
533532
dangerouslySetInnerHTML={{ __html: renderMarkdownText(content || '') }}
534533
/>
535534
{cursor}
@@ -859,13 +858,18 @@ export function NLQChatPanel() {
859858
// Check if user manually stopped
860859
if (stoppedRef.current) {
861860
const streamId = streamingIdRef.current;
862-
// If we have partial streaming content, show it as-is
861+
// If we have partial streaming content, show it separately
862+
// from the stop notice to avoid breaking open markdown fences
863863
if (streamingTextRef.current) {
864864
setMessages((prev) => [
865865
...prev.filter((m) => m.id !== thinkingId && m.id !== streamId),
866866
{
867867
type: MESSAGE_TYPES.ASSISTANT,
868-
content: streamingTextRef.current + '\n\n' + gettext('(Generation stopped)'),
868+
content: streamingTextRef.current,
869+
},
870+
{
871+
type: MESSAGE_TYPES.ASSISTANT,
872+
content: gettext('Generation stopped.'),
869873
},
870874
]);
871875
} else {
@@ -889,13 +893,17 @@ export function NLQChatPanel() {
889893
if (error.name === 'AbortError') {
890894
// Check if this was a user-initiated stop or a timeout
891895
if (stoppedRef.current) {
892-
// User manually stopped - show partial content if any
896+
// User manually stopped - show partial content separately
893897
if (streamingTextRef.current) {
894898
setMessages((prev) => [
895899
...prev.filter((m) => m.id !== thinkingId && m.id !== streamId),
896900
{
897901
type: MESSAGE_TYPES.ASSISTANT,
898-
content: streamingTextRef.current + '\n\n' + gettext('(Generation stopped)'),
902+
content: streamingTextRef.current,
903+
},
904+
{
905+
type: MESSAGE_TYPES.ASSISTANT,
906+
content: gettext('Generation stopped.'),
899907
},
900908
]);
901909
} else {
@@ -1020,9 +1028,10 @@ export function NLQChatPanel() {
10201028
break;
10211029
}
10221030

1023-
case 'error':
1031+
case 'error': {
1032+
const streamId = streamingIdRef.current;
10241033
setMessages((prev) => [
1025-
...prev.filter((m) => m.id !== thinkingId && m.id !== streamingIdRef.current),
1034+
...prev.filter((m) => m.id !== thinkingId && m.id !== streamId),
10261035
{
10271036
type: MESSAGE_TYPES.ERROR,
10281037
content: event.message,

web/pgadmin/tools/sqleditor/tests/test_nlq_chat.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ def runTest(self):
9898
if hasattr(self, 'mock_response'):
9999
def mock_stream_gen(*args, **kwargs):
100100
yield self.mock_response
101-
yield (self.mock_response, [])
101+
yield ('complete', self.mock_response, [])
102102
mock_chat = patch(
103103
'pgadmin.llm.chat.chat_with_database_stream',
104104
side_effect=mock_stream_gen
@@ -254,7 +254,7 @@ def runTest(self):
254254
sql_blocks = re.findall(
255255
r'```(?:sql|pgsql|postgresql)\s*\n(.*?)```',
256256
response_text,
257-
re.DOTALL
257+
re.DOTALL | re.IGNORECASE
258258
)
259259
sql = ';\n\n'.join(
260260
block.strip() for block in sql_blocks
@@ -328,8 +328,8 @@ def mock_stream_gen(*args, **kwargs):
328328
for chunk in [self.mock_response[i:i + 10]
329329
for i in range(0, len(self.mock_response), 10)]:
330330
yield chunk
331-
# Yield final tuple
332-
yield (self.mock_response, [])
331+
# Yield final 3-tuple
332+
yield ('complete', self.mock_response, [])
333333

334334
mock_chat = patch(
335335
'pgadmin.llm.chat.chat_with_database_stream',

0 commit comments

Comments
 (0)