Skip to content

Commit 93b1014

Browse files
Wang-DaojihijzyCaralHsiyuan.wang
authored
feat: repair bug 2.0.10 (#1282)
* chore: change version number to v2.0.9 * fix: llm rsp content error fix * fix: add sources in tool and skill memory --------- Co-authored-by: jiang <fdjzy@qq.com> Co-authored-by: Jiang <33757498+hijzy@users.noreply.github.com> Co-authored-by: CaralHsi <caralhsi@gmail.com> Co-authored-by: yuan.wang <yuan.wang@yuanwangdebijibendiannao.local>
1 parent cca4745 commit 93b1014

6 files changed

Lines changed: 41 additions & 8 deletions

File tree

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
##############################################################################
55

66
name = "MemoryOS"
7-
version = "2.0.8"
7+
version = "2.0.9"
88
description = "Intelligence Begins with Memory"
99
license = {text = "Apache-2.0"}
1010
readme = "README.md"

src/memos/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
__version__ = "2.0.8"
1+
__version__ = "2.0.9"
22

33
from memos.configs.mem_cube import GeneralMemCubeConfig
44
from memos.configs.mem_os import MOSConfig

src/memos/llms/ollama.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ def generate(self, messages: MessageList, **kwargs) -> Any:
8888
)
8989
str_response = response.message.content
9090
if self.config.remove_think_prefix:
91-
return remove_thinking_tags(str_response)
91+
return remove_thinking_tags(str_response or "")
9292
else:
9393
return str_thinking + str_response
9494

src/memos/llms/openai.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def _parse_response(self, response) -> str:
5656
if isinstance(reasoning_content, str) and reasoning_content:
5757
reasoning_content = f"<think>{reasoning_content}</think>"
5858
if self.config.remove_think_prefix:
59-
return remove_thinking_tags(response_content)
59+
return remove_thinking_tags(response_content or "")
6060
if reasoning_content:
6161
return reasoning_content + (response_content or "")
6262
return response_content or ""
@@ -202,7 +202,7 @@ def generate(self, messages: MessageList, **kwargs) -> str:
202202
return self.tool_call_parser(response.choices[0].message.tool_calls)
203203
response_content = response.choices[0].message.content
204204
if self.config.remove_think_prefix:
205-
return remove_thinking_tags(response_content)
205+
return remove_thinking_tags(response_content or "")
206206
else:
207207
return response_content or ""
208208

src/memos/mem_reader/multi_modal_struct.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -927,6 +927,10 @@ def _process_tool_trajectory_fine(
927927
project_id = user_context.project_id if user_context else None
928928

929929
for fast_item in fast_memory_items:
930+
sources = fast_item.metadata.sources or []
931+
if not isinstance(sources, list):
932+
sources = [sources]
933+
930934
# Extract memory text (string content)
931935
mem_str = fast_item.memory or ""
932936
if not mem_str.strip() or (
@@ -954,6 +958,7 @@ def _process_tool_trajectory_fine(
954958
tool_used_status=m.get("tool_used_status", []),
955959
manager_user_id=manager_user_id,
956960
project_id=project_id,
961+
sources=sources,
957962
)
958963
fine_memory_items.append(node)
959964
except Exception as e:

src/memos/mem_reader/read_skill_memory/process_skill_memory.py

Lines changed: 31 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,11 @@
1919
from memos.llms.base import BaseLLM
2020
from memos.log import get_logger
2121
from memos.mem_reader.read_multi_modal import detect_lang
22-
from memos.memories.textual.item import TextualMemoryItem, TreeNodeTextualMemoryMetadata
22+
from memos.memories.textual.item import (
23+
SourceMessage,
24+
TextualMemoryItem,
25+
TreeNodeTextualMemoryMetadata,
26+
)
2327
from memos.memories.textual.tree_text_memory.retrieve.searcher import Searcher
2428
from memos.templates.skill_mem_prompt import (
2529
OTHERS_GENERATION_PROMPT,
@@ -91,6 +95,7 @@ def _batch_extract_skills(
9195
try:
9296
skill_memory = future.result()
9397
if skill_memory:
98+
skill_memory["_task_type"] = task_type
9499
results.append((skill_memory, task_type, task_chunks.get(task_type, [])))
95100
except Exception as e:
96101
logger.warning(
@@ -901,6 +906,7 @@ def create_skill_memory_item(
901906
skill_memory: dict[str, Any],
902907
info: dict[str, Any],
903908
embedder: BaseEmbedder | None = None,
909+
sources: list[SourceMessage] | None = None,
904910
**kwargs: Any,
905911
) -> TextualMemoryItem:
906912
info_ = info.copy()
@@ -923,7 +929,7 @@ def create_skill_memory_item(
923929
status="activated",
924930
tags=skill_memory.get("tags") or skill_memory.get("trigger", []),
925931
key=skill_memory.get("name", ""),
926-
sources=[],
932+
sources=sources or [],
927933
usage=[],
928934
background="",
929935
confidence=0.99,
@@ -1097,6 +1103,7 @@ def _simple_extract():
10971103
try:
10981104
skill_memory = future.result()
10991105
if skill_memory:
1106+
skill_memory["_task_type"] = task_type
11001107
memories.append(skill_memory)
11011108
except Exception as e:
11021109
logger.warning(
@@ -1223,11 +1230,32 @@ def _full_extract():
12231230
except Exception as cleanup_error:
12241231
logger.warning(f"[PROCESS_SKILLS] Error cleaning up local files: {cleanup_error}")
12251232

1233+
# Build source lookup: (role, content) → SourceMessage from fast_memory_items
1234+
source_lookup: dict[tuple[str, str], SourceMessage] = {}
1235+
for fast_item in fast_memory_items:
1236+
for source in getattr(fast_item.metadata, "sources", []) or []:
1237+
source_lookup.setdefault((source.role, source.content), source)
1238+
12261239
# Create TextualMemoryItem objects
12271240
skill_memory_items = []
12281241
for skill_memory in skill_memories:
12291242
try:
1230-
memory_item = create_skill_memory_item(skill_memory, info, embedder, **kwargs)
1243+
# Match sources precisely via the task chunk messages that produced this skill
1244+
task_type = skill_memory.pop("_task_type", None)
1245+
chunk_messages = task_chunks.get(task_type, []) if task_type else []
1246+
skill_sources = []
1247+
seen = set()
1248+
for msg in chunk_messages:
1249+
key = (msg.get("role"), msg.get("content"))
1250+
if key not in seen:
1251+
seen.add(key)
1252+
source = source_lookup.get(key)
1253+
if source:
1254+
skill_sources.append(source)
1255+
1256+
memory_item = create_skill_memory_item(
1257+
skill_memory, info, embedder, sources=skill_sources, **kwargs
1258+
)
12311259
skill_memory_items.append(memory_item)
12321260
except Exception as e:
12331261
logger.warning(f"[PROCESS_SKILLS] Error creating skill memory item: {e}")

0 commit comments

Comments
 (0)