Skip to content

Commit 0eb403a

Browse files
committed
release: With deepseek-reasoner help, develop the ubuntu agent for feature testing.
1 parent 09687de commit 0eb403a

File tree

16 files changed

+454
-28
lines changed

16 files changed

+454
-28
lines changed

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@ debug.log
88
__pycache__/
99
*.py[cod]
1010
*$py.class
11+
.ghostos.yml
12+
ghostos_ws/
1113

1214
# C extensions
1315
*.so

RELEASES.md

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,15 @@ support deepseek-r1.
1616
* fix shell does not close conversation correctly
1717
* fix sequence pipeline handle multiple complete message wrong.
1818

19+
## v0.2.1
20+
21+
With deepseek-reasoner help, develop the ubuntu agent for feature testing.
22+
The deepseek-reasoner write all the terminal codes.
23+
Support ubuntu agent, run `ghostos web ghostos.demo.os_agents.ubuntu_agent` to test it.
24+
25+
* llms model conf support new compatible option `support_function_call` because deepseek not support it yet.
26+
* develop `Terminal` library by deepseek-reasoner.
27+
1928
# v0.1.0
2029

2130
first release version.

ghostos/app/.streamlit/config.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -253,7 +253,7 @@ level = "info"
253253
# Whether to send usage statistics to Streamlit.
254254

255255
# Default: true
256-
# gatherUsageStats = true
256+
gatherUsageStats = false
257257

258258
# Port where users should point their browsers in order to connect to the
259259
# app.

ghostos/app/configs/llms_conf.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,8 @@ models:
7777
model: deepseek-reasoner
7878
service: deepseek
7979
reasoning: {}
80+
compatible:
81+
support_function_call: false
8082
gpt-3.5-turbo:
8183
kwargs: { }
8284
max_tokens: 2000

ghostos/core/llms/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
from ghostos.core.llms.configs import (
2-
ModelConf, ServiceConf, LLMsConfig,
2+
ModelConf, ServiceConf, LLMsConfig, Compatible,
33
OPENAI_DRIVER_NAME, LITELLM_DRIVER_NAME, DEEPSEEK_DRIVER_NAME,
44
)
55
from ghostos.core.llms.abcd import LLMs, LLMDriver, LLMApi

ghostos/core/llms/configs.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@ class Compatible(BaseModel):
7676
use_developer_role: bool = Field(default=False, description="use developer role instead of system")
7777
allow_system_in_messages: bool = Field(default=True, description="allow system messages in history")
7878
allow_system_message: bool = Field(default=True, description="support system message or not")
79+
support_function_call: bool = Field(default=True, description="if the service or model support function call")
7980

8081

8182
class Azure(BaseModel):
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
from ghostos.core.moss import Moss as Parent
2+
3+
4+
class Moss(Parent):
5+
"""
6+
not prepared any libraries yet.
7+
waiting for your suggestions.
8+
"""
9+
pass
10+
11+
12+
# <moss-hide>
13+
from ghostos.ghosts.moss_agent import MossAgent
14+
15+
__ghost__ = MossAgent(
16+
moss_module=__name__,
17+
persona="""
18+
You are the meta agent of GhostOS,
19+
you are supposed to help user develop anything that GhostOS and it's agents can use.
20+
""",
21+
instructions="""
22+
You are going to follow user's instructions to design library or coding,
23+
based on your Understanding of GhostOS and MOSS Protocol.
24+
25+
* 你的任务是帮助用户做设计和实现, 而不是自己写代码.
26+
* 在用户给你明确需求之前, 不要自作主张做什么.
27+
""",
28+
name="GhostOSMeta",
29+
llm_api="deepseek-reasoner",
30+
)
31+
32+
# </moss-hide>

ghostos/demo/os_agents/__init__.py

Whitespace-only changes.
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
from ghostos.core.moss import Moss as Parent
2+
from ghostos.libraries.terminal import Terminal
3+
4+
5+
class Moss(Parent):
6+
terminal: Terminal
7+
""" your terminal to exec command at the operating system you are located."""
8+
9+
10+
# <moss-hide>
11+
from ghostos.ghosts.moss_agent import MossAgent
12+
13+
__ghost__ = MossAgent(
14+
moss_module=__name__,
15+
persona="""
16+
你是一个精通 Ubuntu 系统的 Agent.
17+
""",
18+
instructions="""
19+
你的主要任务是协助用户理解并且操作当前系统.
20+
""",
21+
name="Ubuntu Agent",
22+
llm_api="deepseek-chat",
23+
)
24+
25+
26+
def __shell_providers__():
27+
from ghostos.libraries.terminal import UbuntuTerminalProvider
28+
yield UbuntuTerminalProvider(safe_mode=True)
29+
30+
# </moss-hide>

ghostos/framework/llms/openai_driver.py

Lines changed: 29 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,14 @@
1-
from typing import List, Iterable, Union, Optional
1+
from typing import List, Iterable, Union, Optional, Tuple
22
from openai import OpenAI, AzureOpenAI
33
from httpx import Client
44
from httpx_socks import SyncProxyTransport
5-
from openai import NOT_GIVEN
5+
from openai import NOT_GIVEN, NotGiven
66
from openai.types.chat import ChatCompletion, ChatCompletionReasoningEffort
77
from openai.types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
88
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
9+
from openai.types.chat.chat_completion_tool_param import ChatCompletionToolParam
910
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
11+
from openai.types.chat.completion_create_params import Function
1012
from ghostos.contracts.logger import LoggerItf, get_ghostos_logger
1113
from ghostos.helpers import timestamp_ms
1214
from ghostos.core.messages import (
@@ -15,11 +17,11 @@
1517
CompletionUsagePayload, Role,
1618
)
1719
from ghostos.core.llms import (
18-
LLMs, LLMDriver, LLMApi, ModelConf, ServiceConf, OPENAI_DRIVER_NAME, LITELLM_DRIVER_NAME, DEEPSEEK_DRIVER_NAME,
20+
LLMDriver, LLMApi, ModelConf, ServiceConf, OPENAI_DRIVER_NAME, LITELLM_DRIVER_NAME, DEEPSEEK_DRIVER_NAME,
1921
Prompt, PromptPayload, PromptStorage,
22+
Compatible,
2023
FunctionalToken,
2124
)
22-
from ghostos.container import Bootstrapper, Container
2325

2426
__all__ = [
2527
'OpenAIDriver', 'OpenAIAdapter',
@@ -137,9 +139,7 @@ def _parse_system_to_develop(messages: List[Message]) -> List[Message]:
137139
return changed
138140

139141
def parse_by_compatible_settings(self, messages: List[Message]) -> List[Message]:
140-
compatible = self.model.compatible
141-
if compatible is None:
142-
compatible = self.service.compatible
142+
compatible = self._get_compatible_options()
143143

144144
if compatible is None:
145145
return messages
@@ -191,12 +191,7 @@ def _chat_completion(self, prompt: Prompt, stream: bool) -> Union[ChatCompletion
191191
try:
192192
prompt.run_start = timestamp_ms()
193193
self._logger.debug(f"start chat completion messages %s", messages)
194-
functions = prompt.get_openai_functions()
195-
tools = prompt.get_openai_tools()
196-
if self.model.use_tools:
197-
functions = NOT_GIVEN
198-
else:
199-
tools = NOT_GIVEN
194+
functions, tools = self._get_prompt_functions_and_tools(prompt)
200195
return self._client.chat.completions.create(
201196
messages=messages,
202197
model=self.model.model,
@@ -218,6 +213,22 @@ def _chat_completion(self, prompt: Prompt, stream: bool) -> Union[ChatCompletion
218213
self._logger.debug(f"end chat completion for prompt {prompt.id}")
219214
prompt.run_end = timestamp_ms()
220215

216+
def _get_prompt_functions_and_tools(
217+
self,
218+
prompt: Prompt,
219+
) -> Tuple[Union[List[Function], NotGiven], Union[List[ChatCompletionToolParam], NotGiven]]:
220+
functions = prompt.get_openai_functions()
221+
tools = prompt.get_openai_tools()
222+
if self.model.use_tools:
223+
functions = NOT_GIVEN
224+
else:
225+
tools = NOT_GIVEN
226+
# compatible check
227+
if not self._get_compatible_options().support_function_call:
228+
functions = NOT_GIVEN
229+
tools = NOT_GIVEN
230+
return functions, tools
231+
221232
def _reasoning_completion(self, prompt: Prompt) -> ChatCompletion:
222233
if self.model.reasoning is None:
223234
raise NotImplementedError(f"current model {self.model} does not support reasoning completion ")
@@ -237,12 +248,7 @@ def _reasoning_completion(self, prompt: Prompt) -> ChatCompletion:
237248
try:
238249
prompt.run_start = timestamp_ms()
239250
self._logger.debug(f"start reasoning completion messages %s", messages)
240-
functions = prompt.get_openai_functions()
241-
tools = prompt.get_openai_tools()
242-
if self.model.use_tools:
243-
functions = NOT_GIVEN
244-
else:
245-
tools = NOT_GIVEN
251+
functions, tools = self._get_prompt_functions_and_tools(prompt)
246252
if self.model.reasoning.effort is None:
247253
reasoning_effort = NOT_GIVEN
248254
else:
@@ -292,12 +298,7 @@ def _reasoning_completion_stream(self, prompt: Prompt) -> Iterable[ChatCompletio
292298
try:
293299
prompt.run_start = timestamp_ms()
294300
self._logger.debug(f"start reasoning completion messages %s", messages)
295-
functions = prompt.get_openai_functions()
296-
tools = prompt.get_openai_tools()
297-
if self.model.use_tools:
298-
functions = NOT_GIVEN
299-
else:
300-
tools = NOT_GIVEN
301+
functions, tools = self._get_prompt_functions_and_tools(prompt)
301302
if self.model.reasoning.effort is None:
302303
reasoning_effort = NOT_GIVEN
303304
else:
@@ -415,6 +416,9 @@ def parse_prompt(self, prompt: Prompt) -> Prompt:
415416
prompt.model = self.model
416417
return prompt
417418

419+
def _get_compatible_options(self) -> Compatible:
420+
return self.model.compatible or self.service.compatible or Compatible()
421+
418422

419423
class OpenAIDriver(LLMDriver):
420424
"""

0 commit comments

Comments
 (0)