Skip to content

Commit 5fda954

Browse files
committed
Merge branch 'main' into consciousness_capsule
2 parents 523b82c + f2bc3c9 commit 5fda954

File tree

5 files changed

+109
-106
lines changed

5 files changed

+109
-106
lines changed

ghostos/framework/llms/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
from ghostos.core.llms import LLMs, Prompt, PromptStorage
22
from ghostos.framework.llms.llms import LLMsImpl
3-
from ghostos.framework.llms.openai_driver import OpenAIDriver, OpenAIAdapter, LitellmAdapter
3+
from ghostos.framework.llms.openai_driver import OpenAIDriver, OpenAIAdapter
4+
from ghostos.framework.llms.lite_llm_driver import LitellmAdapter
45
from ghostos.framework.llms.providers import ConfigBasedLLMsProvider, PromptStorageInWorkspaceProvider, LLMsYamlConfig
56
from ghostos.framework.llms.prompt_storage_impl import PromptStorageImpl
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
from typing import Iterable
2+
from ghostos.core.llms.configs import ServiceConf, ModelConf, DEEPSEEK_DRIVER_NAME
3+
from ghostos.core.llms.abcd import LLMApi
4+
from ghostos.core.llms.prompt import Prompt, PromptPayload
5+
from ghostos.core.messages import Message, MessageStage
6+
from ghostos.helpers.timeutils import timestamp_ms
7+
from ghostos.framework.llms.openai_driver import OpenAIDriver, OpenAIAdapter, ChatCompletion, ChatCompletionChunk
8+
9+
10+
class DeepseekAdapter(OpenAIAdapter):
11+
def _from_openai_chat_completion_item(self, message: ChatCompletion) -> Iterable[Message]:
12+
cc_item = message.choices[0].message
13+
if reasoning := cc_item.reasoning_content:
14+
reasoning_message = Message.new_tail(content=reasoning)
15+
reasoning_message.stage = MessageStage.REASONING.value
16+
yield reasoning_message
17+
18+
yield self._parser.from_chat_completion(cc_item)
19+
20+
def reasoning_completion_stream(self, prompt: Prompt) -> Iterable[ChatCompletionChunk]:
21+
try:
22+
chunks: Iterable[ChatCompletionChunk] = self._reasoning_completion_stream(prompt)
23+
messages = self._from_openai_chat_completion_chunks(chunks)
24+
prompt_payload = PromptPayload.from_prompt(prompt)
25+
output = []
26+
for chunk in messages:
27+
if not prompt.first_token:
28+
prompt.first_token = timestamp_ms()
29+
yield chunk
30+
if chunk.is_complete():
31+
self.model.set_payload(chunk)
32+
prompt_payload.set_payload(chunk)
33+
output.append(chunk)
34+
prompt.added = output
35+
except Exception as e:
36+
prompt.error = str(e)
37+
raise
38+
finally:
39+
self._storage.save(prompt)
40+
41+
42+
class DeepseekDriver(OpenAIDriver):
43+
44+
def driver_name(self) -> str:
45+
return DEEPSEEK_DRIVER_NAME
46+
47+
def new(self, service: ServiceConf, model: ModelConf, api_name: str = "") -> LLMApi:
48+
return DeepseekAdapter(service, model, self._parser, self._storage, self._logger, api_name=api_name)
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
from typing import List
2+
from ghostos.core.llms.configs import ServiceConf, ModelConf, LITELLM_DRIVER_NAME
3+
from ghostos.core.llms.abcd import LLMApi
4+
from ghostos.core.messages import Role, Message
5+
from ghostos.core.llms.prompt import Prompt
6+
from ghostos.framework.llms.openai_driver import OpenAIDriver, OpenAIAdapter, ChatCompletionMessageParam, ChatCompletion
7+
8+
9+
class LitellmAdapter(OpenAIAdapter):
10+
"""
11+
adapter class wrap openai api to ghostos.blueprint.kernel.llms.LLMApi
12+
"""
13+
14+
def _chat_completion(self, chat: Prompt, stream: bool) -> ChatCompletion:
15+
import litellm
16+
messages = chat.get_messages()
17+
messages = self.parse_message_params(messages)
18+
response = litellm.completion(
19+
model=self.model.model,
20+
messages=list(messages),
21+
timeout=self.model.timeout,
22+
temperature=self.model.temperature,
23+
n=self.model.n,
24+
# not support stream yet
25+
stream=False,
26+
api_key=self.service.token,
27+
)
28+
return response.choices[0].message
29+
30+
def parse_message_params(self, messages: List[Message]) -> List[ChatCompletionMessageParam]:
31+
parsed = super().parse_message_params(messages)
32+
outputs = []
33+
count = 0
34+
for message in parsed:
35+
# filter all the system message to __system__ user message.
36+
if count > 0 and "role" in message and message["role"] == Role.SYSTEM.value:
37+
message["role"] = Role.USER.value
38+
message["name"] = "__system__"
39+
outputs.append(message)
40+
count += 1
41+
return outputs
42+
43+
44+
class LiteLLMDriver(OpenAIDriver):
45+
46+
def driver_name(self) -> str:
47+
return LITELLM_DRIVER_NAME
48+
49+
def new(self, service: ServiceConf, model: ModelConf, api_name: str = "") -> LLMApi:
50+
return LitellmAdapter(service, model, self._parser, self._storage, self._logger, api_name=api_name)

ghostos/framework/llms/openai_driver.py

Lines changed: 6 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
from httpx import Client
44
from httpx_socks import SyncProxyTransport
55
from openai import NOT_GIVEN, NotGiven
6-
from openai.types.chat import ChatCompletion, ChatCompletionReasoningEffort
6+
from openai.types.chat import ChatCompletion
77
from openai.types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
88
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
99
from openai.types.chat.chat_completion_tool_param import ChatCompletionToolParam
@@ -12,22 +12,18 @@
1212
from ghostos.contracts.logger import LoggerItf, get_ghostos_logger
1313
from ghostos.helpers import timestamp_ms
1414
from ghostos.core.messages import (
15-
MessageStage,
1615
Message, OpenAIMessageParser, DefaultOpenAIMessageParser,
1716
CompletionUsagePayload, Role,
1817
)
1918
from ghostos.core.llms import (
20-
LLMDriver, LLMApi, ModelConf, ServiceConf, OPENAI_DRIVER_NAME, LITELLM_DRIVER_NAME, DEEPSEEK_DRIVER_NAME,
21-
Prompt, PromptPayload, PromptStorage,
22-
Compatible,
19+
LLMApi, LLMDriver,
20+
ModelConf, ServiceConf, Compatible,
21+
OPENAI_DRIVER_NAME, LITELLM_DRIVER_NAME, DEEPSEEK_DRIVER_NAME,
2322
FunctionalToken,
23+
Prompt, PromptPayload, PromptStorage
2424
)
2525

26-
__all__ = [
27-
'OpenAIDriver', 'OpenAIAdapter',
28-
'LitellmAdapter', 'LiteLLMDriver',
29-
'DeepseekDriver', 'DeepseekAdapter',
30-
]
26+
__all__ = ['OpenAIDriver', 'OpenAIAdapter']
3127

3228

3329
class FunctionalTokenPrompt(str):
@@ -438,97 +434,3 @@ def driver_name(self) -> str:
438434
def new(self, service: ServiceConf, model: ModelConf, api_name: str = "") -> LLMApi:
439435
get_ghostos_logger().debug(f"new llm api %s at service %s", model.model, service.name)
440436
return OpenAIAdapter(service, model, self._parser, self._storage, self._logger, api_name=api_name)
441-
442-
443-
class LitellmAdapter(OpenAIAdapter):
444-
"""
445-
adapter class wrap openai api to ghostos.blueprint.kernel.llms.LLMApi
446-
"""
447-
448-
def _chat_completion(self, chat: Prompt, stream: bool) -> ChatCompletion:
449-
import litellm
450-
messages = chat.get_messages()
451-
messages = self.parse_message_params(messages)
452-
response = litellm.completion(
453-
model=self.model.model,
454-
messages=list(messages),
455-
timeout=self.model.timeout,
456-
temperature=self.model.temperature,
457-
n=self.model.n,
458-
# not support stream yet
459-
stream=False,
460-
api_key=self.service.token,
461-
)
462-
return response.choices[0].message
463-
464-
def parse_message_params(self, messages: List[Message]) -> List[ChatCompletionMessageParam]:
465-
parsed = super().parse_message_params(messages)
466-
outputs = []
467-
count = 0
468-
for message in parsed:
469-
# filter all the system message to __system__ user message.
470-
if count > 0 and "role" in message and message["role"] == Role.SYSTEM.value:
471-
message["role"] = Role.USER.value
472-
message["name"] = "__system__"
473-
outputs.append(message)
474-
count += 1
475-
return outputs
476-
477-
478-
# todo: move to lite_llm_driver. shall not locate here at very first.
479-
class LiteLLMDriver(OpenAIDriver):
480-
481-
def driver_name(self) -> str:
482-
return LITELLM_DRIVER_NAME
483-
484-
def new(self, service: ServiceConf, model: ModelConf, api_name: str = "") -> LLMApi:
485-
return LitellmAdapter(service, model, self._parser, self._storage, self._logger, api_name=api_name)
486-
487-
488-
class DeepseekAdapter(OpenAIAdapter):
489-
# def reasoning_completion(self, prompt: Prompt, stream: bool) -> Iterable[Message]:
490-
# if not stream:
491-
# yield from self._reasoning_completion_none_stream(prompt)
492-
# else:
493-
# yield from self._reasoning_completion_stream(prompt)
494-
#
495-
496-
def _from_openai_chat_completion_item(self, message: ChatCompletion) -> Iterable[Message]:
497-
cc_item = message.choices[0].message
498-
if reasoning := cc_item.reasoning_content:
499-
reasoning_message = Message.new_tail(content=reasoning)
500-
reasoning_message.stage = MessageStage.REASONING.value
501-
yield reasoning_message
502-
503-
yield self._parser.from_chat_completion(cc_item)
504-
505-
def reasoning_completion_stream(self, prompt: Prompt) -> Iterable[ChatCompletionChunk]:
506-
try:
507-
chunks: Iterable[ChatCompletionChunk] = self._reasoning_completion_stream(prompt)
508-
messages = self._from_openai_chat_completion_chunks(chunks)
509-
prompt_payload = PromptPayload.from_prompt(prompt)
510-
output = []
511-
for chunk in messages:
512-
if not prompt.first_token:
513-
prompt.first_token = timestamp_ms()
514-
yield chunk
515-
if chunk.is_complete():
516-
self.model.set_payload(chunk)
517-
prompt_payload.set_payload(chunk)
518-
output.append(chunk)
519-
prompt.added = output
520-
except Exception as e:
521-
prompt.error = str(e)
522-
raise
523-
finally:
524-
self._storage.save(prompt)
525-
526-
527-
# todo: move to deepseek driver
528-
class DeepseekDriver(OpenAIDriver):
529-
530-
def driver_name(self) -> str:
531-
return DEEPSEEK_DRIVER_NAME
532-
533-
def new(self, service: ServiceConf, model: ModelConf, api_name: str = "") -> LLMApi:
534-
return DeepseekAdapter(service, model, self._parser, self._storage, self._logger, api_name=api_name)

ghostos/framework/llms/providers.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,9 @@
44
from ghostos.core.llms import LLMs, LLMsConfig, PromptStorage
55
from ghostos.core.messages.openai import OpenAIMessageParser
66
from ghostos.framework.llms.llms import LLMsImpl
7-
from ghostos.framework.llms.openai_driver import OpenAIDriver, LiteLLMDriver, DeepseekDriver
7+
from ghostos.framework.llms.openai_driver import OpenAIDriver
8+
from ghostos.framework.llms.lite_llm_driver import LiteLLMDriver
9+
from ghostos.framework.llms.deepseek_driver import DeepseekDriver
810
from ghostos.framework.llms.prompt_storage_impl import PromptStorageImpl
911
from ghostos.contracts.workspace import Workspace
1012
from ghostos.contracts.logger import LoggerItf

0 commit comments

Comments
 (0)