Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
94 changes: 94 additions & 0 deletions gui_agents/s3/core/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -443,3 +443,97 @@ def generate(self, messages, temperature=0.0, max_new_tokens=None, **kwargs):
.choices[0]
.message.content
)


class LMMEngineDeepSeek(LMMEngine):
def __init__(
self,
base_url=None,
api_key=None,
model=None,
rate_limit=-1,
temperature=None,
**kwargs,
):
assert model is not None, "model must be provided"
self.model = model
self.api_key = api_key
self.base_url = base_url
self.request_interval = 0 if rate_limit == -1 else 60.0 / rate_limit
self.llm_client = None
self.temperature = temperature

@backoff.on_exception(
backoff.expo, (APIConnectionError, APIError, RateLimitError), max_time=60
)
def generate(
self,
messages,
temperature=0.0,
max_new_tokens=None,
**kwargs,
):
api_key = self.api_key or os.getenv("DEEPSEEK_API_KEY")
if api_key is None:
raise ValueError(
"A DeepSeek API key needs to be provided in either the api_key parameter or as an environment variable named DEEPSEEK_API_KEY"
)
base_url = self.base_url or "https://api.deepseek.com"
if not self.llm_client:
self.llm_client = OpenAI(base_url=base_url, api_key=api_key)
temp = self.temperature if self.temperature is not None else temperature
completion = self.llm_client.chat.completions.create(
model=self.model,
messages=messages,
max_tokens=max_new_tokens if max_new_tokens else 4096,
temperature=temp,
**kwargs,
)
return completion.choices[0].message.content
Comment thread
coderabbitai[bot] marked this conversation as resolved.


class LMMEngineQwen(LMMEngine):
def __init__(
self,
base_url=None,
api_key=None,
model=None,
rate_limit=-1,
temperature=None,
**kwargs,
):
assert model is not None, "model must be provided"
self.model = model
self.api_key = api_key
self.base_url = base_url
self.request_interval = 0 if rate_limit == -1 else 60.0 / rate_limit
self.llm_client = None
self.temperature = temperature

@backoff.on_exception(
backoff.expo, (APIConnectionError, APIError, RateLimitError), max_time=60
)
def generate(
self,
messages,
temperature=0.0,
max_new_tokens=None,
**kwargs,
):
api_key = self.api_key or os.getenv("DASHSCOPE_API_KEY")
if api_key is None:
raise ValueError(
"A Qwen/DashScope API key needs to be provided in either the api_key parameter or as an environment variable named DASHSCOPE_API_KEY"
)
base_url = self.base_url or "https://dashscope.aliyuncs.com/compatible-mode/v1"
if not self.llm_client:
self.llm_client = OpenAI(base_url=base_url, api_key=api_key)
temp = self.temperature if self.temperature is not None else temperature
completion = self.llm_client.chat.completions.create(
model=self.model,
messages=messages,
max_tokens=max_new_tokens if max_new_tokens else 4096,
temperature=temp,
**kwargs,
)
return completion.choices[0].message.content
Comment thread
coderabbitai[bot] marked this conversation as resolved.
8 changes: 8 additions & 0 deletions gui_agents/s3/core/mllm.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,12 @@
from gui_agents.s3.core.engine import (
LMMEngineAnthropic,
LMMEngineAzureOpenAI,
LMMEngineDeepSeek,
LMMEngineHuggingFace,
LMMEngineOpenAI,
LMMEngineOpenRouter,
LMMEngineParasail,
LMMEngineQwen,
LMMEnginevLLM,
LMMEngineGemini,
)
Expand All @@ -35,6 +37,10 @@ def __init__(self, engine_params=None, system_prompt=None, engine=None):
self.engine = LMMEngineOpenRouter(**engine_params)
elif engine_type == "parasail":
self.engine = LMMEngineParasail(**engine_params)
elif engine_type == "deepseek":
self.engine = LMMEngineDeepSeek(**engine_params)
elif engine_type == "qwen":
self.engine = LMMEngineQwen(**engine_params)
else:
raise ValueError(f"engine_type '{engine_type}' is not supported")
else:
Expand Down Expand Up @@ -125,10 +131,12 @@ def add_message(
(
LMMEngineOpenAI,
LMMEngineAzureOpenAI,
LMMEngineDeepSeek,
LMMEngineHuggingFace,
LMMEngineGemini,
LMMEngineOpenRouter,
LMMEngineParasail,
LMMEngineQwen,
),
):
# infer role from previous message
Expand Down