Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 5 additions & 14 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ See also: [agentic-slackbot](https://github.com/John-Lin/agentic-slackbot) — a
- Supports OpenAI, Azure OpenAI endpoints
- Per-conversation history with automatic truncation
- Group reply chain — after `@mention`, anyone can continue by replying
- Local shell skills — let the agent run shell scripts from `skills/` (opt-in via `SHELL_SKILLS_ENABLED`)

## Install Dependencies

Expand Down Expand Up @@ -41,6 +42,9 @@ export TELEGRAM_BOT_TOKEN=""
# OpenAI API
export OPENAI_API_KEY=""
export OPENAI_MODEL="gpt-5.4"

# Shell skills (disabled by default)
# export SHELL_SKILLS_ENABLED=1
```

## Agent Instructions
Expand Down Expand Up @@ -180,6 +184,7 @@ docker run -d \
-e TELEGRAM_BOT_TOKEN="" \
-e OPENAI_API_KEY="" \
-e OPENAI_MODEL="gpt-5.4" \
-v /path/to/instructions.md:/app/instructions.md \
-v /path/to/access.json:/app/access.json \
agentic-telegram-bot
```
Expand All @@ -198,17 +203,3 @@ docker run -d \
-v /path/to/access.json:/app/access.json \
agentic-telegram-bot
```

If you do not use MCP servers, you still need to mount `instructions.md`:

```bash
docker run -d \
--name telegent \
-e BOT_USERNAME="@your_bot_username" \
-e TELEGRAM_BOT_TOKEN="" \
-e OPENAI_API_KEY="" \
-e OPENAI_MODEL="gpt-5.4" \
-v /path/to/instructions.md:/app/instructions.md \
-v /path/to/access.json:/app/access.json \
agentic-telegram-bot
```
98 changes: 96 additions & 2 deletions bot/agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@

from agents import Agent
from agents import Runner
from agents import ShellCommandRequest
from agents import ShellTool
from agents import ShellToolLocalEnvironment
from agents import ShellToolLocalSkill
from agents import TResponseInputItem
from agents.mcp import MCPServerStdio
from agents.mcp import MCPServerStreamableHttp
Expand All @@ -20,6 +24,8 @@

MAX_TURNS = 10
MCP_SESSION_TIMEOUT_SECONDS = 30.0
SHELL_TIMEOUT = 30.0
SKILLS_DIR = Path(__file__).resolve().parent.parent / "skills"

set_tracing_disabled(True)

Expand Down Expand Up @@ -58,20 +64,101 @@ def _get_model() -> OpenAIResponsesModel | OpenAIChatCompletionsModel:
return OpenAIResponsesModel(model=model_name, openai_client=client)


def _parse_skill_description(content: str) -> str:
"""Return the description field from a SKILL.md YAML frontmatter, or ""."""
if not content.startswith("---"):
return ""
end = content.find("\n---", 3)
if end == -1:
return ""
for line in content[3:end].splitlines():
if line.startswith("description:"):
value = line[len("description:") :].strip()
if len(value) >= 2 and value[0] == value[-1] and value[0] in ('"', "'"):
value = value[1:-1]
return value
return ""


def _load_shell_skills() -> list[ShellToolLocalSkill]:
"""Discover local shell skills under SKILLS_DIR.

Each immediate subdirectory of SKILLS_DIR containing a SKILL.md is mounted
as a ShellToolLocalSkill. The skill name is the directory name; the
description is read from the SKILL.md YAML frontmatter.
"""
if not SKILLS_DIR.is_dir():
return []
skills: list[ShellToolLocalSkill] = []
for skill_dir in sorted(SKILLS_DIR.iterdir()):
skill_md = skill_dir / "SKILL.md"
if not skill_dir.is_dir() or not skill_md.is_file():
continue
try:
content = skill_md.read_text(encoding="utf-8")
except (OSError, UnicodeDecodeError):
logging.warning("Skipping unreadable skill file: %s", skill_md, exc_info=True)
continue
skills.append(
ShellToolLocalSkill(
name=skill_dir.name,
description=_parse_skill_description(content),
path=str(skill_dir),
)
)
return skills


async def _shell_executor(request: ShellCommandRequest) -> str:
"""Run each shell command from the request and return combined output.

Honours action.timeout_ms when set, otherwise falls back to SHELL_TIMEOUT.
stderr is merged into stdout for simplicity.
"""
action = request.data.action
timeout = (action.timeout_ms / 1000.0) if action.timeout_ms is not None else SHELL_TIMEOUT

outputs: list[str] = []
for command in action.commands:
try:
proc = await asyncio.create_subprocess_shell(
command,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT,
)
except OSError as e:
outputs.append(f"Failed to run command: {command}: {e}")
break
try:
stdout, _ = await asyncio.wait_for(proc.communicate(), timeout=timeout)
output = stdout.decode("utf-8", errors="replace")
if proc.returncode:
output += f"\n[exit code: {proc.returncode}]"
outputs.append(output)
except TimeoutError:
proc.kill()
await proc.communicate()
outputs.append(f"Command timed out after {timeout}s: {command}")
break
return "\n".join(outputs)


class OpenAIAgent:
"""A wrapper for OpenAI Agent with MCP server support."""
"""A wrapper for OpenAI Agent with MCP server and local shell skill support."""

def __init__(
self,
name: str,
instructions: str,
mcp_servers: list | None = None,
tools: list | None = None,
) -> None:
self.agent = Agent(
name=name,
instructions=instructions,
model=_get_model(),
mcp_servers=(mcp_servers if mcp_servers is not None else []),
tools=(tools if tools is not None else []),
)
self.name = name
self._conversations: dict[int, list[TResponseInputItem]] = {}
Expand Down Expand Up @@ -127,8 +214,15 @@ def from_dict(cls, name: str, config: dict[str, Any]) -> OpenAIAgent:
},
)
)
tools: list[Any] = []
if os.getenv("SHELL_SKILLS_ENABLED"):
skills = _load_shell_skills()
if skills:
environment = ShellToolLocalEnvironment(type="local", skills=skills)
tools.append(ShellTool(executor=_shell_executor, environment=environment))

instructions = _load_instructions()
return cls(name, instructions=instructions, mcp_servers=mcp_servers)
return cls(name, instructions=instructions, mcp_servers=mcp_servers, tools=tools)

async def connect(self) -> None:
for mcp_server in self.agent.mcp_servers:
Expand Down
4 changes: 4 additions & 0 deletions skills/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Users drop their own shell skills into this directory; the bot auto-loads
# any subdirectory containing a SKILL.md. Skill content is not tracked.
*
!.gitignore
Loading
Loading