Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,6 @@ __pycache__
firecrawl_selfhost_servers_config.json
jira_servers_config.json
servers_config.json
instructions.md
.envrc
.pre-commit-config.yaml
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,5 +10,6 @@ wheels/
.venv
.envrc
servers_config.json
instructions.md
access.json
.access.pending.json
32 changes: 28 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,17 @@ export OPENAI_API_KEY=""
export OPENAI_MODEL="gpt-5.4"
```

## Agent Instructions

The bot loads its system prompt from `instructions.md` in the project root.
If the file is missing, the bot fails fast at startup.

You can copy `instructions.md.example` as a starting point:

```bash
cp instructions.md.example instructions.md
```

If you are using Azure OpenAI (v1 API), set these instead:

```
Expand All @@ -57,7 +68,6 @@ Create a `servers_config.json` file to add your MCP servers. If this file is not

```json
{
"instructions": "Your custom system prompt here.",
"mcpServers": {
"my-server": {
"command": "uvx",
Expand All @@ -67,13 +77,13 @@ Create a `servers_config.json` file to add your MCP servers. If this file is not
}
```

For HTTP-based MCP servers (Streamable HTTP), use `httpUrl`:
For HTTP-based MCP servers (Streamable HTTP), use `url`:

```json
{
"mcpServers": {
"my-server": {
"httpUrl": "https://mcp.example.com/mcp",
"url": "https://mcp.example.com/mcp",
"headers": {
"Accept": "application/json, text/event-stream"
}
Expand All @@ -86,7 +96,6 @@ For local MCP servers, use `uv --directory`:

```json
{
"instructions": "Your custom system prompt here.",
"mcpServers": {
"my-server": {
"command": "uv",
Expand Down Expand Up @@ -184,7 +193,22 @@ docker run -d \
-e TELEGRAM_BOT_TOKEN="" \
-e OPENAI_API_KEY="" \
-e OPENAI_MODEL="gpt-5.4" \
-v /path/to/instructions.md:/app/instructions.md \
-v /path/to/servers_config.json:/app/servers_config.json \
-v /path/to/access.json:/app/access.json \
agentic-telegram-bot
```

If you do not use MCP servers, you still need to mount `instructions.md`:

```bash
docker run -d \
--name telegent \
-e BOT_USERNAME="@your_bot_username" \
-e TELEGRAM_BOT_TOKEN="" \
-e OPENAI_API_KEY="" \
-e OPENAI_MODEL="gpt-5.4" \
-v /path/to/instructions.md:/app/instructions.md \
-v /path/to/access.json:/app/access.json \
agentic-telegram-bot
```
29 changes: 20 additions & 9 deletions bot/agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import asyncio
import logging
import os
from pathlib import Path
from typing import Any

from agents import Agent
Expand All @@ -15,19 +16,29 @@
from agents.tracing import set_tracing_disabled
from openai import AsyncOpenAI

DEFAULT_INSTRUCTIONS = (
"You are a helpful assistant in a Telegram chat. "
"When referencing articles, websites, or resources, always include "
"the URL as a markdown hyperlink, e.g. [title](https://example.com). "
"Keep responses concise and well-structured for mobile reading."
)
INSTRUCTIONS_FILE = Path("instructions.md")

MAX_TURNS = 10
MCP_SESSION_TIMEOUT_SECONDS = 30.0

set_tracing_disabled(True)


def _load_instructions() -> str:
"""Load agent instructions from ``instructions.md`` in the working directory.

Fails fast with a clear error if the file is missing, so misconfiguration
is caught immediately at startup.
"""
try:
return INSTRUCTIONS_FILE.read_text(encoding="utf-8")
except FileNotFoundError as e:
raise FileNotFoundError(
f"Instructions file not found: {INSTRUCTIONS_FILE.resolve()}. "
"Create or mount instructions.md with the agent system prompt."
) from e


def _get_model() -> OpenAIResponsesModel | OpenAIChatCompletionsModel:
"""Create an OpenAI model from environment variables.

Expand All @@ -53,8 +64,8 @@ class OpenAIAgent:
def __init__(
self,
name: str,
instructions: str,
mcp_servers: list | None = None,
instructions: str = DEFAULT_INSTRUCTIONS,
) -> None:
self.agent = Agent(
name=name,
Expand Down Expand Up @@ -116,8 +127,8 @@ def from_dict(cls, name: str, config: dict[str, Any]) -> OpenAIAgent:
},
)
)
instructions = config.get("instructions", DEFAULT_INSTRUCTIONS)
return cls(name, mcp_servers, instructions=instructions)
instructions = _load_instructions()
return cls(name, instructions=instructions, mcp_servers=mcp_servers)

async def connect(self) -> None:
for mcp_server in self.agent.mcp_servers:
Expand Down
1 change: 1 addition & 0 deletions instructions.md.example
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
You are a helpful assistant in a Telegram chat. When referencing articles, websites, or resources, always include the URL as a markdown hyperlink, e.g. [title](https://example.com). Keep responses concise and well-structured for mobile reading.
1 change: 0 additions & 1 deletion servers_config.example.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
{
"instructions": "You are a helpful financial assistant. Help users look up stock data, news, and market information. Always include ticker symbols. Respond in the user's language. Keep responses concise for mobile reading. Do not offer follow-up suggestions or numbered options after answering.",
"mcpServers": {
"yfmcp": {
"command": "uvx",
Expand Down
50 changes: 24 additions & 26 deletions tests/test_agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,17 @@
from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
from agents.models.openai_responses import OpenAIResponsesModel

from bot.agents import DEFAULT_INSTRUCTIONS
from bot.agents import MAX_TURNS
from bot.agents import OpenAIAgent
from bot.agents import _get_model


@pytest.fixture
def _stub_instructions(monkeypatch):
"""Stub out instructions.md loading for from_dict tests."""
monkeypatch.setattr("bot.agents._load_instructions", lambda: "stub instructions")


@pytest.fixture(autouse=True)
def _mock_model(monkeypatch):
"""Prevent tests from constructing a real OpenAI client."""
Expand Down Expand Up @@ -49,7 +54,7 @@ def test_returns_chat_completions_model_when_api_type_is_chat_completions(self,
class TestPerChatConversations:
def test_separate_chats_have_independent_history(self):
"""Different chat_ids should maintain separate message histories."""
agent = OpenAIAgent(name="test")
agent = OpenAIAgent(name="test", instructions="test-prompt")
agent.append_user_message(chat_id=100, message="hello from chat 100")
agent.append_user_message(chat_id=200, message="hello from chat 200")

Expand All @@ -62,7 +67,7 @@ def test_separate_chats_have_independent_history(self):
assert msgs_200[0]["content"] == "hello from chat 200"

def test_same_chat_accumulates_messages(self):
agent = OpenAIAgent(name="test")
agent = OpenAIAgent(name="test", instructions="test-prompt")
agent.append_user_message(chat_id=100, message="first")
agent.append_user_message(chat_id=100, message="second")

Expand All @@ -72,18 +77,18 @@ def test_same_chat_accumulates_messages(self):
assert msgs[1]["content"] == "second"

def test_unknown_chat_returns_empty(self):
agent = OpenAIAgent(name="test")
agent = OpenAIAgent(name="test", instructions="test-prompt")
assert agent.get_messages(chat_id=999) == []

def test_set_messages_replaces_history(self):
agent = OpenAIAgent(name="test")
agent = OpenAIAgent(name="test", instructions="test-prompt")
agent.append_user_message(chat_id=100, message="old")
new_msgs = [{"role": "user", "content": "replaced"}]
agent.set_messages(chat_id=100, messages=new_msgs)
assert agent.get_messages(chat_id=100) == new_msgs

def test_set_messages_does_not_affect_other_chats(self):
agent = OpenAIAgent(name="test")
agent = OpenAIAgent(name="test", instructions="test-prompt")
agent.append_user_message(chat_id=100, message="chat 100")
agent.append_user_message(chat_id=200, message="chat 200")
agent.set_messages(chat_id=100, messages=[])
Expand All @@ -92,30 +97,23 @@ def test_set_messages_does_not_affect_other_chats(self):


class TestInstructions:
def test_default_instructions_when_none_provided(self):
agent = OpenAIAgent(name="test")
assert agent.agent.instructions == DEFAULT_INSTRUCTIONS

def test_custom_instructions(self):
agent = OpenAIAgent(name="test", instructions="Be a HN bot.")
assert agent.agent.instructions == "Be a HN bot."

def test_from_dict_reads_instructions(self):
config = {
"instructions": "Custom prompt here.",
"mcpServers": {},
}
agent = OpenAIAgent.from_dict("test", config)
assert agent.agent.instructions == "Custom prompt here."
def test_from_dict_loads_instructions_from_file(self, tmp_path, monkeypatch):
monkeypatch.chdir(tmp_path)
(tmp_path / "instructions.md").write_text("From file prompt.", encoding="utf-8")
agent = OpenAIAgent.from_dict("test", {"mcpServers": {}})
assert agent.agent.instructions == "From file prompt."

def test_from_dict_uses_default_without_instructions(self):
config = {
"mcpServers": {},
}
agent = OpenAIAgent.from_dict("test", config)
assert agent.agent.instructions == DEFAULT_INSTRUCTIONS
def test_from_dict_fails_fast_when_instructions_file_missing(self, tmp_path, monkeypatch):
monkeypatch.chdir(tmp_path)
with pytest.raises(FileNotFoundError, match="Instructions file not found"):
OpenAIAgent.from_dict("test", {"mcpServers": {}})


@pytest.mark.usefixtures("_stub_instructions")
class TestFromDictMcpServers:
def test_url_creates_streamable_http_server(self):
config = {
Expand Down Expand Up @@ -172,7 +170,7 @@ def test_default_max_turns(self):
assert MAX_TURNS == 10

def test_truncate_keeps_recent_turns(self):
agent = OpenAIAgent(name="test")
agent = OpenAIAgent(name="test", instructions="test-prompt")
# Simulate 30 turns: each turn is a user msg + assistant msg
for i in range(30):
agent.set_messages(
Expand All @@ -196,7 +194,7 @@ def test_truncate_keeps_recent_turns(self):
assert user_msgs[-1]["content"] == "user-29"

def test_truncate_preserves_tool_messages_within_turn(self):
agent = OpenAIAgent(name="test")
agent = OpenAIAgent(name="test", instructions="test-prompt")
# Build history with tool calls in a turn
history = []
for i in range(MAX_TURNS + 2):
Expand All @@ -218,7 +216,7 @@ def test_truncate_preserves_tool_messages_within_turn(self):
assert len(tool_msgs) == 1

def test_no_truncation_when_under_limit(self):
agent = OpenAIAgent(name="test")
agent = OpenAIAgent(name="test", instructions="test-prompt")
for i in range(3):
agent.set_messages(
chat_id=100,
Expand Down
Loading