1+ # This workflow comes from https://github.com/ofek/hatch-mypyc
2+ # https://github.com/ofek/hatch-mypyc/blob/5a198c0ba8660494d02716cfc9d79ce4adfb1442/.github/workflows/test.yml
3+ name : Test / llama_stack
4+
5+ on :
6+ schedule :
7+ - cron : " 0 0 * * *"
8+ pull_request :
9+ paths :
10+ - " integrations/llama_stack/**"
11+ - " !integrations/llama_stack/*.md"
12+ - " .github/workflows/llama_stack.yml"
13+
14+ defaults :
15+ run :
16+ working-directory : integrations/llama_stack
17+
18+ concurrency :
19+ group : llama_stack-${{ github.head_ref }}
20+ cancel-in-progress : true
21+
22+ env :
23+ PYTHONUNBUFFERED : " 1"
24+ FORCE_COLOR : " 1"
25+
26+ jobs :
27+ run :
28+ name : Python ${{ matrix.python-version }} on ${{ startsWith(matrix.os, 'macos-') && 'macOS' || startsWith(matrix.os, 'windows-') && 'Windows' || 'Linux' }}
29+ runs-on : ${{ matrix.os }}
30+ strategy :
31+ fail-fast : false
32+ matrix :
33+ os : [ubuntu-latest] # to test on other Operating Systems, we need to install Ollama differently
34+ python-version : ["3.10", "3.13"]
35+
36+ steps :
37+ - uses : actions/checkout@v4
38+
39+ - name : Install and run Ollama Server as inference provider (needed for Llama Stack Server)
40+ uses : nick-fields/retry@v3
41+ with :
42+ timeout_minutes : 2
43+ max_attempts : 3
44+ command : |
45+ curl -fsSL https://ollama.com/install.sh | sh
46+ ollama serve &
47+
48+ # Check if the service is up and running with a timeout of 60 seconds
49+ timeout=60
50+ while [ $timeout -gt 0 ] && ! curl -sSf http://localhost:11434/ > /dev/null; do
51+ echo "Waiting for Ollama service to start..."
52+ sleep 5
53+ ((timeout-=5))
54+ done
55+
56+ if [ $timeout -eq 0 ]; then
57+ echo "Timed out waiting for Ollama service to start."
58+ exit 1
59+ fi
60+
61+ echo "Ollama service started successfully."
62+
63+ - name : Pull models
64+ uses : nick-fields/retry@v3
65+ with :
66+ timeout_minutes : 2
67+ max_attempts : 5
68+ command : |
69+ ollama pull llama3.2:3b
70+ ollama list | grep -q "llama3.2:3b" || { echo "Model llama3.2:3b not pulled."; exit 1; }
71+
72+ echo "Models pulled successfully."
73+
74+ - name : Set up Python ${{ matrix.python-version }}
75+ uses : actions/setup-python@v5
76+ with :
77+ python-version : ${{ matrix.python-version }}
78+
79+ - name : Test Llama Stack Server
80+ run : |
81+ pip install uv
82+ INFERENCE_MODEL=llama3.2:3b uv run --with llama-stack llama stack build --template ollama --image-type venv --run &
83+
84+ echo "Llama Stack Server started successfully."
85+
86+ - name : Install Hatch
87+ run : pip install --upgrade hatch
88+
89+ - name : Lint
90+ if : matrix.python-version == '3.10' && runner.os == 'Linux'
91+ run : hatch run fmt-check && hatch run test:types
92+
93+ - name : Generate docs
94+ if : matrix.python-version == '3.10' && runner.os == 'Linux'
95+ run : hatch run docs
96+
97+ - name : Run tests
98+ run : hatch run test:cov-retry
99+
100+ - name : Run unit tests with lowest direct dependencies
101+ run : |
102+ hatch run uv pip compile pyproject.toml --resolution lowest-direct --output-file requirements_lowest_direct.txt
103+ hatch run uv pip install -r requirements_lowest_direct.txt
104+ hatch run test:unit
105+
106+ - name : Nightly - run unit tests with Haystack main branch
107+ if : github.event_name == 'schedule'
108+ run : |
109+ hatch env prune
110+ hatch run uv pip install git+https://github.com/deepset-ai/haystack.git@main
111+ hatch run test:unit
112+
113+ - name : Send event to Datadog for nightly failures
114+ if : failure() && github.event_name == 'schedule'
115+ uses : ./.github/actions/send_failure
116+ with :
117+ title : |
118+ Core integrations nightly tests failure: ${{ github.workflow }}
119+ api-key : ${{ secrets.CORE_DATADOG_API_KEY }}
0 commit comments