9191 llm_call_test :
9292 name : Run Tests with Real LM
9393 runs-on : ubuntu-latest
94- services :
95- ollama :
96- image : ollama/ollama:latest
97- ports :
98- - 11434:11434
9994 steps :
10095 - uses : actions/checkout@v4
10196 - uses : actions/setup-python@v5
@@ -116,15 +111,33 @@ jobs:
116111 run : |
117112 uv sync --dev -p .venv --extra dev
118113 uv pip list
119- - name : Pull LLM
114+ - name : Cache Ollama models
115+ id : cache-ollama
116+ uses : actions/cache@v4
117+ with :
118+ path : ollama-data
119+ key : ollama-llama3.2-3b-${{ runner.os }}-v1
120+ - name : Start Ollama service
120121 run : |
122+ mkdir -p ollama-data
123+ docker run -d --name ollama \
124+ -p 11434:11434 \
125+ -v ${{ github.workspace }}/ollama-data:/root/.ollama \
126+ ollama/ollama:latest
121127 timeout 60 bash -c 'until curl -f http://localhost:11434/api/version; do sleep 2; done'
122- curl -X POST http://localhost:11434/api/pull \
123- -H "Content-Type: application/json" \
124- -d '{"name": "llama3.2:3b"}'
125- echo "LM_FOR_TEST=ollama/llama3.2:3b" >> $GITHUB_ENV
128+ - name : Pull LLM
129+ if : steps.cache-ollama.outputs.cache-hit != 'true'
130+ run : docker exec ollama ollama pull llama3.2:3b
131+ - name : Set LM environment variable
132+ run : echo "LM_FOR_TEST=ollama/llama3.2:3b" >> $GITHUB_ENV
126133 - name : Run tests
127134 run : uv run -p .venv pytest -m llm_call --llm_call -vv --durations=5 tests/
135+ - name : Fix permissions for cache
136+ if : always()
137+ run : sudo chown -R $USER:$USER ollama-data || true
138+ - name : Stop Ollama service
139+ if : always()
140+ run : docker stop ollama && docker rm ollama
128141
129142 build_package :
130143 name : Build Package
0 commit comments