mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-20 12:47:00 +00:00
Unload models from VRAM after use to free GPU memory
- Add unloadModel() method to LLMJudge class - CLI calls unloadModel() after judging completes - Workflows unload gemma3:4b after inference tests - Uses Ollama API with keep_alive:0 to trigger unload
This commit is contained in:
7
.github/workflows/full-pipeline.yml
vendored
7
.github/workflows/full-pipeline.yml
vendored
@@ -165,6 +165,13 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Unload test model from VRAM
|
||||
if: always()
|
||||
run: |
|
||||
echo "Unloading gemma3:4b from VRAM..."
|
||||
curl -s http://localhost:11434/api/generate -d '{"model":"gemma3:4b","keep_alive":0}' || true
|
||||
echo "Model unloaded"
|
||||
|
||||
- name: Upload inference results
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
|
||||
7
.github/workflows/inference.yml
vendored
7
.github/workflows/inference.yml
vendored
@@ -111,6 +111,13 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Unload test model from VRAM
|
||||
if: always()
|
||||
run: |
|
||||
echo "Unloading gemma3:4b from VRAM..."
|
||||
curl -s http://localhost:11434/api/generate -d '{"model":"gemma3:4b","keep_alive":0}' || true
|
||||
echo "Model unloaded"
|
||||
|
||||
- name: Upload inference results
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
|
||||
Reference in New Issue
Block a user