mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-19 12:17:02 +00:00
Add multi-model inference tests for gemma3 12b and 27b
- TC-INFERENCE-004: gemma3:12b single GPU test - TC-INFERENCE-005: gemma3:27b dual-GPU test (K80 layer split) - Each test unloads previous model before loading next - Workflows unload all 3 model sizes after inference suite - 27b test verifies both GPUs have memory allocated
This commit is contained in:
8
.github/workflows/full-pipeline.yml
vendored
8
.github/workflows/full-pipeline.yml
vendored
@@ -165,12 +165,14 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Unload test model from VRAM
|
||||
- name: Unload test models from VRAM
|
||||
if: always()
|
||||
run: |
|
||||
echo "Unloading gemma3:4b from VRAM..."
|
||||
echo "Unloading all test models from VRAM..."
|
||||
curl -s http://localhost:11434/api/generate -d '{"model":"gemma3:4b","keep_alive":0}' || true
|
||||
echo "Model unloaded"
|
||||
curl -s http://localhost:11434/api/generate -d '{"model":"gemma3:12b","keep_alive":0}' || true
|
||||
curl -s http://localhost:11434/api/generate -d '{"model":"gemma3:27b","keep_alive":0}' || true
|
||||
echo "All models unloaded"
|
||||
|
||||
- name: Upload inference results
|
||||
uses: actions/upload-artifact@v4
|
||||
|
||||
8
.github/workflows/inference.yml
vendored
8
.github/workflows/inference.yml
vendored
@@ -111,12 +111,14 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Unload test model from VRAM
|
||||
- name: Unload test models from VRAM
|
||||
if: always()
|
||||
run: |
|
||||
echo "Unloading gemma3:4b from VRAM..."
|
||||
echo "Unloading all test models from VRAM..."
|
||||
curl -s http://localhost:11434/api/generate -d '{"model":"gemma3:4b","keep_alive":0}' || true
|
||||
echo "Model unloaded"
|
||||
curl -s http://localhost:11434/api/generate -d '{"model":"gemma3:12b","keep_alive":0}' || true
|
||||
curl -s http://localhost:11434/api/generate -d '{"model":"gemma3:27b","keep_alive":0}' || true
|
||||
echo "All models unloaded"
|
||||
|
||||
- name: Upload inference results
|
||||
uses: actions/upload-artifact@v4
|
||||
|
||||
120
tests/testcases/inference/TC-INFERENCE-004.yml
Normal file
120
tests/testcases/inference/TC-INFERENCE-004.yml
Normal file
@@ -0,0 +1,120 @@
|
||||
id: TC-INFERENCE-004
|
||||
name: Medium Model (12b) Inference
|
||||
suite: inference
|
||||
priority: 4
|
||||
timeout: 600000
|
||||
|
||||
dependencies:
|
||||
- TC-INFERENCE-003
|
||||
|
||||
steps:
|
||||
- name: Unload previous model from VRAM
|
||||
command: |
|
||||
echo "Unloading any loaded models..."
|
||||
curl -s http://localhost:11434/api/generate -d '{"model":"gemma3:4b","keep_alive":0}' || true
|
||||
sleep 2
|
||||
echo "Previous model unloaded"
|
||||
|
||||
- name: Check if gemma3:12b model exists
|
||||
command: docker exec ollama37 ollama list | grep -q "gemma3:12b" && echo "Model exists" || echo "Model not found"
|
||||
|
||||
- name: Pull gemma3:12b model if needed
|
||||
command: docker exec ollama37 ollama list | grep -q "gemma3:12b" || docker exec ollama37 ollama pull gemma3:12b
|
||||
timeout: 900000
|
||||
|
||||
- name: Verify model available
|
||||
command: docker exec ollama37 ollama list | grep gemma3:12b
|
||||
|
||||
- name: Warmup model (preload into GPU)
|
||||
command: |
|
||||
curl -s http://localhost:11434/api/generate \
|
||||
-d '{"model":"gemma3:12b","prompt":"hi","stream":false}' \
|
||||
| jq -r '.response' | head -c 100
|
||||
timeout: 300000
|
||||
|
||||
- name: Verify model loaded to GPU
|
||||
command: |
|
||||
cd docker
|
||||
LOGS=$(docker compose logs --since=5m 2>&1)
|
||||
|
||||
echo "=== Model Loading Check for gemma3:12b ==="
|
||||
|
||||
# Check for layer offloading to GPU
|
||||
if echo "$LOGS" | grep -q "offloaded.*layers to GPU"; then
|
||||
echo "SUCCESS: Model layers offloaded to GPU"
|
||||
echo "$LOGS" | grep "offloaded.*layers to GPU" | tail -1
|
||||
else
|
||||
echo "ERROR: Model layers not offloaded to GPU"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check llama runner started
|
||||
if echo "$LOGS" | grep -q "llama runner started"; then
|
||||
echo "SUCCESS: Llama runner started"
|
||||
else
|
||||
echo "ERROR: Llama runner not started"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Run inference test
|
||||
command: docker exec ollama37 ollama run gemma3:12b "What is the capital of France? Answer in one word." 2>&1
|
||||
timeout: 180000
|
||||
|
||||
- name: Check GPU memory usage
|
||||
command: |
|
||||
echo "=== GPU Memory Usage ==="
|
||||
docker exec ollama37 nvidia-smi --query-gpu=index,memory.used,memory.total --format=csv
|
||||
echo ""
|
||||
echo "=== GPU Processes ==="
|
||||
docker exec ollama37 nvidia-smi --query-compute-apps=pid,used_memory --format=csv 2>/dev/null || echo "No GPU processes listed"
|
||||
|
||||
- name: Check for inference errors
|
||||
command: |
|
||||
cd docker
|
||||
LOGS=$(docker compose logs --since=5m 2>&1)
|
||||
|
||||
echo "=== Inference Error Check ==="
|
||||
|
||||
if echo "$LOGS" | grep -qE "CUBLAS_STATUS_"; then
|
||||
echo "CRITICAL: CUBLAS error during inference:"
|
||||
echo "$LOGS" | grep -E "CUBLAS_STATUS_"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if echo "$LOGS" | grep -qE "CUDA error"; then
|
||||
echo "CRITICAL: CUDA error during inference:"
|
||||
echo "$LOGS" | grep -E "CUDA error"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if echo "$LOGS" | grep -qi "out of memory"; then
|
||||
echo "ERROR: Out of memory"
|
||||
echo "$LOGS" | grep -i "out of memory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "SUCCESS: No inference errors"
|
||||
|
||||
- name: Unload model after test
|
||||
command: |
|
||||
echo "Unloading gemma3:12b from VRAM..."
|
||||
curl -s http://localhost:11434/api/generate -d '{"model":"gemma3:12b","keep_alive":0}' || true
|
||||
sleep 2
|
||||
echo "Model unloaded"
|
||||
|
||||
criteria: |
|
||||
The gemma3:12b model should run inference on Tesla K80.
|
||||
|
||||
Expected:
|
||||
- Model downloads successfully (~8GB)
|
||||
- Model loads into GPU (single GPU should be sufficient)
|
||||
- Logs show "offloaded X/Y layers to GPU"
|
||||
- Logs show "llama runner started"
|
||||
- Inference returns a response mentioning "Paris"
|
||||
- NO CUBLAS_STATUS_ errors
|
||||
- NO CUDA errors
|
||||
- NO out of memory errors
|
||||
- GPU memory shows allocation (~10GB)
|
||||
|
||||
This is a medium-sized model that should fit in a single K80 GPU.
|
||||
Accept any reasonable answer about France's capital.
|
||||
160
tests/testcases/inference/TC-INFERENCE-005.yml
Normal file
160
tests/testcases/inference/TC-INFERENCE-005.yml
Normal file
@@ -0,0 +1,160 @@
|
||||
id: TC-INFERENCE-005
|
||||
name: Large Model (27b) Dual-GPU Inference
|
||||
suite: inference
|
||||
priority: 5
|
||||
timeout: 900000
|
||||
|
||||
dependencies:
|
||||
- TC-INFERENCE-004
|
||||
|
||||
steps:
|
||||
- name: Unload previous model from VRAM
|
||||
command: |
|
||||
echo "Unloading any loaded models..."
|
||||
curl -s http://localhost:11434/api/generate -d '{"model":"gemma3:12b","keep_alive":0}' || true
|
||||
sleep 2
|
||||
echo "Previous model unloaded"
|
||||
|
||||
- name: Verify dual GPU availability
|
||||
command: |
|
||||
echo "=== GPU Configuration ==="
|
||||
GPU_COUNT=$(docker exec ollama37 nvidia-smi --query-gpu=name --format=csv,noheader | wc -l)
|
||||
echo "GPUs detected: $GPU_COUNT"
|
||||
|
||||
if [ "$GPU_COUNT" -lt 2 ]; then
|
||||
echo "WARNING: Less than 2 GPUs detected. 27b model may not fit."
|
||||
fi
|
||||
|
||||
docker exec ollama37 nvidia-smi --query-gpu=index,name,memory.total,memory.free --format=csv
|
||||
|
||||
- name: Check if gemma3:27b model exists
|
||||
command: docker exec ollama37 ollama list | grep -q "gemma3:27b" && echo "Model exists" || echo "Model not found"
|
||||
|
||||
- name: Pull gemma3:27b model if needed
|
||||
command: docker exec ollama37 ollama list | grep -q "gemma3:27b" || docker exec ollama37 ollama pull gemma3:27b
|
||||
timeout: 1200000
|
||||
|
||||
- name: Verify model available
|
||||
command: docker exec ollama37 ollama list | grep gemma3:27b
|
||||
|
||||
- name: Warmup model (preload into both GPUs)
|
||||
command: |
|
||||
curl -s http://localhost:11434/api/generate \
|
||||
-d '{"model":"gemma3:27b","prompt":"hi","stream":false}' \
|
||||
| jq -r '.response' | head -c 100
|
||||
timeout: 600000
|
||||
|
||||
- name: Verify model loaded across GPUs
|
||||
command: |
|
||||
cd docker
|
||||
LOGS=$(docker compose logs --since=10m 2>&1)
|
||||
|
||||
echo "=== Model Loading Check for gemma3:27b ==="
|
||||
|
||||
# Check for layer offloading to GPU
|
||||
if echo "$LOGS" | grep -q "offloaded.*layers to GPU"; then
|
||||
echo "SUCCESS: Model layers offloaded to GPU"
|
||||
echo "$LOGS" | grep "offloaded.*layers to GPU" | tail -1
|
||||
else
|
||||
echo "ERROR: Model layers not offloaded to GPU"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check llama runner started
|
||||
if echo "$LOGS" | grep -q "llama runner started"; then
|
||||
echo "SUCCESS: Llama runner started"
|
||||
else
|
||||
echo "ERROR: Llama runner not started"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for multi-GPU allocation in memory logs
|
||||
echo ""
|
||||
echo "=== GPU Memory Allocation ==="
|
||||
echo "$LOGS" | grep -E "device=CUDA" | tail -10
|
||||
|
||||
- name: Verify both GPUs have memory allocated
|
||||
command: |
|
||||
echo "=== GPU Memory Usage ==="
|
||||
docker exec ollama37 nvidia-smi --query-gpu=index,memory.used,memory.total --format=csv
|
||||
|
||||
echo ""
|
||||
echo "=== Per-GPU Process Memory ==="
|
||||
docker exec ollama37 nvidia-smi pmon -c 1 2>/dev/null || docker exec ollama37 nvidia-smi
|
||||
|
||||
# Check both GPUs are being used
|
||||
GPU0_MEM=$(docker exec ollama37 nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits -i 0 | tr -d ' ')
|
||||
GPU1_MEM=$(docker exec ollama37 nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits -i 1 | tr -d ' ')
|
||||
|
||||
echo ""
|
||||
echo "GPU 0 memory used: ${GPU0_MEM} MiB"
|
||||
echo "GPU 1 memory used: ${GPU1_MEM} MiB"
|
||||
|
||||
# Both GPUs should have significant memory usage for 27b model
|
||||
if [ "$GPU0_MEM" -gt 1000 ] && [ "$GPU1_MEM" -gt 1000 ]; then
|
||||
echo "SUCCESS: Both GPUs have significant memory allocation (dual-GPU split confirmed)"
|
||||
else
|
||||
echo "WARNING: One GPU may have low memory usage - model might not be split optimally"
|
||||
fi
|
||||
|
||||
- name: Run inference test
|
||||
command: docker exec ollama37 ollama run gemma3:27b "Explain quantum entanglement in one sentence." 2>&1
|
||||
timeout: 300000
|
||||
|
||||
- name: Check for inference errors
|
||||
command: |
|
||||
cd docker
|
||||
LOGS=$(docker compose logs --since=10m 2>&1)
|
||||
|
||||
echo "=== Inference Error Check ==="
|
||||
|
||||
if echo "$LOGS" | grep -qE "CUBLAS_STATUS_"; then
|
||||
echo "CRITICAL: CUBLAS error during inference:"
|
||||
echo "$LOGS" | grep -E "CUBLAS_STATUS_"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if echo "$LOGS" | grep -qE "CUDA error"; then
|
||||
echo "CRITICAL: CUDA error during inference:"
|
||||
echo "$LOGS" | grep -E "CUDA error"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if echo "$LOGS" | grep -qi "out of memory"; then
|
||||
echo "ERROR: Out of memory"
|
||||
echo "$LOGS" | grep -i "out of memory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "SUCCESS: No inference errors"
|
||||
|
||||
- name: Unload model after test
|
||||
command: |
|
||||
echo "Unloading gemma3:27b from VRAM..."
|
||||
curl -s http://localhost:11434/api/generate -d '{"model":"gemma3:27b","keep_alive":0}' || true
|
||||
sleep 3
|
||||
echo "Model unloaded"
|
||||
|
||||
- name: Verify VRAM released
|
||||
command: |
|
||||
echo "=== Post-Unload GPU Memory ==="
|
||||
docker exec ollama37 nvidia-smi --query-gpu=index,memory.used,memory.total --format=csv
|
||||
|
||||
criteria: |
|
||||
The gemma3:27b model should run inference using both GPUs on Tesla K80.
|
||||
|
||||
Expected:
|
||||
- Model downloads successfully (~17GB)
|
||||
- Model loads and splits across both K80 GPUs
|
||||
- Logs show "offloaded X/Y layers to GPU"
|
||||
- Logs show "llama runner started"
|
||||
- Both GPU 0 and GPU 1 show significant memory usage (>1GB each)
|
||||
- Inference returns a coherent response about quantum entanglement
|
||||
- NO CUBLAS_STATUS_ errors
|
||||
- NO CUDA errors
|
||||
- NO out of memory errors
|
||||
|
||||
This is a large model that requires dual-GPU on K80 (11GB + 11GB = 22GB available).
|
||||
The model (~17GB) should split layers across both GPUs.
|
||||
Accept any reasonable explanation of quantum entanglement.
|
||||
Inference will be slower than smaller models due to cross-GPU communication.
|
||||
Reference in New Issue
Block a user