mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-20 20:57:01 +00:00
Test case enhancements: - TC-RUNTIME-001: Add startup log error checking (CUDA, CUBLAS, CPU fallback) - TC-RUNTIME-002: Add GPU detection verification, CUDA init checks, error detection - TC-RUNTIME-003: Add server listening verification, runtime error checks - TC-INFERENCE-001: Add model loading logs, layer offload verification - TC-INFERENCE-002: Add inference error checking (CUBLAS/CUDA errors) - TC-INFERENCE-003: Add API request log verification, response time display Workflow enhancements: - Add judge_mode input (simple/llm/dual) to all workflows - Add judge_model input to specify LLM model for judging - Configurable via GitHub Actions UI without code changes 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
134 lines
4.1 KiB
YAML
134 lines
4.1 KiB
YAML
id: TC-RUNTIME-002
|
|
name: GPU Detection
|
|
suite: runtime
|
|
priority: 2
|
|
timeout: 120000
|
|
|
|
dependencies:
|
|
- TC-RUNTIME-001
|
|
|
|
steps:
|
|
- name: Check nvidia-smi inside container
|
|
command: docker exec ollama37 nvidia-smi
|
|
|
|
- name: Check CUDA libraries
|
|
command: docker exec ollama37 ldconfig -p | grep -i cuda | head -5
|
|
|
|
- name: Verify UVM device files
|
|
command: |
|
|
if [ ! -e /dev/nvidia-uvm ]; then
|
|
echo "WARNING: UVM device missing, creating with nvidia-modprobe..."
|
|
sudo nvidia-modprobe -u -c=0
|
|
echo "Restarting container to pick up UVM devices..."
|
|
cd docker && docker compose restart
|
|
sleep 15
|
|
echo "UVM device fix applied"
|
|
else
|
|
echo "SUCCESS: UVM device file present"
|
|
ls -l /dev/nvidia-uvm
|
|
fi
|
|
|
|
- name: Verify GPU detection in Ollama logs
|
|
command: |
|
|
cd docker
|
|
LOGS=$(docker compose logs 2>&1)
|
|
|
|
echo "=== GPU Detection Check ==="
|
|
|
|
# Check for inference compute with CUDA library
|
|
if echo "$LOGS" | grep -q "inference compute.*library=CUDA"; then
|
|
echo "SUCCESS: GPU detected with CUDA library"
|
|
echo "$LOGS" | grep "inference compute" | head -2
|
|
else
|
|
echo "ERROR: GPU not detected with CUDA library"
|
|
exit 1
|
|
fi
|
|
|
|
# Check for Tesla K80 specifically
|
|
if echo "$LOGS" | grep -q 'description="Tesla K80"'; then
|
|
echo "SUCCESS: Tesla K80 GPU identified"
|
|
else
|
|
echo "WARNING: Tesla K80 not explicitly identified"
|
|
fi
|
|
|
|
# Check compute capability 3.7
|
|
if echo "$LOGS" | grep -q "compute=3.7"; then
|
|
echo "SUCCESS: Compute capability 3.7 detected"
|
|
else
|
|
echo "WARNING: Compute capability 3.7 not detected"
|
|
fi
|
|
|
|
- name: Verify CUDA initialization in logs
|
|
command: |
|
|
cd docker
|
|
LOGS=$(docker compose logs 2>&1)
|
|
|
|
echo "=== CUDA Initialization Check ==="
|
|
|
|
# Check ggml_cuda_init
|
|
if echo "$LOGS" | grep -q "ggml_cuda_init: found"; then
|
|
echo "SUCCESS: CUDA initialized"
|
|
echo "$LOGS" | grep "ggml_cuda_init: found" | head -1
|
|
else
|
|
echo "ERROR: CUDA not initialized"
|
|
exit 1
|
|
fi
|
|
|
|
# Check CUDA backend loaded
|
|
if echo "$LOGS" | grep -q "load_backend: loaded CUDA backend"; then
|
|
echo "SUCCESS: CUDA backend loaded"
|
|
echo "$LOGS" | grep "load_backend: loaded CUDA backend" | head -1
|
|
else
|
|
echo "ERROR: CUDA backend not loaded"
|
|
exit 1
|
|
fi
|
|
|
|
- name: Check for GPU-related errors in logs
|
|
command: |
|
|
cd docker
|
|
LOGS=$(docker compose logs 2>&1)
|
|
|
|
echo "=== GPU Error Check ==="
|
|
|
|
# Check for critical CUDA/CUBLAS errors
|
|
if echo "$LOGS" | grep -qE "(CUBLAS_STATUS_|CUDA error|cudaMalloc failed|out of memory)"; then
|
|
echo "CRITICAL GPU ERRORS FOUND:"
|
|
echo "$LOGS" | grep -E "(CUBLAS_STATUS_|CUDA error|cudaMalloc failed|out of memory)"
|
|
exit 1
|
|
fi
|
|
|
|
# Check for CPU fallback (bad!)
|
|
if echo "$LOGS" | grep -q "id=cpu library=cpu"; then
|
|
echo "ERROR: Ollama fell back to CPU-only mode"
|
|
exit 1
|
|
fi
|
|
|
|
echo "SUCCESS: No GPU-related errors found"
|
|
|
|
- name: Display GPU memory status from logs
|
|
command: |
|
|
cd docker
|
|
LOGS=$(docker compose logs 2>&1)
|
|
|
|
echo "=== GPU Memory Status ==="
|
|
echo "$LOGS" | grep -E "gpu memory.*library=CUDA" | tail -4
|
|
|
|
criteria: |
|
|
Tesla K80 GPU should be detected by both nvidia-smi AND Ollama CUDA runtime.
|
|
|
|
Expected:
|
|
- nvidia-smi shows Tesla K80 GPU(s) with Driver 470.x
|
|
- CUDA libraries are available (libcuda, libcublas, etc.)
|
|
- /dev/nvidia-uvm device file exists (required for CUDA runtime)
|
|
- Ollama logs show "inference compute" with "library=CUDA"
|
|
- Ollama logs show "ggml_cuda_init: found N CUDA devices"
|
|
- Ollama logs show "load_backend: loaded CUDA backend"
|
|
- NO "id=cpu library=cpu" (CPU fallback)
|
|
- NO CUBLAS_STATUS_ errors
|
|
- NO CUDA error messages
|
|
|
|
NOTE: If nvidia-smi works but Ollama shows only CPU, the UVM device
|
|
files are missing. The test will auto-fix with nvidia-modprobe -u -c=0.
|
|
|
|
The K80 has 12GB VRAM per GPU. Accept variations in reported memory.
|