mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-19 04:07:01 +00:00
Add comprehensive Ollama log checking and configurable LLM judge mode
Test case enhancements: - TC-RUNTIME-001: Add startup log error checking (CUDA, CUBLAS, CPU fallback) - TC-RUNTIME-002: Add GPU detection verification, CUDA init checks, error detection - TC-RUNTIME-003: Add server listening verification, runtime error checks - TC-INFERENCE-001: Add model loading logs, layer offload verification - TC-INFERENCE-002: Add inference error checking (CUBLAS/CUDA errors) - TC-INFERENCE-003: Add API request log verification, response time display Workflow enhancements: - Add judge_mode input (simple/llm/dual) to all workflows - Add judge_model input to specify LLM model for judging - Configurable via GitHub Actions UI without code changes 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -20,6 +20,30 @@ steps:
|
||||
- name: Check container status
|
||||
command: cd docker && docker compose ps
|
||||
|
||||
- name: Capture startup logs
|
||||
command: |
|
||||
cd docker && docker compose logs 2>&1 | head -100
|
||||
|
||||
- name: Check for startup errors in logs
|
||||
command: |
|
||||
cd docker
|
||||
LOGS=$(docker compose logs 2>&1)
|
||||
|
||||
# Check for critical errors
|
||||
if echo "$LOGS" | grep -qE "(level=ERROR|CUBLAS_STATUS_|CUDA error|cudaMalloc failed)"; then
|
||||
echo "CRITICAL ERRORS FOUND IN STARTUP LOGS:"
|
||||
echo "$LOGS" | grep -E "(level=ERROR|CUBLAS_STATUS_|CUDA error|cudaMalloc failed)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for CPU-only fallback (GPU not detected)
|
||||
if echo "$LOGS" | grep -q "id=cpu library=cpu"; then
|
||||
echo "ERROR: Ollama fell back to CPU-only mode"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "SUCCESS: No critical errors in startup logs"
|
||||
|
||||
criteria: |
|
||||
The ollama37 container should start successfully with GPU access.
|
||||
|
||||
@@ -27,5 +51,7 @@ criteria: |
|
||||
- Container starts without errors
|
||||
- docker compose ps shows container in "Up" state
|
||||
- No "Exited" or "Restarting" status
|
||||
- No critical errors in logs (level=ERROR, CUBLAS_STATUS_, CUDA error)
|
||||
- No CPU-only fallback (id=cpu library=cpu)
|
||||
|
||||
Accept startup warnings. Container should be running.
|
||||
Accept startup warnings (flash attention not supported is OK). Container should be running.
|
||||
|
||||
@@ -28,9 +28,90 @@ steps:
|
||||
ls -l /dev/nvidia-uvm
|
||||
fi
|
||||
|
||||
- name: Check Ollama GPU detection in logs
|
||||
- name: Verify GPU detection in Ollama logs
|
||||
command: |
|
||||
cd docker && docker compose logs 2>&1 | grep -E "(inference compute|GPU detected)" | tail -5
|
||||
cd docker
|
||||
LOGS=$(docker compose logs 2>&1)
|
||||
|
||||
echo "=== GPU Detection Check ==="
|
||||
|
||||
# Check for inference compute with CUDA library
|
||||
if echo "$LOGS" | grep -q "inference compute.*library=CUDA"; then
|
||||
echo "SUCCESS: GPU detected with CUDA library"
|
||||
echo "$LOGS" | grep "inference compute" | head -2
|
||||
else
|
||||
echo "ERROR: GPU not detected with CUDA library"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for Tesla K80 specifically
|
||||
if echo "$LOGS" | grep -q 'description="Tesla K80"'; then
|
||||
echo "SUCCESS: Tesla K80 GPU identified"
|
||||
else
|
||||
echo "WARNING: Tesla K80 not explicitly identified"
|
||||
fi
|
||||
|
||||
# Check compute capability 3.7
|
||||
if echo "$LOGS" | grep -q "compute=3.7"; then
|
||||
echo "SUCCESS: Compute capability 3.7 detected"
|
||||
else
|
||||
echo "WARNING: Compute capability 3.7 not detected"
|
||||
fi
|
||||
|
||||
- name: Verify CUDA initialization in logs
|
||||
command: |
|
||||
cd docker
|
||||
LOGS=$(docker compose logs 2>&1)
|
||||
|
||||
echo "=== CUDA Initialization Check ==="
|
||||
|
||||
# Check ggml_cuda_init
|
||||
if echo "$LOGS" | grep -q "ggml_cuda_init: found"; then
|
||||
echo "SUCCESS: CUDA initialized"
|
||||
echo "$LOGS" | grep "ggml_cuda_init: found" | head -1
|
||||
else
|
||||
echo "ERROR: CUDA not initialized"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check CUDA backend loaded
|
||||
if echo "$LOGS" | grep -q "load_backend: loaded CUDA backend"; then
|
||||
echo "SUCCESS: CUDA backend loaded"
|
||||
echo "$LOGS" | grep "load_backend: loaded CUDA backend" | head -1
|
||||
else
|
||||
echo "ERROR: CUDA backend not loaded"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Check for GPU-related errors in logs
|
||||
command: |
|
||||
cd docker
|
||||
LOGS=$(docker compose logs 2>&1)
|
||||
|
||||
echo "=== GPU Error Check ==="
|
||||
|
||||
# Check for critical CUDA/CUBLAS errors
|
||||
if echo "$LOGS" | grep -qE "(CUBLAS_STATUS_|CUDA error|cudaMalloc failed|out of memory)"; then
|
||||
echo "CRITICAL GPU ERRORS FOUND:"
|
||||
echo "$LOGS" | grep -E "(CUBLAS_STATUS_|CUDA error|cudaMalloc failed|out of memory)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for CPU fallback (bad!)
|
||||
if echo "$LOGS" | grep -q "id=cpu library=cpu"; then
|
||||
echo "ERROR: Ollama fell back to CPU-only mode"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "SUCCESS: No GPU-related errors found"
|
||||
|
||||
- name: Display GPU memory status from logs
|
||||
command: |
|
||||
cd docker
|
||||
LOGS=$(docker compose logs 2>&1)
|
||||
|
||||
echo "=== GPU Memory Status ==="
|
||||
echo "$LOGS" | grep -E "gpu memory.*library=CUDA" | tail -4
|
||||
|
||||
criteria: |
|
||||
Tesla K80 GPU should be detected by both nvidia-smi AND Ollama CUDA runtime.
|
||||
@@ -39,7 +120,12 @@ criteria: |
|
||||
- nvidia-smi shows Tesla K80 GPU(s) with Driver 470.x
|
||||
- CUDA libraries are available (libcuda, libcublas, etc.)
|
||||
- /dev/nvidia-uvm device file exists (required for CUDA runtime)
|
||||
- Ollama logs show GPU detection, NOT "id=cpu library=cpu"
|
||||
- Ollama logs show "inference compute" with "library=CUDA"
|
||||
- Ollama logs show "ggml_cuda_init: found N CUDA devices"
|
||||
- Ollama logs show "load_backend: loaded CUDA backend"
|
||||
- NO "id=cpu library=cpu" (CPU fallback)
|
||||
- NO CUBLAS_STATUS_ errors
|
||||
- NO CUDA error messages
|
||||
|
||||
NOTE: If nvidia-smi works but Ollama shows only CPU, the UVM device
|
||||
files are missing. The test will auto-fix with nvidia-modprobe -u -c=0.
|
||||
|
||||
@@ -28,6 +28,62 @@ steps:
|
||||
- name: Check Ollama version
|
||||
command: docker exec ollama37 ollama --version
|
||||
|
||||
- name: Verify server listening in logs
|
||||
command: |
|
||||
cd docker
|
||||
LOGS=$(docker compose logs 2>&1)
|
||||
|
||||
echo "=== Server Status Check ==="
|
||||
|
||||
# Check server is listening
|
||||
if echo "$LOGS" | grep -q "Listening on"; then
|
||||
echo "SUCCESS: Server is listening"
|
||||
echo "$LOGS" | grep "Listening on" | head -1
|
||||
else
|
||||
echo "ERROR: Server not listening"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Check for runtime errors in logs
|
||||
command: |
|
||||
cd docker
|
||||
LOGS=$(docker compose logs 2>&1)
|
||||
|
||||
echo "=== Runtime Error Check ==="
|
||||
|
||||
# Check for any ERROR level logs
|
||||
ERROR_COUNT=$(echo "$LOGS" | grep -c "level=ERROR" || echo "0")
|
||||
if [ "$ERROR_COUNT" -gt 0 ]; then
|
||||
echo "WARNING: Found $ERROR_COUNT ERROR level log entries:"
|
||||
echo "$LOGS" | grep "level=ERROR" | tail -5
|
||||
else
|
||||
echo "SUCCESS: No ERROR level logs found"
|
||||
fi
|
||||
|
||||
# Check for panic/fatal
|
||||
if echo "$LOGS" | grep -qiE "(panic|fatal)"; then
|
||||
echo "CRITICAL: Panic or fatal error detected:"
|
||||
echo "$LOGS" | grep -iE "(panic|fatal)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "SUCCESS: No critical runtime errors"
|
||||
|
||||
- name: Verify API request handling in logs
|
||||
command: |
|
||||
cd docker
|
||||
LOGS=$(docker compose logs 2>&1)
|
||||
|
||||
echo "=== API Request Logs ==="
|
||||
|
||||
# Check that API requests are being logged (GIN framework)
|
||||
if echo "$LOGS" | grep -q '\[GIN\].*200.*GET.*"/api/tags"'; then
|
||||
echo "SUCCESS: API requests are being handled"
|
||||
echo "$LOGS" | grep '\[GIN\].*"/api/tags"' | tail -3
|
||||
else
|
||||
echo "WARNING: No API request logs found (might be first request)"
|
||||
fi
|
||||
|
||||
criteria: |
|
||||
Ollama server should be healthy and API responsive.
|
||||
|
||||
@@ -35,5 +91,8 @@ criteria: |
|
||||
- Container health status becomes "healthy"
|
||||
- /api/tags endpoint returns JSON response (even if empty models)
|
||||
- ollama --version shows version information
|
||||
- Logs show "Listening on" message
|
||||
- No panic or fatal errors in logs
|
||||
- API requests logged with 200 status codes
|
||||
|
||||
Accept any valid JSON response from API. Version format may vary.
|
||||
|
||||
Reference in New Issue
Block a user