services: # LLM Judge - stable reference version for evaluating test results # Runs on port 11435 to avoid conflict with test subject (11434) ollama-judge: image: dogkeeper886/ollama37:latest container_name: ollama37-judge runtime: nvidia deploy: resources: reservations: devices: - driver: nvidia count: all capabilities: [gpu] ports: - "11435:11434" volumes: - ollama-judge-data:/root/.ollama environment: - OLLAMA_HOST=0.0.0.0:11434 - NVIDIA_VISIBLE_DEVICES=all - NVIDIA_DRIVER_CAPABILITIES=compute,utility restart: unless-stopped healthcheck: test: ["CMD", "/usr/local/bin/ollama", "list"] interval: 30s timeout: 10s retries: 3 start_period: 5s volumes: ollama-judge-data: name: ollama-judge-data