services: ollama37: build: context: ./runtime dockerfile: Dockerfile image: ollama37:local container_name: ollama37 ports: - "11434:11434" restart: unless-stopped runtime: nvidia volumes: - ./volume:/root/.ollama environment: - CUDA_VISIBLE_DEVICES=all - OLLAMA_HOST=0.0.0.0