Files
ollama37/ollama37.Dockerfile
Shang Chieh Tseng cbcbc9ae07 Add support for new models and fix GitHub issues
- Add Gemma3n model support with text generation capabilities
- Add new CUDA mean operations for improved performance
- Add macOS documentation and performance tests
- Update LLAMA patches for ROCm/CUDA compatibility
- Fix various model conversion and processing issues
- Update CI workflows and build configurations
- Add library model tests and Shakespeare test data

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-07-20 00:12:36 +08:00

33 lines
1.0 KiB
Docker

# ===== Stage 1: Build the source code =====
FROM dogkeeper886/ollama37-builder AS builder
# Copy source code and build
COPY . /usr/local/src/ollama37
WORKDIR /usr/local/src/ollama37
RUN CC=/usr/local/bin/gcc CXX=/usr/local/bin/g++ cmake -B build \
&& CC=/usr/local/bin/gcc CXX=/usr/local/bin/g++ cmake --build build \
&& go build -o ollama .
# ===== Stage 2: Runtime image =====
FROM rockylinux/rockylinux:8
RUN dnf -y update
# Copy only the built binary and any needed assets from the builder stage
COPY --from=builder /usr/local/src/ollama37/ollama /usr/local/bin/ollama
COPY --from=builder /usr/local/lib64 /usr/local/lib64
COPY --from=builder /usr/local/cuda-11.4/lib64 /usr/local/cuda-11.4/lib64
# Set environment variables
ENV LD_LIBRARY_PATH="/usr/local/lib64:/usr/local/cuda-11.4/lib64"
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
ENV NVIDIA_VISIBLE_DEVICES=all
ENV OLLAMA_HOST=0.0.0.0:11434
# Expose port
EXPOSE 11434
# Set entrypoint and command
ENTRYPOINT ["/usr/local/bin/ollama"]
CMD ["serve"]