mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-18 19:56:59 +00:00
Add local development build support to Docker build system
Extends the Docker Makefile with targets for building from local source code without pushing to GitHub, enabling faster iteration during development. New build targets: - build-runtime-local: Build from local source with cache - build-runtime-local-no-cache: Full rebuild from local source - build-runtime-no-cache: Force fresh GitHub clone without cache Added docker/runtime/Dockerfile.local for local source builds, mirroring the GitHub-based Dockerfile structure but using COPY instead of git clone. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
106
docker/Makefile
106
docker/Makefile
@@ -35,7 +35,10 @@ RUNTIME_DOCKERFILE := $(SOURCE_DIR)/docker/runtime/Dockerfile
|
||||
BUILDER_CONTEXT := $(SOURCE_DIR)/docker/builder
|
||||
RUNTIME_CONTEXT := $(SOURCE_DIR)
|
||||
|
||||
.PHONY: all build build-builder build-runtime ensure-builder clean help
|
||||
# Dockerfiles
|
||||
RUNTIME_DOCKERFILE_LOCAL := $(SOURCE_DIR)/docker/runtime/Dockerfile.local
|
||||
|
||||
.PHONY: all build build-builder build-runtime build-runtime-no-cache build-runtime-local build-runtime-local-no-cache ensure-builder clean help
|
||||
|
||||
# Default target
|
||||
all: build
|
||||
@@ -74,6 +77,7 @@ build-runtime: ensure-builder
|
||||
@echo "→ Building runtime image..."
|
||||
@echo " Image: $(RUNTIME_IMAGE):$(RUNTIME_TAG)"
|
||||
@echo " Dockerfile: $(RUNTIME_DOCKERFILE)"
|
||||
@echo " Source: GitHub (uses cache)"
|
||||
@echo ""
|
||||
@echo " This will:"
|
||||
@echo " - Clone ollama37 source from GitHub"
|
||||
@@ -92,6 +96,78 @@ build-runtime: ensure-builder
|
||||
@echo "To start the Ollama server:"
|
||||
@echo " docker-compose up -d"
|
||||
|
||||
# Build runtime with --no-cache (force fresh GitHub clone)
|
||||
build-runtime-no-cache: ensure-builder
|
||||
@echo "→ Building runtime image (no cache)..."
|
||||
@echo " Image: $(RUNTIME_IMAGE):$(RUNTIME_TAG)"
|
||||
@echo " Dockerfile: $(RUNTIME_DOCKERFILE)"
|
||||
@echo " Source: GitHub (fresh clone, no cache)"
|
||||
@echo ""
|
||||
@echo " This will:"
|
||||
@echo " - Force fresh clone from GitHub"
|
||||
@echo " - Rebuild all layers without cache"
|
||||
@echo " - Configure with CMake (CUDA 11 preset)"
|
||||
@echo " - Compile C/C++/CUDA libraries"
|
||||
@echo " - Build Go binary"
|
||||
@echo ""
|
||||
@docker build --no-cache \
|
||||
-f $(RUNTIME_DOCKERFILE) \
|
||||
-t $(RUNTIME_IMAGE):$(RUNTIME_TAG) \
|
||||
$(RUNTIME_CONTEXT)
|
||||
@echo ""
|
||||
@echo "✓ Runtime image built successfully!"
|
||||
@echo ""
|
||||
@echo "To start the Ollama server:"
|
||||
@echo " docker-compose up -d"
|
||||
|
||||
# Build runtime using local source code
|
||||
build-runtime-local: ensure-builder
|
||||
@echo "→ Building runtime image (local source)..."
|
||||
@echo " Image: $(RUNTIME_IMAGE):$(RUNTIME_TAG)"
|
||||
@echo " Dockerfile: $(RUNTIME_DOCKERFILE_LOCAL)"
|
||||
@echo " Source: Local directory (uses cache)"
|
||||
@echo ""
|
||||
@echo " This will:"
|
||||
@echo " - Copy local source code to container"
|
||||
@echo " - Configure with CMake (CUDA 11 preset)"
|
||||
@echo " - Compile C/C++/CUDA libraries"
|
||||
@echo " - Build Go binary"
|
||||
@echo " - Package runtime environment"
|
||||
@echo ""
|
||||
@docker build \
|
||||
-f $(RUNTIME_DOCKERFILE_LOCAL) \
|
||||
-t $(RUNTIME_IMAGE):$(RUNTIME_TAG) \
|
||||
$(RUNTIME_CONTEXT)
|
||||
@echo ""
|
||||
@echo "✓ Runtime image built successfully!"
|
||||
@echo ""
|
||||
@echo "To start the Ollama server:"
|
||||
@echo " docker-compose up -d"
|
||||
|
||||
# Build runtime using local source with --no-cache
|
||||
build-runtime-local-no-cache: ensure-builder
|
||||
@echo "→ Building runtime image (local source, no cache)..."
|
||||
@echo " Image: $(RUNTIME_IMAGE):$(RUNTIME_TAG)"
|
||||
@echo " Dockerfile: $(RUNTIME_DOCKERFILE_LOCAL)"
|
||||
@echo " Source: Local directory (no cache)"
|
||||
@echo ""
|
||||
@echo " This will:"
|
||||
@echo " - Copy local source code to container"
|
||||
@echo " - Rebuild all layers without cache"
|
||||
@echo " - Configure with CMake (CUDA 11 preset)"
|
||||
@echo " - Compile C/C++/CUDA libraries"
|
||||
@echo " - Build Go binary"
|
||||
@echo ""
|
||||
@docker build --no-cache \
|
||||
-f $(RUNTIME_DOCKERFILE_LOCAL) \
|
||||
-t $(RUNTIME_IMAGE):$(RUNTIME_TAG) \
|
||||
$(RUNTIME_CONTEXT)
|
||||
@echo ""
|
||||
@echo "✓ Runtime image built successfully!"
|
||||
@echo ""
|
||||
@echo "To start the Ollama server:"
|
||||
@echo " docker-compose up -d"
|
||||
|
||||
# Ensure builder image exists (build if not present)
|
||||
ensure-builder:
|
||||
@if ! docker images --format '{{.Repository}}:{{.Tag}}' | grep -q "^$(BUILDER_IMAGE):$(BUILDER_TAG)$$"; then \
|
||||
@@ -117,11 +193,20 @@ help:
|
||||
@echo "Ollama37 Docker Build System"
|
||||
@echo ""
|
||||
@echo "Build Targets:"
|
||||
@echo " make build - Build builder and runtime images (default)"
|
||||
@echo " make build-builder - Build only the builder base image"
|
||||
@echo " make build-runtime - Build only the runtime image"
|
||||
@echo " make clean - Remove all Docker images"
|
||||
@echo " make help - Show this help message"
|
||||
@echo " make build - Build builder and runtime images (default)"
|
||||
@echo " make build-builder - Build only the builder base image"
|
||||
@echo " make build-runtime - Build runtime from GitHub (uses cache)"
|
||||
@echo " make build-runtime-no-cache - Build runtime from GitHub (fresh clone, no cache)"
|
||||
@echo " make build-runtime-local - Build runtime from local source (uses cache)"
|
||||
@echo " make build-runtime-local-no-cache - Build runtime from local source (no cache)"
|
||||
@echo " make clean - Remove all Docker images"
|
||||
@echo " make help - Show this help message"
|
||||
@echo ""
|
||||
@echo "Which Build Target to Use?"
|
||||
@echo " • build-runtime - Normal builds after pushing to GitHub"
|
||||
@echo " • build-runtime-no-cache - After GitHub push when Docker cache is stale"
|
||||
@echo " • build-runtime-local - Quick testing of local changes without push"
|
||||
@echo " • build-runtime-local-no-cache- Full rebuild with local changes"
|
||||
@echo ""
|
||||
@echo "Configuration:"
|
||||
@echo " BUILDER_IMAGE: $(BUILDER_IMAGE):$(BUILDER_TAG)"
|
||||
@@ -129,13 +214,14 @@ help:
|
||||
@echo ""
|
||||
@echo "Dockerfiles:"
|
||||
@echo " Builder: $(BUILDER_DOCKERFILE)"
|
||||
@echo " Runtime: $(RUNTIME_DOCKERFILE)"
|
||||
@echo " Runtime (GitHub):$(RUNTIME_DOCKERFILE)"
|
||||
@echo " Runtime (Local): $(RUNTIME_DOCKERFILE_LOCAL)"
|
||||
@echo ""
|
||||
@echo "Build Architecture:"
|
||||
@echo " 1. Builder image: Base environment (CUDA 11.4, GCC 10, CMake 4, Go 1.25.3)"
|
||||
@echo " 2. Runtime image: Two-stage build (compile + package)"
|
||||
@echo " - Stage 1: Clone source, compile C/C++/CUDA/Go"
|
||||
@echo " - Stage 2: Package runtime with compiled binaries"
|
||||
@echo " 2. Runtime image: Single-stage build (compile + package)"
|
||||
@echo " - Clone/copy source, compile C/C++/CUDA/Go"
|
||||
@echo " - Package runtime with compiled binaries"
|
||||
@echo ""
|
||||
@echo "Container Management (use docker-compose):"
|
||||
@echo " docker-compose up -d - Start Ollama server"
|
||||
|
||||
74
docker/runtime/Dockerfile.local
Normal file
74
docker/runtime/Dockerfile.local
Normal file
@@ -0,0 +1,74 @@
|
||||
# Ollama37 Runtime Image - Local Development Build
|
||||
# Single-stage build: compiles and packages the binary in one image
|
||||
# This Dockerfile uses LOCAL source code instead of cloning from GitHub
|
||||
# Use this for testing changes without pushing to GitHub
|
||||
#
|
||||
# Usage: docker build -f docker/runtime/Dockerfile.local -t ollama37:latest .
|
||||
#
|
||||
# The runtime needs access to the build directory for GGML CUDA libraries
|
||||
# This ensures the compiled binary can find all required runtime libraries at:
|
||||
# /usr/local/src/ollama37/build/lib/ollama
|
||||
|
||||
# Base image: ollama37-builder contains GCC 10, CUDA 11.4, and build tools
|
||||
FROM ollama37-builder
|
||||
|
||||
# Copy local source code to container
|
||||
# Build context should be the repository root
|
||||
COPY . /usr/local/src/ollama37
|
||||
|
||||
# Set working directory for build
|
||||
WORKDIR /usr/local/src/ollama37
|
||||
|
||||
# Configure build with CMake
|
||||
# Use "CUDA 11" preset for Tesla K80 compute capability 3.7 support
|
||||
# Set LD_LIBRARY_PATH during build so CMake can locate GCC 10 runtime libraries
|
||||
# and properly link against them (required for C++ standard library and atomics)
|
||||
RUN bash -c 'LD_LIBRARY_PATH=/usr/local/lib:/usr/local/lib64:/usr/lib64:$LD_LIBRARY_PATH \
|
||||
CC=/usr/local/bin/gcc CXX=/usr/local/bin/g++ \
|
||||
cmake --preset "CUDA 11"'
|
||||
|
||||
# Build C/C++/CUDA libraries with CMake
|
||||
# Compile all GGML CUDA kernels and Ollama native libraries
|
||||
# Use all available CPU cores (-j) for parallel compilation to speed up build
|
||||
RUN bash -c 'LD_LIBRARY_PATH=/usr/local/lib:/usr/local/lib64:/usr/lib64:$LD_LIBRARY_PATH \
|
||||
CC=/usr/local/bin/gcc CXX=/usr/local/bin/g++ \
|
||||
cmake --build build -j$(nproc)'
|
||||
|
||||
# Build Go binary
|
||||
# Build to source directory so binary can find libraries via relative path
|
||||
RUN go build -o ./ollama .
|
||||
|
||||
# Create symlink to standard binary location
|
||||
# The code in ml/path.go uses filepath.EvalSymlinks() which resolves this symlink
|
||||
# to /usr/local/src/ollama37/ollama, allowing it to find libraries at build/lib/ollama
|
||||
RUN ln -s /usr/local/src/ollama37/ollama /usr/local/bin/ollama
|
||||
|
||||
# Setup library paths for runtime
|
||||
# The binary expects libraries in these exact paths:
|
||||
# /usr/local/src/ollama37/build/lib/ollama - Ollama CUDA/GGML libraries
|
||||
# /usr/local/lib64 - GCC 10 runtime libraries (libstdc++, libgcc_s)
|
||||
# /usr/local/cuda-11.4/lib64 - CUDA 11.4 runtime libraries
|
||||
# /usr/lib64 - System libraries
|
||||
ENV LD_LIBRARY_PATH=/usr/local/src/ollama37/build/lib/ollama:/usr/local/lib64:/usr/local/cuda-11.4/lib64:/usr/lib64
|
||||
|
||||
# Configure Ollama server to listen on all interfaces
|
||||
ENV OLLAMA_HOST=0.0.0.0:11434
|
||||
|
||||
# Expose Ollama API port
|
||||
EXPOSE 11434
|
||||
|
||||
# Create persistent volume for model storage
|
||||
# Models downloaded by Ollama will be stored here
|
||||
RUN mkdir -p /root/.ollama
|
||||
VOLUME ["/root/.ollama"]
|
||||
|
||||
# Configure health check to verify Ollama is running
|
||||
# Uses 'ollama list' command to check if the service is responsive
|
||||
# This validates both API availability and model registry access
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD /usr/local/bin/ollama list || exit 1
|
||||
|
||||
# Set entrypoint and default command
|
||||
# Container runs 'ollama serve' by default to start the API server
|
||||
ENTRYPOINT ["/usr/local/bin/ollama"]
|
||||
CMD ["serve"]
|
||||
Reference in New Issue
Block a user