mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-09 23:37:06 +00:00
This commit represents a complete rework after pulling the latest changes from official ollama/ollama repository and re-applying Tesla K80 compatibility patches. ## Key Changes ### CUDA Compute Capability 3.7 Support (Tesla K80) - Added sm_37 (compute 3.7) to CMAKE_CUDA_ARCHITECTURES in CMakeLists.txt - Updated CMakePresets.json to include compute 3.7 in "CUDA 11" preset - Using 37-virtual (PTX with JIT compilation) for maximum compatibility ### Legacy Toolchain Compatibility - **NVIDIA Driver**: 470.256.02 (last version supporting Kepler/K80) - **CUDA Version**: 11.4.4 (last CUDA 11.x supporting compute 3.7) - **GCC Version**: 10.5.0 (required by CUDA 11.4 host_config.h) ### CPU Architecture Trade-offs Due to GCC 10.5 limitation, sacrificed newer CPU optimizations: - Alderlake CPU variant enabled WITHOUT AVX_VNNI (requires GCC 11+) - Still supports: SSE4.2, AVX, F16C, AVX2, BMI2, FMA - Performance impact: ~3-7% on newer CPUs (acceptable for K80 compatibility) ### Build System Updates - Modified ml/backend/ggml/ggml/src/ggml-cuda/CMakeLists.txt for compute 3.7 - Added -Wno-deprecated-gpu-targets flag to suppress warnings - Updated ml/backend/ggml/ggml/src/CMakeLists.txt for Alderlake without AVX_VNNI ### Upstream Sync Merged latest llama.cpp changes including: - Enhanced KV cache management with ISWA and hybrid memory support - Improved multi-modal support (mtmd framework) - New model architectures (Gemma3, Llama4, Qwen3, etc.) - GPU backend improvements for CUDA, Metal, and ROCm - Updated quantization support and GGUF format handling ### Documentation - Updated CLAUDE.md with comprehensive build instructions - Documented toolchain constraints and CPU architecture trade-offs - Removed outdated CI/CD workflows (tesla-k80-*.yml) - Cleaned up temporary development artifacts ## Rationale This fork maintains Tesla K80 GPU support (compute 3.7) which was dropped in official Ollama due to legacy driver/CUDA requirements. The toolchain constraint creates a deadlock: - K80 → Driver 470 → CUDA 11.4 → GCC 10 → No AVX_VNNI We accept the loss of cutting-edge CPU optimizations to enable running modern LLMs on legacy but still capable Tesla K80 hardware (12GB VRAM per GPU). 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
113 lines
3.3 KiB
Go
113 lines
3.3 KiB
Go
package server
|
|
|
|
import (
|
|
"bytes"
|
|
"encoding/json"
|
|
"net/http"
|
|
"path/filepath"
|
|
"testing"
|
|
|
|
"github.com/gin-gonic/gin"
|
|
|
|
"github.com/ollama/ollama/api"
|
|
"github.com/ollama/ollama/types/model"
|
|
)
|
|
|
|
func TestDelete(t *testing.T) {
|
|
gin.SetMode(gin.TestMode)
|
|
|
|
p := t.TempDir()
|
|
t.Setenv("OLLAMA_MODELS", p)
|
|
|
|
var s Server
|
|
|
|
_, digest := createBinFile(t, nil, nil)
|
|
w := createRequest(t, s.CreateHandler, api.CreateRequest{
|
|
Name: "test",
|
|
Files: map[string]string{"test.gguf": digest},
|
|
})
|
|
|
|
if w.Code != http.StatusOK {
|
|
t.Fatalf("expected status code 200, actual %d", w.Code)
|
|
}
|
|
|
|
w = createRequest(t, s.CreateHandler, api.CreateRequest{
|
|
Name: "test2",
|
|
Files: map[string]string{"test.gguf": digest},
|
|
Template: "{{ .System }} {{ .Prompt }}",
|
|
})
|
|
|
|
if w.Code != http.StatusOK {
|
|
t.Fatalf("expected status code 200, actual %d", w.Code)
|
|
}
|
|
|
|
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
|
|
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
|
|
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test2", "latest"),
|
|
})
|
|
|
|
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
|
|
filepath.Join(p, "blobs", "sha256-136bf7c76bac2ec09d6617885507d37829e04b41acc47687d45e512b544e893a"),
|
|
filepath.Join(p, "blobs", "sha256-6bcdb8859d417753645538d7bbfbd7ca91a3f0c191aef5379c53c05e86b669dd"),
|
|
filepath.Join(p, "blobs", "sha256-89a2116c3a82d6a97f59f748d86ed4417214353fd178ee54df418fde32495fad"),
|
|
filepath.Join(p, "blobs", "sha256-fe7ac77b725cda2ccad03f88a880ecdfd7a33192d6cae08fce2c0ee1455991ed"),
|
|
})
|
|
|
|
w = createRequest(t, s.DeleteHandler, api.DeleteRequest{Name: "test"})
|
|
|
|
if w.Code != http.StatusOK {
|
|
t.Fatalf("expected status code 200, actual %d", w.Code)
|
|
}
|
|
|
|
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
|
|
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test2", "latest"),
|
|
})
|
|
|
|
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
|
|
filepath.Join(p, "blobs", "sha256-136bf7c76bac2ec09d6617885507d37829e04b41acc47687d45e512b544e893a"),
|
|
filepath.Join(p, "blobs", "sha256-89a2116c3a82d6a97f59f748d86ed4417214353fd178ee54df418fde32495fad"),
|
|
filepath.Join(p, "blobs", "sha256-fe7ac77b725cda2ccad03f88a880ecdfd7a33192d6cae08fce2c0ee1455991ed"),
|
|
})
|
|
|
|
w = createRequest(t, s.DeleteHandler, api.DeleteRequest{Name: "test2"})
|
|
|
|
if w.Code != http.StatusOK {
|
|
t.Fatalf("expected status code 200, actual %d", w.Code)
|
|
}
|
|
|
|
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{})
|
|
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{})
|
|
}
|
|
|
|
func TestDeleteDuplicateLayers(t *testing.T) {
|
|
gin.SetMode(gin.TestMode)
|
|
|
|
p := t.TempDir()
|
|
t.Setenv("OLLAMA_MODELS", p)
|
|
var s Server
|
|
|
|
n := model.ParseName("test")
|
|
|
|
var b bytes.Buffer
|
|
if err := json.NewEncoder(&b).Encode(&ConfigV2{}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
config, err := NewLayer(&b, "application/vnd.docker.container.image.v1+json")
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// create a manifest with duplicate layers
|
|
if err := WriteManifest(n, config, []Layer{config}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
w := createRequest(t, s.DeleteHandler, api.DeleteRequest{Name: "test"})
|
|
if w.Code != http.StatusOK {
|
|
t.Errorf("expected status code 200, actual %d", w.Code)
|
|
}
|
|
|
|
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{})
|
|
}
|