mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-09 23:37:06 +00:00
This commit represents a complete rework after pulling the latest changes from official ollama/ollama repository and re-applying Tesla K80 compatibility patches. ## Key Changes ### CUDA Compute Capability 3.7 Support (Tesla K80) - Added sm_37 (compute 3.7) to CMAKE_CUDA_ARCHITECTURES in CMakeLists.txt - Updated CMakePresets.json to include compute 3.7 in "CUDA 11" preset - Using 37-virtual (PTX with JIT compilation) for maximum compatibility ### Legacy Toolchain Compatibility - **NVIDIA Driver**: 470.256.02 (last version supporting Kepler/K80) - **CUDA Version**: 11.4.4 (last CUDA 11.x supporting compute 3.7) - **GCC Version**: 10.5.0 (required by CUDA 11.4 host_config.h) ### CPU Architecture Trade-offs Due to GCC 10.5 limitation, sacrificed newer CPU optimizations: - Alderlake CPU variant enabled WITHOUT AVX_VNNI (requires GCC 11+) - Still supports: SSE4.2, AVX, F16C, AVX2, BMI2, FMA - Performance impact: ~3-7% on newer CPUs (acceptable for K80 compatibility) ### Build System Updates - Modified ml/backend/ggml/ggml/src/ggml-cuda/CMakeLists.txt for compute 3.7 - Added -Wno-deprecated-gpu-targets flag to suppress warnings - Updated ml/backend/ggml/ggml/src/CMakeLists.txt for Alderlake without AVX_VNNI ### Upstream Sync Merged latest llama.cpp changes including: - Enhanced KV cache management with ISWA and hybrid memory support - Improved multi-modal support (mtmd framework) - New model architectures (Gemma3, Llama4, Qwen3, etc.) - GPU backend improvements for CUDA, Metal, and ROCm - Updated quantization support and GGUF format handling ### Documentation - Updated CLAUDE.md with comprehensive build instructions - Documented toolchain constraints and CPU architecture trade-offs - Removed outdated CI/CD workflows (tesla-k80-*.yml) - Cleaned up temporary development artifacts ## Rationale This fork maintains Tesla K80 GPU support (compute 3.7) which was dropped in official Ollama due to legacy driver/CUDA requirements. The toolchain constraint creates a deadlock: - K80 → Driver 470 → CUDA 11.4 → GCC 10 → No AVX_VNNI We accept the loss of cutting-edge CPU optimizations to enable running modern LLMs on legacy but still capable Tesla K80 hardware (12GB VRAM per GPU). 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
151 lines
3.4 KiB
Go
151 lines
3.4 KiB
Go
package openai
|
|
|
|
import (
|
|
"encoding/base64"
|
|
"testing"
|
|
|
|
"github.com/ollama/ollama/api"
|
|
)
|
|
|
|
const (
|
|
prefix = `data:image/jpeg;base64,`
|
|
image = `iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNk+A8AAQUBAScY42YAAAAASUVORK5CYII=`
|
|
)
|
|
|
|
func TestFromChatRequest_Basic(t *testing.T) {
|
|
req := ChatCompletionRequest{
|
|
Model: "test-model",
|
|
Messages: []Message{
|
|
{Role: "user", Content: "Hello"},
|
|
},
|
|
}
|
|
|
|
result, err := FromChatRequest(req)
|
|
if err != nil {
|
|
t.Fatalf("unexpected error: %v", err)
|
|
}
|
|
|
|
if result.Model != "test-model" {
|
|
t.Errorf("expected model 'test-model', got %q", result.Model)
|
|
}
|
|
|
|
if len(result.Messages) != 1 {
|
|
t.Fatalf("expected 1 message, got %d", len(result.Messages))
|
|
}
|
|
|
|
if result.Messages[0].Role != "user" || result.Messages[0].Content != "Hello" {
|
|
t.Errorf("unexpected message: %+v", result.Messages[0])
|
|
}
|
|
}
|
|
|
|
func TestFromChatRequest_WithImage(t *testing.T) {
|
|
imgData, _ := base64.StdEncoding.DecodeString(image)
|
|
|
|
req := ChatCompletionRequest{
|
|
Model: "test-model",
|
|
Messages: []Message{
|
|
{
|
|
Role: "user",
|
|
Content: []any{
|
|
map[string]any{"type": "text", "text": "Hello"},
|
|
map[string]any{
|
|
"type": "image_url",
|
|
"image_url": map[string]any{"url": prefix + image},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
result, err := FromChatRequest(req)
|
|
if err != nil {
|
|
t.Fatalf("unexpected error: %v", err)
|
|
}
|
|
|
|
if len(result.Messages) != 2 {
|
|
t.Fatalf("expected 2 messages, got %d", len(result.Messages))
|
|
}
|
|
|
|
if result.Messages[0].Content != "Hello" {
|
|
t.Errorf("expected first message content 'Hello', got %q", result.Messages[0].Content)
|
|
}
|
|
|
|
if len(result.Messages[1].Images) != 1 {
|
|
t.Fatalf("expected 1 image, got %d", len(result.Messages[1].Images))
|
|
}
|
|
|
|
if string(result.Messages[1].Images[0]) != string(imgData) {
|
|
t.Error("image data mismatch")
|
|
}
|
|
}
|
|
|
|
func TestFromCompleteRequest_Basic(t *testing.T) {
|
|
temp := float32(0.8)
|
|
req := CompletionRequest{
|
|
Model: "test-model",
|
|
Prompt: "Hello",
|
|
Temperature: &temp,
|
|
}
|
|
|
|
result, err := FromCompleteRequest(req)
|
|
if err != nil {
|
|
t.Fatalf("unexpected error: %v", err)
|
|
}
|
|
|
|
if result.Model != "test-model" {
|
|
t.Errorf("expected model 'test-model', got %q", result.Model)
|
|
}
|
|
|
|
if result.Prompt != "Hello" {
|
|
t.Errorf("expected prompt 'Hello', got %q", result.Prompt)
|
|
}
|
|
|
|
if tempVal, ok := result.Options["temperature"].(float32); !ok || tempVal != 0.8 {
|
|
t.Errorf("expected temperature 0.8, got %v", result.Options["temperature"])
|
|
}
|
|
}
|
|
|
|
func TestToUsage(t *testing.T) {
|
|
resp := api.ChatResponse{
|
|
Metrics: api.Metrics{
|
|
PromptEvalCount: 10,
|
|
EvalCount: 20,
|
|
},
|
|
}
|
|
|
|
usage := ToUsage(resp)
|
|
|
|
if usage.PromptTokens != 10 {
|
|
t.Errorf("expected PromptTokens 10, got %d", usage.PromptTokens)
|
|
}
|
|
|
|
if usage.CompletionTokens != 20 {
|
|
t.Errorf("expected CompletionTokens 20, got %d", usage.CompletionTokens)
|
|
}
|
|
|
|
if usage.TotalTokens != 30 {
|
|
t.Errorf("expected TotalTokens 30, got %d", usage.TotalTokens)
|
|
}
|
|
}
|
|
|
|
func TestNewError(t *testing.T) {
|
|
tests := []struct {
|
|
code int
|
|
want string
|
|
}{
|
|
{400, "invalid_request_error"},
|
|
{404, "not_found_error"},
|
|
{500, "api_error"},
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
result := NewError(tt.code, "test message")
|
|
if result.Error.Type != tt.want {
|
|
t.Errorf("NewError(%d) type = %q, want %q", tt.code, result.Error.Type, tt.want)
|
|
}
|
|
if result.Error.Message != "test message" {
|
|
t.Errorf("NewError(%d) message = %q, want %q", tt.code, result.Error.Message, "test message")
|
|
}
|
|
}
|
|
}
|