mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-18 03:37:09 +00:00
Sync with upstream ollama/ollama and restore Tesla K80 (compute 3.7) support
This commit represents a complete rework after pulling the latest changes from official ollama/ollama repository and re-applying Tesla K80 compatibility patches. ## Key Changes ### CUDA Compute Capability 3.7 Support (Tesla K80) - Added sm_37 (compute 3.7) to CMAKE_CUDA_ARCHITECTURES in CMakeLists.txt - Updated CMakePresets.json to include compute 3.7 in "CUDA 11" preset - Using 37-virtual (PTX with JIT compilation) for maximum compatibility ### Legacy Toolchain Compatibility - **NVIDIA Driver**: 470.256.02 (last version supporting Kepler/K80) - **CUDA Version**: 11.4.4 (last CUDA 11.x supporting compute 3.7) - **GCC Version**: 10.5.0 (required by CUDA 11.4 host_config.h) ### CPU Architecture Trade-offs Due to GCC 10.5 limitation, sacrificed newer CPU optimizations: - Alderlake CPU variant enabled WITHOUT AVX_VNNI (requires GCC 11+) - Still supports: SSE4.2, AVX, F16C, AVX2, BMI2, FMA - Performance impact: ~3-7% on newer CPUs (acceptable for K80 compatibility) ### Build System Updates - Modified ml/backend/ggml/ggml/src/ggml-cuda/CMakeLists.txt for compute 3.7 - Added -Wno-deprecated-gpu-targets flag to suppress warnings - Updated ml/backend/ggml/ggml/src/CMakeLists.txt for Alderlake without AVX_VNNI ### Upstream Sync Merged latest llama.cpp changes including: - Enhanced KV cache management with ISWA and hybrid memory support - Improved multi-modal support (mtmd framework) - New model architectures (Gemma3, Llama4, Qwen3, etc.) - GPU backend improvements for CUDA, Metal, and ROCm - Updated quantization support and GGUF format handling ### Documentation - Updated CLAUDE.md with comprehensive build instructions - Documented toolchain constraints and CPU architecture trade-offs - Removed outdated CI/CD workflows (tesla-k80-*.yml) - Cleaned up temporary development artifacts ## Rationale This fork maintains Tesla K80 GPU support (compute 3.7) which was dropped in official Ollama due to legacy driver/CUDA requirements. The toolchain constraint creates a deadlock: - K80 → Driver 470 → CUDA 11.4 → GCC 10 → No AVX_VNNI We accept the loss of cutting-edge CPU optimizations to enable running modern LLMs on legacy but still capable Tesla K80 hardware (12GB VRAM per GPU). 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -154,24 +154,55 @@ func TestTemplate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
cases := []struct {
|
||||
validCases := []struct {
|
||||
name string
|
||||
template string
|
||||
vars []string
|
||||
}{
|
||||
{"{{ .Prompt }}", []string{"prompt", "response"}},
|
||||
{"{{ .System }} {{ .Prompt }}", []string{"prompt", "response", "system"}},
|
||||
{"{{ .System }} {{ .Prompt }} {{ .Response }}", []string{"prompt", "response", "system"}},
|
||||
{"{{ with .Tools }}{{ . }}{{ end }} {{ .System }} {{ .Prompt }}", []string{"prompt", "response", "system", "tools"}},
|
||||
{"{{ range .Messages }}{{ .Role }} {{ .Content }}{{ end }}", []string{"content", "messages", "role"}},
|
||||
{"{{ range .Messages }}{{ if eq .Role \"tool\" }}Tool Result: {{ .ToolName }} {{ .Content }}{{ end }}{{ end }}", []string{"content", "messages", "role", "toolname"}},
|
||||
{`{{- range .Messages }}
|
||||
{
|
||||
name: "PromptOnly",
|
||||
template: "{{ .Prompt }}",
|
||||
vars: []string{"prompt", "response"},
|
||||
},
|
||||
{
|
||||
name: "SystemAndPrompt",
|
||||
template: "{{ .System }} {{ .Prompt }}",
|
||||
vars: []string{"prompt", "response", "system"},
|
||||
},
|
||||
{
|
||||
name: "PromptResponseSystem",
|
||||
template: "{{ .System }} {{ .Prompt }} {{ .Response }}",
|
||||
vars: []string{"prompt", "response", "system"},
|
||||
},
|
||||
{
|
||||
name: "ToolsBlock",
|
||||
template: "{{ with .Tools }}{{ . }}{{ end }} {{ .System }} {{ .Prompt }}",
|
||||
vars: []string{"prompt", "response", "system", "tools"},
|
||||
},
|
||||
{
|
||||
name: "MessagesRange",
|
||||
template: "{{ range .Messages }}{{ .Role }} {{ .Content }}{{ end }}",
|
||||
vars: []string{"content", "messages", "role"},
|
||||
},
|
||||
{
|
||||
name: "ToolResultConditional",
|
||||
template: "{{ range .Messages }}{{ if eq .Role \"tool\" }}Tool Result: {{ .ToolName }} {{ .Content }}{{ end }}{{ end }}",
|
||||
vars: []string{"content", "messages", "role", "toolname"},
|
||||
},
|
||||
{
|
||||
name: "MultilineSystemUserAssistant",
|
||||
template: `{{- range .Messages }}
|
||||
{{- if eq .Role "system" }}SYSTEM:
|
||||
{{- else if eq .Role "user" }}USER:
|
||||
{{- else if eq .Role "assistant" }}ASSISTANT:
|
||||
{{- else if eq .Role "tool" }}TOOL:
|
||||
{{- else if eq .Role "tool" }}TOOL:
|
||||
{{- end }} {{ .Content }}
|
||||
{{- end }}`, []string{"content", "messages", "role"}},
|
||||
{`{{- if .Messages }}
|
||||
{{- end }}`,
|
||||
vars: []string{"content", "messages", "role"},
|
||||
},
|
||||
{
|
||||
name: "ChatMLLike",
|
||||
template: `{{- if .Messages }}
|
||||
{{- range .Messages }}<|im_start|>{{ .Role }}
|
||||
{{ .Content }}<|im_end|>
|
||||
{{ end }}<|im_start|>assistant
|
||||
@@ -182,18 +213,60 @@ func TestParse(t *testing.T) {
|
||||
{{ .Prompt }}<|im_end|>
|
||||
{{ end }}<|im_start|>assistant
|
||||
{{ .Response }}<|im_end|>
|
||||
{{- end -}}`, []string{"content", "messages", "prompt", "response", "role", "system"}},
|
||||
{{- end -}}`,
|
||||
vars: []string{"content", "messages", "prompt", "response", "role", "system"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range cases {
|
||||
t.Run("", func(t *testing.T) {
|
||||
for _, tt := range validCases {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tmpl, err := Parse(tt.template)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
t.Fatalf("Parse returned unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tmpl.Vars(), tt.vars); diff != "" {
|
||||
t.Errorf("mismatch (-got +want):\n%s", diff)
|
||||
gotVars, err := tmpl.Vars()
|
||||
if err != nil {
|
||||
t.Fatalf("Vars returned unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(gotVars, tt.vars); diff != "" {
|
||||
t.Errorf("Vars mismatch (-got +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseError(t *testing.T) {
|
||||
invalidCases := []struct {
|
||||
name string
|
||||
template string
|
||||
errorStr string
|
||||
}{
|
||||
{
|
||||
"TemplateNotClosed",
|
||||
"{{ .Prompt ",
|
||||
"unclosed action",
|
||||
},
|
||||
{
|
||||
"Template",
|
||||
`{{define "x"}}{{template "x"}}{{end}}{{template "x"}}`,
|
||||
"undefined template specified",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range invalidCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := Parse(tt.template)
|
||||
if err == nil {
|
||||
t.Fatalf("expected Parse to return an error for an invalid template, got nil")
|
||||
}
|
||||
|
||||
if !strings.Contains(strings.ToLower(err.Error()), strings.ToLower(tt.errorStr)) {
|
||||
t.Errorf("unexpected error message.\n got: %q\n want substring (case‑insensitive): %q", err.Error(), tt.errorStr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user