Files
ollama37/model/renderers/qwen3vl.go
Shang Chieh Tseng ef14fb5b26 Sync with upstream ollama/ollama and restore Tesla K80 (compute 3.7) support
This commit represents a complete rework after pulling the latest changes from
official ollama/ollama repository and re-applying Tesla K80 compatibility patches.

## Key Changes

### CUDA Compute Capability 3.7 Support (Tesla K80)
- Added sm_37 (compute 3.7) to CMAKE_CUDA_ARCHITECTURES in CMakeLists.txt
- Updated CMakePresets.json to include compute 3.7 in "CUDA 11" preset
- Using 37-virtual (PTX with JIT compilation) for maximum compatibility

### Legacy Toolchain Compatibility
- **NVIDIA Driver**: 470.256.02 (last version supporting Kepler/K80)
- **CUDA Version**: 11.4.4 (last CUDA 11.x supporting compute 3.7)
- **GCC Version**: 10.5.0 (required by CUDA 11.4 host_config.h)

### CPU Architecture Trade-offs
Due to GCC 10.5 limitation, sacrificed newer CPU optimizations:
- Alderlake CPU variant enabled WITHOUT AVX_VNNI (requires GCC 11+)
- Still supports: SSE4.2, AVX, F16C, AVX2, BMI2, FMA
- Performance impact: ~3-7% on newer CPUs (acceptable for K80 compatibility)

### Build System Updates
- Modified ml/backend/ggml/ggml/src/ggml-cuda/CMakeLists.txt for compute 3.7
- Added -Wno-deprecated-gpu-targets flag to suppress warnings
- Updated ml/backend/ggml/ggml/src/CMakeLists.txt for Alderlake without AVX_VNNI

### Upstream Sync
Merged latest llama.cpp changes including:
- Enhanced KV cache management with ISWA and hybrid memory support
- Improved multi-modal support (mtmd framework)
- New model architectures (Gemma3, Llama4, Qwen3, etc.)
- GPU backend improvements for CUDA, Metal, and ROCm
- Updated quantization support and GGUF format handling

### Documentation
- Updated CLAUDE.md with comprehensive build instructions
- Documented toolchain constraints and CPU architecture trade-offs
- Removed outdated CI/CD workflows (tesla-k80-*.yml)
- Cleaned up temporary development artifacts

## Rationale

This fork maintains Tesla K80 GPU support (compute 3.7) which was dropped in
official Ollama due to legacy driver/CUDA requirements. The toolchain constraint
creates a deadlock:
- K80 → Driver 470 → CUDA 11.4 → GCC 10 → No AVX_VNNI

We accept the loss of cutting-edge CPU optimizations to enable running modern
LLMs on legacy but still capable Tesla K80 hardware (12GB VRAM per GPU).

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-05 14:03:05 +08:00

176 lines
4.8 KiB
Go

package renderers
import (
"encoding/json"
"strings"
"github.com/ollama/ollama/api"
)
func marshalWithSpaces(v any) ([]byte, error) {
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
out := make([]byte, 0, len(b)+len(b)/8)
inStr, esc := false, false
for _, c := range b {
if inStr {
out = append(out, c)
if esc {
esc = false
continue
}
if c == '\\' {
esc = true
continue
}
if c == '"' {
inStr = false
}
continue
}
switch c {
case '"':
inStr = true
out = append(out, c)
case ':':
out = append(out, ':', ' ')
case ',':
out = append(out, ',', ' ')
default:
out = append(out, c)
}
}
return out, nil
}
type Qwen3VLRenderer struct {
isThinking bool
useImgTags bool
}
func (r *Qwen3VLRenderer) renderContent(content api.Message) string {
// This assumes all images are at the front of the message - same assumption as ollama/ollama/runner.go
var subSb strings.Builder
for range content.Images {
// TODO: (jmorganca): how to render this is different for different
// model backends, and so we should eventually parameterize this or
// only output a placeholder such as [img]
if r.useImgTags {
subSb.WriteString("[img]")
} else {
subSb.WriteString("<|vision_start|><|image_pad|><|vision_end|>")
}
}
// TODO: support videos
subSb.WriteString(content.Content)
return subSb.String()
}
func (r *Qwen3VLRenderer) Render(messages []api.Message, tools []api.Tool, _ *api.ThinkValue) (string, error) {
var sb strings.Builder
if len(tools) > 0 {
sb.WriteString(imStartTag + "system\n")
if len(messages) > 0 && messages[0].Role == "system" {
sb.WriteString(messages[0].Content + "\n\n")
}
sb.WriteString("# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>")
for _, tool := range tools {
sb.WriteString("\n")
if b, err := marshalWithSpaces(tool); err == nil {
sb.Write(b)
}
}
sb.WriteString("\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n")
} else if len(messages) > 0 && messages[0].Role == "system" {
sb.WriteString("<|im_start|>system\n" + messages[0].Content + "<|im_end|>\n")
}
multiStepTool := true
lastQueryIndex := len(messages) - 1 // so this is the last user message
for i := len(messages) - 1; i >= 0; i-- {
message := messages[i]
if multiStepTool && message.Role == "user" {
// Check if content starts with <tool_response> and ends with </tool_response>
content := r.renderContent(message)
if !(strings.HasPrefix(content, "<tool_response>") && strings.HasSuffix(content, "</tool_response>")) {
multiStepTool = false
lastQueryIndex = i
}
}
}
for i, message := range messages {
content := r.renderContent(message)
lastMessage := i == len(messages)-1
prefill := lastMessage && message.Role == "assistant"
if message.Role == "user" || message.Role == "system" && i != 0 {
sb.WriteString("<|im_start|>" + message.Role + "\n" + content + "<|im_end|>\n")
} else if message.Role == "assistant" {
contentReasoning := ""
if r.isThinking {
if message.Thinking != "" {
contentReasoning = message.Thinking
}
}
if r.isThinking && i > lastQueryIndex {
if i == len(messages)-1 || contentReasoning != "" {
sb.WriteString("<|im_start|>" + message.Role + "\n<think>\n" + strings.Trim(contentReasoning, "\n")) // do we want to add a new line here?
if content != "" {
sb.WriteString("\n</think>\n\n" + strings.TrimLeft(content, "\n"))
}
} else {
sb.WriteString("<|im_start|>" + message.Role + "\n" + content)
}
} else {
sb.WriteString("<|im_start|>" + message.Role + "\n" + content)
}
if len(message.ToolCalls) > 0 {
for j, toolCall := range message.ToolCalls {
if j > 0 || content != "" {
sb.WriteString("\n")
}
sb.WriteString("<tool_call>\n{\"name\": \"" + toolCall.Function.Name + "\", \"arguments\": ")
if b, err := marshalWithSpaces(toolCall.Function.Arguments); err == nil {
sb.Write(b)
}
sb.WriteString("}\n</tool_call>")
}
}
if !prefill {
sb.WriteString("<|im_end|>\n")
}
} else if message.Role == "tool" {
if i == 0 || messages[i-1].Role != "tool" {
sb.WriteString("<|im_start|>user")
}
sb.WriteString("\n<tool_response>\n" + message.Content + "\n</tool_response>")
if i == len(messages)-1 || messages[i+1].Role != "tool" {
sb.WriteString("<|im_end|>\n")
}
}
// prefill at the end
if lastMessage && !prefill {
sb.WriteString("<|im_start|>assistant\n")
if r.isThinking {
sb.WriteString("<think>\n")
}
}
}
return sb.String(), nil
}