mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-10 15:57:04 +00:00
This commit represents a complete rework after pulling the latest changes from official ollama/ollama repository and re-applying Tesla K80 compatibility patches. ## Key Changes ### CUDA Compute Capability 3.7 Support (Tesla K80) - Added sm_37 (compute 3.7) to CMAKE_CUDA_ARCHITECTURES in CMakeLists.txt - Updated CMakePresets.json to include compute 3.7 in "CUDA 11" preset - Using 37-virtual (PTX with JIT compilation) for maximum compatibility ### Legacy Toolchain Compatibility - **NVIDIA Driver**: 470.256.02 (last version supporting Kepler/K80) - **CUDA Version**: 11.4.4 (last CUDA 11.x supporting compute 3.7) - **GCC Version**: 10.5.0 (required by CUDA 11.4 host_config.h) ### CPU Architecture Trade-offs Due to GCC 10.5 limitation, sacrificed newer CPU optimizations: - Alderlake CPU variant enabled WITHOUT AVX_VNNI (requires GCC 11+) - Still supports: SSE4.2, AVX, F16C, AVX2, BMI2, FMA - Performance impact: ~3-7% on newer CPUs (acceptable for K80 compatibility) ### Build System Updates - Modified ml/backend/ggml/ggml/src/ggml-cuda/CMakeLists.txt for compute 3.7 - Added -Wno-deprecated-gpu-targets flag to suppress warnings - Updated ml/backend/ggml/ggml/src/CMakeLists.txt for Alderlake without AVX_VNNI ### Upstream Sync Merged latest llama.cpp changes including: - Enhanced KV cache management with ISWA and hybrid memory support - Improved multi-modal support (mtmd framework) - New model architectures (Gemma3, Llama4, Qwen3, etc.) - GPU backend improvements for CUDA, Metal, and ROCm - Updated quantization support and GGUF format handling ### Documentation - Updated CLAUDE.md with comprehensive build instructions - Documented toolchain constraints and CPU architecture trade-offs - Removed outdated CI/CD workflows (tesla-k80-*.yml) - Cleaned up temporary development artifacts ## Rationale This fork maintains Tesla K80 GPU support (compute 3.7) which was dropped in official Ollama due to legacy driver/CUDA requirements. The toolchain constraint creates a deadlock: - K80 → Driver 470 → CUDA 11.4 → GCC 10 → No AVX_VNNI We accept the loss of cutting-edge CPU optimizations to enable running modern LLMs on legacy but still capable Tesla K80 hardware (12GB VRAM per GPU). 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
114 lines
4.8 KiB
Diff
114 lines
4.8 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Michael Yang <git@mxy.ng>
|
|
Date: Web, 16 Oct 2025 20:37:19 -0700
|
|
Subject: [PATCH] interleave multi rope
|
|
|
|
since ollama doesn't use mrope for anything else, change it to mean the
|
|
interleaved version used for qwen3vl
|
|
---
|
|
ggml/src/ggml-cpu/ops.cpp | 7 ++-----
|
|
ggml/src/ggml-cuda/rope.cu | 12 +++---------
|
|
ggml/src/ggml-metal/ggml-metal.metal | 10 +++-------
|
|
ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp | 12 +++---------
|
|
4 files changed, 11 insertions(+), 30 deletions(-)
|
|
|
|
diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp
|
|
index 31478dd8e..4d1ed207e 100644
|
|
--- a/ggml/src/ggml-cpu/ops.cpp
|
|
+++ b/ggml/src/ggml-cpu/ops.cpp
|
|
@@ -5509,15 +5509,12 @@ static void ggml_mrope_cache_init(
|
|
}
|
|
|
|
float theta = theta_t;
|
|
- if (sector >= sections[0] && sector < sec_w) {
|
|
+ if (sector % 3 == 1 && sector < 1 + 3 * sections[1]) {
|
|
theta = theta_h;
|
|
}
|
|
- else if (sector >= sec_w && sector < sec_w + sections[2]) {
|
|
+ else if (sector % 3 == 2 && sector < 2 + 3 * sections[2]) {
|
|
theta = theta_w;
|
|
}
|
|
- else if (sector >= sec_w + sections[2]) {
|
|
- theta = theta_e;
|
|
- }
|
|
|
|
rope_yarn(
|
|
theta/ff, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]
|
|
diff --git a/ggml/src/ggml-cuda/rope.cu b/ggml/src/ggml-cuda/rope.cu
|
|
index d058504cd..287fe9d2c 100644
|
|
--- a/ggml/src/ggml-cuda/rope.cu
|
|
+++ b/ggml/src/ggml-cuda/rope.cu
|
|
@@ -151,19 +151,13 @@ static __global__ void rope_multi(
|
|
const int sec_w = sections.v[1] + sections.v[0];
|
|
const int sector = (i0 / 2) % sect_dims;
|
|
|
|
- float theta_base = 0.0;
|
|
- if (sector < sections.v[0]) {
|
|
- theta_base = pos[channel_x]*powf(theta_scale, i0/2.0f);
|
|
- }
|
|
- else if (sector >= sections.v[0] && sector < sec_w) {
|
|
+ float theta_base = pos[channel_x]*powf(theta_scale, i0/2.0f);
|
|
+ if (sector % 3 == 1 && sector < 1 + 3 * sections.v[1]) {
|
|
theta_base = pos[channel_x + ne2 * 1]*powf(theta_scale, i0/2.0f);
|
|
}
|
|
- else if (sector >= sec_w && sector < sec_w + sections.v[2]) {
|
|
+ else if (sector % 3 == 2 && sector < 2 + 3 * sections.v[2]) {
|
|
theta_base = pos[channel_x + ne2 * 2]*powf(theta_scale, i0/2.0f);
|
|
}
|
|
- else if (sector >= sec_w + sections.v[2]) {
|
|
- theta_base = pos[channel_x + ne2 * 3]*powf(theta_scale, i0/2.0f);
|
|
- }
|
|
|
|
const float freq_factor = has_ff ? freq_factors[i0/2] : 1.0f;
|
|
|
|
diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal
|
|
index 375a0c7fd..9866c96b4 100644
|
|
--- a/ggml/src/ggml-metal/ggml-metal.metal
|
|
+++ b/ggml/src/ggml-metal/ggml-metal.metal
|
|
@@ -3858,15 +3858,11 @@ kernel void kernel_rope_multi(
|
|
const int sec_w012 = args.sect_0 + args.sect_1 + args.sect_2; // end of section 2
|
|
const int sector = ic % sect_dims;
|
|
|
|
- float theta_base;
|
|
- if (sector < args.sect_0) {
|
|
- theta_base = (float) pos[i2];
|
|
- } else if (sector < sec_w01) {
|
|
+ float theta_base = (float) pos[i2];
|
|
+ if (sector % 3 == 1 && sector < 1 + 3 * args.sect_1) {
|
|
theta_base = (float) pos[i2 + args.ne02];
|
|
- } else if (sector < sec_w012) {
|
|
+ } else if (sector % 3 == 2 && sector < 2 + 3 * args.sect_2) {
|
|
theta_base = (float) pos[i2 + args.ne02 * 2];
|
|
- } else {
|
|
- theta_base = (float) pos[i2 + args.ne02 * 3];
|
|
}
|
|
// end of mrope
|
|
|
|
diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp b/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp
|
|
index 111286b49..6fc2b42f8 100644
|
|
--- a/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp
|
|
+++ b/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp
|
|
@@ -31,19 +31,13 @@ void main() {
|
|
const int sec_w = p.sections[1] + p.sections[0];
|
|
const uint sector = (i0 / 2) % sect_dims;
|
|
|
|
- float theta_base = 0.0;
|
|
- if (sector < p.sections[0]) {
|
|
- theta_base = data_pos[channel_x]*pow(p.theta_scale, i0/2.0f);
|
|
- }
|
|
- else if (sector >= p.sections[0] && sector < sec_w) {
|
|
+ float theta_base = data_pos[channel_x]*pow(p.theta_scale, i0/2.0f);
|
|
+ if (sector % 3 == 1 && sector < 1 + 3 * p.sections[1]) {
|
|
theta_base = data_pos[channel_x + ne2 * 1]*pow(p.theta_scale, i0/2.0f);
|
|
}
|
|
- else if (sector >= sec_w && sector < sec_w + p.sections[2]) {
|
|
+ else if (sector % 3 == 2 && sector < 2 + 3 * p.sections[2]) {
|
|
theta_base = data_pos[channel_x + ne2 * 2]*pow(p.theta_scale, i0/2.0f);
|
|
}
|
|
- else if (sector >= sec_w + p.sections[2]) {
|
|
- theta_base = data_pos[channel_x + ne2 * 3]*pow(p.theta_scale, i0/2.0f);
|
|
- }
|
|
|
|
const float freq_factor = p.has_ff != 0 ? data_ff[i0/2] : 1.0f;
|
|
|