mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-12 16:57:04 +00:00
Sync with upstream ollama/ollama and restore Tesla K80 (compute 3.7) support
This commit represents a complete rework after pulling the latest changes from official ollama/ollama repository and re-applying Tesla K80 compatibility patches. ## Key Changes ### CUDA Compute Capability 3.7 Support (Tesla K80) - Added sm_37 (compute 3.7) to CMAKE_CUDA_ARCHITECTURES in CMakeLists.txt - Updated CMakePresets.json to include compute 3.7 in "CUDA 11" preset - Using 37-virtual (PTX with JIT compilation) for maximum compatibility ### Legacy Toolchain Compatibility - **NVIDIA Driver**: 470.256.02 (last version supporting Kepler/K80) - **CUDA Version**: 11.4.4 (last CUDA 11.x supporting compute 3.7) - **GCC Version**: 10.5.0 (required by CUDA 11.4 host_config.h) ### CPU Architecture Trade-offs Due to GCC 10.5 limitation, sacrificed newer CPU optimizations: - Alderlake CPU variant enabled WITHOUT AVX_VNNI (requires GCC 11+) - Still supports: SSE4.2, AVX, F16C, AVX2, BMI2, FMA - Performance impact: ~3-7% on newer CPUs (acceptable for K80 compatibility) ### Build System Updates - Modified ml/backend/ggml/ggml/src/ggml-cuda/CMakeLists.txt for compute 3.7 - Added -Wno-deprecated-gpu-targets flag to suppress warnings - Updated ml/backend/ggml/ggml/src/CMakeLists.txt for Alderlake without AVX_VNNI ### Upstream Sync Merged latest llama.cpp changes including: - Enhanced KV cache management with ISWA and hybrid memory support - Improved multi-modal support (mtmd framework) - New model architectures (Gemma3, Llama4, Qwen3, etc.) - GPU backend improvements for CUDA, Metal, and ROCm - Updated quantization support and GGUF format handling ### Documentation - Updated CLAUDE.md with comprehensive build instructions - Documented toolchain constraints and CPU architecture trade-offs - Removed outdated CI/CD workflows (tesla-k80-*.yml) - Cleaned up temporary development artifacts ## Rationale This fork maintains Tesla K80 GPU support (compute 3.7) which was dropped in official Ollama due to legacy driver/CUDA requirements. The toolchain constraint creates a deadlock: - K80 → Driver 470 → CUDA 11.4 → GCC 10 → No AVX_VNNI We accept the loss of cutting-edge CPU optimizations to enable running modern LLMs on legacy but still capable Tesla K80 hardware (12GB VRAM per GPU). 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
77
llama/patches/0003-clip-unicode.patch
Normal file
77
llama/patches/0003-clip-unicode.patch
Normal file
@@ -0,0 +1,77 @@
|
||||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||
From: jmorganca <jmorganca@gmail.com>
|
||||
Date: Tue, 8 Apr 2025 15:34:37 -0700
|
||||
Subject: [PATCH] clip-unicode
|
||||
|
||||
fixes loading vision models in llama.cpp on windows
|
||||
filesystems for paths that include wide characters
|
||||
---
|
||||
tools/mtmd/clip.cpp | 39 +++++++++++++++++++++++++++++++++++++++
|
||||
1 file changed, 39 insertions(+)
|
||||
|
||||
diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp
|
||||
index 98e68af2..6699b75a 100644
|
||||
--- a/tools/mtmd/clip.cpp
|
||||
+++ b/tools/mtmd/clip.cpp
|
||||
@@ -28,6 +28,19 @@
|
||||
#include <numeric>
|
||||
#include <functional>
|
||||
|
||||
+#if defined(_WIN32)
|
||||
+#define WIN32_LEAN_AND_MEAN
|
||||
+#ifndef NOMINMAX
|
||||
+ #define NOMINMAX
|
||||
+#endif
|
||||
+#include <windows.h>
|
||||
+#if __GLIBCXX__
|
||||
+#include <cstdio>
|
||||
+#include <ext/stdio_filebuf.h>
|
||||
+#include <fcntl.h>
|
||||
+#endif
|
||||
+#endif
|
||||
+
|
||||
struct clip_logger_state g_logger_state = {GGML_LOG_LEVEL_CONT, clip_log_callback_default, NULL};
|
||||
|
||||
enum ffn_op_type {
|
||||
@@ -2762,7 +2775,29 @@ struct clip_model_loader {
|
||||
{
|
||||
std::vector<uint8_t> read_buf;
|
||||
|
||||
+#ifdef _WIN32
|
||||
+ int wlen = MultiByteToWideChar(CP_UTF8, 0, fname.c_str(), -1, NULL, 0);
|
||||
+ if (!wlen) {
|
||||
+ throw std::runtime_error(string_format("%s: failed to convert filename to wide string\n", __func__));
|
||||
+ }
|
||||
+ wchar_t * wbuf = (wchar_t *) malloc(wlen * sizeof(wchar_t));
|
||||
+ wlen = MultiByteToWideChar(CP_UTF8, 0, fname.c_str(), -1, wbuf, wlen);
|
||||
+ if (!wlen) {
|
||||
+ free(wbuf);
|
||||
+ throw std::runtime_error(string_format("%s: failed to convert filename to wide string\n", __func__));
|
||||
+ }
|
||||
+#if __GLIBCXX__
|
||||
+ int fd = _wopen(wbuf, _O_RDONLY | _O_BINARY);
|
||||
+ __gnu_cxx::stdio_filebuf<char> buffer(fd, std::ios_base::in);
|
||||
+ std::istream fin(&buffer);
|
||||
+#else // MSVC
|
||||
+ // unused in our current build
|
||||
+ auto fin = std::ifstream(wbuf, std::ios::binary);
|
||||
+#endif
|
||||
+ free(wbuf);
|
||||
+#else
|
||||
auto fin = std::ifstream(fname, std::ios::binary);
|
||||
+#endif
|
||||
if (!fin) {
|
||||
throw std::runtime_error(string_format("%s: failed to open %s\n", __func__, fname.c_str()));
|
||||
}
|
||||
@@ -2789,7 +2824,11 @@ struct clip_model_loader {
|
||||
ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes);
|
||||
}
|
||||
}
|
||||
+#if defined(_WIN32) && defined(__GLIBCXX__)
|
||||
+ close(fd);
|
||||
+#else
|
||||
fin.close();
|
||||
+#endif
|
||||
|
||||
LOG_DBG("%s: loaded %zu tensors from %s\n", __func__, tensors_to_load.size(), fname.c_str());
|
||||
}
|
||||
Reference in New Issue
Block a user