mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-10 07:46:59 +00:00
* add build to .dockerignore * test: only build one arch * add build to .gitignore * fix ccache path * filter amdgpu targets * only filter if autodetecting * Don't clobber gpu list for default runner This ensures the GPU specific environment variables are set properly * explicitly set CXX compiler for HIP * Update build_windows.ps1 This isn't complete, but is close. Dependencies are missing, and it only builds the "default" preset. * build: add ollama subdir * add .git to .dockerignore * docs: update development.md * update build_darwin.sh * remove unused scripts * llm: add cwd and build/lib/ollama to library paths * default DYLD_LIBRARY_PATH to LD_LIBRARY_PATH in runner on macOS * add additional cmake output vars for msvc * interim edits to make server detection logic work with dll directories like lib/ollama/cuda_v12 * remove unncessary filepath.Dir, cleanup * add hardware-specific directory to path * use absolute server path * build: linux arm * cmake install targets * remove unused files * ml: visit each library path once * build: skip cpu variants on arm * build: install cpu targets * build: fix workflow * shorter names * fix rocblas install * docs: clean up development.md * consistent build dir removal in development.md * silence -Wimplicit-function-declaration build warnings in ggml-cpu * update readme * update development readme * llm: update library lookup logic now that there is one runner (#8587) * tweak development.md * update docs * add windows cuda/rocm tests --------- Co-authored-by: jmorganca <jmorganca@gmail.com> Co-authored-by: Daniel Hiltgen <daniel@ollama.com>
114 lines
2.8 KiB
Diff
114 lines
2.8 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: jmorganca <jmorganca@gmail.com>
|
|
Date: Sat, 4 Jan 2025 22:52:48 -0800
|
|
Subject: [PATCH] re-enable gpu for clip
|
|
|
|
---
|
|
examples/llava/clip.cpp | 86 ++++++++++++++++++++---------------------
|
|
1 file changed, 43 insertions(+), 43 deletions(-)
|
|
|
|
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
|
|
index b3c1829f..718052e1 100644
|
|
--- a/examples/llava/clip.cpp
|
|
+++ b/examples/llava/clip.cpp
|
|
@@ -8,25 +8,25 @@
|
|
#include "ggml-alloc.h"
|
|
#include "ggml-backend.h"
|
|
|
|
-//#ifdef GGML_USE_CUDA
|
|
-//#include "ggml-cuda.h"
|
|
-//#endif
|
|
-//
|
|
-//#ifdef GGML_USE_SYCL
|
|
-//#include "ggml-sycl.h"
|
|
-//#endif
|
|
-//
|
|
-//#ifdef GGML_USE_METAL
|
|
-//#include "ggml-metal.h"
|
|
-//#endif
|
|
-//
|
|
-//#ifdef GGML_USE_CANN
|
|
-//#include "ggml-cann.h"
|
|
-//#endif
|
|
-//
|
|
-//#ifdef GGML_USE_VULKAN
|
|
-//#include "ggml-vulkan.h"
|
|
-//#endif
|
|
+#ifdef GGML_USE_CUDA
|
|
+#include "ggml-cuda.h"
|
|
+#endif
|
|
+
|
|
+#ifdef GGML_USE_SYCL
|
|
+#include "ggml-sycl.h"
|
|
+#endif
|
|
+
|
|
+#ifdef GGML_USE_METAL
|
|
+#include "ggml-metal.h"
|
|
+#endif
|
|
+
|
|
+#ifdef GGML_USE_CANN
|
|
+#include "ggml-cann.h"
|
|
+#endif
|
|
+
|
|
+#ifdef GGML_USE_VULKAN
|
|
+#include "ggml-vulkan.h"
|
|
+#endif
|
|
|
|
#define STB_IMAGE_IMPLEMENTATION
|
|
#include "stb_image.h"
|
|
@@ -1235,30 +1235,30 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|
}
|
|
}
|
|
|
|
-//#ifdef GGML_USE_CUDA
|
|
-// new_clip->backend = ggml_backend_cuda_init(0);
|
|
-// LOG_INF("%s: CLIP using CUDA backend\n", __func__);
|
|
-//#endif
|
|
-//
|
|
-//#ifdef GGML_USE_METAL
|
|
-// new_clip->backend = ggml_backend_metal_init();
|
|
-// LOG_INF("%s: CLIP using Metal backend\n", __func__);
|
|
-//#endif
|
|
-//
|
|
-//#ifdef GGML_USE_CANN
|
|
-// new_clip->backend = ggml_backend_cann_init(0);
|
|
-// LOG_INF("%s: CLIP using CANN backend\n", __func__);
|
|
-//#endif
|
|
-//
|
|
-//#ifdef GGML_USE_VULKAN
|
|
-// new_clip->backend = ggml_backend_vk_init(0);
|
|
-// LOG_INF("%s: CLIP using Vulkan backend\n", __func__);
|
|
-//#endif
|
|
-//
|
|
-//#ifdef GGML_USE_SYCL
|
|
-// new_clip->backend = ggml_backend_sycl_init(0);
|
|
-// LOG_INF("%s: CLIP using SYCL backend\n", __func__);
|
|
-//#endif
|
|
+#ifdef GGML_USE_CUDA
|
|
+ new_clip->backend = ggml_backend_cuda_init(0);
|
|
+ LOG_INF("%s: CLIP using CUDA backend\n", __func__);
|
|
+#endif
|
|
+
|
|
+#ifdef GGML_USE_METAL
|
|
+ new_clip->backend = ggml_backend_metal_init();
|
|
+ LOG_INF("%s: CLIP using Metal backend\n", __func__);
|
|
+#endif
|
|
+
|
|
+#ifdef GGML_USE_CANN
|
|
+ new_clip->backend = ggml_backend_cann_init(0);
|
|
+ LOG_INF("%s: CLIP using CANN backend\n", __func__);
|
|
+#endif
|
|
+
|
|
+#ifdef GGML_USE_VULKAN
|
|
+ new_clip->backend = ggml_backend_vk_init(0);
|
|
+ LOG_INF("%s: CLIP using Vulkan backend\n", __func__);
|
|
+#endif
|
|
+
|
|
+#ifdef GGML_USE_SYCL
|
|
+ new_clip->backend = ggml_backend_sycl_init(0);
|
|
+ LOG_INF("%s: CLIP using SYCL backend\n", __func__);
|
|
+#endif
|
|
|
|
if (!new_clip->backend) {
|
|
new_clip->backend = ggml_backend_cpu_init();
|