mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-11 00:07:07 +00:00
update llama.cpp submodule to ceca1ae (#3064)
This commit is contained in:
@@ -1,19 +1,21 @@
|
||||
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
|
||||
index f255ad76..914ecfdd 100644
|
||||
index 8fe5e0b1..3e82acb9 100644
|
||||
--- a/examples/server/server.cpp
|
||||
+++ b/examples/server/server.cpp
|
||||
@@ -1101,12 +1101,13 @@ struct server_context {
|
||||
@@ -997,13 +997,15 @@ struct llama_server_context
|
||||
slot.n_sent_text += result.text_to_send.size();
|
||||
// add the token to slot queue and cache
|
||||
}
|
||||
|
||||
- slot.add_token_string(result);
|
||||
if (slot.params.stream) {
|
||||
+
|
||||
if (slot.params.stream)
|
||||
{
|
||||
send_partial_response(slot, result);
|
||||
}
|
||||
}
|
||||
|
||||
+ slot.add_token_string(result);
|
||||
+
|
||||
if (incomplete) {
|
||||
if (incomplete)
|
||||
{
|
||||
slot.has_next_token = true;
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
|
||||
index b14cca61..02bfd4b1 100644
|
||||
index 8fe5e0b1..53bf39c1 100644
|
||||
--- a/examples/server/server.cpp
|
||||
+++ b/examples/server/server.cpp
|
||||
@@ -29,6 +29,10 @@
|
||||
@@ -31,6 +31,10 @@
|
||||
#include <atomic>
|
||||
#include <signal.h>
|
||||
#include <memory>
|
||||
|
||||
+#ifdef GGML_USE_CUBLAS
|
||||
+extern "C" GGML_CALL void ggml_free_cublas(void);
|
||||
@@ -12,8 +12,8 @@ index b14cca61..02bfd4b1 100644
|
||||
+
|
||||
using json = nlohmann::json;
|
||||
|
||||
bool server_verbose = false;
|
||||
@@ -664,6 +668,10 @@ struct server_context {
|
||||
struct server_params {
|
||||
@@ -363,6 +367,10 @@ struct llama_server_context
|
||||
llama_free_model(model);
|
||||
model = nullptr;
|
||||
}
|
||||
@@ -23,8 +23,8 @@ index b14cca61..02bfd4b1 100644
|
||||
+#endif
|
||||
}
|
||||
|
||||
bool load_model(const gpt_params & params_) {
|
||||
@@ -3499,6 +3507,7 @@ int main(int argc, char ** argv) {
|
||||
bool load_model(const gpt_params ¶ms_)
|
||||
@@ -3543,6 +3551,7 @@ int main(int argc, char **argv)
|
||||
sigemptyset (&sigint_action.sa_mask);
|
||||
sigint_action.sa_flags = 0;
|
||||
sigaction(SIGINT, &sigint_action, NULL);
|
||||
@@ -33,10 +33,10 @@ index b14cca61..02bfd4b1 100644
|
||||
auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
|
||||
return (ctrl_type == CTRL_C_EVENT) ? (signal_handler(SIGINT), true) : false;
|
||||
diff --git a/ggml-cuda.cu b/ggml-cuda.cu
|
||||
index c207ff87..945708a4 100644
|
||||
index 72bcec8c..6c934e8c 100644
|
||||
--- a/ggml-cuda.cu
|
||||
+++ b/ggml-cuda.cu
|
||||
@@ -46,6 +46,7 @@
|
||||
@@ -43,6 +43,7 @@
|
||||
#define __shfl_xor_sync(mask, var, laneMask, width) __shfl_xor(var, laneMask, width)
|
||||
#define cublasComputeType_t hipblasDatatype_t //deprecated, new hipblasComputeType_t not in 5.6
|
||||
#define cublasCreate hipblasCreate
|
||||
@@ -44,7 +44,7 @@ index c207ff87..945708a4 100644
|
||||
#define cublasGemmEx hipblasGemmEx
|
||||
#define cublasGemmBatchedEx hipblasGemmBatchedEx
|
||||
#define cublasGemmStridedBatchedEx hipblasGemmStridedBatchedEx
|
||||
@@ -8014,10 +8015,10 @@ GGML_CALL bool ggml_cublas_loaded(void) {
|
||||
@@ -8751,10 +8752,10 @@ GGML_CALL bool ggml_cublas_loaded(void) {
|
||||
return g_cublas_loaded;
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ index c207ff87..945708a4 100644
|
||||
|
||||
#ifdef __HIP_PLATFORM_AMD__
|
||||
// Workaround for a rocBLAS bug when using multiple graphics cards:
|
||||
@@ -8027,7 +8028,7 @@ GGML_CALL void ggml_init_cublas() {
|
||||
@@ -8764,7 +8765,7 @@ GGML_CALL void ggml_init_cublas() {
|
||||
#endif
|
||||
|
||||
if (cudaGetDeviceCount(&g_device_count) != cudaSuccess) {
|
||||
@@ -67,7 +67,7 @@ index c207ff87..945708a4 100644
|
||||
g_cublas_loaded = false;
|
||||
fprintf(stderr, "%s: no " GGML_CUDA_NAME " devices found, " GGML_CUDA_NAME " will be disabled\n", __func__);
|
||||
return;
|
||||
@@ -8098,7 +8099,7 @@ GGML_CALL void ggml_init_cublas() {
|
||||
@@ -8835,7 +8836,7 @@ GGML_CALL void ggml_init_cublas() {
|
||||
// configure logging to stdout
|
||||
// CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, nullptr));
|
||||
|
||||
@@ -76,7 +76,7 @@ index c207ff87..945708a4 100644
|
||||
g_cublas_loaded = true;
|
||||
}
|
||||
}
|
||||
@@ -11753,3 +11754,23 @@ GGML_CALL int ggml_backend_cuda_reg_devices() {
|
||||
@@ -12490,3 +12491,23 @@ GGML_CALL int ggml_backend_cuda_reg_devices() {
|
||||
}
|
||||
return device_count;
|
||||
}
|
||||
@@ -100,6 +100,7 @@ index c207ff87..945708a4 100644
|
||||
+
|
||||
+ g_cublas_initialized = false;
|
||||
+}
|
||||
\ No newline at end of file
|
||||
diff --git a/ggml-cuda.h b/ggml-cuda.h
|
||||
index b1ebd61d..6dd58ddf 100644
|
||||
--- a/ggml-cuda.h
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
diff --git a/llama.cpp b/llama.cpp
|
||||
index b19616e8..519b9602 100644
|
||||
--- a/llama.cpp
|
||||
+++ b/llama.cpp
|
||||
@@ -9938,7 +9938,7 @@ struct llm_tokenizer_wpm {
|
||||
}
|
||||
|
||||
uint32_t to_lower(uint32_t code) {
|
||||
- static const std::locale locale("en_US.UTF-8");
|
||||
+ static const std::locale locale("");
|
||||
#if defined(_WIN32)
|
||||
if (code > 0xFFFF) {
|
||||
return code;
|
||||
Reference in New Issue
Block a user