update llama.cpp submodule to 6cdabe6 (#2999)

This commit is contained in:
Jeffrey Morgan
2024-03-08 00:26:20 -08:00
committed by GitHub
parent b886bec3f9
commit 0e4669b04f
4 changed files with 32 additions and 33 deletions

View File

@@ -1,21 +1,19 @@
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index 2b2f4a0f..afac49af 100644
index f255ad76..914ecfdd 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -997,13 +997,15 @@ struct llama_server_context
slot.n_sent_text += result.text_to_send.size();
@@ -1101,12 +1101,13 @@ struct server_context {
// add the token to slot queue and cache
}
- slot.add_token_string(result);
+
if (slot.params.stream)
{
if (slot.params.stream) {
send_partial_response(slot, result);
}
}
+ slot.add_token_string(result);
+
if (incomplete)
{
if (incomplete) {
slot.has_next_token = true;
}

View File

@@ -1,9 +1,9 @@
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index 2b2f4a0f..25857bdd 100644
index f255ad76..5b83acb1 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -31,6 +31,10 @@
#include <atomic>
@@ -28,6 +28,10 @@
#include <thread>
#include <signal.h>
+#ifdef GGML_USE_CUBLAS
@@ -12,18 +12,19 @@ index 2b2f4a0f..25857bdd 100644
+
using json = nlohmann::json;
struct server_params {
@@ -363,6 +367,9 @@ struct llama_server_context
bool server_verbose = false;
@@ -648,6 +652,10 @@ struct server_context {
llama_free_model(model);
model = nullptr;
}
+
+#ifdef GGML_USE_CUBLAS
+ ggml_free_cublas();
+#endif
}
bool load_model(const gpt_params &params_)
@@ -3494,6 +3501,7 @@ int main(int argc, char **argv)
bool load_model(const gpt_params & params_) {
@@ -3339,6 +3347,7 @@ int main(int argc, char ** argv) {
sigemptyset (&sigint_action.sa_mask);
sigint_action.sa_flags = 0;
sigaction(SIGINT, &sigint_action, NULL);
@@ -32,7 +33,7 @@ index 2b2f4a0f..25857bdd 100644
auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
return (ctrl_type == CTRL_C_EVENT) ? (signal_handler(SIGINT), true) : false;
diff --git a/ggml-cuda.cu b/ggml-cuda.cu
index 0c6501e9..75c12723 100644
index 72bcec8c..50a45e3d 100644
--- a/ggml-cuda.cu
+++ b/ggml-cuda.cu
@@ -43,6 +43,7 @@
@@ -43,7 +44,7 @@ index 0c6501e9..75c12723 100644
#define cublasGemmEx hipblasGemmEx
#define cublasGemmBatchedEx hipblasGemmBatchedEx
#define cublasGemmStridedBatchedEx hipblasGemmStridedBatchedEx
@@ -8694,10 +8695,10 @@ GGML_CALL bool ggml_cublas_loaded(void) {
@@ -8751,10 +8752,10 @@ GGML_CALL bool ggml_cublas_loaded(void) {
return g_cublas_loaded;
}
@@ -57,7 +58,7 @@ index 0c6501e9..75c12723 100644
#ifdef __HIP_PLATFORM_AMD__
// Workaround for a rocBLAS bug when using multiple graphics cards:
@@ -8707,7 +8708,7 @@ GGML_CALL void ggml_init_cublas() {
@@ -8764,7 +8765,7 @@ GGML_CALL void ggml_init_cublas() {
#endif
if (cudaGetDeviceCount(&g_device_count) != cudaSuccess) {
@@ -66,7 +67,7 @@ index 0c6501e9..75c12723 100644
g_cublas_loaded = false;
fprintf(stderr, "%s: no " GGML_CUDA_NAME " devices found, " GGML_CUDA_NAME " will be disabled\n", __func__);
return;
@@ -8778,7 +8779,7 @@ GGML_CALL void ggml_init_cublas() {
@@ -8835,7 +8836,7 @@ GGML_CALL void ggml_init_cublas() {
// configure logging to stdout
// CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, nullptr));
@@ -75,7 +76,7 @@ index 0c6501e9..75c12723 100644
g_cublas_loaded = true;
}
}
@@ -12345,3 +12346,22 @@ GGML_CALL int ggml_backend_cuda_reg_devices() {
@@ -12490,3 +12491,22 @@ GGML_CALL int ggml_backend_cuda_reg_devices() {
}
return device_count;
}