Bump llama.cpp to b2081

This commit is contained in:
Daniel Hiltgen
2024-02-06 12:06:43 -08:00
parent 59ec837ef6
commit de76b95dd4
3 changed files with 17 additions and 22 deletions

View File

@@ -1,8 +1,8 @@
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index a48582ad..9fffffd8 100644
index d86d7e04..7d71c766 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -1564,12 +1564,6 @@ struct llama_server_context
@@ -1598,12 +1598,6 @@ struct llama_server_context
LOG_TEE("slot %d : in cache: %i tokens | to process: %i tokens\n", slot.id, slot.n_past, slot.num_prompt_tokens_processed);
}
@@ -15,7 +15,7 @@ index a48582ad..9fffffd8 100644
if (slot.n_past == slot.num_prompt_tokens && slot.n_past > 0)
{
// we have to evaluate at least 1 token to generate logits.
@@ -1581,6 +1575,12 @@ struct llama_server_context
@@ -1615,6 +1609,12 @@ struct llama_server_context
}
}
@@ -26,5 +26,5 @@ index a48582ad..9fffffd8 100644
+ slot.cache_tokens = prompt_tokens;
+
LOG_VERBOSE("prompt ingested", {
{"n_past", slot.n_past},
{"cached", tokens_to_str(ctx, slot.cache_tokens.cbegin(), slot.cache_tokens.cbegin() + slot.n_past)},
{"n_past", slot.n_past},
{"cached", tokens_to_str(ctx, slot.cache_tokens.cbegin(), slot.cache_tokens.cbegin() + slot.n_past)},