mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-14 09:47:02 +00:00
llama: remove model loading for grammar (#10096)
This commit is contained in:
14
llama/llama.cpp/src/llama-grammar.h
vendored
14
llama/llama.cpp/src/llama-grammar.h
vendored
@@ -6,8 +6,19 @@
|
||||
#include <regex>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <set>
|
||||
|
||||
struct llama_vocab;
|
||||
struct ollama_vocab {
|
||||
std::map<uint32_t, std::string> token_to_piece_map;
|
||||
std::set<uint32_t> special_eog_ids;
|
||||
|
||||
const std::string & token_to_piece(const uint32_t token) const;
|
||||
void add_token_pieces(const uint32_t* tokens, size_t n_tokens, const char** pieces);
|
||||
void set_eog_tokens(const uint32_t* tokens, size_t n_tokens);
|
||||
bool is_eog(const uint32_t token) const;
|
||||
|
||||
};
|
||||
|
||||
// grammar element type
|
||||
enum llama_gretype {
|
||||
@@ -114,6 +125,7 @@ struct llama_grammar_trigger_pattern {
|
||||
struct llama_grammar {
|
||||
// note: allow null vocab for testing (not great)
|
||||
const llama_vocab * vocab;
|
||||
const ollama_vocab * o_vocab;
|
||||
|
||||
const llama_grammar_rules rules; // TODO: shared ptr
|
||||
llama_grammar_stacks stacks;
|
||||
@@ -141,12 +153,14 @@ struct llama_grammar {
|
||||
// note: needed for tests (not great)
|
||||
struct llama_grammar * llama_grammar_init_impl(
|
||||
const struct llama_vocab * vocab,
|
||||
const struct ollama_vocab * ollama_vocab,
|
||||
const llama_grammar_element ** rules,
|
||||
size_t n_rules,
|
||||
size_t start_rule_index);
|
||||
|
||||
struct llama_grammar * llama_grammar_init_impl(
|
||||
const struct llama_vocab * vocab,
|
||||
const struct ollama_vocab * ollama_vocab,
|
||||
const char * grammar_str,
|
||||
const char * grammar_root,
|
||||
bool lazy,
|
||||
|
||||
Reference in New Issue
Block a user