mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-14 09:47:02 +00:00
* add build to .dockerignore * test: only build one arch * add build to .gitignore * fix ccache path * filter amdgpu targets * only filter if autodetecting * Don't clobber gpu list for default runner This ensures the GPU specific environment variables are set properly * explicitly set CXX compiler for HIP * Update build_windows.ps1 This isn't complete, but is close. Dependencies are missing, and it only builds the "default" preset. * build: add ollama subdir * add .git to .dockerignore * docs: update development.md * update build_darwin.sh * remove unused scripts * llm: add cwd and build/lib/ollama to library paths * default DYLD_LIBRARY_PATH to LD_LIBRARY_PATH in runner on macOS * add additional cmake output vars for msvc * interim edits to make server detection logic work with dll directories like lib/ollama/cuda_v12 * remove unncessary filepath.Dir, cleanup * add hardware-specific directory to path * use absolute server path * build: linux arm * cmake install targets * remove unused files * ml: visit each library path once * build: skip cpu variants on arm * build: install cpu targets * build: fix workflow * shorter names * fix rocblas install * docs: clean up development.md * consistent build dir removal in development.md * silence -Wimplicit-function-declaration build warnings in ggml-cpu * update readme * update development readme * llm: update library lookup logic now that there is one runner (#8587) * tweak development.md * update docs * add windows cuda/rocm tests --------- Co-authored-by: jmorganca <jmorganca@gmail.com> Co-authored-by: Daniel Hiltgen <daniel@ollama.com>
52 lines
1.4 KiB
C++
Vendored
52 lines
1.4 KiB
C++
Vendored
#pragma once
|
|
|
|
#include <string>
|
|
#include <vector>
|
|
#include <cstdint>
|
|
|
|
enum llm_chat_template {
|
|
LLM_CHAT_TEMPLATE_CHATML,
|
|
LLM_CHAT_TEMPLATE_LLAMA_2,
|
|
LLM_CHAT_TEMPLATE_LLAMA_2_SYS,
|
|
LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS,
|
|
LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V1,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V3,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V7,
|
|
LLM_CHAT_TEMPLATE_PHI_3,
|
|
LLM_CHAT_TEMPLATE_FALCON_3,
|
|
LLM_CHAT_TEMPLATE_ZEPHYR,
|
|
LLM_CHAT_TEMPLATE_MONARCH,
|
|
LLM_CHAT_TEMPLATE_GEMMA,
|
|
LLM_CHAT_TEMPLATE_ORION,
|
|
LLM_CHAT_TEMPLATE_OPENCHAT,
|
|
LLM_CHAT_TEMPLATE_VICUNA,
|
|
LLM_CHAT_TEMPLATE_VICUNA_ORCA,
|
|
LLM_CHAT_TEMPLATE_DEEPSEEK,
|
|
LLM_CHAT_TEMPLATE_DEEPSEEK_2,
|
|
LLM_CHAT_TEMPLATE_DEEPSEEK_3,
|
|
LLM_CHAT_TEMPLATE_COMMAND_R,
|
|
LLM_CHAT_TEMPLATE_LLAMA_3,
|
|
LLM_CHAT_TEMPLATE_CHATGML_3,
|
|
LLM_CHAT_TEMPLATE_CHATGML_4,
|
|
LLM_CHAT_TEMPLATE_MINICPM,
|
|
LLM_CHAT_TEMPLATE_EXAONE_3,
|
|
LLM_CHAT_TEMPLATE_RWKV_WORLD,
|
|
LLM_CHAT_TEMPLATE_GRANITE,
|
|
LLM_CHAT_TEMPLATE_GIGACHAT,
|
|
LLM_CHAT_TEMPLATE_MEGREZ,
|
|
LLM_CHAT_TEMPLATE_UNKNOWN,
|
|
};
|
|
|
|
struct llama_chat_message;
|
|
|
|
llm_chat_template llm_chat_template_from_str(const std::string & name);
|
|
|
|
llm_chat_template llm_chat_detect_template(const std::string & tmpl);
|
|
|
|
int32_t llm_chat_apply_template(
|
|
llm_chat_template tmpl,
|
|
const std::vector<const llama_chat_message *> & chat,
|
|
std::string & dest, bool add_ass);
|