mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-16 02:37:06 +00:00
* add build to .dockerignore * test: only build one arch * add build to .gitignore * fix ccache path * filter amdgpu targets * only filter if autodetecting * Don't clobber gpu list for default runner This ensures the GPU specific environment variables are set properly * explicitly set CXX compiler for HIP * Update build_windows.ps1 This isn't complete, but is close. Dependencies are missing, and it only builds the "default" preset. * build: add ollama subdir * add .git to .dockerignore * docs: update development.md * update build_darwin.sh * remove unused scripts * llm: add cwd and build/lib/ollama to library paths * default DYLD_LIBRARY_PATH to LD_LIBRARY_PATH in runner on macOS * add additional cmake output vars for msvc * interim edits to make server detection logic work with dll directories like lib/ollama/cuda_v12 * remove unncessary filepath.Dir, cleanup * add hardware-specific directory to path * use absolute server path * build: linux arm * cmake install targets * remove unused files * ml: visit each library path once * build: skip cpu variants on arm * build: install cpu targets * build: fix workflow * shorter names * fix rocblas install * docs: clean up development.md * consistent build dir removal in development.md * silence -Wimplicit-function-declaration build warnings in ggml-cpu * update readme * update development readme * llm: update library lookup logic now that there is one runner (#8587) * tweak development.md * update docs * add windows cuda/rocm tests --------- Co-authored-by: jmorganca <jmorganca@gmail.com> Co-authored-by: Daniel Hiltgen <daniel@ollama.com>
49 lines
1.5 KiB
C++
Vendored
49 lines
1.5 KiB
C++
Vendored
#pragma once
|
|
|
|
// TODO: rename llama-sampling.h/.cpp to llama-sampler.h/.cpp ?
|
|
|
|
#include "llama-grammar.h"
|
|
|
|
struct llama_vocab;
|
|
struct llama_grammar;
|
|
|
|
// sampler chain
|
|
|
|
struct llama_sampler_chain {
|
|
llama_sampler_chain_params params;
|
|
|
|
std::vector<struct llama_sampler *> samplers;
|
|
|
|
// timing
|
|
|
|
mutable int64_t t_sample_us;
|
|
|
|
mutable int32_t n_sample;
|
|
};
|
|
|
|
struct llama_sampler * llama_sampler_init_grammar_impl(
|
|
const struct llama_vocab & vocab,
|
|
const char * grammar_str,
|
|
const char * grammar_root);
|
|
|
|
struct llama_sampler * llama_sampler_init_infill_impl(
|
|
const struct llama_vocab & vocab);
|
|
|
|
struct llama_sampler * llama_sampler_init_dry_impl(
|
|
const struct llama_vocab & vocab,
|
|
int32_t context_size,
|
|
float dry_multiplier,
|
|
float dry_base,
|
|
int32_t dry_allowed_length,
|
|
int32_t dry_penalty_last_n,
|
|
const char ** seq_breakers,
|
|
size_t num_breakers);
|
|
|
|
struct llama_sampler * llama_sampler_init_dry_testing(
|
|
int32_t context_size,
|
|
float dry_multiplier,
|
|
float dry_base,
|
|
int32_t dry_allowed_length,
|
|
int32_t dry_penalty_last_n,
|
|
const std::vector<std::vector<llama_token>>& seq_breakers);
|