mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-10 15:57:04 +00:00
subprocess llama.cpp server (#401)
* remove c code * pack llama.cpp * use request context for llama_cpp * let llama_cpp decide the number of threads to use * stop llama runner when app stops * remove sample count and duration metrics * use go generate to get libraries * tmp dir for running llm
This commit is contained in:
@@ -521,7 +521,7 @@ func embeddingLayers(e EmbeddingParams) ([]*LayerReader, error) {
|
||||
model = &Model{ModelPath: e.model}
|
||||
}
|
||||
|
||||
if err := load(model, e.opts, defaultSessionDuration); err != nil {
|
||||
if err := load(context.Background(), model, e.opts, defaultSessionDuration); err != nil {
|
||||
return nil, fmt.Errorf("load model to generate embeddings: %v", err)
|
||||
}
|
||||
|
||||
@@ -584,7 +584,7 @@ func embeddingLayers(e EmbeddingParams) ([]*LayerReader, error) {
|
||||
embeddings = append(embeddings, vector.Embedding{Data: d, Vector: existing[d]})
|
||||
continue
|
||||
}
|
||||
embed, err := loaded.llm.Embedding(d)
|
||||
embed, err := loaded.llm.Embedding(context.Background(), d)
|
||||
if err != nil {
|
||||
log.Printf("failed to generate embedding for '%s' line %d: %v", filePath, i+1, err)
|
||||
continue
|
||||
|
||||
Reference in New Issue
Block a user