subprocess llama.cpp server (#401)

* remove c code
* pack llama.cpp
* use request context for llama_cpp
* let llama_cpp decide the number of threads to use
* stop llama runner when app stops
* remove sample count and duration metrics
* use go generate to get libraries
* tmp dir for running llm
This commit is contained in:
Bruce MacDonald
2023-08-30 16:35:03 -04:00
committed by GitHub
parent f4432e1dba
commit 42998d797d
37 changed files with 958 additions and 43928 deletions

View File

@@ -521,7 +521,7 @@ func embeddingLayers(e EmbeddingParams) ([]*LayerReader, error) {
model = &Model{ModelPath: e.model}
}
if err := load(model, e.opts, defaultSessionDuration); err != nil {
if err := load(context.Background(), model, e.opts, defaultSessionDuration); err != nil {
return nil, fmt.Errorf("load model to generate embeddings: %v", err)
}
@@ -584,7 +584,7 @@ func embeddingLayers(e EmbeddingParams) ([]*LayerReader, error) {
embeddings = append(embeddings, vector.Embedding{Data: d, Vector: existing[d]})
continue
}
embed, err := loaded.llm.Embedding(d)
embed, err := loaded.llm.Embedding(context.Background(), d)
if err != nil {
log.Printf("failed to generate embedding for '%s' line %d: %v", filePath, i+1, err)
continue