mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-18 11:47:07 +00:00
add prompt templates as j2 templates
This commit is contained in:
@@ -69,6 +69,7 @@ def generate_oneshot(*args, **kwargs):
|
||||
if len(choices) > 0:
|
||||
print(choices[0].get("text", ""), end="", flush=True)
|
||||
|
||||
# end with a new line
|
||||
print()
|
||||
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@ import json
|
||||
import sys
|
||||
from contextlib import contextmanager
|
||||
from llama_cpp import Llama as LLM
|
||||
from template import template
|
||||
|
||||
import ollama.model
|
||||
import ollama.prompt
|
||||
|
||||
|
||||
@contextmanager
|
||||
@@ -21,7 +21,7 @@ def suppress_stderr():
|
||||
def generate(model, prompt, models_home=".", llms={}, *args, **kwargs):
|
||||
llm = load(model, models_home=models_home, llms=llms)
|
||||
|
||||
prompt = template(model, prompt)
|
||||
prompt = ollama.prompt.template(model, prompt)
|
||||
|
||||
if "max_tokens" not in kwargs:
|
||||
kwargs.update({"max_tokens": 16384})
|
||||
@@ -43,12 +43,9 @@ def load(model, models_home=".", llms={}):
|
||||
name: path for name, path in ollama.model.models(models_home)
|
||||
}.get(model, None)
|
||||
|
||||
if model_path is None:
|
||||
if not model_path:
|
||||
# try loading this as a path to a model, rather than a model name
|
||||
if os.path.isfile(model):
|
||||
model_path = model
|
||||
else:
|
||||
raise ValueError("Model not found")
|
||||
model_path = model
|
||||
|
||||
# suppress LLM's output
|
||||
with suppress_stderr():
|
||||
|
||||
19
ollama/prompt.py
Normal file
19
ollama/prompt.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from os import path
|
||||
from difflib import SequenceMatcher
|
||||
from jinja2 import Environment, PackageLoader
|
||||
|
||||
|
||||
def template(model, prompt):
|
||||
best_ratio = 0
|
||||
best_template = ''
|
||||
|
||||
environment = Environment(loader=PackageLoader(__name__, 'templates'))
|
||||
for template in environment.list_templates():
|
||||
base, _ = path.splitext(template)
|
||||
ratio = SequenceMatcher(None, model.lower(), base).ratio()
|
||||
if ratio > best_ratio:
|
||||
best_ratio = ratio
|
||||
best_template = template
|
||||
|
||||
template = environment.get_template(best_template)
|
||||
return template.render(prompt=prompt)
|
||||
8
ollama/templates/alpaca.prompt
Normal file
8
ollama/templates/alpaca.prompt
Normal file
@@ -0,0 +1,8 @@
|
||||
Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
||||
|
||||
### Instruction:
|
||||
{{ prompt }}
|
||||
|
||||
### Response:
|
||||
|
||||
|
||||
5
ollama/templates/gpt4.prompt
Normal file
5
ollama/templates/gpt4.prompt
Normal file
@@ -0,0 +1,5 @@
|
||||
### Instruction:
|
||||
{{ prompt }}
|
||||
|
||||
### Response:
|
||||
|
||||
5
ollama/templates/hermes.prompt
Normal file
5
ollama/templates/hermes.prompt
Normal file
@@ -0,0 +1,5 @@
|
||||
### Instruction:
|
||||
{{ prompt }}
|
||||
|
||||
### Response:
|
||||
|
||||
1
ollama/templates/oasst.prompt
Normal file
1
ollama/templates/oasst.prompt
Normal file
@@ -0,0 +1 @@
|
||||
{{ prompt }}
|
||||
7
ollama/templates/orca.prompt
Normal file
7
ollama/templates/orca.prompt
Normal file
@@ -0,0 +1,7 @@
|
||||
### System:
|
||||
You are an AI assistant that follows instruction extremely well. Help as much as you can.
|
||||
|
||||
### User:
|
||||
{{ prompt }}
|
||||
|
||||
### Response:
|
||||
2
ollama/templates/qlora.prompt
Normal file
2
ollama/templates/qlora.prompt
Normal file
@@ -0,0 +1,2 @@
|
||||
### Human: {{ prompt }}
|
||||
### Assistant:
|
||||
4
ollama/templates/tulu.prompt
Normal file
4
ollama/templates/tulu.prompt
Normal file
@@ -0,0 +1,4 @@
|
||||
|
||||
{{ prompt }}
|
||||
|
||||
|
||||
4
ollama/templates/vicuna.prompt
Normal file
4
ollama/templates/vicuna.prompt
Normal file
@@ -0,0 +1,4 @@
|
||||
A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
|
||||
|
||||
USER: {{ prompt }}
|
||||
ASSISTANT:
|
||||
2
ollama/templates/wizardlm.prompt
Normal file
2
ollama/templates/wizardlm.prompt
Normal file
@@ -0,0 +1,2 @@
|
||||
{{ prompt }}
|
||||
### Response:
|
||||
Reference in New Issue
Block a user