mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-12 00:37:04 +00:00
refactor
This commit is contained in:
9
ollama/__init__.py
Normal file
9
ollama/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
from ollama.model import models
|
||||
from ollama.engine import generate, load, unload
|
||||
|
||||
__all__ = [
|
||||
'models',
|
||||
'generate',
|
||||
'load',
|
||||
'unload',
|
||||
]
|
||||
0
ollama/cmd/__init__.py
Normal file
0
ollama/cmd/__init__.py
Normal file
43
ollama/cmd/cli.py
Normal file
43
ollama/cmd/cli.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
from argparse import ArgumentParser
|
||||
|
||||
from ollama import model, engine
|
||||
from ollama.cmd import server
|
||||
|
||||
|
||||
def main():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('--models-home', default=Path.home() / '.ollama' / 'models')
|
||||
|
||||
subparsers = parser.add_subparsers()
|
||||
|
||||
server.set_parser(subparsers.add_parser('serve'))
|
||||
|
||||
list_parser = subparsers.add_parser('list')
|
||||
list_parser.set_defaults(fn=list)
|
||||
|
||||
generate_parser = subparsers.add_parser('generate')
|
||||
generate_parser.add_argument('model')
|
||||
generate_parser.add_argument('prompt')
|
||||
generate_parser.set_defaults(fn=generate)
|
||||
|
||||
args = parser.parse_args()
|
||||
args = vars(args)
|
||||
|
||||
fn = args.pop('fn')
|
||||
fn(**args)
|
||||
|
||||
|
||||
def list(*args, **kwargs):
|
||||
for m in model.models(*args, **kwargs):
|
||||
print(m)
|
||||
|
||||
|
||||
def generate(*args, **kwargs):
|
||||
for output in engine.generate(*args, **kwargs):
|
||||
output = json.loads(output)
|
||||
|
||||
choices = output.get('choices', [])
|
||||
if len(choices) > 0:
|
||||
print(choices[0].get('text', ''), end='')
|
||||
75
ollama/cmd/server.py
Normal file
75
ollama/cmd/server.py
Normal file
@@ -0,0 +1,75 @@
|
||||
from aiohttp import web
|
||||
|
||||
from ollama import engine
|
||||
|
||||
|
||||
def set_parser(parser):
|
||||
parser.add_argument('--host', default='127.0.0.1')
|
||||
parser.add_argument('--port', default=7734)
|
||||
parser.set_defaults(fn=serve)
|
||||
|
||||
|
||||
def serve(models_home='.', *args, **kwargs):
|
||||
app = web.Application()
|
||||
app.add_routes([
|
||||
web.post('/load', load),
|
||||
web.post('/unload', unload),
|
||||
web.post('/generate', generate),
|
||||
])
|
||||
|
||||
app.update({
|
||||
'llms': {},
|
||||
'models_home': models_home,
|
||||
})
|
||||
|
||||
web.run_app(app, **kwargs)
|
||||
|
||||
|
||||
async def load(request):
|
||||
body = await request.json()
|
||||
model = body.get('model')
|
||||
if not model:
|
||||
raise web.HTTPBadRequest()
|
||||
|
||||
kwargs = {
|
||||
'llms': request.app.get('llms'),
|
||||
'models_home': request.app.get('models_home'),
|
||||
}
|
||||
|
||||
engine.load(model, **kwargs)
|
||||
return web.Response()
|
||||
|
||||
|
||||
async def unload(request):
|
||||
body = await request.json()
|
||||
model = body.get('model')
|
||||
if not model:
|
||||
raise web.HTTPBadRequest()
|
||||
|
||||
engine.unload(model, llms=request.app.get('llms'))
|
||||
return web.Response()
|
||||
|
||||
|
||||
async def generate(request):
|
||||
body = await request.json()
|
||||
model = body.get('model')
|
||||
if not model:
|
||||
raise web.HTTPBadRequest()
|
||||
|
||||
prompt = body.get('prompt')
|
||||
if not prompt:
|
||||
raise web.HTTPBadRequest()
|
||||
|
||||
response = web.StreamResponse()
|
||||
await response.prepare(request)
|
||||
|
||||
kwargs = {
|
||||
'llms': request.app.get('llms'),
|
||||
'models_home': request.app.get('models_home'),
|
||||
}
|
||||
|
||||
for output in engine.generate(model, prompt, **kwargs):
|
||||
await response.write(output.encode('utf-8'))
|
||||
await response.write(b'\n')
|
||||
|
||||
return response
|
||||
57
ollama/engine.py
Normal file
57
ollama/engine.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import os
|
||||
import json
|
||||
import sys
|
||||
from contextlib import contextmanager
|
||||
from llama_cpp import Llama as LLM
|
||||
|
||||
import ollama.model
|
||||
|
||||
|
||||
@contextmanager
|
||||
def suppress_stderr():
|
||||
stderr = os.dup(sys.stderr.fileno())
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
os.dup2(devnull.fileno(), sys.stderr.fileno())
|
||||
yield
|
||||
|
||||
os.dup2(stderr, sys.stderr.fileno())
|
||||
|
||||
|
||||
def generate(model, prompt, models_home='.', llms={}, *args, **kwargs):
|
||||
llm = load(model, models_home=models_home, llms=llms)
|
||||
|
||||
if 'max_tokens' not in kwargs:
|
||||
kwargs.update({'max_tokens': 16384})
|
||||
|
||||
if 'stop' not in kwargs:
|
||||
kwargs.update({'stop': ['Q:', '\n']})
|
||||
|
||||
if 'stream' not in kwargs:
|
||||
kwargs.update({'stream': True})
|
||||
|
||||
for output in llm(prompt, *args, **kwargs):
|
||||
yield json.dumps(output)
|
||||
|
||||
|
||||
def load(model, models_home='.', llms={}):
|
||||
llm = llms.get(model, None)
|
||||
if not llm:
|
||||
model_path = {
|
||||
name: path
|
||||
for name, path in ollama.model.models(models_home)
|
||||
}.get(model, None)
|
||||
|
||||
if model_path is None:
|
||||
raise ValueError('Model not found')
|
||||
|
||||
# suppress LLM's output
|
||||
with suppress_stderr():
|
||||
llm = LLM(model_path, verbose=False)
|
||||
llms.update({model: llm})
|
||||
|
||||
return llm
|
||||
|
||||
|
||||
def unload(model, llms={}):
|
||||
if model in llms:
|
||||
llms.pop(model)
|
||||
9
ollama/model.py
Normal file
9
ollama/model.py
Normal file
@@ -0,0 +1,9 @@
|
||||
from os import walk, path
|
||||
|
||||
|
||||
def models(models_home='.', *args, **kwargs):
|
||||
for root, _, files in walk(models_home):
|
||||
for file in files:
|
||||
base, ext = path.splitext(file)
|
||||
if ext == '.bin':
|
||||
yield base, path.join(root, file)
|
||||
Reference in New Issue
Block a user