Update the /api/create endpoint to use JSON (#7935)

Replaces `POST /api/create` to use JSON instead of a Modelfile.

This is a breaking change.
This commit is contained in:
Patrick Devine
2024-12-31 18:02:30 -08:00
committed by GitHub
parent 459d822b51
commit 86a622cbdc
17 changed files with 1523 additions and 1094 deletions

667
server/create.go Normal file
View File

@@ -0,0 +1,667 @@
package server
import (
"bytes"
"cmp"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log/slog"
"net/http"
"os"
"path/filepath"
"slices"
"strings"
"github.com/gin-gonic/gin"
"github.com/ollama/ollama/api"
"github.com/ollama/ollama/convert"
"github.com/ollama/ollama/envconfig"
"github.com/ollama/ollama/format"
"github.com/ollama/ollama/llama"
"github.com/ollama/ollama/llm"
"github.com/ollama/ollama/template"
"github.com/ollama/ollama/types/errtypes"
"github.com/ollama/ollama/types/model"
)
var (
errNoFilesProvided = errors.New("no files provided to convert")
errOnlyOneAdapterSupported = errors.New("only one adapter is currently supported")
errOnlyGGUFSupported = errors.New("supplied file was not in GGUF format")
errUnknownType = errors.New("unknown type")
)
func (s *Server) CreateHandler(c *gin.Context) {
var r api.CreateRequest
if err := c.ShouldBindJSON(&r); errors.Is(err, io.EOF) {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "missing request body"})
return
} else if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
name := model.ParseName(cmp.Or(r.Model, r.Name))
if !name.IsValid() {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": errtypes.InvalidModelNameErrMsg})
return
}
name, err := getExistingName(name)
if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
ch := make(chan any)
go func() {
defer close(ch)
fn := func(resp api.ProgressResponse) {
ch <- resp
}
oldManifest, _ := ParseNamedManifest(name)
var baseLayers []*layerGGML
if r.From != "" {
slog.Debug("create model from model name")
fromName := model.ParseName(r.From)
if !fromName.IsValid() {
ch <- gin.H{"error": errtypes.InvalidModelNameErrMsg, "status": http.StatusBadRequest}
return
}
ctx, cancel := context.WithCancel(c.Request.Context())
defer cancel()
baseLayers, err = parseFromModel(ctx, fromName, fn)
if err != nil {
ch <- gin.H{"error": err.Error()}
}
} else if r.Files != nil {
baseLayers, err = convertModelFromFiles(r.Files, baseLayers, false, fn)
if err != nil {
for _, badReq := range []error{errNoFilesProvided, errOnlyGGUFSupported, errUnknownType} {
if errors.Is(err, badReq) {
ch <- gin.H{"error": err.Error(), "status": http.StatusBadRequest}
return
}
}
ch <- gin.H{"error": err.Error()}
return
}
} else {
ch <- gin.H{"error": "neither 'from' or 'files' was specified", "status": http.StatusBadRequest}
return
}
var adapterLayers []*layerGGML
if r.Adapters != nil {
adapterLayers, err = convertModelFromFiles(r.Adapters, baseLayers, true, fn)
if err != nil {
for _, badReq := range []error{errNoFilesProvided, errOnlyOneAdapterSupported, errOnlyGGUFSupported, errUnknownType} {
if errors.Is(err, badReq) {
ch <- gin.H{"error": err.Error(), "status": http.StatusBadRequest}
return
}
}
ch <- gin.H{"error": err.Error(), "status": http.StatusBadRequest}
return
}
}
if len(adapterLayers) > 0 {
baseLayers = append(baseLayers, adapterLayers...)
}
if err := createModel(r, name, baseLayers, fn); err != nil {
if errors.Is(err, errBadTemplate) {
ch <- gin.H{"error": err.Error(), "status": http.StatusBadRequest}
return
}
ch <- gin.H{"error": err.Error()}
return
}
if !envconfig.NoPrune() && oldManifest != nil {
if err := oldManifest.RemoveLayers(); err != nil {
ch <- gin.H{"error": err.Error()}
}
}
ch <- api.ProgressResponse{Status: "success"}
}()
if r.Stream != nil && !*r.Stream {
waitForStream(c, ch)
return
}
streamResponse(c, ch)
}
func convertModelFromFiles(files map[string]string, baseLayers []*layerGGML, isAdapter bool, fn func(resp api.ProgressResponse)) ([]*layerGGML, error) {
switch detectModelTypeFromFiles(files) {
case "safetensors":
layers, err := convertFromSafetensors(files, baseLayers, isAdapter, fn)
if err != nil {
slog.Error("error converting from safetensors", "error", err)
return nil, err
}
return layers, nil
case "gguf":
if len(files) == 0 {
return nil, errNoFilesProvided
} else if len(files) > 1 && isAdapter {
return nil, errOnlyOneAdapterSupported
}
var digest string
var allLayers []*layerGGML
for _, v := range files {
digest = v
layers, err := ggufLayers(digest, fn)
if err != nil {
return nil, err
}
allLayers = append(allLayers, layers...)
}
return allLayers, nil
default:
return nil, errUnknownType
}
}
func detectModelTypeFromFiles(files map[string]string) string {
// todo make this more robust by actually introspecting the files
for fn := range files {
if strings.HasSuffix(fn, ".safetensors") {
return "safetensors"
} else if strings.HasSuffix(fn, ".bin") || strings.HasSuffix(fn, ".gguf") {
return "gguf"
}
}
return ""
}
func convertFromSafetensors(files map[string]string, baseLayers []*layerGGML, isAdapter bool, fn func(resp api.ProgressResponse)) ([]*layerGGML, error) {
tmpDir, err := os.MkdirTemp("", "ollama-safetensors")
if err != nil {
return nil, err
}
defer os.RemoveAll(tmpDir)
for fp, digest := range files {
blobPath, err := GetBlobsPath(digest)
if err != nil {
return nil, err
}
if err := createLink(blobPath, filepath.Join(tmpDir, fp)); err != nil {
return nil, err
}
}
t, err := os.CreateTemp(tmpDir, "fp16")
if err != nil {
return nil, err
}
defer t.Close()
var mediaType string
if !isAdapter {
fn(api.ProgressResponse{Status: "converting model"})
mediaType = "application/vnd.ollama.image.model"
if err := convert.ConvertModel(os.DirFS(tmpDir), t); err != nil {
return nil, err
}
} else {
kv, err := kvFromLayers(baseLayers)
if err != nil {
return nil, err
}
fn(api.ProgressResponse{Status: "converting adapter"})
mediaType = "application/vnd.ollama.image.adapter"
if err := convert.ConvertAdapter(os.DirFS(tmpDir), t, kv); err != nil {
return nil, err
}
}
if _, err := t.Seek(0, io.SeekStart); err != nil {
return nil, err
}
layer, err := NewLayer(t, mediaType)
if err != nil {
return nil, err
}
bin, err := layer.Open()
if err != nil {
return nil, err
}
ggml, _, err := llm.DecodeGGML(bin, 0)
if err != nil {
return nil, err
}
layers := []*layerGGML{{layer, ggml}}
if !isAdapter {
return detectChatTemplate(layers)
}
return layers, nil
}
func kvFromLayers(baseLayers []*layerGGML) (llm.KV, error) {
for _, l := range baseLayers {
if l.GGML != nil {
return l.KV(), nil
}
}
return llm.KV{}, fmt.Errorf("no base model was found")
}
func createModel(r api.CreateRequest, name model.Name, baseLayers []*layerGGML, fn func(resp api.ProgressResponse)) (err error) {
config := ConfigV2{
OS: "linux",
Architecture: "amd64",
RootFS: RootFS{
Type: "layers",
},
}
var layers []Layer
for _, layer := range baseLayers {
if layer.GGML != nil {
quantType := strings.ToUpper(cmp.Or(r.Quantize, r.Quantization))
if quantType != "" && layer.GGML.Name() == "gguf" && layer.MediaType == "application/vnd.ollama.image.model" {
want, err := llm.ParseFileType(quantType)
if err != nil {
return err
}
ft := layer.GGML.KV().FileType()
if !slices.Contains([]string{"F16", "F32"}, ft.String()) {
return errors.New("quantization is only supported for F16 and F32 models")
} else if ft != want {
layer, err = quantizeLayer(layer, quantType, fn)
if err != nil {
return err
}
}
}
config.ModelFormat = cmp.Or(config.ModelFormat, layer.GGML.Name())
config.ModelFamily = cmp.Or(config.ModelFamily, layer.GGML.KV().Architecture())
config.ModelType = cmp.Or(config.ModelType, format.HumanNumber(layer.GGML.KV().ParameterCount()))
config.FileType = cmp.Or(config.FileType, layer.GGML.KV().FileType().String())
config.ModelFamilies = append(config.ModelFamilies, layer.GGML.KV().Architecture())
}
layers = append(layers, layer.Layer)
}
if r.Template != "" {
layers, err = setTemplate(layers, r.Template)
if err != nil {
return err
}
}
if r.System != "" {
layers, err = setSystem(layers, r.System)
if err != nil {
return err
}
}
if r.License != nil {
switch l := r.License.(type) {
case string:
if l != "" {
layers, err = setLicense(layers, l)
if err != nil {
return err
}
}
case any:
var licenses []string
b, _ := json.Marshal(l) // re-marshal to JSON
if err := json.Unmarshal(b, &licenses); err != nil {
return err
}
for _, v := range licenses {
layers, err = setLicense(layers, v)
if err != nil {
return err
}
}
default:
return fmt.Errorf("unknown license type: %T", l)
}
}
layers, err = setParameters(layers, r.Parameters)
if err != nil {
return err
}
layers, err = setMessages(layers, r.Messages)
if err != nil {
return err
}
configLayer, err := createConfigLayer(layers, config)
if err != nil {
return err
}
for _, layer := range layers {
if layer.status != "" {
fn(api.ProgressResponse{Status: layer.status})
}
}
fn(api.ProgressResponse{Status: "writing manifest"})
if err := WriteManifest(name, *configLayer, layers); err != nil {
return err
}
return nil
}
func quantizeLayer(layer *layerGGML, quantizeType string, fn func(resp api.ProgressResponse)) (*layerGGML, error) {
ft := layer.GGML.KV().FileType()
fn(api.ProgressResponse{Status: fmt.Sprintf("quantizing %s model to %s", ft, quantizeType)})
want, err := llm.ParseFileType(quantizeType)
if err != nil {
return nil, err
}
blob, err := GetBlobsPath(layer.Digest)
if err != nil {
return nil, err
}
temp, err := os.CreateTemp(filepath.Dir(blob), quantizeType)
if err != nil {
return nil, err
}
defer temp.Close()
defer os.Remove(temp.Name())
if err := llama.Quantize(blob, temp.Name(), uint32(want)); err != nil {
return nil, err
}
newLayer, err := NewLayer(temp, layer.MediaType)
if err != nil {
return nil, err
}
if _, err := temp.Seek(0, io.SeekStart); err != nil {
return nil, err
}
ggml, _, err := llm.DecodeGGML(temp, 0)
if err != nil {
slog.Error(fmt.Sprintf("error decoding ggml: %s\n", err))
return nil, err
}
return &layerGGML{newLayer, ggml}, nil
}
func ggufLayers(digest string, fn func(resp api.ProgressResponse)) ([]*layerGGML, error) {
var layers []*layerGGML
fn(api.ProgressResponse{Status: "parsing GGUF"})
blobPath, err := GetBlobsPath(digest)
if err != nil {
return nil, err
}
blob, err := os.Open(blobPath)
if err != nil {
return nil, err
}
defer blob.Close()
sr := io.NewSectionReader(blob, 0, 512)
contentType, err := detectContentType(sr)
if err != nil {
return nil, err
}
if contentType != "gguf" {
slog.Error(fmt.Sprintf("unsupported content type: %s", contentType))
return nil, errOnlyGGUFSupported
}
stat, err := blob.Stat()
if err != nil {
return nil, err
}
var offset int64
for offset < stat.Size() {
ggml, n, err := llm.DecodeGGML(blob, 0)
if errors.Is(err, io.EOF) {
break
} else if err != nil {
return nil, err
}
mediatype := "application/vnd.ollama.image.model"
if ggml.KV().Kind() == "adapter" {
mediatype = "application/vnd.ollama.image.adapter"
} else if _, ok := ggml.KV()[fmt.Sprintf("%s.vision.block_count", ggml.KV().Architecture())]; ok || ggml.KV().Kind() == "projector" {
mediatype = "application/vnd.ollama.image.projector"
}
var layer Layer
if digest != "" && n == stat.Size() && offset == 0 {
layer, err = NewLayerFromLayer(digest, mediatype, blob.Name())
if err != nil {
slog.Debug("could not create new layer from layer", "error", err)
return nil, err
}
}
// Fallback to creating layer from file copy (either NewLayerFromLayer failed, or digest empty/n != stat.Size())
if layer.Digest == "" {
layer, err = NewLayer(io.NewSectionReader(blob, offset, n), mediatype)
if err != nil {
return nil, err
}
}
layers = append(layers, &layerGGML{layer, ggml})
offset = n
}
return detectChatTemplate(layers)
}
func removeLayer(layers []Layer, mediatype string) []Layer {
return slices.DeleteFunc(layers, func(layer Layer) bool {
if layer.MediaType != mediatype {
return false
}
if err := layer.Remove(); err != nil {
slog.Warn("couldn't remove blob", "digest", layer.Digest, "error", err)
return true
}
return true
})
}
func setTemplate(layers []Layer, t string) ([]Layer, error) {
layers = removeLayer(layers, "application/vnd.ollama.image.template")
if _, err := template.Parse(t); err != nil {
return nil, fmt.Errorf("%w: %s", errBadTemplate, err)
}
if _, err := template.Parse(t); err != nil {
return nil, fmt.Errorf("%w: %s", errBadTemplate, err)
}
blob := strings.NewReader(t)
layer, err := NewLayer(blob, "application/vnd.ollama.image.template")
if err != nil {
return nil, err
}
layers = append(layers, layer)
return layers, nil
}
func setSystem(layers []Layer, s string) ([]Layer, error) {
layers = removeLayer(layers, "application/vnd.ollama.image.system")
if s != "" {
blob := strings.NewReader(s)
layer, err := NewLayer(blob, "application/vnd.ollama.image.system")
if err != nil {
return nil, err
}
layers = append(layers, layer)
}
return layers, nil
}
func setLicense(layers []Layer, l string) ([]Layer, error) {
blob := strings.NewReader(l)
layer, err := NewLayer(blob, "application/vnd.ollama.image.license")
if err != nil {
return nil, err
}
layers = append(layers, layer)
return layers, nil
}
func setParameters(layers []Layer, p map[string]any) ([]Layer, error) {
if p == nil {
p = make(map[string]any)
}
for _, layer := range layers {
if layer.MediaType != "application/vnd.ollama.image.params" {
continue
}
digestPath, err := GetBlobsPath(layer.Digest)
if err != nil {
return nil, err
}
fn, err := os.Open(digestPath)
if err != nil {
return nil, err
}
defer fn.Close()
var existing map[string]any
if err := json.NewDecoder(fn).Decode(&existing); err != nil {
return nil, err
}
for k, v := range existing {
if _, exists := p[k]; exists {
continue
}
p[k] = v
}
}
if len(p) == 0 {
return layers, nil
}
layers = removeLayer(layers, "application/vnd.ollama.image.params")
var b bytes.Buffer
if err := json.NewEncoder(&b).Encode(p); err != nil {
return nil, err
}
layer, err := NewLayer(&b, "application/vnd.ollama.image.params")
if err != nil {
return nil, err
}
layers = append(layers, layer)
return layers, nil
}
func setMessages(layers []Layer, m []api.Message) ([]Layer, error) {
// this leaves the old messages intact if no new messages were specified
// which may not be the correct behaviour
if len(m) == 0 {
return layers, nil
}
fmt.Printf("removing old messages\n")
layers = removeLayer(layers, "application/vnd.ollama.image.messages")
var b bytes.Buffer
if err := json.NewEncoder(&b).Encode(m); err != nil {
return nil, err
}
layer, err := NewLayer(&b, "application/vnd.ollama.image.messages")
if err != nil {
return nil, err
}
layers = append(layers, layer)
return layers, nil
}
func createConfigLayer(layers []Layer, config ConfigV2) (*Layer, error) {
digests := make([]string, len(layers))
for i, layer := range layers {
digests[i] = layer.Digest
}
config.RootFS.DiffIDs = digests
var b bytes.Buffer
if err := json.NewEncoder(&b).Encode(config); err != nil {
return nil, err
}
layer, err := NewLayer(&b, "application/vnd.docker.container.image.v1+json")
if err != nil {
return nil, err
}
return &layer, nil
}
func createLink(src, dst string) error {
// make any subdirs for dst
if err := os.MkdirAll(filepath.Dir(dst), 0o755); err != nil {
return err
}
_ = os.Remove(dst)
if err := os.Symlink(src, dst); err != nil {
if err := copyFile(src, dst); err != nil {
return err
}
}
return nil
}
func copyFile(src, dst string) error {
srcFile, err := os.Open(src)
if err != nil {
return err
}
defer srcFile.Close()
dstFile, err := os.Create(dst)
if err != nil {
return err
}
defer dstFile.Close()
_, err = io.Copy(dstFile, srcFile)
return err
}

View File

@@ -2,7 +2,6 @@ package server
import (
"bytes"
"cmp"
"context"
"crypto/sha256"
"encoding/hex"
@@ -24,8 +23,6 @@ import (
"github.com/ollama/ollama/api"
"github.com/ollama/ollama/envconfig"
"github.com/ollama/ollama/format"
"github.com/ollama/ollama/llama"
"github.com/ollama/ollama/llm"
"github.com/ollama/ollama/parser"
"github.com/ollama/ollama/template"
@@ -121,7 +118,7 @@ func (m *Model) CheckCapabilities(caps ...Capability) error {
}
func (m *Model) String() string {
var modelfile parser.File
var modelfile parser.Modelfile
modelfile.Commands = append(modelfile.Commands, parser.Command{
Name: "model",
@@ -330,328 +327,6 @@ func GetModel(name string) (*Model, error) {
return model, nil
}
func realpath(rel, from string) string {
abspath, err := filepath.Abs(from)
if err != nil {
return from
}
home, err := os.UserHomeDir()
if err != nil {
return abspath
}
if from == "~" {
return home
} else if strings.HasPrefix(from, "~/") {
return filepath.Join(home, from[2:])
}
if _, err := os.Stat(filepath.Join(rel, from)); err == nil {
// this is a file relative to the Modelfile
return filepath.Join(rel, from)
}
return abspath
}
func CreateModel(ctx context.Context, name model.Name, modelFileDir, quantization string, modelfile *parser.File, fn func(resp api.ProgressResponse)) (err error) {
config := ConfigV2{
OS: "linux",
Architecture: "amd64",
RootFS: RootFS{
Type: "layers",
},
}
var messages []*api.Message
parameters := make(map[string]any)
var layers []Layer
var baseLayers []*layerGGML
for _, c := range modelfile.Commands {
mediatype := fmt.Sprintf("application/vnd.ollama.image.%s", c.Name)
command := c.Name
switch command {
case "model", "adapter":
if name := model.ParseName(c.Args); name.IsValid() && command == "model" {
name, err := getExistingName(name)
if err != nil {
return err
}
baseLayers, err = parseFromModel(ctx, name, fn)
if err != nil {
return err
}
} else if strings.HasPrefix(c.Args, "@") {
digest := strings.TrimPrefix(c.Args, "@")
if ib, ok := intermediateBlobs[digest]; ok {
p, err := GetBlobsPath(ib)
if err != nil {
return err
}
if _, err := os.Stat(p); errors.Is(err, os.ErrNotExist) {
// pass
} else if err != nil {
return err
} else {
fn(api.ProgressResponse{Status: fmt.Sprintf("using cached layer %s", ib)})
digest = ib
}
}
blobpath, err := GetBlobsPath(digest)
if err != nil {
return err
}
blob, err := os.Open(blobpath)
if err != nil {
return err
}
defer blob.Close()
baseLayers, err = parseFromFile(ctx, command, baseLayers, blob, digest, fn)
if err != nil {
return err
}
} else if file, err := os.Open(realpath(modelFileDir, c.Args)); err == nil {
defer file.Close()
baseLayers, err = parseFromFile(ctx, command, baseLayers, file, "", fn)
if err != nil {
return err
}
} else {
return fmt.Errorf("invalid model reference: %s", c.Args)
}
for _, baseLayer := range baseLayers {
if quantization != "" &&
baseLayer.MediaType == "application/vnd.ollama.image.model" &&
baseLayer.GGML != nil &&
baseLayer.GGML.Name() == "gguf" {
want, err := llm.ParseFileType(quantization)
if err != nil {
return err
}
ft := baseLayer.GGML.KV().FileType()
if !slices.Contains([]string{"F16", "F32"}, ft.String()) {
return errors.New("quantization is only supported for F16 and F32 models")
} else if want != ft {
fn(api.ProgressResponse{Status: fmt.Sprintf("quantizing %s model to %s", ft, quantization)})
blob, err := GetBlobsPath(baseLayer.Digest)
if err != nil {
return err
}
temp, err := os.CreateTemp(filepath.Dir(blob), quantization)
if err != nil {
return err
}
defer temp.Close()
defer os.Remove(temp.Name())
if err := llama.Quantize(blob, temp.Name(), uint32(want)); err != nil {
return err
}
layer, err := NewLayer(temp, baseLayer.MediaType)
if err != nil {
return err
}
if _, err := temp.Seek(0, io.SeekStart); err != nil {
return err
}
ggml, _, err := llm.DecodeGGML(temp, 0)
if err != nil {
return err
}
baseLayer.Layer = layer
baseLayer.GGML = ggml
}
}
if baseLayer.GGML != nil {
config.ModelFormat = cmp.Or(config.ModelFormat, baseLayer.GGML.Name())
config.ModelFamily = cmp.Or(config.ModelFamily, baseLayer.GGML.KV().Architecture())
config.ModelType = cmp.Or(config.ModelType, format.HumanNumber(baseLayer.GGML.KV().ParameterCount()))
config.FileType = cmp.Or(config.FileType, baseLayer.GGML.KV().FileType().String())
config.ModelFamilies = append(config.ModelFamilies, baseLayer.GGML.KV().Architecture())
}
layers = append(layers, baseLayer.Layer)
}
case "license", "template", "system":
if c.Name == "template" {
if _, err := template.Parse(c.Args); err != nil {
return fmt.Errorf("%w: %s", errBadTemplate, err)
}
}
if c.Name != "license" {
// replace
layers = slices.DeleteFunc(layers, func(layer Layer) bool {
if layer.MediaType != mediatype {
return false
}
if err := layer.Remove(); err != nil {
return false
}
return true
})
}
blob := strings.NewReader(c.Args)
layer, err := NewLayer(blob, mediatype)
if err != nil {
return err
}
layers = append(layers, layer)
case "message":
role, content, ok := strings.Cut(c.Args, ": ")
if !ok {
return fmt.Errorf("invalid message: %s", c.Args)
}
messages = append(messages, &api.Message{Role: role, Content: content})
default:
ps, err := api.FormatParams(map[string][]string{c.Name: {c.Args}})
if err != nil {
return err
}
for k, v := range ps {
if ks, ok := parameters[k].([]string); ok {
parameters[k] = append(ks, v.([]string)...)
} else if vs, ok := v.([]string); ok {
parameters[k] = vs
} else {
parameters[k] = v
}
}
}
}
var err2 error
layers = slices.DeleteFunc(layers, func(layer Layer) bool {
switch layer.MediaType {
case "application/vnd.ollama.image.message":
// if there are new messages, remove the inherited ones
if len(messages) > 0 {
return true
}
return false
case "application/vnd.ollama.image.params":
// merge inherited parameters with new ones
r, err := layer.Open()
if err != nil {
err2 = err
return false
}
defer r.Close()
var ps map[string]any
if err := json.NewDecoder(r).Decode(&ps); err != nil {
err2 = err
return false
}
for k, v := range ps {
if _, ok := parameters[k]; !ok {
parameters[k] = v
}
}
return true
default:
return false
}
})
if err2 != nil {
return err2
}
if len(messages) > 0 {
var b bytes.Buffer
if err := json.NewEncoder(&b).Encode(messages); err != nil {
return err
}
layer, err := NewLayer(&b, "application/vnd.ollama.image.messages")
if err != nil {
return err
}
layers = append(layers, layer)
}
if len(parameters) > 0 {
var b bytes.Buffer
if err := json.NewEncoder(&b).Encode(parameters); err != nil {
return err
}
layer, err := NewLayer(&b, "application/vnd.ollama.image.params")
if err != nil {
return err
}
layers = append(layers, layer)
}
digests := make([]string, len(layers))
for i, layer := range layers {
digests[i] = layer.Digest
}
config.RootFS.DiffIDs = digests
var b bytes.Buffer
if err := json.NewEncoder(&b).Encode(config); err != nil {
return err
}
configLayer, err := NewLayer(&b, "application/vnd.docker.container.image.v1+json")
if err != nil {
return err
}
for _, layer := range append(layers, configLayer) {
if layer.status != "" {
fn(api.ProgressResponse{Status: layer.status})
}
}
old, _ := ParseNamedManifest(name)
fn(api.ProgressResponse{Status: "writing manifest"})
if err := WriteManifest(name, configLayer, layers); err != nil {
return err
}
if !envconfig.NoPrune() && old != nil {
if err := old.RemoveLayers(); err != nil {
return err
}
}
fn(api.ProgressResponse{Status: "success"})
return nil
}
func CopyModel(src, dst model.Name) error {
if !dst.IsFullyQualified() {
return model.Unqualified(dst)

View File

@@ -1,7 +1,6 @@
package server
import (
"archive/zip"
"bytes"
"context"
"encoding/json"
@@ -11,13 +10,11 @@ import (
"log/slog"
"net/http"
"os"
"path/filepath"
"slices"
"strings"
"text/template/parse"
"github.com/ollama/ollama/api"
"github.com/ollama/ollama/convert"
"github.com/ollama/ollama/llm"
"github.com/ollama/ollama/template"
"github.com/ollama/ollama/types/model"
@@ -81,148 +78,6 @@ func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressRe
return layers, nil
}
func parseFromZipFile(_ context.Context, command string, baseLayers []*layerGGML, f *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerGGML, err error) {
fi, err := f.Stat()
if err != nil {
return nil, err
}
r, err := zip.NewReader(f, fi.Size())
if err != nil {
return nil, err
}
p, err := os.MkdirTemp(filepath.Dir(f.Name()), "")
if err != nil {
return nil, err
}
defer os.RemoveAll(p)
fn(api.ProgressResponse{Status: "converting model"})
// TODO(mxyng): this should write directly into a layer
// e.g. NewLayer(arch.Reader(), "application/vnd.ollama.image.model")
t, err := os.CreateTemp(p, "fp16")
if err != nil {
return nil, err
}
defer t.Close()
defer os.Remove(t.Name())
var layerType string
switch command {
case "adapter":
var baseModel *llm.GGML
for _, l := range baseLayers {
if l.GGML != nil {
baseModel = l.GGML
break
}
}
if baseModel == nil {
return nil, fmt.Errorf("no base model specified for the adapter")
}
if err := convert.ConvertAdapter(convert.NewZipReader(r, p, 32<<20), t, baseModel.KV()); err != nil {
return nil, err
}
layerType = "application/vnd.ollama.image.adapter"
case "model":
if err := convert.ConvertModel(convert.NewZipReader(r, p, 32<<20), t); err != nil {
return nil, err
}
layerType = "application/vnd.ollama.image.model"
}
if _, err := t.Seek(0, io.SeekStart); err != nil {
return nil, err
}
layer, err := NewLayer(t, layerType)
if err != nil {
return nil, err
}
bin, err := layer.Open()
if err != nil {
return nil, err
}
defer bin.Close()
ggml, _, err := llm.DecodeGGML(bin, 0)
if err != nil {
return nil, err
}
layers = append(layers, &layerGGML{layer, ggml})
intermediateBlobs[digest] = layer.Digest
return detectChatTemplate(layers)
}
func parseFromFile(ctx context.Context, command string, baseLayers []*layerGGML, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerGGML, err error) {
sr := io.NewSectionReader(file, 0, 512)
contentType, err := detectContentType(sr)
if err != nil {
return nil, err
}
switch contentType {
case "gguf", "ggla":
// noop
case "application/zip":
return parseFromZipFile(ctx, command, baseLayers, file, digest, fn)
default:
return nil, fmt.Errorf("unsupported content type: %s", contentType)
}
stat, err := file.Stat()
if err != nil {
return nil, err
}
var offset int64
for offset < stat.Size() {
ggml, n, err := llm.DecodeGGML(file, 0)
if errors.Is(err, io.EOF) {
break
} else if err != nil {
return nil, err
}
mediatype := "application/vnd.ollama.image.model"
if ggml.Name() == "ggla" || ggml.KV().Kind() == "adapter" {
mediatype = "application/vnd.ollama.image.adapter"
}
if _, ok := ggml.KV()[fmt.Sprintf("%s.vision.block_count", ggml.KV().Architecture())]; ok || ggml.KV().Kind() == "projector" {
mediatype = "application/vnd.ollama.image.projector"
}
var layer Layer
if digest != "" && n == stat.Size() && offset == 0 {
layer, err = NewLayerFromLayer(digest, mediatype, file.Name())
if err != nil {
slog.Debug("could not create new layer from layer", "error", err)
}
}
// Fallback to creating layer from file copy (either NewLayerFromLayer failed, or digest empty/n != stat.Size())
if layer.Digest == "" {
layer, err = NewLayer(io.NewSectionReader(file, offset, n), mediatype)
if err != nil {
return nil, err
}
}
layers = append(layers, &layerGGML{layer, ggml})
offset = n
}
return detectChatTemplate(layers)
}
func detectChatTemplate(layers []*layerGGML) ([]*layerGGML, error) {
for _, layer := range layers {
if s := layer.GGML.KV().ChatTemplate(); s != "" {

View File

@@ -2,10 +2,8 @@ package server
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"testing"
@@ -13,7 +11,6 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/ollama/ollama/api"
"github.com/ollama/ollama/llm"
"github.com/ollama/ollama/template"
)
@@ -139,87 +136,6 @@ The temperature in San Francisco, CA is 70°F and in Toronto, Canada is 20°C.`,
}
}
func TestParseFromFileFromLayer(t *testing.T) {
tempModels := t.TempDir()
t.Setenv("OLLAMA_MODELS", tempModels)
file, err := os.CreateTemp(tempModels, "")
if err != nil {
t.Fatalf("failed to open file: %v", err)
}
defer file.Close()
if err := llm.WriteGGUF(file, llm.KV{"general.architecture": "gemma"}, []llm.Tensor{}); err != nil {
t.Fatalf("failed to write gguf: %v", err)
}
if _, err := file.Seek(0, io.SeekStart); err != nil {
t.Fatalf("failed to seek to start: %v", err)
}
layers, err := parseFromFile(context.Background(), "model", []*layerGGML{}, file, "", func(api.ProgressResponse) {})
if err != nil {
t.Fatalf("failed to parse from file: %v", err)
}
if len(layers) != 1 {
t.Fatalf("got %d != want 1", len(layers))
}
if _, err := file.Seek(0, io.SeekStart); err != nil {
t.Fatalf("failed to seek to start: %v", err)
}
layers2, err := parseFromFile(context.Background(), "model", []*layerGGML{}, file, layers[0].Digest, func(api.ProgressResponse) {})
if err != nil {
t.Fatalf("failed to parse from file: %v", err)
}
if len(layers2) != 1 {
t.Fatalf("got %d != want 1", len(layers2))
}
if layers[0].Digest != layers2[0].Digest {
t.Fatalf("got %s != want %s", layers[0].Digest, layers2[0].Digest)
}
if layers[0].Size != layers2[0].Size {
t.Fatalf("got %d != want %d", layers[0].Size, layers2[0].Size)
}
if layers[0].MediaType != layers2[0].MediaType {
t.Fatalf("got %v != want %v", layers[0].MediaType, layers2[0].MediaType)
}
}
func TestParseLayerFromCopy(t *testing.T) {
tempModels := t.TempDir()
t.Setenv("OLLAMA_MODELS", tempModels)
file2, err := os.CreateTemp(tempModels, "")
if err != nil {
t.Fatalf("failed to open file: %v", err)
}
defer file2.Close()
for range 5 {
if err := llm.WriteGGUF(file2, llm.KV{"general.architecture": "gemma"}, []llm.Tensor{}); err != nil {
t.Fatalf("failed to write gguf: %v", err)
}
}
if _, err := file2.Seek(0, io.SeekStart); err != nil {
t.Fatalf("failed to seek to start: %v", err)
}
layers, err := parseFromFile(context.Background(), "model", []*layerGGML{}, file2, "", func(api.ProgressResponse) {})
if err != nil {
t.Fatalf("failed to parse from file: %v", err)
}
if len(layers) != 5 {
t.Fatalf("got %d != want 5", len(layers))
}
}
func TestParseObjects(t *testing.T) {
tests := []struct {
input string

View File

@@ -33,7 +33,6 @@ import (
"github.com/ollama/ollama/llm"
"github.com/ollama/ollama/model/mllama"
"github.com/ollama/ollama/openai"
"github.com/ollama/ollama/parser"
"github.com/ollama/ollama/runners"
"github.com/ollama/ollama/template"
"github.com/ollama/ollama/types/errtypes"
@@ -688,77 +687,6 @@ func getExistingName(n model.Name) (model.Name, error) {
return n, nil
}
func (s *Server) CreateHandler(c *gin.Context) {
var r api.CreateRequest
if err := c.ShouldBindJSON(&r); errors.Is(err, io.EOF) {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "missing request body"})
return
} else if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
name := model.ParseName(cmp.Or(r.Model, r.Name))
if !name.IsValid() {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": errtypes.InvalidModelNameErrMsg})
return
}
name, err := getExistingName(name)
if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
if r.Path == "" && r.Modelfile == "" {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "path or Modelfile are required"})
return
}
var sr io.Reader = strings.NewReader(r.Modelfile)
if r.Path != "" && r.Modelfile == "" {
f, err := os.Open(r.Path)
if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("error reading modelfile: %s", err)})
return
}
defer f.Close()
sr = f
}
f, err := parser.ParseFile(sr)
if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
ch := make(chan any)
go func() {
defer close(ch)
fn := func(resp api.ProgressResponse) {
ch <- resp
}
ctx, cancel := context.WithCancel(c.Request.Context())
defer cancel()
quantization := cmp.Or(r.Quantize, r.Quantization)
if err := CreateModel(ctx, name, filepath.Dir(r.Path), strings.ToUpper(quantization), f, fn); errors.Is(err, errBadTemplate) {
ch <- gin.H{"error": err.Error(), "status": http.StatusBadRequest}
} else if err != nil {
ch <- gin.H{"error": err.Error()}
}
}()
if r.Stream != nil && !*r.Stream {
waitForStream(c, ch)
return
}
streamResponse(c, ch)
}
func (s *Server) DeleteHandler(c *gin.Context) {
var r api.DeleteRequest
if err := c.ShouldBindJSON(&r); errors.Is(err, io.EOF) {

View File

@@ -11,18 +11,23 @@ import (
"os"
"path/filepath"
"slices"
"strings"
"testing"
"github.com/gin-gonic/gin"
"github.com/ollama/ollama/api"
"github.com/ollama/ollama/envconfig"
"github.com/ollama/ollama/llm"
)
var stream bool = false
func createBinFile(t *testing.T, kv map[string]any, ti []llm.Tensor) string {
func createBinFile(t *testing.T, kv map[string]any, ti []llm.Tensor) (string, string) {
t.Helper()
t.Setenv("OLLAMA_MODELS", cmp.Or(os.Getenv("OLLAMA_MODELS"), t.TempDir()))
modelDir := envconfig.Models()
f, err := os.CreateTemp(t.TempDir(), "")
if err != nil {
@@ -33,8 +38,21 @@ func createBinFile(t *testing.T, kv map[string]any, ti []llm.Tensor) string {
if err := llm.WriteGGUF(f, kv, ti); err != nil {
t.Fatal(err)
}
// Calculate sha256 of file
if _, err := f.Seek(0, 0); err != nil {
t.Fatal(err)
}
return f.Name()
digest, _ := GetSHA256Digest(f)
if err := f.Close(); err != nil {
t.Fatal(err)
}
if err := createLink(f.Name(), filepath.Join(modelDir, "blobs", fmt.Sprintf("sha256-%s", strings.TrimPrefix(digest, "sha256:")))); err != nil {
t.Fatal(err)
}
return f.Name(), digest
}
type responseRecorder struct {
@@ -93,13 +111,17 @@ func TestCreateFromBin(t *testing.T) {
t.Setenv("OLLAMA_MODELS", p)
var s Server
_, digest := createBinFile(t, nil, nil)
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
Stream: &stream,
Name: "test",
Files: map[string]string{"test.gguf": digest},
Stream: &stream,
})
if w.Code != http.StatusOK {
fmt.Println(w)
t.Fatalf("expected status code 200, actual %d", w.Code)
}
@@ -120,10 +142,12 @@ func TestCreateFromModel(t *testing.T) {
t.Setenv("OLLAMA_MODELS", p)
var s Server
_, digest := createBinFile(t, nil, nil)
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
Stream: &stream,
Name: "test",
Files: map[string]string{"test.gguf": digest},
Stream: &stream,
})
if w.Code != http.StatusOK {
@@ -135,9 +159,9 @@ func TestCreateFromModel(t *testing.T) {
})
w = createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test2",
Modelfile: "FROM test",
Stream: &stream,
Name: "test2",
From: "test",
Stream: &stream,
})
if w.Code != http.StatusOK {
@@ -162,10 +186,12 @@ func TestCreateRemovesLayers(t *testing.T) {
t.Setenv("OLLAMA_MODELS", p)
var s Server
_, digest := createBinFile(t, nil, nil)
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ .Prompt }}", createBinFile(t, nil, nil)),
Stream: &stream,
Name: "test",
Files: map[string]string{"test.gguf": digest},
Template: "{{ .Prompt }}",
Stream: &stream,
})
if w.Code != http.StatusOK {
@@ -183,9 +209,10 @@ func TestCreateRemovesLayers(t *testing.T) {
})
w = createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ .System }} {{ .Prompt }}", createBinFile(t, nil, nil)),
Stream: &stream,
Name: "test",
Files: map[string]string{"test.gguf": digest},
Template: "{{ .System }} {{ .Prompt }}",
Stream: &stream,
})
if w.Code != http.StatusOK {
@@ -210,10 +237,12 @@ func TestCreateUnsetsSystem(t *testing.T) {
t.Setenv("OLLAMA_MODELS", p)
var s Server
_, digest := createBinFile(t, nil, nil)
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nSYSTEM Say hi!", createBinFile(t, nil, nil)),
Stream: &stream,
Name: "test",
Files: map[string]string{"test.gguf": digest},
System: "Say hi!",
Stream: &stream,
})
if w.Code != http.StatusOK {
@@ -231,9 +260,10 @@ func TestCreateUnsetsSystem(t *testing.T) {
})
w = createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nSYSTEM \"\"", createBinFile(t, nil, nil)),
Stream: &stream,
Name: "test",
Files: map[string]string{"test.gguf": digest},
System: "",
Stream: &stream,
})
if w.Code != http.StatusOK {
@@ -245,19 +275,9 @@ func TestCreateUnsetsSystem(t *testing.T) {
})
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
filepath.Join(p, "blobs", "sha256-67d4b8d106af2a5b100a46e9bdc038c71eef2a35c9abac784092654212f97cf5"),
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
filepath.Join(p, "blobs", "sha256-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"),
filepath.Join(p, "blobs", "sha256-ca239d7bd8ea90e4a5d2e6bf88f8d74a47b14336e73eb4e18bed4dd325018116"),
})
bts, err := os.ReadFile(filepath.Join(p, "blobs", "sha256-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"))
if err != nil {
t.Fatal(err)
}
if string(bts) != "" {
t.Fatalf("expected empty string, actual %s", string(bts))
}
}
func TestCreateMergeParameters(t *testing.T) {
@@ -267,10 +287,16 @@ func TestCreateMergeParameters(t *testing.T) {
t.Setenv("OLLAMA_MODELS", p)
var s Server
_, digest := createBinFile(t, nil, nil)
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nPARAMETER temperature 1\nPARAMETER top_k 10\nPARAMETER stop USER:\nPARAMETER stop ASSISTANT:", createBinFile(t, nil, nil)),
Stream: &stream,
Name: "test",
Files: map[string]string{"test.gguf": digest},
Parameters: map[string]any{
"temperature": 1,
"top_k": 10,
"stop": []string{"USER:", "ASSISTANT:"},
},
Stream: &stream,
})
if w.Code != http.StatusOK {
@@ -289,9 +315,13 @@ func TestCreateMergeParameters(t *testing.T) {
// in order to merge parameters, the second model must be created FROM the first
w = createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test2",
Modelfile: "FROM test\nPARAMETER temperature 0.6\nPARAMETER top_p 0.7",
Stream: &stream,
Name: "test2",
From: "test",
Parameters: map[string]any{
"temperature": 0.6,
"top_p": 0.7,
},
Stream: &stream,
})
if w.Code != http.StatusOK {
@@ -303,6 +333,22 @@ func TestCreateMergeParameters(t *testing.T) {
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test2", "latest"),
})
// Display contents of each blob in the directory
blobDir := filepath.Join(p, "blobs")
entries, err := os.ReadDir(blobDir)
if err != nil {
t.Fatalf("failed to read blobs directory: %v", err)
}
for _, entry := range entries {
blobPath := filepath.Join(blobDir, entry.Name())
content, err := os.ReadFile(blobPath)
if err != nil {
t.Fatalf("failed to read blob %s: %v", entry.Name(), err)
}
t.Logf("Contents of %s:\n%s", entry.Name(), string(content))
}
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
filepath.Join(p, "blobs", "sha256-1d0ad71299d48c2fb7ae2b98e683643e771f8a5b72be34942af90d97a91c1e37"),
filepath.Join(p, "blobs", "sha256-4a384beaf47a9cbe452dfa5ab70eea691790f3b35a832d12933a1996685bf2b6"),
@@ -327,9 +373,14 @@ func TestCreateMergeParameters(t *testing.T) {
// slices are replaced
w = createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test2",
Modelfile: "FROM test\nPARAMETER temperature 0.6\nPARAMETER top_p 0.7\nPARAMETER stop <|endoftext|>",
Stream: &stream,
Name: "test2",
From: "test",
Parameters: map[string]any{
"temperature": 0.6,
"top_p": 0.7,
"stop": []string{"<|endoftext|>"},
},
Stream: &stream,
})
if w.Code != http.StatusOK {
@@ -371,10 +422,25 @@ func TestCreateReplacesMessages(t *testing.T) {
t.Setenv("OLLAMA_MODELS", p)
var s Server
_, digest := createBinFile(t, nil, nil)
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nMESSAGE assistant \"What is my purpose?\"\nMESSAGE user \"You run tests.\"\nMESSAGE assistant \"Oh, my god.\"", createBinFile(t, nil, nil)),
Stream: &stream,
Name: "test",
Files: map[string]string{"test.gguf": digest},
Messages: []api.Message{
{
Role: "assistant",
Content: "What is my purpose?",
},
{
Role: "user",
Content: "You run tests.",
},
{
Role: "assistant",
Content: "Oh, my god.",
},
},
Stream: &stream,
})
if w.Code != http.StatusOK {
@@ -392,9 +458,23 @@ func TestCreateReplacesMessages(t *testing.T) {
})
w = createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test2",
Modelfile: "FROM test\nMESSAGE assistant \"You're a test, Harry.\"\nMESSAGE user \"I-I'm a what?\"\nMESSAGE assistant \"A test. And a thumping good one at that, I'd wager.\"",
Stream: &stream,
Name: "test2",
From: "test",
Messages: []api.Message{
{
Role: "assistant",
Content: "You're a test, Harry.",
},
{
Role: "user",
Content: "I-I'm a what?",
},
{
Role: "assistant",
Content: "A test. And a thumping good one at that, I'd wager.",
},
},
Stream: &stream,
})
if w.Code != http.StatusOK {
@@ -406,12 +486,13 @@ func TestCreateReplacesMessages(t *testing.T) {
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test2", "latest"),
})
// Old layers will not have been pruned
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
filepath.Join(p, "blobs", "sha256-298baeaf6928a60cf666d88d64a1ba606feb43a2865687c39e40652e407bffc4"),
filepath.Join(p, "blobs", "sha256-4f48b25fe9969564c82f58eb1cedbdff6484cc0baf474bc6c2a9b37c8da3362a"),
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
filepath.Join(p, "blobs", "sha256-a60ecc9da299ec7ede453f99236e5577fd125e143689b646d9f0ddc9971bf4db"),
filepath.Join(p, "blobs", "sha256-e0e27d47045063ccb167ae852c51d49a98eab33fabaee4633fdddf97213e40b5"),
filepath.Join(p, "blobs", "sha256-f4e2c3690efef1b4b63ba1e1b2744ffeb6a7438a0110b86596069f6d9999c80b"),
})
type message struct {
@@ -448,10 +529,13 @@ func TestCreateTemplateSystem(t *testing.T) {
t.Setenv("OLLAMA_MODELS", p)
var s Server
_, digest := createBinFile(t, nil, nil)
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ .Prompt }}\nSYSTEM Say hello!\nTEMPLATE {{ .System }} {{ .Prompt }}\nSYSTEM Say bye!", createBinFile(t, nil, nil)),
Stream: &stream,
Name: "test",
Files: map[string]string{"test.gguf": digest},
Template: "{{ .System }} {{ .Prompt }}",
System: "Say bye!",
Stream: &stream,
})
if w.Code != http.StatusOK {
@@ -488,10 +572,12 @@ func TestCreateTemplateSystem(t *testing.T) {
}
t.Run("incomplete template", func(t *testing.T) {
_, digest := createBinFile(t, nil, nil)
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ .Prompt", createBinFile(t, nil, nil)),
Stream: &stream,
Name: "test",
Files: map[string]string{"test.gguf": digest},
Template: "{{ .Prompt",
Stream: &stream,
})
if w.Code != http.StatusBadRequest {
@@ -500,10 +586,12 @@ func TestCreateTemplateSystem(t *testing.T) {
})
t.Run("template with unclosed if", func(t *testing.T) {
_, digest := createBinFile(t, nil, nil)
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ if .Prompt }}", createBinFile(t, nil, nil)),
Stream: &stream,
Name: "test",
Files: map[string]string{"test.gguf": digest},
Template: "{{ if .Prompt }}",
Stream: &stream,
})
if w.Code != http.StatusBadRequest {
@@ -512,10 +600,12 @@ func TestCreateTemplateSystem(t *testing.T) {
})
t.Run("template with undefined function", func(t *testing.T) {
_, digest := createBinFile(t, nil, nil)
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ Prompt }}", createBinFile(t, nil, nil)),
Stream: &stream,
Name: "test",
Files: map[string]string{"test.gguf": digest},
Template: "{{ Prompt }}",
Stream: &stream,
})
if w.Code != http.StatusBadRequest {
@@ -531,10 +621,12 @@ func TestCreateLicenses(t *testing.T) {
t.Setenv("OLLAMA_MODELS", p)
var s Server
_, digest := createBinFile(t, nil, nil)
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nLICENSE MIT\nLICENSE Apache-2.0", createBinFile(t, nil, nil)),
Stream: &stream,
Name: "test",
Files: map[string]string{"test.gguf": digest},
License: []string{"MIT", "Apache-2.0"},
Stream: &stream,
})
if w.Code != http.StatusOK {
@@ -579,11 +671,12 @@ func TestCreateDetectTemplate(t *testing.T) {
var s Server
t.Run("matched", func(t *testing.T) {
_, digest := createBinFile(t, llm.KV{
"tokenizer.chat_template": "{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
}, nil)
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, llm.KV{
"tokenizer.chat_template": "{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
}, nil)),
Name: "test",
Files: map[string]string{"test.gguf": digest},
Stream: &stream,
})
@@ -600,10 +693,11 @@ func TestCreateDetectTemplate(t *testing.T) {
})
t.Run("unmatched", func(t *testing.T) {
_, digest := createBinFile(t, nil, nil)
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
Stream: &stream,
Name: "test",
Files: map[string]string{"test.gguf": digest},
Stream: &stream,
})
if w.Code != http.StatusOK {

View File

@@ -3,7 +3,6 @@ package server
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"path/filepath"
"testing"
@@ -22,9 +21,10 @@ func TestDelete(t *testing.T) {
var s Server
_, digest := createBinFile(t, nil, nil)
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
Name: "test",
Files: map[string]string{"test.gguf": digest},
})
if w.Code != http.StatusOK {
@@ -32,8 +32,9 @@ func TestDelete(t *testing.T) {
}
w = createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "test2",
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ .System }} {{ .Prompt }}", createBinFile(t, nil, nil)),
Name: "test2",
Files: map[string]string{"test.gguf": digest},
Template: "{{ .System }} {{ .Prompt }}",
})
if w.Code != http.StatusOK {

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
@@ -89,10 +88,34 @@ func TestGenerateChat(t *testing.T) {
go s.sched.Run(context.TODO())
_, digest := createBinFile(t, llm.KV{
"general.architecture": "llama",
"llama.block_count": uint32(1),
"llama.context_length": uint32(8192),
"llama.embedding_length": uint32(4096),
"llama.attention.head_count": uint32(32),
"llama.attention.head_count_kv": uint32(8),
"tokenizer.ggml.tokens": []string{""},
"tokenizer.ggml.scores": []float32{0},
"tokenizer.ggml.token_type": []int32{0},
}, []llm.Tensor{
{Name: "token_embd.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_norm.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_down.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_gate.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_up.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_norm.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_k.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_output.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_q.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_v.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "output.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
})
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Model: "test",
Modelfile: fmt.Sprintf(`FROM %s
TEMPLATE """
Files: map[string]string{"file.gguf": digest},
Template: `
{{- if .Tools }}
{{ .Tools }}
{{ end }}
@@ -100,30 +123,7 @@ func TestGenerateChat(t *testing.T) {
{{- .Role }}: {{ .Content }}
{{- range .ToolCalls }}{"name": "{{ .Function.Name }}", "arguments": {{ .Function.Arguments }}}
{{- end }}
{{ end }}"""
`, createBinFile(t, llm.KV{
"general.architecture": "llama",
"llama.block_count": uint32(1),
"llama.context_length": uint32(8192),
"llama.embedding_length": uint32(4096),
"llama.attention.head_count": uint32(32),
"llama.attention.head_count_kv": uint32(8),
"tokenizer.ggml.tokens": []string{""},
"tokenizer.ggml.scores": []float32{0},
"tokenizer.ggml.token_type": []int32{0},
}, []llm.Tensor{
{Name: "token_embd.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_norm.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_down.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_gate.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_up.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_norm.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_k.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_output.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_q.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_v.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "output.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
})),
{{ end }}`,
Stream: &stream,
})
@@ -154,12 +154,13 @@ func TestGenerateChat(t *testing.T) {
})
t.Run("missing capabilities chat", func(t *testing.T) {
_, digest := createBinFile(t, llm.KV{
"general.architecture": "bert",
"bert.pooling_type": uint32(0),
}, []llm.Tensor{})
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Model: "bert",
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, llm.KV{
"general.architecture": "bert",
"bert.pooling_type": uint32(0),
}, []llm.Tensor{})),
Model: "bert",
Files: map[string]string{"bert.gguf": digest},
Stream: &stream,
})
@@ -281,8 +282,9 @@ func TestGenerateChat(t *testing.T) {
})
w = createRequest(t, s.CreateHandler, api.CreateRequest{
Model: "test-system",
Modelfile: "FROM test\nSYSTEM You are a helpful assistant.",
Model: "test-system",
From: "test",
System: "You are a helpful assistant.",
})
if w.Code != http.StatusOK {
@@ -622,36 +624,38 @@ func TestGenerate(t *testing.T) {
go s.sched.Run(context.TODO())
_, digest := createBinFile(t, llm.KV{
"general.architecture": "llama",
"llama.block_count": uint32(1),
"llama.context_length": uint32(8192),
"llama.embedding_length": uint32(4096),
"llama.attention.head_count": uint32(32),
"llama.attention.head_count_kv": uint32(8),
"tokenizer.ggml.tokens": []string{""},
"tokenizer.ggml.scores": []float32{0},
"tokenizer.ggml.token_type": []int32{0},
}, []llm.Tensor{
{Name: "token_embd.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_norm.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_down.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_gate.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_up.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_norm.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_k.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_output.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_q.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_v.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "output.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
})
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Model: "test",
Modelfile: fmt.Sprintf(`FROM %s
TEMPLATE """
Files: map[string]string{"file.gguf": digest},
Template: `
{{- if .System }}System: {{ .System }} {{ end }}
{{- if .Prompt }}User: {{ .Prompt }} {{ end }}
{{- if .Response }}Assistant: {{ .Response }} {{ end }}"""
`, createBinFile(t, llm.KV{
"general.architecture": "llama",
"llama.block_count": uint32(1),
"llama.context_length": uint32(8192),
"llama.embedding_length": uint32(4096),
"llama.attention.head_count": uint32(32),
"llama.attention.head_count_kv": uint32(8),
"tokenizer.ggml.tokens": []string{""},
"tokenizer.ggml.scores": []float32{0},
"tokenizer.ggml.token_type": []int32{0},
}, []llm.Tensor{
{Name: "token_embd.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_norm.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_down.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_gate.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_up.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_norm.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_k.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_output.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_q.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_v.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "output.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
})),
{{- if .Response }}Assistant: {{ .Response }} {{ end }}
`,
Stream: &stream,
})
@@ -682,12 +686,14 @@ func TestGenerate(t *testing.T) {
})
t.Run("missing capabilities generate", func(t *testing.T) {
_, digest := createBinFile(t, llm.KV{
"general.architecture": "bert",
"bert.pooling_type": uint32(0),
}, []llm.Tensor{})
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Model: "bert",
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, llm.KV{
"general.architecture": "bert",
"bert.pooling_type": uint32(0),
}, []llm.Tensor{})),
Model: "bert",
Files: map[string]string{"file.gguf": digest},
Stream: &stream,
})
@@ -824,8 +830,9 @@ func TestGenerate(t *testing.T) {
})
w = createRequest(t, s.CreateHandler, api.CreateRequest{
Model: "test-system",
Modelfile: "FROM test\nSYSTEM You are a helpful assistant.",
Model: "test-system",
From: "test",
System: "You are a helpful assistant.",
})
if w.Code != http.StatusOK {
@@ -894,10 +901,10 @@ func TestGenerate(t *testing.T) {
w = createRequest(t, s.CreateHandler, api.CreateRequest{
Model: "test-suffix",
Modelfile: `FROM test
TEMPLATE """{{- if .Suffix }}<PRE> {{ .Prompt }} <SUF>{{ .Suffix }} <MID>
Template: `{{- if .Suffix }}<PRE> {{ .Prompt }} <SUF>{{ .Suffix }} <MID>
{{- else }}{{ .Prompt }}
{{- end }}"""`,
{{- end }}`,
From: "test",
})
if w.Code != http.StatusOK {

View File

@@ -2,7 +2,6 @@ package server
import (
"encoding/json"
"fmt"
"net/http"
"slices"
"testing"
@@ -31,9 +30,11 @@ func TestList(t *testing.T) {
var s Server
for _, n := range expectNames {
_, digest := createBinFile(t, nil, nil)
createRequest(t, s.CreateHandler, api.CreateRequest{
Name: n,
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
Name: n,
Files: map[string]string{"test.gguf": digest},
})
}

View File

@@ -23,14 +23,18 @@ import (
"github.com/ollama/ollama/api"
"github.com/ollama/ollama/llm"
"github.com/ollama/ollama/openai"
"github.com/ollama/ollama/parser"
"github.com/ollama/ollama/types/model"
"github.com/ollama/ollama/version"
)
func createTestFile(t *testing.T, name string) string {
func createTestFile(t *testing.T, name string) (string, string) {
t.Helper()
modelDir := os.Getenv("OLLAMA_MODELS")
if modelDir == "" {
t.Fatalf("OLLAMA_MODELS not specified")
}
f, err := os.CreateTemp(t.TempDir(), name)
if err != nil {
t.Fatalf("failed to create temp file: %v", err)
@@ -57,7 +61,21 @@ func createTestFile(t *testing.T, name string) string {
t.Fatalf("failed to write to file: %v", err)
}
return f.Name()
// Calculate sha256 sum of file
if _, err := f.Seek(0, 0); err != nil {
t.Fatal(err)
}
digest, _ := GetSHA256Digest(f)
if err := f.Close(); err != nil {
t.Fatal(err)
}
if err := createLink(f.Name(), filepath.Join(modelDir, "blobs", fmt.Sprintf("sha256-%s", strings.TrimPrefix(digest, "sha256:")))); err != nil {
t.Fatal(err)
}
return f.Name(), digest
}
// equalStringSlices checks if two slices of strings are equal.
@@ -85,20 +103,32 @@ func Test_Routes(t *testing.T) {
createTestModel := func(t *testing.T, name string) {
t.Helper()
fname := createTestFile(t, "ollama-model")
_, digest := createTestFile(t, "ollama-model")
r := strings.NewReader(fmt.Sprintf("FROM %s\nPARAMETER seed 42\nPARAMETER top_p 0.9\nPARAMETER stop foo\nPARAMETER stop bar", fname))
modelfile, err := parser.ParseFile(r)
if err != nil {
t.Fatalf("failed to parse file: %v", err)
}
fn := func(resp api.ProgressResponse) {
t.Logf("Status: %s", resp.Status)
}
err = CreateModel(context.TODO(), model.ParseName(name), "", "", modelfile, fn)
r := api.CreateRequest{
Name: name,
Files: map[string]string{"test.gguf": digest},
Parameters: map[string]any{
"seed": 42,
"top_p": 0.9,
"stop": []string{"foo", "bar"},
},
}
modelName := model.ParseName(name)
baseLayers, err := ggufLayers(digest, fn)
if err != nil {
t.Fatalf("failed to create model: %v", err)
}
if err := createModel(r, modelName, baseLayers, fn); err != nil {
t.Fatal(err)
}
}
testCases := []testCase{
@@ -301,13 +331,12 @@ func Test_Routes(t *testing.T) {
Method: http.MethodPost,
Path: "/api/create",
Setup: func(t *testing.T, req *http.Request) {
fname := createTestFile(t, "ollama-model")
_, digest := createTestFile(t, "ollama-model")
stream := false
createReq := api.CreateRequest{
Name: "t-bone",
Modelfile: fmt.Sprintf("FROM %s", fname),
Stream: &stream,
Name: "t-bone",
Files: map[string]string{"test.gguf": digest},
Stream: &stream,
}
jsonData, err := json.Marshal(createReq)
if err != nil {
@@ -419,7 +448,10 @@ func Test_Routes(t *testing.T) {
},
},
{
Name: "openai retrieve model handler",
Name: "openai retrieve model handler",
Setup: func(t *testing.T, req *http.Request) {
createTestModel(t, "show-model")
},
Method: http.MethodGet,
Path: "/v1/models/show-model",
Expected: func(t *testing.T, resp *http.Response) {
@@ -571,21 +603,21 @@ func TestManifestCaseSensitivity(t *testing.T) {
t.Cleanup(func() { testMakeRequestDialContext = nil })
t.Logf("creating")
_, digest := createBinFile(t, nil, nil)
checkOK(createRequest(t, s.CreateHandler, api.CreateRequest{
// Start with the stable name, and later use a case-shuffled
// version.
Name: wantStableName,
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
Stream: &stream,
Name: wantStableName,
Files: map[string]string{"test.gguf": digest},
Stream: &stream,
}))
checkManifestList()
t.Logf("creating (again)")
checkOK(createRequest(t, s.CreateHandler, api.CreateRequest{
Name: name(),
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
Stream: &stream,
Name: name(),
Files: map[string]string{"test.gguf": digest},
Stream: &stream,
}))
checkManifestList()
@@ -622,13 +654,12 @@ func TestShow(t *testing.T) {
var s Server
_, digest1 := createBinFile(t, llm.KV{"general.architecture": "test"}, nil)
_, digest2 := createBinFile(t, llm.KV{"general.type": "projector", "general.architecture": "clip"}, nil)
createRequest(t, s.CreateHandler, api.CreateRequest{
Name: "show-model",
Modelfile: fmt.Sprintf(
"FROM %s\nFROM %s",
createBinFile(t, llm.KV{"general.architecture": "test"}, nil),
createBinFile(t, llm.KV{"general.type": "projector", "general.architecture": "clip"}, nil),
),
Name: "show-model",
Files: map[string]string{"model.gguf": digest1, "projector.gguf": digest2},
})
w := createRequest(t, s.ShowHandler, api.ShowRequest{