From cae5d4d4ea43493670d038ec01c466bec81edf38 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Wed, 5 Mar 2025 14:11:21 -0800 Subject: [PATCH 001/157] Win: doc new rocm zip file (#9367) To stay under the 2G github artifact limit, we're splitting ROCm out like we do on linux. --- docs/windows.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/windows.md b/docs/windows.md index 018cc41d..78b99a5d 100644 --- a/docs/windows.md +++ b/docs/windows.md @@ -81,9 +81,11 @@ help you keep up to date. If you'd like to install or integrate Ollama as a service, a standalone `ollama-windows-amd64.zip` zip file is available containing only the Ollama CLI -and GPU library dependencies for Nvidia and AMD. This allows for embedding -Ollama in existing applications, or running it as a system service via `ollama -serve` with tools such as [NSSM](https://nssm.cc/). +and GPU library dependencies for Nvidia. If you have an AMD GPU, also download +and extract the additional ROCm package `ollama-windows-amd64-rocm.zip` into the +same directory. This allows for embedding Ollama in existing applications, or +running it as a system service via `ollama serve` with tools such as +[NSSM](https://nssm.cc/). > [!NOTE] > If you are upgrading from a prior version, you should remove the old directories first. From e2252d0fc6ea5c410b1ac4fa0a722beda78b3431 Mon Sep 17 00:00:00 2001 From: Blake Mizerany Date: Wed, 5 Mar 2025 14:48:18 -0800 Subject: [PATCH 002/157] server/internal/registry: take over pulls from server package (#9485) This commit replaces the old pull implementation in the server package with the new, faster, more robust pull implementation in the registry package. The new endpoint, and now the remove endpoint too, are behind the feature gate "client2" enabled only by setting the OLLAMA_EXPERIMENT environment variable include "client2". Currently, the progress indication is wired to perform the same as the previous implementation to avoid making changes to the CLI, and because the status reports happen at the start of the download, and the end of the write to disk, the progress indication is not as smooth as it could be. This is a known issue and will be addressed in a future change. This implementation may be ~0.5-1.0% slower in rare cases, depending on network and disk speed, but is generally MUCH faster and more robust than the its predecessor in all other cases. --- api/types.go | 6 +- go.mod | 1 + go.sum | 2 + server/internal/client/ollama/registry.go | 117 +++++++++++----- .../internal/client/ollama/registry_test.go | 2 +- server/internal/client/ollama/trace.go | 10 +- server/internal/registry/server.go | 97 +++++++++++++ server/internal/registry/server_test.go | 130 +++++++++++++++++- .../library/smol/latest | 0 .../internal/registry/testdata/registry.txt | 22 +++ server/routes.go | 35 +++-- 11 files changed, 370 insertions(+), 52 deletions(-) rename server/internal/registry/testdata/models/manifests/{registry.ollama.ai => example.com}/library/smol/latest (100%) create mode 100644 server/internal/registry/testdata/registry.txt diff --git a/api/types.go b/api/types.go index 637ca204..fef836bd 100644 --- a/api/types.go +++ b/api/types.go @@ -361,9 +361,9 @@ type CopyRequest struct { // PullRequest is the request passed to [Client.Pull]. type PullRequest struct { Model string `json:"model"` - Insecure bool `json:"insecure,omitempty"` - Username string `json:"username"` - Password string `json:"password"` + Insecure bool `json:"insecure,omitempty"` // Deprecated: ignored + Username string `json:"username"` // Deprecated: ignored + Password string `json:"password"` // Deprecated: ignored Stream *bool `json:"stream,omitempty"` // Deprecated: set the model name with Model instead diff --git a/go.mod b/go.mod index af0cedc8..c45c9892 100644 --- a/go.mod +++ b/go.mod @@ -24,6 +24,7 @@ require ( github.com/nlpodyssey/gopickle v0.3.0 github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c golang.org/x/image v0.22.0 + golang.org/x/tools v0.30.0 gonum.org/v1/gonum v0.15.0 ) diff --git a/go.sum b/go.sum index 013a7db7..0ab97b90 100644 --- a/go.sum +++ b/go.sum @@ -309,6 +309,8 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= +golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/server/internal/client/ollama/registry.go b/server/internal/client/ollama/registry.go index 007de5e8..423a6ad2 100644 --- a/server/internal/client/ollama/registry.go +++ b/server/internal/client/ollama/registry.go @@ -45,9 +45,9 @@ import ( // Errors var ( - // ErrManifestNotFound is returned when a manifest is not found in the + // ErrModelNotFound is returned when a manifest is not found in the // cache or registry. - ErrManifestNotFound = errors.New("manifest not found") + ErrModelNotFound = errors.New("model not found") // ErrManifestInvalid is returned when a manifest found in a local or // remote cache is invalid. @@ -114,7 +114,18 @@ type Error struct { } func (e *Error) Error() string { - return fmt.Sprintf("registry responded with status %d: %s %s", e.Status, e.Code, e.Message) + var b strings.Builder + b.WriteString("registry responded with status ") + b.WriteString(strconv.Itoa(e.Status)) + if e.Code != "" { + b.WriteString(": code ") + b.WriteString(e.Code) + } + if e.Message != "" { + b.WriteString(": ") + b.WriteString(e.Message) + } + return b.String() } func (e *Error) LogValue() slog.Value { @@ -355,7 +366,7 @@ func (r *Registry) Push(ctx context.Context, name string, p *PushParams) error { n.Model(), l.Digest, ) - res, err := r.doOK(ctx, "POST", startURL, nil) + res, err := r.send(ctx, "POST", startURL, nil) if err != nil { return err } @@ -379,7 +390,7 @@ func (r *Registry) Push(ctx context.Context, name string, p *PushParams) error { } req.ContentLength = l.Size - res, err = doOK(r.client(), req) + res, err = sendRequest(r.client(), req) if err == nil { res.Body.Close() } @@ -399,7 +410,7 @@ func (r *Registry) Push(ctx context.Context, name string, p *PushParams) error { n.Model(), n.Tag(), ) - res, err := r.doOK(ctx, "PUT", path, bytes.NewReader(m.Data)) + res, err := r.send(ctx, "PUT", path, bytes.NewReader(m.Data)) if err == nil { res.Body.Close() } @@ -448,10 +459,15 @@ func (r *Registry) Pull(ctx context.Context, name string) error { t := traceFromContext(ctx) - var g errgroup.Group + g, ctx := errgroup.WithContext(ctx) g.SetLimit(r.maxStreams()) - for _, l := range m.Layers { + layers := m.Layers + if m.Config != nil && m.Config.Digest.IsValid() { + layers = append(layers, m.Config) + } + + for _, l := range layers { if exists(l) { t.update(l, l.Size, ErrCached) continue @@ -468,7 +484,9 @@ func (r *Registry) Pull(ctx context.Context, name string) error { if l.Size <= r.maxChunkingThreshold() { g.Go(func() error { - res, err := doOK(r.client(), req) + // TODO(bmizerany): retry/backoff like below in + // the chunking case + res, err := sendRequest(r.client(), req) if err != nil { return err } @@ -494,19 +512,21 @@ func (r *Registry) Pull(ctx context.Context, name string) error { // fire an initial request to get the final URL and // then use that URL for the chunk requests. req.Header.Set("Range", "bytes=0-0") - res, err := doOK(r.client(), req) + res, err := sendRequest(r.client(), req) if err != nil { return err } res.Body.Close() req = res.Request.WithContext(req.Context()) - streamNo := 0 - tws := make([]*bufio.Writer, r.maxStreams()-1) + wp := writerPool{size: r.maxChunkSize()} + for chunk := range chunks.Of(l.Size, r.maxChunkSize()) { + if ctx.Err() != nil { + break + } + ticket := q.Take() - bufIdx := streamNo % len(tws) - streamNo++ g.Go(func() (err error) { defer func() { if err != nil { @@ -520,23 +540,18 @@ func (r *Registry) Pull(ctx context.Context, name string) error { if err != nil { return err } - err := func() error { req := req.Clone(req.Context()) req.Header.Set("Range", fmt.Sprintf("bytes=%s", chunk)) - res, err := doOK(r.client(), req) + res, err := sendRequest(r.client(), req) if err != nil { return err } defer res.Body.Close() - tw := tws[bufIdx] - if tw == nil { - tw = bufio.NewWriterSize(nil, int(r.maxChunkSize())) - tws[bufIdx] = tw - } + tw := wp.get() tw.Reset(ticket) - defer tw.Reset(nil) // release ticket + defer wp.put(tw) _, err = io.CopyN(tw, res.Body, chunk.Size()) if err != nil { @@ -595,6 +610,9 @@ type Manifest struct { Name string `json:"-"` // the canonical name of the model Data []byte `json:"-"` // the raw data of the manifest Layers []*Layer `json:"layers"` + + // For legacy reasons, we still have to download the config layer. + Config *Layer `json:"config"` } var emptyDigest, _ = blob.ParseDigest("sha256:0000000000000000000000000000000000000000000000000000000000000000") @@ -678,7 +696,7 @@ func (r *Registry) ResolveLocal(name string) (*Manifest, error) { data, err := os.ReadFile(c.GetFile(d)) if err != nil { if errors.Is(err, fs.ErrNotExist) { - return nil, fmt.Errorf("%w: %s", ErrManifestNotFound, name) + return nil, fmt.Errorf("%w: %s", ErrModelNotFound, name) } return nil, err } @@ -701,7 +719,7 @@ func (r *Registry) Resolve(ctx context.Context, name string) (*Manifest, error) manifestURL = fmt.Sprintf("%s://%s/v2/%s/%s/blobs/%s", scheme, n.Host(), n.Namespace(), n.Model(), d) } - res, err := r.doOK(ctx, "GET", manifestURL, nil) + res, err := r.send(ctx, "GET", manifestURL, nil) if err != nil { return nil, err } @@ -726,7 +744,7 @@ func (r *Registry) client() *http.Client { } // newRequest constructs a new request, ready to use, with the given method, -// url, and body, presigned with client Key and UserAgent. +// url, and body, pre-signed with client [Key] and [UserAgent]. func (r *Registry) newRequest(ctx context.Context, method, url string, body io.Reader) (*http.Request, error) { req, err := http.NewRequestWithContext(ctx, method, url, body) if err != nil { @@ -745,11 +763,17 @@ func (r *Registry) newRequest(ctx context.Context, method, url string, body io.R return req, nil } -// doOK makes a request with the given client and request, and returns the +// sendRequest makes a request with the given client and request, and returns the // response if the status code is 200. If the status code is not 200, an Error // is parsed from the response body and returned. If any other error occurs, it // is returned. -func doOK(c *http.Client, r *http.Request) (*http.Response, error) { +func sendRequest(c *http.Client, r *http.Request) (_ *http.Response, err error) { + defer func() { + if err != nil { + err = fmt.Errorf("request error %s: %w", r.URL, err) + } + }() + if r.URL.Scheme == "https+insecure" { // TODO(bmizerany): clone client.Transport, set // InsecureSkipVerify, etc. @@ -792,20 +816,26 @@ func doOK(c *http.Client, r *http.Request) (*http.Response, error) { // Use the raw body if we can't parse it as an error object. re.Message = string(out) } + + // coerce MANIFEST_UNKNOWN to ErrManifestNotFound + if strings.EqualFold(re.Code, "MANIFEST_UNKNOWN") { + return nil, ErrModelNotFound + } + re.Status = res.StatusCode return nil, &re } return res, nil } -// doOK is a convenience method for making a request with newRequest and -// passing it to doOK with r.client(). -func (r *Registry) doOK(ctx context.Context, method, path string, body io.Reader) (*http.Response, error) { +// send is a convenience method for making a request with newRequest and +// passing it to send with r.client(). +func (r *Registry) send(ctx context.Context, method, path string, body io.Reader) (*http.Response, error) { req, err := r.newRequest(ctx, method, path, body) if err != nil { return nil, err } - return doOK(r.client(), req) + return sendRequest(r.client(), req) } // makeAuthToken creates an Ollama auth token for the given private key. @@ -960,3 +990,28 @@ func splitExtended(s string) (scheme, name, digest string) { } return scheme, s, digest } + +type writerPool struct { + size int64 // set by the caller + + mu sync.Mutex + ws []*bufio.Writer +} + +func (p *writerPool) get() *bufio.Writer { + p.mu.Lock() + defer p.mu.Unlock() + if len(p.ws) == 0 { + return bufio.NewWriterSize(nil, int(p.size)) + } + w := p.ws[len(p.ws)-1] + p.ws = p.ws[:len(p.ws)-1] + return w +} + +func (p *writerPool) put(w *bufio.Writer) { + p.mu.Lock() + defer p.mu.Unlock() + w.Reset(nil) + p.ws = append(p.ws, w) +} diff --git a/server/internal/client/ollama/registry_test.go b/server/internal/client/ollama/registry_test.go index b9b4271b..8f4e1604 100644 --- a/server/internal/client/ollama/registry_test.go +++ b/server/internal/client/ollama/registry_test.go @@ -608,7 +608,7 @@ func TestInsecureSkipVerify(t *testing.T) { url := fmt.Sprintf("https://%s/%s", s.Listener.Addr(), name) _, err := rc.Resolve(t.Context(), url) if err == nil || !strings.Contains(err.Error(), "failed to verify") { - t.Errorf("err = %v; want cert verifiction failure", err) + t.Errorf("err = %v; want cert verification failure", err) } url = fmt.Sprintf("https+insecure://%s/%s", s.Listener.Addr(), name) diff --git a/server/internal/client/ollama/trace.go b/server/internal/client/ollama/trace.go index e300870b..69435c40 100644 --- a/server/internal/client/ollama/trace.go +++ b/server/internal/client/ollama/trace.go @@ -13,9 +13,13 @@ type Trace struct { // Update is called during [Registry.Push] and [Registry.Pull] to // report the progress of blob uploads and downloads. // - // It is called once at the beginning of the download with a zero n and - // then once per read operation with the number of bytes read so far, - // and an error if any. + // The n argument is the number of bytes transferred so far, and err is + // any error that has occurred. If n == 0, and err is nil, the download + // or upload has just started. If err is [ErrCached], the download or + // upload has been skipped because the blob is already present in the + // local cache or remote registry, respectively. Otherwise, if err is + // non-nil, the download or upload has failed. When l.Size == n, and + // err is nil, the download or upload has completed. // // A function assigned must be safe for concurrent use. The function is // called synchronously and so should not block or take long to run. diff --git a/server/internal/registry/server.go b/server/internal/registry/server.go index 4d44aa8d..62fefb4c 100644 --- a/server/internal/registry/server.go +++ b/server/internal/registry/server.go @@ -7,10 +7,14 @@ import ( "cmp" "encoding/json" "errors" + "fmt" "io" "log/slog" "net/http" + "sync" + "time" + "github.com/ollama/ollama/server/internal/cache/blob" "github.com/ollama/ollama/server/internal/client/ollama" ) @@ -109,6 +113,8 @@ func (s *Local) serveHTTP(rec *statusCodeRecorder, r *http.Request) { switch r.URL.Path { case "/api/delete": return false, s.handleDelete(rec, r) + case "/api/pull": + return false, s.handlePull(rec, r) default: if s.Fallback != nil { s.Fallback.ServeHTTP(rec, r) @@ -214,6 +220,97 @@ func (s *Local) handleDelete(_ http.ResponseWriter, r *http.Request) error { return s.Prune() } +type progressUpdateJSON struct { + Status string `json:"status"` + Digest blob.Digest `json:"digest,omitempty,omitzero"` + Total int64 `json:"total,omitempty,omitzero"` + Completed int64 `json:"completed,omitempty,omitzero"` +} + +func (s *Local) handlePull(w http.ResponseWriter, r *http.Request) error { + if r.Method != "POST" { + return errMethodNotAllowed + } + + p, err := decodeUserJSON[*params](r.Body) + if err != nil { + return err + } + + maybeFlush := func() { + fl, _ := w.(http.Flusher) + if fl != nil { + fl.Flush() + } + } + defer maybeFlush() + + var mu sync.Mutex + enc := json.NewEncoder(w) + enc.Encode(progressUpdateJSON{Status: "pulling manifest"}) + + ctx := ollama.WithTrace(r.Context(), &ollama.Trace{ + Update: func(l *ollama.Layer, n int64, err error) { + mu.Lock() + defer mu.Unlock() + + // TODO(bmizerany): coalesce these updates; writing per + // update is expensive + enc.Encode(progressUpdateJSON{ + Digest: l.Digest, + Status: "pulling", + Total: l.Size, + Completed: n, + }) + }, + }) + + done := make(chan error, 1) + go func() { + // TODO(bmizerany): continue to support non-streaming responses + done <- s.Client.Pull(ctx, p.model()) + }() + + func() { + t := time.NewTicker(100 * time.Millisecond) + defer t.Stop() + for { + select { + case <-t.C: + mu.Lock() + maybeFlush() + mu.Unlock() + case err := <-done: + if err != nil { + var status string + if errors.Is(err, ollama.ErrModelNotFound) { + status = fmt.Sprintf("error: model %q not found", p.model()) + enc.Encode(progressUpdateJSON{Status: status}) + } else { + status = fmt.Sprintf("error: %v", err) + enc.Encode(progressUpdateJSON{Status: status}) + } + return + } + + // These final updates are not strictly necessary, because they have + // already happened at this point. Our pull handler code used to do + // these steps after, not during, the pull, and they were slow, so we + // wanted to provide feedback to users what was happening. For now, we + // keep them to not jar users who are used to seeing them. We can phase + // them out with a new and nicer UX later. One without progress bars + // and digests that no one cares about. + enc.Encode(progressUpdateJSON{Status: "verifying layers"}) + enc.Encode(progressUpdateJSON{Status: "writing manifest"}) + enc.Encode(progressUpdateJSON{Status: "success"}) + return + } + } + }() + + return nil +} + func decodeUserJSON[T any](r io.Reader) (T, error) { var v T err := json.NewDecoder(r).Decode(&v) diff --git a/server/internal/registry/server_test.go b/server/internal/registry/server_test.go index e44d88c0..597e9bd6 100644 --- a/server/internal/registry/server_test.go +++ b/server/internal/registry/server_test.go @@ -1,17 +1,27 @@ package registry import ( + "bytes" + "context" "encoding/json" + "fmt" + "io" + "io/fs" + "net" "net/http" "net/http/httptest" "os" "regexp" "strings" + "sync" "testing" "github.com/ollama/ollama/server/internal/cache/blob" "github.com/ollama/ollama/server/internal/client/ollama" "github.com/ollama/ollama/server/internal/testutil" + "golang.org/x/tools/txtar" + + _ "embed" ) type panicTransport struct{} @@ -30,7 +40,7 @@ type bytesResetter interface { Reset() } -func newTestServer(t *testing.T) *Local { +func newTestServer(t *testing.T, upstreamRegistry http.HandlerFunc) *Local { t.Helper() dir := t.TempDir() err := os.CopyFS(dir, os.DirFS("testdata/models")) @@ -41,10 +51,25 @@ func newTestServer(t *testing.T) *Local { if err != nil { t.Fatal(err) } + + client := panicOnRoundTrip + if upstreamRegistry != nil { + s := httptest.NewTLSServer(upstreamRegistry) + t.Cleanup(s.Close) + tr := s.Client().Transport.(*http.Transport).Clone() + tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, "tcp", s.Listener.Addr().String()) + } + client = &http.Client{Transport: tr} + } + rc := &ollama.Registry{ Cache: c, - HTTPClient: panicOnRoundTrip, + HTTPClient: client, + Mask: "example.com/library/_:latest", } + l := &Local{ Client: rc, Logger: testutil.Slogger(t), @@ -85,7 +110,7 @@ func captureLogs(t *testing.T, s *Local) (*Local, bytesResetter) { func TestServerDelete(t *testing.T) { check := testutil.Checker(t) - s := newTestServer(t) + s := newTestServer(t, nil) _, err := s.Client.ResolveLocal("smol") check(err) @@ -127,8 +152,105 @@ func TestServerDelete(t *testing.T) { } } +//go:embed testdata/registry.txt +var registryTXT []byte + +var registryFS = sync.OnceValue(func() fs.FS { + // Txtar gets hung up on \r\n line endings, so we need to convert them + // to \n when parsing the txtar on Windows. + data := bytes.ReplaceAll(registryTXT, []byte("\r\n"), []byte("\n")) + a := txtar.Parse(data) + fmt.Printf("%q\n", a.Comment) + fsys, err := txtar.FS(a) + if err != nil { + panic(err) + } + return fsys +}) + +func TestServerPull(t *testing.T) { + modelsHandler := http.FileServerFS(registryFS()) + s := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/v2/library/BOOM/manifests/latest": + w.WriteHeader(999) + io.WriteString(w, `{"error": "boom"}`) + case "/v2/library/unknown/manifests/latest": + w.WriteHeader(404) + io.WriteString(w, `{"errors": [{"code": "MANIFEST_UNKNOWN", "message": "manifest unknown"}]}`) + default: + t.Logf("serving file: %s", r.URL.Path) + modelsHandler.ServeHTTP(w, r) + } + }) + + checkResponse := func(got *httptest.ResponseRecorder, wantlines string) { + t.Helper() + + if got.Code != 200 { + t.Fatalf("Code = %d; want 200", got.Code) + } + gotlines := got.Body.String() + t.Logf("got:\n%s", gotlines) + for want := range strings.Lines(wantlines) { + want = strings.TrimSpace(want) + want, unwanted := strings.CutPrefix(want, "!") + want = strings.TrimSpace(want) + if !unwanted && !strings.Contains(gotlines, want) { + t.Fatalf("! missing %q in body", want) + } + if unwanted && strings.Contains(gotlines, want) { + t.Fatalf("! unexpected %q in body", want) + } + } + } + + got := s.send(t, "POST", "/api/pull", `{"model": "BOOM"}`) + checkResponse(got, ` + {"status":"pulling manifest"} + {"status":"error: request error https://example.com/v2/library/BOOM/manifests/latest: registry responded with status 999: boom"} + `) + + got = s.send(t, "POST", "/api/pull", `{"model": "smol"}`) + checkResponse(got, ` + {"status":"pulling manifest"} + {"status":"pulling","digest":"sha256:68e0ec597aee59d35f8dc44942d7b17d471ade10d3aca07a5bb7177713950312","total":5} + {"status":"pulling","digest":"sha256:ca3d163bab055381827226140568f3bef7eaac187cebd76878e0b63e9e442356","total":3} + {"status":"pulling","digest":"sha256:68e0ec597aee59d35f8dc44942d7b17d471ade10d3aca07a5bb7177713950312","total":5,"completed":5} + {"status":"pulling","digest":"sha256:ca3d163bab055381827226140568f3bef7eaac187cebd76878e0b63e9e442356","total":3,"completed":3} + {"status":"verifying layers"} + {"status":"writing manifest"} + {"status":"success"} + `) + + got = s.send(t, "POST", "/api/pull", `{"model": "unknown"}`) + checkResponse(got, ` + {"status":"pulling manifest"} + {"status":"error: model \"unknown\" not found"} + `) + + got = s.send(t, "DELETE", "/api/pull", `{"model": "smol"}`) + checkErrorResponse(t, got, 405, "method_not_allowed", "method not allowed") + + got = s.send(t, "POST", "/api/pull", `!`) + checkErrorResponse(t, got, 400, "bad_request", "invalid character '!' looking for beginning of value") + + got = s.send(t, "POST", "/api/pull", ``) + checkErrorResponse(t, got, 400, "bad_request", "empty request body") + + got = s.send(t, "POST", "/api/pull", `{"model": "://"}`) + checkResponse(got, ` + {"status":"pulling manifest"} + {"status":"error: invalid or missing name: \"\""} + + !verifying + !writing + !success + `) +} + func TestServerUnknownPath(t *testing.T) { - s := newTestServer(t) + s := newTestServer(t, nil) got := s.send(t, "DELETE", "/api/unknown", `{}`) checkErrorResponse(t, got, 404, "not_found", "not found") } diff --git a/server/internal/registry/testdata/models/manifests/registry.ollama.ai/library/smol/latest b/server/internal/registry/testdata/models/manifests/example.com/library/smol/latest similarity index 100% rename from server/internal/registry/testdata/models/manifests/registry.ollama.ai/library/smol/latest rename to server/internal/registry/testdata/models/manifests/example.com/library/smol/latest diff --git a/server/internal/registry/testdata/registry.txt b/server/internal/registry/testdata/registry.txt new file mode 100644 index 00000000..2fc363fc --- /dev/null +++ b/server/internal/registry/testdata/registry.txt @@ -0,0 +1,22 @@ +-- v2/library/smol/manifests/latest -- +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "digest": "sha256:ca3d163bab055381827226140568f3bef7eaac187cebd76878e0b63e9e442356", + "size": 3 + }, + "layers": [ + { + "mediaType": "application/vnd.ollama.image.model", + "digest": "sha256:68e0ec597aee59d35f8dc44942d7b17d471ade10d3aca07a5bb7177713950312", + "size": 5 + } + ] +} + +-- v2/library/smol/blobs/sha256:68e0ec597aee59d35f8dc44942d7b17d471ade10d3aca07a5bb7177713950312 -- +GGUF +-- v2/library/smol/blobs/sha256:ca3d163bab055381827226140568f3bef7eaac187cebd76878e0b63e9e442356 -- +{} diff --git a/server/routes.go b/server/routes.go index 73e94dc6..3efa12e4 100644 --- a/server/routes.go +++ b/server/routes.go @@ -42,6 +42,12 @@ import ( "github.com/ollama/ollama/version" ) +func experimentEnabled(name string) bool { + return slices.Contains(strings.Split(os.Getenv("OLLAMA_EXPERIMENT"), ","), name) +} + +var useClient2 = experimentEnabled("client2") + var mode string = gin.DebugMode type Server struct { @@ -1173,6 +1179,7 @@ func (s *Server) GenerateRoutes(rc *ollama.Registry) (http.Handler, error) { r.HEAD("/api/tags", s.ListHandler) r.GET("/api/tags", s.ListHandler) r.POST("/api/show", s.ShowHandler) + r.DELETE("/api/delete", s.DeleteHandler) // Create r.POST("/api/create", s.CreateHandler) @@ -1194,16 +1201,19 @@ func (s *Server) GenerateRoutes(rc *ollama.Registry) (http.Handler, error) { r.GET("/v1/models", openai.ListMiddleware(), s.ListHandler) r.GET("/v1/models/:model", openai.RetrieveMiddleware(), s.ShowHandler) - // wrap old with new - rs := ®istry.Local{ - Client: rc, - Logger: slog.Default(), // TODO(bmizerany): Take a logger, do not use slog.Default() - Fallback: r, + if rc != nil { + // wrap old with new + rs := ®istry.Local{ + Client: rc, + Logger: slog.Default(), // TODO(bmizerany): Take a logger, do not use slog.Default() + Fallback: r, - Prune: PruneLayers, + Prune: PruneLayers, + } + return rs, nil } - return rs, nil + return r, nil } func Serve(ln net.Listener) error { @@ -1258,15 +1268,20 @@ func Serve(ln net.Listener) error { s := &Server{addr: ln.Addr()} - rc, err := ollama.DefaultRegistry() - if err != nil { - return err + var rc *ollama.Registry + if useClient2 { + var err error + rc, err = ollama.DefaultRegistry() + if err != nil { + return err + } } h, err := s.GenerateRoutes(rc) if err != nil { return err } + http.Handle("/", h) ctx, done := context.WithCancel(context.Background()) From b70fc4d51e76fc023afcd005c467d415c0c62750 Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Wed, 5 Mar 2025 13:27:53 -0800 Subject: [PATCH 003/157] model: Don't unconditionally add special tokens We sometimes tokenize partial strings. For example, with multimodal inputs, we split the input string around the images and then tokenize each piece. In these cases, we should only add the special tokens on the first piece. --- llm/server.go | 2 +- model/process_text.go | 6 +++--- model/process_text_test.go | 14 +++++++------- runner/ollamarunner/runner.go | 2 +- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/llm/server.go b/llm/server.go index 09690a5f..9553ba8f 100644 --- a/llm/server.go +++ b/llm/server.go @@ -973,7 +973,7 @@ func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) return s.llamaModel.Tokenize(content, false, true) } if s.textProcessor != nil { - tokens, err := s.textProcessor.Encode(content) + tokens, err := s.textProcessor.Encode(content, false) if err != nil { return nil, err } diff --git a/model/process_text.go b/model/process_text.go index 7083f36f..bfb0a5f2 100644 --- a/model/process_text.go +++ b/model/process_text.go @@ -19,7 +19,7 @@ const ( ) type TextProcessor interface { - Encode(string) ([]int32, error) + Encode(s string, addSpecial bool) ([]int32, error) Decode([]int32) (string, error) Is(int32, Special) bool } @@ -144,7 +144,7 @@ type merge struct { runes []rune } -func (bpe BytePairEncoding) Encode(s string) ([]int32, error) { +func (bpe BytePairEncoding) Encode(s string, addSpecial bool) ([]int32, error) { fragments := []fragment{{value: s}} for _, special := range bpe.vocab.SpecialVocabulary() { // TODO: process special tokens concurrently @@ -282,7 +282,7 @@ func (bpe BytePairEncoding) Encode(s string) ([]int32, error) { } } - if len(ids) > 0 { + if addSpecial && len(ids) > 0 { if bpe.vocab.AddBOS { if ids[0] == bpe.vocab.BOS { slog.Warn("adding bos token to prompt which already has it", "id", bpe.vocab.BOS) diff --git a/model/process_text_test.go b/model/process_text_test.go index cad1f94f..f4830321 100644 --- a/model/process_text_test.go +++ b/model/process_text_test.go @@ -74,7 +74,7 @@ func TestLlama(t *testing.T) { t.Run("simple", func(t *testing.T) { t.Parallel() - ids, err := tokenizer.Encode("hello world") + ids, err := tokenizer.Encode("hello world", true) if err != nil { t.Error(err) } @@ -92,7 +92,7 @@ func TestLlama(t *testing.T) { t.Errorf("got %q, want hello world", s) } - ids, err = tokenizer.Encode("hello <|end_of_text|>") + ids, err = tokenizer.Encode("hello <|end_of_text|>", true) if err != nil { t.Error(err) } @@ -126,7 +126,7 @@ func TestLlama(t *testing.T) { } for s, want := range cases { - ids, err := tokenizer.Encode(s) + ids, err := tokenizer.Encode(s, true) if err != nil { t.Error(err) } @@ -152,7 +152,7 @@ func TestLlama(t *testing.T) { } for _, want := range cases { - ids, err := tokenizer.Encode(want) + ids, err := tokenizer.Encode(want, true) if err != nil { t.Error(err) } @@ -176,7 +176,7 @@ func TestLlama(t *testing.T) { } for s, want := range cases { - ids, err := tokenizer.Encode(s) + ids, err := tokenizer.Encode(s, true) if err != nil { t.Fatal(err) } @@ -222,7 +222,7 @@ func BenchmarkBytePairEncoding(b *testing.B) { b.Run("encode"+strconv.Itoa(n), func(b *testing.B) { b.ResetTimer() for range b.N { - _, err := tokenizer.Encode(string(bts)) + _, err := tokenizer.Encode(string(bts), true) if err != nil { b.Fatal(err) } @@ -230,7 +230,7 @@ func BenchmarkBytePairEncoding(b *testing.B) { }) b.Run("decode"+strconv.Itoa(n), func(b *testing.B) { - ids, err := tokenizer.Encode(string(bts)) + ids, err := tokenizer.Encode(string(bts), true) if err != nil { b.Fatal(err) } diff --git a/runner/ollamarunner/runner.go b/runner/ollamarunner/runner.go index 1a4bbf19..9ba6563f 100644 --- a/runner/ollamarunner/runner.go +++ b/runner/ollamarunner/runner.go @@ -161,7 +161,7 @@ func (s *Server) inputs(prompt string, images []ImageData) ([]input, error) { for i, part := range parts { // text - tokenize - tokens, err := s.model.(model.TextProcessor).Encode(part) + tokens, err := s.model.(model.TextProcessor).Encode(part, i == 0) if err != nil { return nil, err } From a7e63b82be6dfcd8011e0b45f19658be71c0e2b9 Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Wed, 5 Mar 2025 12:08:06 -0800 Subject: [PATCH 004/157] ollamarunner: Improve multimodal input handling Various vision models have different requirements for how they receive their inputs. For example: - Mllama wants images together with text and the image embeddings don't themselves have positions or get stored in the main KV cache - Llava-style models feed in embeddings similar to tokens and images correspond to a varying number of tokens in the cache. In addition, the strategy for providing inputs must support batching and multiple sequences, which are managed by the runner. At the same time, we want to keep data handling fully in the model so that new architectures are not bottlenecked by runner code which does not understand their particular requirements. This provides a method for models to edit the input stream so that it meets their needs while still being in a format that the runner understands. This allows the runner to avoid special processing for different models. In addition, this fixes a regression where non-vision models may try to incorrectly interpret images. --- model/model.go | 70 +++++++++++++++++-- model/models/mllama/model.go | 109 +++++++++++++++++++++--------- runner/ollamarunner/cache.go | 20 +++--- runner/ollamarunner/cache_test.go | 74 +++++++++++--------- runner/ollamarunner/runner.go | 104 ++++++++++++++++------------ 5 files changed, 247 insertions(+), 130 deletions(-) diff --git a/model/model.go b/model/model.go index f8ed8741..75b7f639 100644 --- a/model/model.go +++ b/model/model.go @@ -3,7 +3,6 @@ package model import ( "errors" "fmt" - "image" _ "image/jpeg" _ "image/png" "log/slog" @@ -22,14 +21,40 @@ import ( _ "github.com/ollama/ollama/ml/backend" ) +// Input represents one token in the input stream +type Input struct { + // Token is a single element of text. + Token int32 + + // Multimodal is opaque data representing a non-text + // element such as an image (or part of one if the image + // can be processed in pieces). It may be either together + // with Token or on its own. + Multimodal any + + // MultimodalHash is a unique representation of the data + // stored in Multimodal, used for caching and comparing + // equality. + MultimodalHash uint64 +} + +// MultimodalIndex is a multimodal element (such as an image) +// together with an index into the slice of Inputs with the +// corresponding token. Note that the index is not the same +// as the position - to find that use the index with the +// Positions slice. +type MultimodalIndex struct { + Index int + Multimodal any +} + // Options contains the inputs for a model forward pass type Options struct { - Inputs []int32 - Positions []int32 - Sequences []int - Outputs []int32 - - Images []image.Image + Inputs []int32 + Multimodal []MultimodalIndex + Positions []int32 + Sequences []int + Outputs []int32 } type config struct { @@ -59,6 +84,37 @@ type Model interface { Config() config } +// MultimodalProcessor must be implemented by multimodal models. +type MultimodalProcessor interface { + // EncodeMultimodal processes a single input (such as an image) and + // generates an output (typically an embedding) that can be used by the model. + // + // The return value is most typically an ml.Tensor, however, different + // type are possible, such as an object containing a tensor plus + // additional metadata, a slice of tensors or even just the original input. + // + // The result may be cached by the runner. + EncodeMultimodal(ml.Context, []byte) (any, error) + + // PostTokenize is called after tokenization to allow the model to edit the + // input stream to correctly arrange multimodal elements. + // + // The input is a slice of tokens with the results of EncodeMultimodal interleaved + // in the order that the user provided them. Each element of the slice will be + // either a single token or single multimodal object. + // + // The model must ensure that inputs are stored according to how they will be + // processed and stored in the cache. For example, Llava-style models should insert + // placeholder tokens equal to the feature size of the corresponding image with + // the image itself attached to and split across these tokens. When Forward is called + // a partial subset of these tokens may be submitted according to the batch size. + // + // This function is also responsible for updating MultimodalHash for any Multimodal + // that is modified to ensure that there is a unique hash value that accurately + // represents the contents. + PostTokenize(ml.Context, []Input) ([]Input, error) +} + var models = make(map[string]func(ml.Config) (Model, error)) // Register registers a model constructor for the given architecture diff --git a/model/models/mllama/model.go b/model/models/mllama/model.go index 8fee0cdb..945c7295 100644 --- a/model/models/mllama/model.go +++ b/model/models/mllama/model.go @@ -1,7 +1,12 @@ package mllama import ( + "bytes" + "encoding/binary" "fmt" + "hash/fnv" + "image" + "slices" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" @@ -56,41 +61,79 @@ func New(c ml.Config) (model.Model, error) { return &m, nil } +func (m *Model) EncodeMultimodal(ctx ml.Context, multimodalData []byte) (any, error) { + image, _, err := image.Decode(bytes.NewReader(multimodalData)) + if err != nil { + return nil, err + } + + f32s, aspectRatioID, err := m.ImageProcessor.ProcessImage(image) + if err != nil { + return nil, err + } + + pixelValues, err := ctx.FromFloatSlice(f32s, + m.ImageProcessor.imageSize, + m.ImageProcessor.imageSize, + m.ImageProcessor.numChannels, + m.ImageProcessor.maxNumTiles, + ) + if err != nil { + return nil, err + } + + aspectRatio, err := ctx.FromIntSlice([]int32{int32(aspectRatioID)}, 1) + if err != nil { + return nil, err + } + + positions := make([]int32, 1601) + for i := range positions { + positions[i] = int32(i) + } + + positionIDs, err := ctx.FromIntSlice(positions, len(positions)) + if err != nil { + return nil, err + } + + crossAttentionStates := m.VisionModel.Forward(ctx, pixelValues, positionIDs, aspectRatio) + return m.Projector.Forward(ctx, crossAttentionStates), nil +} + +func (m *Model) PostTokenize(ctx ml.Context, inputs []model.Input) ([]model.Input, error) { + var images []model.Input + fnvHash := fnv.New64a() + + for i := range inputs { + if inputs[i].Multimodal == nil { + if len(images) > 0 { + inputs[i].Multimodal = images[0].Multimodal + inputs[i].MultimodalHash = images[0].MultimodalHash + for j := 1; j < len(images); j++ { + inputs[i].Multimodal = inputs[i].Multimodal.(ml.Tensor).Concat(ctx, images[j].Multimodal.(ml.Tensor), 3) + fnvHash.Reset() + binary.Write(fnvHash, binary.NativeEndian, inputs[i].MultimodalHash) + binary.Write(fnvHash, binary.NativeEndian, inputs[j].MultimodalHash) + inputs[i].MultimodalHash = fnvHash.Sum64() + } + images = nil + } + } else { + images = append(images, inputs[i]) + inputs[i].Token = -1 + } + } + + inputs = slices.DeleteFunc(inputs, func(input model.Input) bool { return input.Token == -1 }) + + return inputs, nil +} + func (m *Model) Forward(ctx ml.Context, opts model.Options) (ml.Tensor, error) { var crossAttentionStates ml.Tensor - if opts.Images != nil { - f32s, aspectRatioID, err := m.ImageProcessor.ProcessImage(opts.Images[0]) - if err != nil { - return nil, err - } - - pixelValues, err := ctx.FromFloatSlice(f32s, - m.ImageProcessor.imageSize, - m.ImageProcessor.imageSize, - m.ImageProcessor.numChannels, - m.ImageProcessor.maxNumTiles, - ) - if err != nil { - return nil, err - } - - aspectRatio, err := ctx.FromIntSlice([]int32{int32(aspectRatioID)}, 1) - if err != nil { - return nil, err - } - - positions := make([]int32, 1601) - for i := range positions { - positions[i] = int32(i) - } - - positionIDs, err := ctx.FromIntSlice(positions, len(positions)) - if err != nil { - return nil, err - } - - crossAttentionStates = m.VisionModel.Forward(ctx, pixelValues, positionIDs, aspectRatio) - crossAttentionStates = m.Projector.Forward(ctx, crossAttentionStates) + if opts.Multimodal != nil { + crossAttentionStates = opts.Multimodal[0].Multimodal.(ml.Tensor) } inputs, err := ctx.FromIntSlice(opts.Inputs, len(opts.Inputs)) diff --git a/runner/ollamarunner/cache.go b/runner/ollamarunner/cache.go index e1fa98b1..2fd060a1 100644 --- a/runner/ollamarunner/cache.go +++ b/runner/ollamarunner/cache.go @@ -5,7 +5,6 @@ import ( "fmt" "log/slog" "math" - "reflect" "time" "github.com/ollama/ollama/kvcache" @@ -39,10 +38,7 @@ func NewInputCache(model model.Model, kvCacheType string, kvSize int32, numSlots slots := make([]InputCacheSlot, numSlots) for i := range slots { - slots[i] = InputCacheSlot{ - Id: i, - Inputs: make([]input, 0), - } + slots[i] = InputCacheSlot{Id: i} } cache := model.Config().Cache @@ -83,7 +79,7 @@ type InputCacheSlot struct { Id int // Inputs that are stored in the KV cache - Inputs []input + Inputs []model.Input // is this cache actively being processed as part of a sequence? InUse bool @@ -92,7 +88,7 @@ type InputCacheSlot struct { lastUsed time.Time } -func (c *InputCache) LoadCacheSlot(prompt []input, cachePrompt bool) (*InputCacheSlot, []input, error) { +func (c *InputCache) LoadCacheSlot(prompt []model.Input, cachePrompt bool) (*InputCacheSlot, []model.Input, error) { var slot *InputCacheSlot var numPast int32 var err error @@ -143,7 +139,7 @@ func (c *InputCache) LoadCacheSlot(prompt []input, cachePrompt bool) (*InputCach return slot, prompt, nil } -func (c *InputCache) findLongestCacheSlot(prompt []input) (*InputCacheSlot, int32, error) { +func (c *InputCache) findLongestCacheSlot(prompt []model.Input) (*InputCacheSlot, int32, error) { longest := int32(-1) var longestSlot *InputCacheSlot @@ -166,7 +162,7 @@ func (c *InputCache) findLongestCacheSlot(prompt []input) (*InputCacheSlot, int3 return longestSlot, longest, nil } -func (c *InputCache) findBestCacheSlot(prompt []input) (*InputCacheSlot, int32, error) { +func (c *InputCache) findBestCacheSlot(prompt []model.Input) (*InputCacheSlot, int32, error) { oldest := time.Now() var oldestSlot *InputCacheSlot @@ -202,7 +198,7 @@ func (c *InputCache) findBestCacheSlot(prompt []input) (*InputCacheSlot, int32, if longest > 0 && longestSlot != oldestSlot { slog.Debug("forking cache slot", "src", longestSlot.Id, "dst", oldestSlot.Id, "inputs", longest, "total", len(longestSlot.Inputs)) - oldestSlot.Inputs = make([]input, longest) + oldestSlot.Inputs = make([]model.Input, longest) copy(oldestSlot.Inputs, longestSlot.Inputs[:longest]) if c.cache != nil { c.cache.CopyPrefix(longestSlot.Id, oldestSlot.Id, longest) @@ -212,7 +208,7 @@ func (c *InputCache) findBestCacheSlot(prompt []input) (*InputCacheSlot, int32, return oldestSlot, longest, nil } -func countCommonPrefix(a []input, b []input) int32 { +func countCommonPrefix(a []model.Input, b []model.Input) int32 { var count int32 for i := range a { @@ -220,7 +216,7 @@ func countCommonPrefix(a []input, b []input) int32 { break } - if !reflect.DeepEqual(a[i], b[i]) { + if a[i].Token != b[i].Token || a[i].MultimodalHash != b[i].MultimodalHash { break } diff --git a/runner/ollamarunner/cache_test.go b/runner/ollamarunner/cache_test.go index 99e67b4f..9ce03b73 100644 --- a/runner/ollamarunner/cache_test.go +++ b/runner/ollamarunner/cache_test.go @@ -4,6 +4,8 @@ import ( "image" "testing" "time" + + "github.com/ollama/ollama/model" ) func TestCountCommon(t *testing.T) { @@ -13,44 +15,50 @@ func TestCountCommon(t *testing.T) { tests := []struct { name string - t1 []input - t2 []input + t1 []model.Input + t2 []model.Input expected int32 }{ { name: "Equal", - t1: []input{{token: 1}, {token: 2}, {token: 3}}, - t2: []input{{token: 1}, {token: 2}, {token: 3}}, + t1: []model.Input{{Token: 1}, {Token: 2}, {Token: 3}}, + t2: []model.Input{{Token: 1}, {Token: 2}, {Token: 3}}, expected: 3, }, { name: "Prefix", - t1: []input{{token: 1}}, - t2: []input{{token: 1}, {token: 2}, {token: 3}}, + t1: []model.Input{{Token: 1}}, + t2: []model.Input{{Token: 1}, {Token: 2}, {Token: 3}}, expected: 1, }, { name: "Image Prefix", - t1: []input{{image: imgA}}, - t2: []input{{image: imgA}, {image: imgB}, {image: imgC}}, + t1: []model.Input{{Multimodal: imgA, MultimodalHash: 1}}, + t2: []model.Input{{Multimodal: imgA, MultimodalHash: 1}, {Multimodal: imgB, MultimodalHash: 2}, {Multimodal: imgC, MultimodalHash: 3}}, expected: 1, }, { name: "Mixed", - t1: []input{{token: 1}, {image: imgA}}, - t2: []input{{token: 1}, {image: imgA}, {token: 5}}, + t1: []model.Input{{Token: 1}, {Multimodal: imgA, MultimodalHash: 1}}, + t2: []model.Input{{Token: 1}, {Multimodal: imgA, MultimodalHash: 1}, {Token: 5}}, expected: 2, }, + { + name: "Mixed, Same Length", + t1: []model.Input{{Token: 1}, {Multimodal: imgA, MultimodalHash: 1}}, + t2: []model.Input{{Token: 1}, {Multimodal: imgB, MultimodalHash: 2}}, + expected: 1, + }, { name: "Empty", - t1: []input{}, - t2: []input{{token: 1}, {token: 2}, {token: 3}}, + t1: []model.Input{}, + t2: []model.Input{{Token: 1}, {Token: 2}, {Token: 3}}, expected: 0, }, { name: "Both Empty", - t1: []input{}, - t2: []input{}, + t1: []model.Input{}, + t2: []model.Input{}, expected: 0, }, } @@ -74,7 +82,7 @@ func TestFindCacheSlot(t *testing.T) { tests := []struct { name string cache InputCache - prompt []input + prompt []model.Input longest expected best expected }{ @@ -83,18 +91,18 @@ func TestFindCacheSlot(t *testing.T) { cache: InputCache{slots: []InputCacheSlot{ { Id: 0, - Inputs: []input{}, + Inputs: []model.Input{}, InUse: false, lastUsed: time.Time{}, }, { Id: 1, - Inputs: []input{}, + Inputs: []model.Input{}, InUse: false, lastUsed: time.Time{}, }, }}, - prompt: []input{{token: 1}}, + prompt: []model.Input{{Token: 1}}, longest: expected{result: 0, len: 0}, best: expected{result: 0, len: 0}, }, @@ -103,18 +111,18 @@ func TestFindCacheSlot(t *testing.T) { cache: InputCache{slots: []InputCacheSlot{ { Id: 0, - Inputs: []input{{token: 1}}, + Inputs: []model.Input{{Token: 1}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, - Inputs: []input{{token: 1}, {token: 2}}, + Inputs: []model.Input{{Token: 1}, {Token: 2}}, InUse: false, lastUsed: time.Now().Add(-2 * time.Second), }, }}, - prompt: []input{{token: 1}, {token: 2}}, + prompt: []model.Input{{Token: 1}, {Token: 2}}, longest: expected{result: 1, len: 2}, best: expected{result: 1, len: 2}, }, @@ -123,18 +131,18 @@ func TestFindCacheSlot(t *testing.T) { cache: InputCache{slots: []InputCacheSlot{ { Id: 0, - Inputs: []input{{token: 1}, {token: 2}}, + Inputs: []model.Input{{Token: 1}, {Token: 2}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, - Inputs: []input{}, + Inputs: []model.Input{}, InUse: false, lastUsed: time.Time{}, }, }}, - prompt: []input{{token: 2}}, + prompt: []model.Input{{Token: 2}}, longest: expected{result: 0, len: 0}, best: expected{result: 1, len: 0}, }, @@ -144,19 +152,19 @@ func TestFindCacheSlot(t *testing.T) { slots: []InputCacheSlot{ { Id: 0, - Inputs: []input{{token: 1}, {token: 2}}, + Inputs: []model.Input{{Token: 1}, {Token: 2}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, - Inputs: []input{}, + Inputs: []model.Input{}, InUse: false, lastUsed: time.Time{}, }, }, }, - prompt: []input{{token: 1}}, + prompt: []model.Input{{Token: 1}}, longest: expected{result: 0, len: 1}, best: expected{result: 1, len: 1}, }, @@ -165,18 +173,18 @@ func TestFindCacheSlot(t *testing.T) { cache: InputCache{slots: []InputCacheSlot{ { Id: 0, - Inputs: []input{{token: 1}}, + Inputs: []model.Input{{Token: 1}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, - Inputs: []input{{token: 1}, {token: 2}}, + Inputs: []model.Input{{Token: 1}, {Token: 2}}, InUse: false, lastUsed: time.Now().Add(-2 * time.Second), }, }}, - prompt: []input{{token: 2}, {token: 3}}, + prompt: []model.Input{{Token: 2}, {Token: 3}}, longest: expected{result: 0, len: 0}, best: expected{result: 1, len: 0}, }, @@ -185,18 +193,18 @@ func TestFindCacheSlot(t *testing.T) { cache: InputCache{slots: []InputCacheSlot{ { Id: 0, - Inputs: []input{{token: 1}, {token: 2}}, + Inputs: []model.Input{{Token: 1}, {Token: 2}}, InUse: true, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, - Inputs: []input{{token: 1}}, + Inputs: []model.Input{{Token: 1}}, InUse: false, lastUsed: time.Now().Add(-2 * time.Second), }, }}, - prompt: []input{{token: 1}, {token: 2}}, + prompt: []model.Input{{Token: 1}, {Token: 2}}, longest: expected{result: 1, len: 1}, best: expected{result: 1, len: 2}, }, diff --git a/runner/ollamarunner/runner.go b/runner/ollamarunner/runner.go index 9ba6563f..e5189fa5 100644 --- a/runner/ollamarunner/runner.go +++ b/runner/ollamarunner/runner.go @@ -1,13 +1,12 @@ package ollamarunner import ( - "bytes" "context" "encoding/json" "errors" "flag" "fmt" - "image" + "hash/maphash" "log" "log/slog" "net" @@ -33,22 +32,19 @@ import ( _ "github.com/ollama/ollama/model/models" ) -// input is an element of the prompt to process, either a token or an image -type input struct { - token int32 - - image image.Image -} - type Sequence struct { + // ctx for allocating tensors that last the lifetime of the sequence, such as + // multimodal embeddings + ctx ml.Context + // batch index iBatch int // prompt inputs left to evaluate - inputs []input + inputs []model.Input // inputs that have been added to a batch but not yet submitted to Forward - pendingInputs []input + pendingInputs []model.Input // tokens that have been generated but not returned yet (e.g. for stop sequences) pendingResponses []string @@ -101,8 +97,9 @@ func (s *Server) NewSequence(prompt string, images []ImageData, params NewSequen s.ready.Wait() startTime := time.Now() + ctx := s.model.Backend().NewContext() - inputs, err := s.inputs(prompt, images) + inputs, err := s.inputs(ctx, prompt, images) if err != nil { return nil, fmt.Errorf("failed to process inputs: %w", err) } else if len(inputs) == 0 { @@ -128,6 +125,7 @@ func (s *Server) NewSequence(prompt string, images []ImageData, params NewSequen // TODO(jessegross): Ingest cached history for grammar return &Sequence{ + ctx: ctx, inputs: inputs, numPromptInputs: len(inputs), startProcessingTime: startTime, @@ -146,19 +144,22 @@ func (s *Server) NewSequence(prompt string, images []ImageData, params NewSequen // inputs processes the prompt and images into a list of inputs // by splitting the prompt on [img-] tags, tokenizing text and // decoding images -func (s *Server) inputs(prompt string, images []ImageData) ([]input, error) { - var inputs []input +func (s *Server) inputs(ctx ml.Context, prompt string, images []ImageData) ([]model.Input, error) { + var inputs []model.Input var parts []string var matches [][]string - // TODO(jessegross): This can sometimes trigger for matching text in the - // user's prompt. We previously tried to avoid it by only looking for images - // on image models. We don't have a clear indication now but it would be better - // to properly escape it in any case. - re := regexp.MustCompile(`\[img-(\d+)\]`) - parts = re.Split(prompt, -1) - matches = re.FindAllStringSubmatch(prompt, -1) + multimodalProcessor, visionModel := s.model.(model.MultimodalProcessor) + if visionModel { + re := regexp.MustCompile(`\[img-(\d+)\]`) + parts = re.Split(prompt, -1) + matches = re.FindAllStringSubmatch(prompt, -1) + } else { + parts = []string{prompt} + } + + postTokenize := false for i, part := range parts { // text - tokenize tokens, err := s.model.(model.TextProcessor).Encode(part, i == 0) @@ -167,7 +168,7 @@ func (s *Server) inputs(prompt string, images []ImageData) ([]input, error) { } for _, t := range tokens { - inputs = append(inputs, input{token: t}) + inputs = append(inputs, model.Input{Token: t}) } // image - decode and store @@ -186,12 +187,25 @@ func (s *Server) inputs(prompt string, images []ImageData) ([]input, error) { return nil, fmt.Errorf("invalid image index: %d", n) } - image, _, err := image.Decode(bytes.NewReader(images[imageIndex].Data)) + imageEmbeddings, err := multimodalProcessor.EncodeMultimodal(ctx, images[imageIndex].Data) if err != nil { return nil, err } - inputs = append(inputs, input{image: image}) + s.multimodalHash.Reset() + _, _ = s.multimodalHash.Write(images[imageIndex].Data) + imageHash := s.multimodalHash.Sum64() + + inputs = append(inputs, model.Input{Multimodal: imageEmbeddings, MultimodalHash: imageHash}) + postTokenize = true + } + } + + if visionModel && postTokenize { + var err error + inputs, err = multimodalProcessor.PostTokenize(ctx, inputs) + if err != nil { + return nil, err } } @@ -238,6 +252,10 @@ type Server struct { // next sequence for prompt processing to avoid starvation nextSeq int + + // multimodalHash generates hashes for comparing equality + // of non-text data + multimodalHash maphash.Hash } func (s *Server) allNil() bool { @@ -283,6 +301,7 @@ func (s *Server) removeSequence(seqIndex int, reason string) { close(seq.responses) close(seq.embedding) seq.cache.InUse = false + seq.ctx.Close() s.seqs[seqIndex] = nil s.seqsSem.Release(1) } @@ -311,7 +330,6 @@ func (s *Server) processBatch() error { defer s.mu.Unlock() var options model.Options - imgSeq := -1 seqIdx := s.nextSeq - 1 for range s.seqs { @@ -330,7 +348,7 @@ func (s *Server) processBatch() error { if !s.cache.enabled { seq.inputs = append(seq.cache.Inputs, seq.inputs...) - seq.cache.Inputs = []input{} + seq.cache.Inputs = []model.Input{} } for i, input := range seq.inputs { @@ -349,25 +367,21 @@ func (s *Server) processBatch() error { break } - // TODO(jessegross): Image inputs need to be rethought - it's - // it doesn't work well for different types of models or multiple sequences - if input.image != nil { - if len(seq.pendingInputs) != len(options.Images) { - break - } - - if imgSeq != seqIdx && imgSeq != -1 { - s.nextSeq = seqIdx - break - } - - imgSeq = seqIdx - options.Images = append(options.Images, input.image) - seq.pendingInputs = append(seq.pendingInputs, input) - continue + // TODO(jessegross): This is a workaround for generating an attention mask and also providing a hint + // to the encoder cache. + // + // Break the batch when switching from text to images so that images are always at the beginning. + if input.Multimodal != nil && !(len(seq.pendingInputs) == 0 || + (len(options.Multimodal) > 0 && options.Multimodal[len(options.Multimodal)-1].Index == len(options.Inputs)-1)) { + s.nextSeq = seqIdx + break + } + + options.Inputs = append(options.Inputs, input.Token) + if input.Multimodal != nil { + options.Multimodal = append(options.Multimodal, model.MultimodalIndex{Index: len(options.Inputs) - 1, Multimodal: input.Multimodal}) } - options.Inputs = append(options.Inputs, input.token) options.Positions = append(options.Positions, int32(len(seq.cache.Inputs)+len(seq.pendingInputs))) options.Sequences = append(options.Sequences, seq.cache.Id) @@ -403,7 +417,7 @@ func (s *Server) processBatch() error { // After calling Forward, pending inputs are now in the cache if len(seq.pendingInputs) > 0 { seq.cache.Inputs = append(seq.cache.Inputs, seq.pendingInputs...) - seq.pendingInputs = []input{} + seq.pendingInputs = []model.Input{} } // don't sample prompt processing @@ -449,7 +463,7 @@ func (s *Server) processBatch() error { return err } - seq.inputs = []input{{token: token}} + seq.inputs = []model.Input{{Token: token}} seq.pendingResponses = append(seq.pendingResponses, piece) sequence := strings.Join(seq.pendingResponses, "") From 25248f4bd5a1d720f6775ee1e45dfed718d6f98f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=AErekc=C3=A4H=20nitraM=E2=80=AE?= Date: Fri, 7 Mar 2025 10:26:31 +0100 Subject: [PATCH 005/157] Better WantedBy declaration The problem with default.target is that it always points to the target that is currently started. So if you boot into single user mode or the rescue mode still Ollama tries to start. I noticed this because either tried (and failed) to start all the time during a system update, where Ollama definitely is not wanted. --- docs/linux.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/linux.md b/docs/linux.md index 12581bdd..2dda87f3 100644 --- a/docs/linux.md +++ b/docs/linux.md @@ -75,7 +75,7 @@ RestartSec=3 Environment="PATH=$PATH" [Install] -WantedBy=default.target +WantedBy=multi-user.target ``` Then start the service: From 4289c74359ad7cac0d4350c22e6af3f92c5f091c Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 7 Mar 2025 09:25:34 -0800 Subject: [PATCH 006/157] llama: fix kv loading on snowflake-arctic-embed models (#9536) --- llama/llama.cpp/src/llama-vocab.cpp | 2 +- .../0019-fix-string-arr-kv-loading.patch | 64 +++++++++++++++++++ ml/backend/ggml/ggml/include/gguf.h | 1 + ml/backend/ggml/ggml/src/gguf.cpp | 7 +- 4 files changed, 71 insertions(+), 3 deletions(-) create mode 100644 llama/patches/0019-fix-string-arr-kv-loading.patch diff --git a/llama/llama.cpp/src/llama-vocab.cpp b/llama/llama.cpp/src/llama-vocab.cpp index c7ff28be..7a185443 100644 --- a/llama/llama.cpp/src/llama-vocab.cpp +++ b/llama/llama.cpp/src/llama-vocab.cpp @@ -1443,7 +1443,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { const int precompiled_charsmap_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP).c_str()); if (precompiled_charsmap_keyidx != -1) { - size_t n_precompiled_charsmap = gguf_get_arr_n(ctx, precompiled_charsmap_keyidx); + size_t n_precompiled_charsmap = gguf_get_arr_data_n(ctx, precompiled_charsmap_keyidx); const char * pc = (const char *) gguf_get_arr_data(ctx, precompiled_charsmap_keyidx); precompiled_charsmap.assign(pc, pc + n_precompiled_charsmap); #ifdef IS_BIG_ENDIAN diff --git a/llama/patches/0019-fix-string-arr-kv-loading.patch b/llama/patches/0019-fix-string-arr-kv-loading.patch new file mode 100644 index 00000000..aa7b4d3c --- /dev/null +++ b/llama/patches/0019-fix-string-arr-kv-loading.patch @@ -0,0 +1,64 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: jmorganca +Date: Wed, 5 Mar 2025 17:41:07 -0800 +Subject: [PATCH] fix string arr kv loading + +--- + ggml/include/gguf.h | 1 + + ggml/src/gguf.cpp | 7 +++++-- + src/llama-vocab.cpp | 2 +- + 3 files changed, 7 insertions(+), 3 deletions(-) + +diff --git a/ggml/include/gguf.h b/ggml/include/gguf.h +index 79ee2020..3efb22f0 100644 +--- a/ggml/include/gguf.h ++++ b/ggml/include/gguf.h +@@ -114,6 +114,7 @@ extern "C" { + // get raw pointer to the first element of the array with the given key_id + // for bool arrays, note that they are always stored as int8 on all platforms (usually this makes no difference) + GGML_API const void * gguf_get_arr_data(const struct gguf_context * ctx, int64_t key_id); ++ GGML_API size_t gguf_get_arr_data_n(const struct gguf_context * ctx, int64_t key_id); + + // get ith C string from array with given key_id + GGML_API const char * gguf_get_arr_str (const struct gguf_context * ctx, int64_t key_id, size_t i); +diff --git a/ggml/src/gguf.cpp b/ggml/src/gguf.cpp +index ab13669c..f75b923f 100644 +--- a/ggml/src/gguf.cpp ++++ b/ggml/src/gguf.cpp +@@ -777,10 +777,14 @@ enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int64_t key_id + + const void * gguf_get_arr_data(const struct gguf_context * ctx, int64_t key_id) { + GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx)); +- GGML_ASSERT(ctx->kv[key_id].get_type() != GGUF_TYPE_STRING); + return ctx->kv[key_id].data.data(); + } + ++size_t gguf_get_arr_data_n(const struct gguf_context * ctx, int64_t key_id) { ++ GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx)); ++ return ctx->kv[key_id].data.size(); ++} ++ + const char * gguf_get_arr_str(const struct gguf_context * ctx, int64_t key_id, size_t i) { + GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx)); + GGML_ASSERT(ctx->kv[key_id].get_type() == GGUF_TYPE_STRING); +@@ -874,7 +878,6 @@ const char * gguf_get_val_str(const struct gguf_context * ctx, int64_t key_id) { + const void * gguf_get_val_data(const struct gguf_context * ctx, int64_t key_id) { + GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx)); + GGML_ASSERT(ctx->kv[key_id].get_ne() == 1); +- GGML_ASSERT(ctx->kv[key_id].get_type() != GGUF_TYPE_STRING); + return ctx->kv[key_id].data.data(); + } + +diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp +index c7ff28be..7a185443 100644 +--- a/src/llama-vocab.cpp ++++ b/src/llama-vocab.cpp +@@ -1443,7 +1443,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { + + const int precompiled_charsmap_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP).c_str()); + if (precompiled_charsmap_keyidx != -1) { +- size_t n_precompiled_charsmap = gguf_get_arr_n(ctx, precompiled_charsmap_keyidx); ++ size_t n_precompiled_charsmap = gguf_get_arr_data_n(ctx, precompiled_charsmap_keyidx); + const char * pc = (const char *) gguf_get_arr_data(ctx, precompiled_charsmap_keyidx); + precompiled_charsmap.assign(pc, pc + n_precompiled_charsmap); + #ifdef IS_BIG_ENDIAN diff --git a/ml/backend/ggml/ggml/include/gguf.h b/ml/backend/ggml/ggml/include/gguf.h index 79ee2020..3efb22f0 100644 --- a/ml/backend/ggml/ggml/include/gguf.h +++ b/ml/backend/ggml/ggml/include/gguf.h @@ -114,6 +114,7 @@ extern "C" { // get raw pointer to the first element of the array with the given key_id // for bool arrays, note that they are always stored as int8 on all platforms (usually this makes no difference) GGML_API const void * gguf_get_arr_data(const struct gguf_context * ctx, int64_t key_id); + GGML_API size_t gguf_get_arr_data_n(const struct gguf_context * ctx, int64_t key_id); // get ith C string from array with given key_id GGML_API const char * gguf_get_arr_str (const struct gguf_context * ctx, int64_t key_id, size_t i); diff --git a/ml/backend/ggml/ggml/src/gguf.cpp b/ml/backend/ggml/ggml/src/gguf.cpp index ab13669c..f75b923f 100644 --- a/ml/backend/ggml/ggml/src/gguf.cpp +++ b/ml/backend/ggml/ggml/src/gguf.cpp @@ -777,10 +777,14 @@ enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int64_t key_id const void * gguf_get_arr_data(const struct gguf_context * ctx, int64_t key_id) { GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx)); - GGML_ASSERT(ctx->kv[key_id].get_type() != GGUF_TYPE_STRING); return ctx->kv[key_id].data.data(); } +size_t gguf_get_arr_data_n(const struct gguf_context * ctx, int64_t key_id) { + GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx)); + return ctx->kv[key_id].data.size(); +} + const char * gguf_get_arr_str(const struct gguf_context * ctx, int64_t key_id, size_t i) { GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx)); GGML_ASSERT(ctx->kv[key_id].get_type() == GGUF_TYPE_STRING); @@ -874,7 +878,6 @@ const char * gguf_get_val_str(const struct gguf_context * ctx, int64_t key_id) { const void * gguf_get_val_data(const struct gguf_context * ctx, int64_t key_id) { GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx)); GGML_ASSERT(ctx->kv[key_id].get_ne() == 1); - GGML_ASSERT(ctx->kv[key_id].get_type() != GGUF_TYPE_STRING); return ctx->kv[key_id].data.data(); } From 1f6986e91902b1308a8dc6be45418b7db9ccc0e9 Mon Sep 17 00:00:00 2001 From: Breaker Date: Sat, 8 Mar 2025 01:30:07 +0800 Subject: [PATCH 007/157] readme: add QwQ to the supported models list (#9565) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 5aa4801e..1162f891 100644 --- a/README.md +++ b/README.md @@ -54,6 +54,7 @@ Here are some example models that can be downloaded: | Model | Parameters | Size | Download | | ------------------ | ---------- | ----- | -------------------------------- | +| QwQ | 32B | 20GB | `ollama run qwq` | | DeepSeek-R1 | 7B | 4.7GB | `ollama run deepseek-r1` | | DeepSeek-R1 | 671B | 404GB | `ollama run deepseek-r1:671b` | | Llama 3.3 | 70B | 43GB | `ollama run llama3.3` | From 0682dae0275af6ab376cfa346ef27562b574684d Mon Sep 17 00:00:00 2001 From: Parth Sareen Date: Fri, 7 Mar 2025 12:37:48 -0800 Subject: [PATCH 008/157] sample: improve ollama engine sampler performance (#9374) This change bring in various interface cleanups along with greatly improving the performance of the sampler. Tested with llama3.2 on local machine. Improves performance from ~ 70 tokens/s -> 135 tokens/s with topK(40) enabled. Without topK performance is ~ 110 tokens/s --- go.mod | 2 +- runner/ollamarunner/runner.go | 10 +- sample/samplers.go | 166 +++++++++++-------- sample/samplers_benchmark_test.go | 104 ++++++++++++ sample/samplers_test.go | 155 ++++-------------- sample/transforms.go | 263 ++++++++++++++++++++---------- sample/transforms_test.go | 203 +++++++++++++++++------ 7 files changed, 572 insertions(+), 331 deletions(-) create mode 100644 sample/samplers_benchmark_test.go diff --git a/go.mod b/go.mod index c45c9892..cc578900 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,6 @@ require ( github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c golang.org/x/image v0.22.0 golang.org/x/tools v0.30.0 - gonum.org/v1/gonum v0.15.0 ) require ( @@ -45,6 +44,7 @@ require ( github.com/xtgo/set v1.0.0 // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + gonum.org/v1/gonum v0.15.0 // indirect gorgonia.org/vecf32 v0.9.0 // indirect gorgonia.org/vecf64 v0.9.0 // indirect ) diff --git a/runner/ollamarunner/runner.go b/runner/ollamarunner/runner.go index e5189fa5..81e06562 100644 --- a/runner/ollamarunner/runner.go +++ b/runner/ollamarunner/runner.go @@ -589,11 +589,19 @@ func (s *Server) completion(w http.ResponseWriter, r *http.Request) { return } + sampler := sample.NewSampler( + req.Temperature, + req.TopK, + req.TopP, + req.MinP, + req.Seed, + ) + seq, err := s.NewSequence(req.Prompt, req.Images, NewSequenceParams{ numPredict: req.NumPredict, stop: req.Stop, numKeep: int32(req.NumKeep), - sampler: sample.Greedy(), // TODO: add support for different samplers when performance is optimized + sampler: sampler, embedding: false, }) if err != nil { diff --git a/sample/samplers.go b/sample/samplers.go index 1b8a5edd..a5a0507c 100644 --- a/sample/samplers.go +++ b/sample/samplers.go @@ -2,76 +2,103 @@ package sample import ( "errors" - "math" - - "golang.org/x/exp/rand" - "gonum.org/v1/gonum/stat/sampleuv" + "math/rand/v2" + "slices" ) +// Sampler is not thread-safe. Each goroutine should have its own instance type Sampler interface { Sample([]float32) (int32, error) } +// logit represents information about a single token during sampling +type logit struct { + id int32 // The token's unique identifier + value float32 // The raw logit or probability from the model +} + type weighted struct { - src rand.Source - transforms []Transform + rng *rand.Rand + tokens []logit + topK int + topP float32 + minP float32 + temperature float32 } -// TODO(parthsareen): remove uv sample dependency https://github.com/ollama/ollama/issues/9279 -func Weighted(seed *uint64, transforms ...Transform) Sampler { - var src rand.Source - if seed != nil { - src = rand.NewSource(*seed) +func (s *weighted) Sample(logits []float32) (int32, error) { + if len(s.tokens) < len(logits) { + s.tokens = make([]logit, len(logits)) } - return weighted{src: src, transforms: transforms} -} -func (s weighted) Sample(logits []float32) (int32, error) { - logits64 := make([]float64, len(logits)) + tokens := s.tokens[:len(logits)] + for i, v := range logits { - logits64[i] = float64(v) + tokens[i].id = int32(i) + tokens[i].value = v } - for _, t := range s.transforms { - logits64 = t.Apply(logits64) + // Tokens are sorted by logits in TopK or SortTokens + if s.topK > 0 { + tokens = topK(tokens, s.topK) + } else { + sortLogits(tokens) } - logitsCopy := make([]float64, 0, len(logits)) - indices := make([]int, 0, len(logits)) - for i, logit := range logits64 { - if !math.IsInf(logit, -1) { - logitsCopy = append(logitsCopy, logit) - indices = append(indices, i) + tokens = temperature(tokens, s.temperature) + tokens = softmax(tokens) + + tokens = topP(tokens, s.topP) + tokens = minP(tokens, s.minP) + + if len(tokens) == 0 { + return -1, errors.New("no valid logits found for weighted sampling") + } + + var r float32 + if s.rng != nil { + r = s.rng.Float32() + } else { + r = rand.Float32() + } + + // Calculate cumulative sum of probabilities + var sum float32 + for i := range tokens { + sum += tokens[i].value + tokens[i].value = sum + } + r *= tokens[len(tokens)-1].value + + idx, _ := slices.BinarySearchFunc(tokens, r, func(token logit, target float32) int { + // Compare cumulative probabilities + if token.value < target { + return -1 } + // First token that exceeds target + return 1 + }) + + if idx >= len(tokens) { + idx = len(tokens) - 1 } - if len(logitsCopy) == 0 { - return -1, errors.New("no valid logits found for weighed sampling") - } - - probs := softmax(logitsCopy) - w := sampleuv.NewWeighted(probs, s.src) - if idx, ok := w.Take(); ok { - return int32(indices[idx]), nil - } - return -1, errors.New("weighted sampler failed, no valid token found") + return tokens[idx].id, nil } type greedy struct{} -func Greedy() Sampler { - return greedy{} -} - -// Sample returns the index of the maximum value in logits. +// Greedy sample returns the index of the maximum value in logits. func (s greedy) Sample(logits []float32) (int32, error) { if len(logits) == 0 { return -1, errors.New("no logits provided for greedy sampling") } maxIdx := 0 - for i := range logits { - if logits[i] > logits[maxIdx] { + maxVal := logits[0] + for i := 1; i < len(logits); i++ { + if logits[i] > maxVal { + maxVal = logits[i] maxIdx = i } } @@ -80,41 +107,40 @@ func (s greedy) Sample(logits []float32) (int32, error) { } // TODO(parthsareen): update sampler interface to use json unmarshal https://github.com/ollama/ollama/issues/9278 -func NewSampler(temperature float32, topK int, topP float32, minP float32, seed int) (Sampler, error) { +func NewSampler(temperature float32, topK int, topP float32, minP float32, seed int) Sampler { if temperature == 0 { - return Greedy(), nil + return &greedy{} } - if temperature < 0 || temperature > 2 { - return nil, errors.New("temperature must be between 0 and 2") + var rng *rand.Rand + if seed != -1 { + // PCG requires two parameters: sequence and stream + // Use original seed for sequence + sequence := uint64(seed) + // Use golden ratio hash to generate statistically independent seeds + rng = rand.New(rand.NewPCG(sequence, sequence^0x9E3779B9)) + } + temperature = max(temperature, 1) + + if topP < 0.0 { + topP = 0.0 + } + if topP >= 1.0 { + topP = 1.0 } - transforms := []Transform{Temperature(temperature)} - - if topK != 0 { - if topK <= 0 { - return nil, errors.New("topK must be greater than 0") - } - transforms = append(transforms, TopK(topK)) + if minP < 0.0 { + minP = 0.0 + } + if minP >= 1.0 { + minP = 1.0 } - if topP != 0 { - if topP < 0 || topP >= 1 { - return nil, errors.New("topP must be between 0 and 1") - } - transforms = append(transforms, TopP(topP)) + return &weighted{ + rng: rng, + topK: topK, + topP: topP, + minP: minP, + temperature: temperature, } - - if minP != 0 { - if minP < 0 || minP >= 1 { - return nil, errors.New("minP must be between 0 and 1") - } - transforms = append(transforms, MinP(minP)) - } - - if seed >= 0 { - seed64 := uint64(seed) - return Weighted(&seed64, transforms...), nil - } - return Weighted(nil, transforms...), nil } diff --git a/sample/samplers_benchmark_test.go b/sample/samplers_benchmark_test.go new file mode 100644 index 00000000..41c0b487 --- /dev/null +++ b/sample/samplers_benchmark_test.go @@ -0,0 +1,104 @@ +package sample + +import ( + "fmt" + "math/rand" + "testing" +) + +func BenchmarkWeightedSampler(b *testing.B) { + sizes := []int{10, 100, 1000, 10000} + + for _, size := range sizes { + b.Run(fmt.Sprintf("Size %d", size), func(b *testing.B) { + logits := make([]float32, size) + for i := range logits { + logits[i] = float32(rand.Float64()*10 - 5) + } + + sampler := NewSampler(0.8, 0, 0, 0, 42) + b.ResetTimer() + for b.Loop() { + _, err := sampler.Sample(logits) + if err != nil { + b.Fatalf("Sampling failed: %v", err) + } + } + }) + } + + configs := []struct { + name string + temperature float32 + topK int + topP float32 + minP float32 + seed int + }{ + {"Greedy", 0, -1, 0, 0, -1}, + {"Temperature", 0.8, -1, 0, 0, -1}, + {"TopK", 0.8, 50, 0, 0, -1}, + {"TopP", 0.8, -1, 0.9, 0, -1}, + {"MinP", 0.8, -1, 0, 0.05, -1}, + {"WithSeed", 0.8, 50, 0, 0, 42}, + } + + // Fixed size for common vocab size + size := 128000 + logits := make([]float32, size) + for i := range logits { + logits[i] = float32(rand.Float64()*10 - 5) + } + + for _, tc := range configs { + b.Run("Config"+tc.name, func(b *testing.B) { + sampler := NewSampler(tc.temperature, tc.topK, tc.topP, tc.minP, tc.seed) + sampler.Sample(logits) + + b.ResetTimer() + + for b.Loop() { + _, err := sampler.Sample(logits) + if err != nil { + b.Fatalf("Sampling failed: %v", err) + } + } + }) + } + + // Test with combined transforms separately - topK influences performance greatly + b.Run("TransformCombined", func(b *testing.B) { + sampler := NewSampler(0.8, 50, 0.9, 0.05, 42) + b.ResetTimer() + + for b.Loop() { + _, err := sampler.Sample(logits) + if err != nil { + b.Fatalf("Sampling failed: %v", err) + } + } + }) +} + +func BenchmarkGreedySampler(b *testing.B) { + sizes := []int{10, 100, 1000, 10000, 100000} + + for _, size := range sizes { + b.Run(fmt.Sprintf("Size %d", size), func(b *testing.B) { + logits := make([]float32, size) + for i := range logits { + logits[i] = float32(rand.Float64()*10 - 5) + } + + sampler := NewSampler(0, -1, 0, 0, -1) + b.ResetTimer() + + for b.Loop() { + _, err := sampler.Sample(logits) + if err != nil { + b.Fatalf("Sampling failed: %v", err) + } + } + }) + } +} diff --git a/sample/samplers_test.go b/sample/samplers_test.go index 32364a3b..dbbee17b 100644 --- a/sample/samplers_test.go +++ b/sample/samplers_test.go @@ -1,15 +1,14 @@ package sample import ( - "math" "math/rand/v2" "testing" - - "github.com/google/go-cmp/cmp" ) func TestWeighted(t *testing.T) { - got, err := Weighted(nil).Sample([]float32{float32(math.Inf(-1)), 2, float32(math.Inf(-1)), float32(math.Inf(-1))}) + logits := []float32{-10, 3, -10, -10} + sampler := NewSampler(0, 0, 0, 0, 0) + got, err := sampler.Sample(logits) if err != nil { t.Error(err) return @@ -19,64 +18,19 @@ func TestWeighted(t *testing.T) { t.Errorf("index mismatch: want %d, got %d", want, got) } - got, err = Weighted(nil).Sample([]float32{float32(math.Inf(-1)), float32(math.Inf(-1)), float32(math.Inf(-1))}) - if err == nil { - t.Error("expected error for no valid tokens, got index", got) - } - - seed := uint64(42) - got, err = Weighted(&seed).Sample([]float32{1, 2, 3, 4}) + logits = []float32{-100, -10, 0, 10} + sampler = NewSampler(0, 0, 0, 0, 0) + got, err = sampler.Sample(logits) if err != nil { t.Error(err) return } - // With seed 42, we expect a consistent sample - want = int32(3) // This will be deterministic due to the seed + want = int32(3) // Should pick highest probability with this r value if want != got { t.Errorf("index mismatch: want %d, got %d", want, got) } } -type testTransform struct { - id int - callOrder *[]int -} - -func (ts *testTransform) Apply(logits []float64) []float64 { - if ts.callOrder != nil { - *ts.callOrder = append(*ts.callOrder, ts.id) - } - return logits -} - -func TestSample(t *testing.T) { - input := []float32{1, 2, 3, 4} - - var callOrder []int - mock1 := &testTransform{ - id: 1, - callOrder: &callOrder, - } - mock2 := &testTransform{ - id: 2, - callOrder: &callOrder, - } - mock3 := &testTransform{ - id: 3, - callOrder: &callOrder, - } - - _, err := Weighted(nil, mock1, mock2, mock3).Sample(input) - if err != nil { - t.Error(err) - return - } - wantOrder := []int{1, 2, 3} - if diff := cmp.Diff(wantOrder, callOrder); diff != "" { - t.Errorf("call order mismatch (-want +got):\n%s", diff) - } -} - func TestNewSampler(t *testing.T) { tests := []struct { name string @@ -85,75 +39,41 @@ func TestNewSampler(t *testing.T) { topP float32 minP float32 seed int - wantErr bool + wantGreedy bool // Instead of wantErr, check if we get greedy sampler }{ - { - name: "no transforms", - // temperature is 0, so greedy should be used - wantErr: false, - }, { name: "temperature", temperature: 0.5, - wantErr: false, + wantGreedy: false, }, { - name: "invalid temperature negative", - temperature: -1, - wantErr: true, - }, - { - name: "invalid temperature too high", - temperature: 2.1, - wantErr: true, + name: "zero temperature - greedy", + temperature: 0, + wantGreedy: true, }, { name: "top k", + temperature: 0.1, topK: 10, - temperature: 0.8, - wantErr: false, - }, - { - name: "invalid top k negative", - topK: -1, - temperature: 0.8, - wantErr: true, + wantGreedy: false, }, { name: "top p", + temperature: 0.1, topP: 0.9, - temperature: 0.8, - wantErr: false, - }, - { - name: "invalid top p negative", - topP: -0.1, - temperature: 0.8, - wantErr: true, - }, - { - name: "invalid top p one", - topP: 1.0, - temperature: 0.8, - wantErr: true, + wantGreedy: false, }, { name: "min p", + temperature: 0.1, minP: 0.2, - temperature: 0.8, - wantErr: false, + wantGreedy: false, }, { - name: "invalid min p negative", - minP: -0.1, - temperature: 0.8, - wantErr: true, - }, - { - name: "invalid min p one", - minP: 1.0, - temperature: 0.8, - wantErr: true, + name: "seed - weighted", + temperature: 0.1, + seed: 42, + wantGreedy: false, }, { name: "default values", @@ -162,16 +82,16 @@ func TestNewSampler(t *testing.T) { topP: 0.9, minP: 0.0, seed: 0, - wantErr: false, + wantGreedy: false, }, { - name: "all zeroes", + name: "all zeroes - greedy", temperature: 0.0, topK: 0, topP: 0.0, minP: 0.0, seed: 0, - wantErr: false, // all zeroes means no transforms + wantGreedy: true, }, { name: "all transforms", @@ -180,33 +100,28 @@ func TestNewSampler(t *testing.T) { topP: 0.95, minP: 0.1, seed: 42, - wantErr: false, + wantGreedy: false, }, } - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - _, err := NewSampler(tt.temperature, tt.topK, tt.topP, tt.minP, tt.seed) - if (err != nil) != tt.wantErr { - t.Errorf("NewSampler() error = %v, wantErr %v", err, tt.wantErr) + sampler := NewSampler(tt.temperature, tt.topK, tt.topP, tt.minP, tt.seed) + _, isGreedy := sampler.(*greedy) + if isGreedy != tt.wantGreedy { + t.Errorf("NewSampler() got greedy = %v, want %v", isGreedy, tt.wantGreedy) } }) } } func BenchmarkSample(b *testing.B) { - transforms := []Transform{ - Temperature(0.5), - TopK(10), - TopP(0.9), - MinP(0.2), - } - + weighted := NewSampler(0.5, 10, 0.9, 0.2, -1) samplers := map[string]Sampler{ - "Greedy": Greedy(), - "Weighted": Weighted(nil, transforms...), + "Greedy": NewSampler(0, 0, 0, 0, 0), // Use NewSampler with temp=0 for greedy + "Weighted": weighted, } + // Generate random logits for benchmarking logits := make([]float32, 1<<16) for i := range logits { logits[i] = rand.Float32() @@ -215,7 +130,7 @@ func BenchmarkSample(b *testing.B) { for name, s := range samplers { b.Run(name, func(b *testing.B) { b.ResetTimer() - for range b.N { + for b.Loop() { if _, err := s.Sample(logits); err != nil { b.Error(err) } diff --git a/sample/transforms.go b/sample/transforms.go index 2dc6ebae..f1f4f3b1 100644 --- a/sample/transforms.go +++ b/sample/transforms.go @@ -1,120 +1,203 @@ package sample import ( - "cmp" "math" "slices" - - pq "github.com/emirpasic/gods/v2/queues/priorityqueue" ) -type Transform interface { - Apply([]float64) []float64 -} - -// TODO(parthsareen): potentially cache softmax values -func softmax(logits []float64) []float64 { - var sum float64 - probs := make([]float64, len(logits)) - for i, v := range logits { - probs[i] = math.Exp(v) - sum += probs[i] +func softmax(ts []logit) []logit { + var sum float32 + for i, v := range ts { + ts[i].value = float32(math.Exp(float64(v.value))) + sum += ts[i].value } - for i := range probs { - probs[i] /= sum + for i := range ts { + ts[i].value /= sum } - return probs + return ts } -type Temperature float64 +func temperature(ti []logit, t float32) []logit { + if t == 1 { + return ti + } -func (t Temperature) Apply(logits []float64) []float64 { - temp := math.Max(float64(t), 1e-7) + temp := max(t, 1e-7) + maxLogit := float32(math.Inf(-1)) + for _, token := range ti { + if token.value > maxLogit { + maxLogit = token.value + } + } // subtracting max logit to avoid under/overflow - maxLogit := slices.Max(logits) - for i := range logits { - logits[i] = (logits[i] - maxLogit) / temp + for i := range ti { + ti[i].value = (ti[i].value - maxLogit) / temp } - return logits + return ti } -type logitMap struct { - index int - logit float64 -} - -type TopK int - -// TODO(parthsareen): avoid having to check all logits after this transform -func (k TopK) Apply(logits []float64) []float64 { - if int(k) >= len(logits) { - return logits - } - q := pq.NewWith(func(a, b logitMap) int { - return -cmp.Compare(a.logit, b.logit) - }) - - for i, logit := range logits { - q.Enqueue(logitMap{index: i, logit: logit}) - } - - validLogits := make(map[int]float64) - for range k { - logitMap, _ := q.Dequeue() - validLogits[logitMap.index] = logitMap.logit - } - - for i := range logits { - if _, ok := validLogits[i]; !ok { - logits[i] = math.Inf(-1) - } - } - - return logits -} - -type TopP float64 - -func (p TopP) Apply(logits []float64) []float64 { - probs := softmax(logits) - indices := make([]int, len(probs)) - for i := range indices { - indices[i] = i - } - - // sort in descending order - slices.SortFunc(indices, func(i, j int) int { - return cmp.Compare(probs[j], probs[i]) - }) - - var sum float64 - for i, idx := range indices { - sum += probs[idx] - if sum > float64(p) { - for _, idx := range indices[i+1:] { - logits[idx] = math.Inf(-1) - } +// siftDown maintains a min-heap property by recursively moving larger elements down the heap. +// +// The heap is represented as an array where for any node at index i: +// - Left child is at index 2i + 1 +// - Right child is at index 2i + 2 +// - Parent is at index (i-1)/2 +// +// The function compares a node with its children and: +// 1. Finds the smallest value between the node and its children +// 2. If the node is not the smallest, swaps it with its smallest child +// 3. Continues this process down the affected path until the min-heap property is restored +func siftDown(data []logit, start, end int) { + root := start + for { + child := 2*root + 1 + if child >= end { break } + // Find smaller child (we want min heap) + if child+1 < end && data[child+1].value < data[child].value { + child++ + } + // Exit if root is already smaller than children + if data[root].value <= data[child].value { + break + } + // Swap with smaller child and continue + data[root], data[child] = data[child], data[root] + root = child } - return logits } -type MinP float64 +// topK limits the number of tokens considered to the k highest logits +func topK(ts []logit, k int) []logit { + if k >= len(ts) { + return ts + } + // Heapify + siftDown - O(nlog(k)) + // Build min-heap of first k elements + heap := ts[:k] + for i := k/2 - 1; i >= 0; i-- { + siftDown(heap, i, k) + } -func (p MinP) Apply(logits []float64) []float64 { - probs := softmax(logits) - threshold := slices.Max(probs) * float64(p) - - for i, prob := range probs { - if prob < threshold { - logits[i] = math.Inf(-1) + // Process remaining elements - if larger than heap root, replace root + for i := k; i < len(ts); i++ { + if ts[i].value > heap[0].value { + heap[0] = ts[i] + siftDown(heap, 0, k) } } - return logits + slices.Reverse(heap) + + ts = heap + return ts +} + +// topP limits tokens to those with cumulative probability p +func topP(ts []logit, p float32) []logit { + if p == 1.0 { + return ts + } + + // Find cutoff index where cumulative sum exceeds p + var sum float32 + for i, t := range ts { + sum += t.value + if sum > float32(p) { + ts = ts[:i+1] + return ts + } + } + + return ts +} + +// minP limits tokens to those with cumulative probability p +func minP(ts []logit, p float32) []logit { + if p == 1.0 { + return ts + } + + maxProb := float32(math.Inf(-1)) + for _, token := range ts { + if token.value > maxProb { + maxProb = token.value + } + } + + threshold := maxProb * float32(p) + + // Filter tokens in-place + validTokens := ts[:0] + for i, token := range ts { + if token.value >= threshold { + validTokens = append(validTokens, ts[i]) + } + } + + ts = validTokens + return ts +} + +// TODO(parthsareen): possibly replace with simpler implementation https://github.com/ollama/ollama/issues/9584 +// Conting sort implementation to sort tokens by logits +func sortLogits(tokens []logit) { + if len(tokens) <= 1 { + return + } + + // Find max/min in a single pass + minLogit, maxLogit := tokens[0].value, tokens[0].value + for _, t := range tokens[1:] { + if t.value < minLogit { + minLogit = t.value + } else if t.value > maxLogit { + maxLogit = t.value + } + } + + // Calculate scaling to map to uint32 range + logitRange := maxLogit - minLogit + if logitRange < 1e-6 { + return // All values effectively equal + } + + // Count frequencies directly from tokens + const maxInt = (1 << 24) - 1 // Use 24 bits for good granularity + var counts [256]int // For first byte + + // First pass: count frequencies + for _, t := range tokens { + // Map to [0, maxInt] range + score := min(uint32((t.value-minLogit)*float32(maxInt)/logitRange), maxInt) + counts[score>>16]++ + } + + // Calculate offsets + var offset int + for i := range counts { + count := counts[i] + counts[i] = offset + offset += count + } + + // Second pass: place elements in correct position + output := make([]logit, len(tokens)) + // Track current positions + countsCopy := counts + + for i, t := range tokens { + score := min(uint32((t.value-minLogit)*float32(maxInt)/logitRange), maxInt) + + pos := countsCopy[score>>16] + countsCopy[score>>16]++ + output[len(tokens)-1-pos] = tokens[i] + } + + copy(tokens, output) } diff --git a/sample/transforms_test.go b/sample/transforms_test.go index 05f76a27..950d79b3 100644 --- a/sample/transforms_test.go +++ b/sample/transforms_test.go @@ -4,77 +4,182 @@ import ( "math" "math/rand/v2" "testing" - - "github.com/google/go-cmp/cmp" ) -func TestTemperature(t *testing.T) { - got := Temperature(0.5).Apply([]float64{2, -1, 4, -3, 1, -2, 0}) - want := []float64{-4, -10, 0, -14, -6, -12, -8} - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("logits mismatch (-want +got):\n%s", diff) +// Helper to convert float64 slice to logit slice +func toLogits(values []float64) []logit { + tokens := make([]logit, len(values)) + for i, v := range values { + tokens[i] = logit{ + id: int32(i), + value: float32(v), + } + } + return tokens +} + +// Helper to compare logit slices +func compareLogits(t *testing.T, name string, want []float64, got []logit) { + t.Helper() + if len(want) != len(got) { + t.Errorf("%s: length mismatch: want %d, got %d", name, len(want), len(got)) + return + } + for i := range want { + if math.Abs(float64(got[i].value)-want[i]) > 1e-6 { + t.Errorf("%s: index %d: want %f, got %f", name, i, want[i], got[i].value) + } } } -func TestSoftmax(t *testing.T) { - got := softmax([]float64{-3, -2, -1, 0, 1, 2, 4}) +func TestTemperature(t *testing.T) { + input := []float64{2, -1, 4, -3, 1, -2, 0} + want := []float64{-4, -10, 0, -14, -6, -12, -8} // (logit - max logit) / temp - want := []float64{0.000751406628089903, 0.0020425349829204676, 0.005552185728064613, 0.015092405572827691, 0.04102541181635154, 0.11151863144543739, 0.8240174238263085} - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("probs mismatch (-want +got):\n%s", diff) + got := temperature(toLogits(input), 0.5) + compareLogits(t, "Temperature", want, got) +} + +func TestSoftmax(t *testing.T) { + input := []float64{-3, -2, -1, 0, 1, 2, 4} + got := softmax(toLogits(input)) + + // Check probabilities sum to 1 + var sum float32 + for _, token := range got { + sum += token.value + } + if math.Abs(float64(sum)-1.0) > 1e-6 { + t.Errorf("probabilities don't sum to 1: got %f", sum) + } + + // Check relative ordering is preserved + for i := 1; i < len(got); i++ { + if got[i].value < got[i-1].value { + t.Errorf("probability ordering not preserved at index %d", i) + } } } func TestTopK(t *testing.T) { - got := TopK(3).Apply([]float64{-3, -2, -1, 0, 1, 2, 4}) - want := []float64{math.Inf(-1), math.Inf(-1), math.Inf(-1), math.Inf(-1), 1, 2, 4} - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("logits mismatch (-want +got):\n%s", diff) - } + input := []float64{-3, -2, -1, 0, 1, 2, 4} - got = TopK(10).Apply([]float64{-3, -2, -1, 0, 1, 2, 4}) - - want = []float64{-3, -2, -1, 0, 1, 2, 4} - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("logits mismatch (-want +got):\n%s", diff) + // Test k=3 + got := topK(toLogits(input), 3) + if len(got) != 3 { + t.Errorf("topK(3): wrong length: want 3, got %d", len(got)) } + // Should keep highest 3 values: 4, 2, 1 + want := []float64{4, 2, 1} + compareLogits(t, "topK(3)", want, got) + + // Test k > len + got = topK(toLogits(input), 10) + compareLogits(t, "topK(10)", input, got) } func TestTopP(t *testing.T) { - got := TopP(0.9).Apply([]float64{-3, -2, -1, 0, 1, 2, 4}) - want := []float64{math.Inf(-1), math.Inf(-1), math.Inf(-1), math.Inf(-1), math.Inf(-1), 2, 4} - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("logits mismatch (-want +got):\n%s", diff) + input := []float64{-3, -2, -1, 0, 1, 2, 4} + tokens := toLogits(input) + + // First apply temperature and softmax to get probabilities + tokens = temperature(tokens, 1) + tokens = softmax(tokens) + sortLogits(tokens) + + // Then apply topP + got := topP(tokens, 0.95) + + // Should keep tokens until cumsum > 0.95 + if len(got) > 3 { + t.Errorf("topP(0.95): kept too many tokens: got %d", len(got)) + t.Logf("got: %v", got) } } func TestMinP(t *testing.T) { - got := MinP(0.2).Apply([]float64{-3, -2, -1, 0, 1, 2, 4, 3}) - want := []float64{math.Inf(-1), math.Inf(-1), math.Inf(-1), math.Inf(-1), math.Inf(-1), math.Inf(-1), 4, 3} - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("logits mismatch (-want +got):\n%s", diff) + input := []float64{-3, -2, -1, 0, 1, 2, 4, 3} + tokens := toLogits(input) + + // First apply temperature and softmax + tokens = temperature(tokens, 1) + tokens = softmax(tokens) + + // Then apply minP + got := minP(tokens, 0.2) + + // Should keep tokens with prob >= 0.2 * max_prob + if len(got) > 3 { + t.Errorf("minP(0.2): kept too many tokens: got %d", len(got)) } } -func BenchmarkTransform(b *testing.B) { - transforms := map[string]Transform{ - "Temperature": Temperature(0.5), - "TopK": TopK(10), - "TopP": TopP(0.9), - "MinP": MinP(0.2), +func TestSortLogits(t *testing.T) { + input := []float64{3, 1, 4, 2, -1, 0, -2} + tokens := toLogits(input) + + sortLogits(tokens) + + for i := 1; i < len(tokens); i++ { + if tokens[i].value > tokens[i-1].value { + t.Errorf("sortLogits: tokens not sorted in descending order at index %d: %f > %f", + i, tokens[i].value, tokens[i-1].value) + } } - logits := make([]float64, 1<<16) - for i := range logits { - logits[i] = rand.Float64() - } - - for name, transform := range transforms { - b.Run(name, func(b *testing.B) { - b.ResetTimer() - for range b.N { - transform.Apply(logits) - } - }) - } + want := []float64{4, 3, 2, 1, 0, -1, -2} + compareLogits(t, "sortLogits", want, tokens) +} + +func BenchmarkTransforms(b *testing.B) { + // Generate random logits + tokens := make([]logit, 1<<16) + for i := range tokens { + tokens[i] = logit{ + id: int32(i), + value: rand.Float32(), + } + } + + tokensCopy := make([]logit, len(tokens)) + + b.Run("Temperature", func(b *testing.B) { + b.ResetTimer() + for b.Loop() { + copy(tokensCopy, tokens) + temperature(tokensCopy, 0.5) + } + }) + + b.Run("TopK", func(b *testing.B) { + b.ResetTimer() + for b.Loop() { + copy(tokensCopy, tokens) + topK(tokensCopy, 10) + } + }) + + b.Run("TopP", func(b *testing.B) { + b.ResetTimer() + for b.Loop() { + copy(tokensCopy, tokens) + topP(tokensCopy, 0.9) + } + }) + + b.Run("MinP", func(b *testing.B) { + b.ResetTimer() + for b.Loop() { + copy(tokensCopy, tokens) + minP(tokensCopy, 0.2) + } + }) + + b.Run("SortTokens", func(b *testing.B) { + b.ResetTimer() + for b.Loop() { + copy(tokensCopy, tokens) + sortLogits(tokensCopy) + } + }) } From bab6f34dc0f441c6a18b7cbc2465e1b386cf613e Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 19 Feb 2025 14:26:40 -0800 Subject: [PATCH 009/157] ml/backend/ggml: update model loading for hybrid/multi backends use a similar strategy as llama.cpp for deciding where tensors should be allocated. this will be improved later to be aware of usable memory before assigning the tensor --- ml/backend/ggml/ggml.go | 359 +++++++++++------- ml/backend/ggml/ggml/src/ggml-backend-reg.cpp | 21 +- model/models/llama/model.go | 16 +- 3 files changed, 249 insertions(+), 147 deletions(-) diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index 2d8ddf99..ae32e3c6 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -9,67 +9,46 @@ package ggml import "C" import ( + "errors" "fmt" "io" + "iter" "log/slog" + "maps" "os" - "sync" + "slices" + "strconv" + "strings" + "unicode" "unsafe" "github.com/ollama/ollama/format" fs "github.com/ollama/ollama/fs/ggml" "github.com/ollama/ollama/ml" "golang.org/x/sync/errgroup" - - ggml "github.com/ollama/ollama/ml/backend/ggml/ggml/src" ) -type device struct { - d *C.struct_ggml_backend_device -} - -func (d device) LogValue() slog.Value { - var free, total uint64 - C.ggml_backend_dev_memory(d.d, (*C.size_t)(&free), (*C.size_t)(&total)) - - kind := "unknown" - switch C.ggml_backend_dev_type(d.d) { - case C.GGML_BACKEND_DEVICE_TYPE_CPU: - kind = "cpu" - case C.GGML_BACKEND_DEVICE_TYPE_GPU: - kind = "gpu" - case C.GGML_BACKEND_DEVICE_TYPE_ACCEL: - kind = "accel" +func devices() iter.Seq[*C.struct_ggml_backend_device] { + return func(yield func(*C.struct_ggml_backend_device) bool) { + for i := range C.ggml_backend_dev_count() { + if !yield(C.ggml_backend_dev_get(i)) { + return + } + } } - - return slog.GroupValue( - slog.String("name", C.GoString(C.ggml_backend_dev_name(d.d))), - slog.String("description", C.GoString(C.ggml_backend_dev_description(d.d))), - slog.String("kind", kind), - slog.String("free", format.HumanBytes2(free)), - slog.String("total", format.HumanBytes2(total)), - ) } -var devices = sync.OnceValue(func() []device { - ggml.OnceLoad() - - s := make([]device, C.ggml_backend_dev_count()) - for i := range s { - s[i] = device{C.ggml_backend_dev_get(C.size_t(i))} - } - - return s -}) - type Backend struct { + meta *fs.GGML + flashAttention bool - meta *fs.GGML - cpus, gpus []Context - tensors map[string]*Context - sched *C.struct_ggml_backend_sched + + tensors map[string]*C.struct_ggml_tensor + ctxs []*C.struct_ggml_context + backends []*C.struct_ggml_backend + bufts []*C.struct_ggml_backend_buffer_type } func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { @@ -88,100 +67,226 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { "num_key_values", len(meta.KV()), ) - var cpus, gpus []Context - for _, d := range devices() { - switch C.ggml_backend_dev_type(d.d) { + type dbt struct { + d *C.struct_ggml_backend_device + bts []*C.struct_ggml_backend_buffer_type + } + + var cpus, accels, gpus []*C.struct_ggml_backend_device + for d := range devices() { + switch C.ggml_backend_dev_type(d) { + case C.GGML_BACKEND_DEVICE_TYPE_CPU: + cpus = append(cpus, d) + case C.GGML_BACKEND_DEVICE_TYPE_ACCEL: + accels = append(accels, d) + case C.GGML_BACKEND_DEVICE_TYPE_GPU: + gpus = append(gpus, d) + } + } + + var cpuBufferTypes []*C.struct_ggml_backend_buffer_type + for _, d := range append(accels, append(gpus, cpus...)...) { + switch C.ggml_backend_dev_type(d) { case C.GGML_BACKEND_DEVICE_TYPE_CPU, C.GGML_BACKEND_DEVICE_TYPE_ACCEL: - slog.Info("cpu", "device", d) - cpus = append(cpus, Context{ - ctx: C.ggml_init(C.struct_ggml_init_params{ - mem_size: C.size_t(int(C.ggml_tensor_overhead()) * (len(meta.Tensors().Items()) + 1 + int(meta.KV().BlockCount())*2)), - no_alloc: true, - }), - backend: C.ggml_backend_dev_init(d.d, nil), - }) - case C.GGML_BACKEND_DEVICE_TYPE_GPU: - slog.Info("gpu", "device", d) - gpus = append(gpus, Context{ - ctx: C.ggml_init(C.struct_ggml_init_params{ - mem_size: C.size_t(int(C.ggml_tensor_overhead()) * (len(meta.Tensors().Items()) + 1 + int(meta.KV().BlockCount())*2)), - no_alloc: true, - }), - backend: C.ggml_backend_dev_init(d.d, nil), - }) + cpuBufferTypes = append(cpuBufferTypes, C.ggml_backend_dev_buffer_type(d)) } } - ctxFunc := func(s []Context) (*Context, error) { - for _, e := range s { - return &e, nil - } + var sum uint64 + var cumsum []uint64 - return nil, fmt.Errorf("no devices available") + var gpuBufferTypes []dbt + for _, d := range gpus { + var free, total C.size_t + C.ggml_backend_dev_memory(d, &free, &total) + sum += uint64(free) + cumsum = append(cumsum, sum) + + bt := C.ggml_backend_dev_buffer_type(d) + gpuBufferTypes = append(gpuBufferTypes, dbt{ + d: d, + bts: append([]*C.struct_ggml_backend_buffer_type{bt}, cpuBufferTypes...), + }) } - tensors := make(map[*fs.Tensor]*Context, len(meta.Tensors().Items())) - for _, t := range meta.Tensors().Items() { - c, err := ctxFunc(append(gpus, cpus...)) - if err != nil { - return nil, err - } + splits := make([]float64, len(cumsum)) + for i := range splits { + splits[i] = float64(cumsum[i]) / float64(sum) + } - func() { - tt := C.ggml_new_tensor(c.ctx, t.Kind, C.int(len(t.Shape)), (*C.int64_t)(unsafe.Pointer(&t.Shape[0]))) + input := dbt{C.ggml_backend_dev_by_type(C.GGML_BACKEND_DEVICE_TYPE_CPU), cpuBufferTypes} + slog.Info("input layer", "device", C.GoString(C.ggml_backend_dev_name(input.d))) + + var blocks int + for key, value := range meta.KV() { + if strings.HasSuffix(key, ".block_count") { + blocks += int(value.(uint32)) + } + } + + indexFunc := func(i int) func(float64) bool { + return func(f float64) bool { + return float64(i)/float64(blocks+1) < f + } + } + + layers := make([]dbt, blocks) + for i := range layers { + layers[i] = gpuBufferTypes[slices.IndexFunc(splits, indexFunc(i))] + slog.Info("layer", "i", i, "device", C.GoString(C.ggml_backend_dev_name(layers[i].d))) + } + + output := gpuBufferTypes[slices.IndexFunc(splits, indexFunc(blocks))] + slog.Info("output layer", "device", C.GoString(C.ggml_backend_dev_name(output.d))) + + maxTensors := len(meta.Tensors().Items()) + maxTensors += 1 + maxTensors += blocks * 2 + + slog.Info("max tensors", "max_tensors", maxTensors) + + ctxs := make(map[*C.struct_ggml_backend_buffer_type]*C.struct_ggml_context) + createTensor := func(t *fs.Tensor, bts []*C.struct_ggml_backend_buffer_type) *C.struct_ggml_tensor { + for _, bt := range bts { + if _, ok := ctxs[bt]; !ok { + ctxs[bt] = C.ggml_init(C.struct_ggml_init_params{ + mem_size: C.ggml_tensor_overhead() * C.size_t(maxTensors), + no_alloc: true, + }) + } cname := C.CString(t.Name) defer C.free(unsafe.Pointer(cname)) + if tt := C.ggml_get_tensor(ctxs[bt], cname); tt != nil { + return tt + } + + tt := C.ggml_new_tensor(ctxs[bt], t.Kind, C.int(len(t.Shape)), (*C.int64_t)(unsafe.Pointer(&t.Shape[0]))) C.ggml_set_name(tt, cname) - tensors[t] = c - }() + slog.Debug("created tensor", "name", t.Name, "shape", t.Shape, "dtype", t.Kind, "buffer_type", C.GoString(C.ggml_backend_buft_name(bt))) + //nolint:staticcheck // TODO: check if buffer type supports this tensor + return tt + } + + return nil } - for _, b := range append(gpus, cpus...) { - C.ggml_backend_alloc_ctx_tensors(b.ctx, b.backend) + hasPart := func(s string, parts ...string) bool { + split := strings.Split(s, ".") + for _, part := range parts { + if slices.Contains(split, part) { + return true + } + } + + return false + } + + for _, t := range meta.Tensors().Items() { + switch { + case hasPart(t.Name, "position_embd", "token_embd", "token_norm_embd", "token_types"): + createTensor(t, input.bts) + case hasPart(t.Name, "cls", "output", "output_norm"): + createTensor(t, output.bts) + default: + if i := func() int { + if fields := strings.FieldsFunc(t.Name, func(r rune) bool { return !unicode.IsNumber(r) }); len(fields) > 0 { + if i, err := strconv.Atoi(fields[0]); err == nil { + return i + } + } + + return -1 + }(); i >= 0 { + createTensor(t, layers[i].bts) + } else { + for _, layer := range layers { + createTensor(t, layer.bts) + } + } + } + } + + bbs := make(map[*C.struct_ggml_context][]*C.struct_ggml_backend_buffer, len(ctxs)) + + for bt, c := range ctxs { + if C.ggml_get_first_tensor(c) == nil { + continue + } + + b := C.ggml_backend_alloc_ctx_tensors_from_buft(c, bt) + C.ggml_backend_buffer_set_usage(b, C.GGML_BACKEND_BUFFER_USAGE_WEIGHTS) + bbs[c] = append(bbs[c], b) + } + + for bs := range maps.Values(bbs) { + for _, b := range bs { + slog.Info("model", "buffer", C.GoString(C.ggml_backend_buffer_name(b)), "size", format.HumanBytes2(uint64(C.ggml_backend_buffer_get_size(b)))) + } + } + + tensors := make(map[string]*C.struct_ggml_tensor) + for _, c := range ctxs { + for t := C.ggml_get_first_tensor(c); t != nil; t = C.ggml_get_next_tensor(c, t) { + tensors[C.GoString(C.ggml_get_name(t))] = t + } } sr := io.NewSectionReader(r, int64(meta.Tensors().Offset), n-int64(meta.Tensors().Offset)) - var g errgroup.Group - for t, c := range tensors { + for _, t := range meta.Tensors().Items() { g.Go(func() error { + tt, ok := tensors[t.Name] + if !ok { + return fmt.Errorf("unassigned tensor: %s", t.Name) + } + bts := make([]byte, t.Size()) n, err := io.ReadFull(io.NewSectionReader(sr, int64(t.Offset), int64(t.Size())), bts) if err != nil { return err } - if n != int(t.Size()) { - return fmt.Errorf("expected %d bytes, got %d", t.Size(), n) + if n != len(bts) { + return errors.New("short read") } cname := C.CString(t.Name) - defer C.free(unsafe.Pointer(cname)) + C.ggml_backend_tensor_set(tt, unsafe.Pointer(&bts[0]), 0, C.size_t(t.Size())) + C.free(unsafe.Pointer(cname)) - C.ggml_backend_tensor_set(C.ggml_get_tensor(c.ctx, cname), unsafe.Pointer(&bts[0]), 0, C.size_t(n)) return nil }) } - if err := g.Wait(); err != nil { + if g.Wait() != nil { return nil, err } - backends := make([]*C.struct_ggml_backend, len(gpus)+len(cpus)) - bufts := make([]*C.struct_ggml_backend_buffer_type, len(gpus)+len(cpus)) - for i, c := range append(gpus, cpus...) { - backends[i] = c.backend - bufts[i] = C.ggml_backend_get_default_buffer_type(c.backend) + var backends []*C.struct_ggml_backend + var bufts []*C.struct_ggml_backend_buffer_type + for _, d := range append(gpus, append(accels, cpus...)...) { + b := C.ggml_backend_dev_init(d, nil) + backends = append(backends, b) + + bt := C.ggml_backend_get_default_buffer_type(b) + if d := C.ggml_backend_get_device(b); C.ggml_backend_dev_type(d) == C.GGML_BACKEND_DEVICE_TYPE_CPU && len(gpus) > 0 { + if hbt := C.ggml_backend_dev_host_buffer_type(d); hbt != nil { + bt = hbt + } + } + + bufts = append(bufts, bt) + + slog.Info("compute buffer", "backend", C.GoString(C.ggml_backend_name(b)), "buffer_type", C.GoString(C.ggml_backend_buft_name(bt))) } return &Backend{ flashAttention: params.FlashAttention, - meta: meta, - cpus: cpus, - gpus: gpus, + meta: meta, + tensors: tensors, sched: C.ggml_backend_sched_new( (*C.ggml_backend_t)(unsafe.Pointer(&backends[0])), (*C.ggml_backend_buffer_type_t)(unsafe.Pointer(&bufts[0])), @@ -201,36 +306,22 @@ func (b *Backend) Config() ml.Config { } func (b *Backend) Get(name string) ml.Tensor { - cname := C.CString(name) - defer C.free(unsafe.Pointer(cname)) - - for _, c := range append(b.gpus, b.cpus...) { - if t := C.ggml_get_tensor(c.ctx, cname); t != nil { - return &Tensor{b: b, t: t} - } + if t, ok := b.tensors[name]; ok { + return &Tensor{b: b, t: t} } return nil } func (b *Backend) NewContext() ml.Context { - nodes := max(8192, len(b.meta.Tensors().Items())*5) - c := C.ggml_init(C.struct_ggml_init_params{ - mem_buffer: nil, - mem_size: C.size_t(nodes)*C.ggml_tensor_overhead() + C.ggml_graph_overhead_custom(C.size_t(nodes), false), - no_alloc: true, - }) - - backends := make([]*C.struct_ggml_backend, len(b.gpus)+len(b.cpus)) - for i, c := range append(b.gpus, b.cpus...) { - backends[i] = c.backend - } - + maxTensors := max(8192, len(b.meta.Tensors().Items())*5) return &Context{ - b: b, - ctx: c, - backend: backends[0], - nodes: nodes, + b: b, + maxTensors: maxTensors, + ctx: C.ggml_init(C.struct_ggml_init_params{ + mem_size: C.size_t(maxTensors)*C.ggml_tensor_overhead() + C.ggml_graph_overhead_custom(C.size_t(maxTensors), false), + no_alloc: true, + }), } } @@ -243,17 +334,17 @@ func (b *Backend) CacheConfig() ml.CacheConfig { } type Context struct { - b *Backend - ctx *C.struct_ggml_context - backend *C.struct_ggml_backend + b *Backend + ctx *C.struct_ggml_context graph *C.struct_ggml_cgraph - nodes int + + maxTensors int } func (c *Context) Forward(tensors ...ml.Tensor) ml.Context { if c.graph == nil { - c.graph = C.ggml_new_graph_custom(c.ctx, C.size_t(c.nodes), false) + c.graph = C.ggml_new_graph_custom(c.ctx, C.size_t(c.maxTensors), false) } for _, tensor := range tensors { @@ -264,8 +355,9 @@ func (c *Context) Forward(tensors ...ml.Tensor) ml.Context { } func (c *Context) Compute(tensors ...ml.Tensor) { - C.ggml_backend_sched_graph_compute_async(c.b.sched, c.graph) C.ggml_backend_sched_reset(c.b.sched) + C.ggml_backend_sched_alloc_graph(c.b.sched, c.graph) + C.ggml_backend_sched_graph_compute_async(c.b.sched, c.graph) needSync := true sync := func() { @@ -283,19 +375,19 @@ func (c *Context) Compute(tensors ...ml.Tensor) { } func (c *Context) MaxTensors() int { - return c.nodes + return c.maxTensors } func shapeToGGML(shape []int) *C.int64_t { sh := make([]C.int64_t, len(shape)) for i, s := range shape { - sh[i] = (C.int64_t)(s) + sh[i] = C.int64_t(s) } return &sh[0] } -func newTensor(ctx Context, dtype ml.DType, zero bool, shape []int) ml.Tensor { +func newTensor(ctx Context, dtype ml.DType, shape []int) ml.Tensor { if len(shape) < 1 || len(shape) > 4 { panic("unsupported number of dimensions") } @@ -318,20 +410,20 @@ func newTensor(ctx Context, dtype ml.DType, zero bool, shape []int) ml.Tensor { panic("unsupported dtype") } - b := C.ggml_backend_alloc_buffer(ctx.backend, C.ggml_nbytes(t)) + b := C.ggml_backend_alloc_buffer(C.ggml_backend_sched_get_backend(ctx.b.sched, 0), C.ggml_nbytes(t)) C.ggml_backend_tensor_alloc(b, t, C.ggml_backend_buffer_get_base(b)) - if zero { - C.ggml_set_zero(t) - } + C.ggml_set_input(t) return &Tensor{b: ctx.b, t: t} } func (c Context) Empty(dtype ml.DType, shape ...int) ml.Tensor { - return newTensor(c, dtype, false, shape) + return newTensor(c, dtype, shape) } func (c Context) Zeros(dtype ml.DType, shape ...int) ml.Tensor { - return newTensor(c, dtype, true, shape) + t := newTensor(c, dtype, shape) + C.ggml_set_zero(t.(*Tensor).t) + return t } func fromSlice[S ~[]E, E float32 | int32](ctx Context, s S, shape []int, dtype uint32) (ml.Tensor, error) { @@ -352,9 +444,10 @@ func fromSlice[S ~[]E, E float32 | int32](ctx Context, s S, shape []int, dtype u } t := C.ggml_new_tensor(ctx.ctx, dtype, C.int(len(shape)), shapeToGGML(shape)) - b := C.ggml_backend_alloc_buffer(ctx.backend, C.ggml_nbytes(t)) + b := C.ggml_backend_alloc_buffer(C.ggml_backend_sched_get_backend(ctx.b.sched, 0), C.ggml_nbytes(t)) C.ggml_backend_tensor_alloc(b, t, C.ggml_backend_buffer_get_base(b)) C.ggml_backend_tensor_set(t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t)) + C.ggml_set_input(t) return &Tensor{b: ctx.b, t: t}, nil } diff --git a/ml/backend/ggml/ggml/src/ggml-backend-reg.cpp b/ml/backend/ggml/ggml/src/ggml-backend-reg.cpp index 799af5f3..00cfc968 100644 --- a/ml/backend/ggml/ggml/src/ggml-backend-reg.cpp +++ b/ml/backend/ggml/ggml/src/ggml-backend-reg.cpp @@ -207,13 +207,7 @@ struct ggml_backend_registry { for (size_t i = 0; i < ggml_backend_reg_dev_count(reg); i++) { register_device(ggml_backend_reg_dev_get(reg, i), score); } - } - void register_device(ggml_backend_dev_t device, int score = -1) { -#ifndef NDEBUG - GGML_LOG_DEBUG("%s: registered device %s (%s)\n", __func__, ggml_backend_dev_name(device), ggml_backend_dev_description(device)); -#endif - devices.push_back({device, score}); std::stable_sort(devices.begin(), devices.end(), [](const auto & a, const auto & b) { return a.second > b.second; @@ -221,6 +215,21 @@ struct ggml_backend_registry { ); } + void register_device(ggml_backend_dev_t device, int score = -1) { + switch (ggml_backend_dev_type(device)) { + case GGML_BACKEND_DEVICE_TYPE_CPU: + case GGML_BACKEND_DEVICE_TYPE_GPU: + score += 1 << 16; + case GGML_BACKEND_DEVICE_TYPE_ACCEL: + score += 1 << 20; + } + +#ifndef NDEBUG + GGML_LOG_DEBUG("%s: registered device %s (%s)\n", __func__, ggml_backend_dev_name(device), ggml_backend_dev_description(device)); +#endif + devices.push_back({device, score}); + } + ggml_backend_reg_t load_backend(const std::filesystem::path & path, bool silent) { dl_handle_ptr handle { dl_load_library(path) }; if (!handle) { diff --git a/model/models/llama/model.go b/model/models/llama/model.go index 2f254a28..73a23621 100644 --- a/model/models/llama/model.go +++ b/model/models/llama/model.go @@ -12,7 +12,6 @@ import ( ) type Options struct { - RopeFactors ml.Tensor `gguf:"rope_freqs.weight"` hiddenSize, numHeads, numKVHeads int eps, ropeBase, ropeScale float32 ropeDim uint32 @@ -66,10 +65,11 @@ func New(c ml.Config) (model.Model, error) { } type SelfAttention struct { - Query *nn.Linear `gguf:"attn_q"` - Key *nn.Linear `gguf:"attn_k"` - Value *nn.Linear `gguf:"attn_v"` - Output *nn.Linear `gguf:"attn_output"` + Query *nn.Linear `gguf:"attn_q"` + Key *nn.Linear `gguf:"attn_k"` + Value *nn.Linear `gguf:"attn_v"` + Output *nn.Linear `gguf:"attn_output"` + RopeFactors ml.Tensor `gguf:"rope_freqs.weight"` } func (sa *SelfAttention) Forward(ctx ml.Context, hiddenState, positionIDs ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor { @@ -78,11 +78,11 @@ func (sa *SelfAttention) Forward(ctx ml.Context, hiddenState, positionIDs ml.Ten q := sa.Query.Forward(ctx, hiddenState) q = q.Reshape(ctx, headDim, opts.numHeads, batchSize) - q = q.RoPE(ctx, positionIDs, opts.RopeFactors, opts.ropeDim, opts.ropeBase, opts.ropeScale) + q = q.RoPE(ctx, positionIDs, sa.RopeFactors, opts.ropeDim, opts.ropeBase, opts.ropeScale) k := sa.Key.Forward(ctx, hiddenState) k = k.Reshape(ctx, headDim, opts.numKVHeads, batchSize) - k = k.RoPE(ctx, positionIDs, opts.RopeFactors, opts.ropeDim, opts.ropeBase, opts.ropeScale) + k = k.RoPE(ctx, positionIDs, sa.RopeFactors, opts.ropeDim, opts.ropeBase, opts.ropeScale) v := sa.Value.Forward(ctx, hiddenState) v = v.Reshape(ctx, headDim, opts.numKVHeads, batchSize) @@ -95,7 +95,7 @@ func (sa *SelfAttention) Forward(ctx ml.Context, hiddenState, positionIDs ml.Ten } func (m *Model) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { - return key.RoPE(ctx, shift, m.Options.RopeFactors, m.Options.ropeDim, m.Options.ropeBase, m.Options.ropeScale), nil + return key.RoPE(ctx, shift, m.Layers[layer].SelfAttention.RopeFactors, m.ropeDim, m.ropeBase, m.ropeScale), nil } type MLP struct { From bfce55db3d9052200392c67412656cc3e37ba893 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Mon, 24 Feb 2025 15:48:42 -0800 Subject: [PATCH 010/157] model: load non-repeated tensors into multiple backends some tensors are expected to be used in repeating layers but are not themselves repeated. this change copies these tensors into the same backends as their repeating counterparts to minimize copying tensors between backends --- ml/backend/ggml/ggml.go | 79 ++++++++++++------- ml/backend/ggml/ggml/src/ggml-backend-reg.cpp | 21 ++--- 2 files changed, 58 insertions(+), 42 deletions(-) diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index ae32e3c6..e909f53c 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -25,11 +25,13 @@ import ( "github.com/ollama/ollama/format" fs "github.com/ollama/ollama/fs/ggml" "github.com/ollama/ollama/ml" + ggml "github.com/ollama/ollama/ml/backend/ggml/ggml/src" "golang.org/x/sync/errgroup" ) func devices() iter.Seq[*C.struct_ggml_backend_device] { return func(yield func(*C.struct_ggml_backend_device) bool) { + ggml.OnceLoad() for i := range C.ggml_backend_dev_count() { if !yield(C.ggml_backend_dev_get(i)) { return @@ -146,8 +148,15 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { slog.Info("max tensors", "max_tensors", maxTensors) + type tensor struct { + source *fs.Tensor + target string + } + + targets := make(map[string][]string) + ctxs := make(map[*C.struct_ggml_backend_buffer_type]*C.struct_ggml_context) - createTensor := func(t *fs.Tensor, bts []*C.struct_ggml_backend_buffer_type) *C.struct_ggml_tensor { + createTensor := func(t tensor, bts []*C.struct_ggml_backend_buffer_type) *C.struct_ggml_tensor { for _, bt := range bts { if _, ok := ctxs[bt]; !ok { ctxs[bt] = C.ggml_init(C.struct_ggml_init_params{ @@ -156,16 +165,23 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { }) } - cname := C.CString(t.Name) + targets[t.source.Name] = append(targets[t.source.Name], t.target) + + name := t.source.Name + if t.target != "" { + name = t.target + } + + cname := C.CString(name) defer C.free(unsafe.Pointer(cname)) if tt := C.ggml_get_tensor(ctxs[bt], cname); tt != nil { return tt } - tt := C.ggml_new_tensor(ctxs[bt], t.Kind, C.int(len(t.Shape)), (*C.int64_t)(unsafe.Pointer(&t.Shape[0]))) + tt := C.ggml_new_tensor(ctxs[bt], t.source.Kind, C.int(len(t.source.Shape)), (*C.int64_t)(unsafe.Pointer(&t.source.Shape[0]))) C.ggml_set_name(tt, cname) - slog.Debug("created tensor", "name", t.Name, "shape", t.Shape, "dtype", t.Kind, "buffer_type", C.GoString(C.ggml_backend_buft_name(bt))) + slog.Debug("created tensor", "name", name, "shape", t.source.Shape, "dtype", t.source.Kind, "buffer_type", C.GoString(C.ggml_backend_buft_name(bt))) //nolint:staticcheck // TODO: check if buffer type supports this tensor return tt } @@ -187,9 +203,9 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { for _, t := range meta.Tensors().Items() { switch { case hasPart(t.Name, "position_embd", "token_embd", "token_norm_embd", "token_types"): - createTensor(t, input.bts) + createTensor(tensor{source: t}, input.bts) case hasPart(t.Name, "cls", "output", "output_norm"): - createTensor(t, output.bts) + createTensor(tensor{source: t}, output.bts) default: if i := func() int { if fields := strings.FieldsFunc(t.Name, func(r rune) bool { return !unicode.IsNumber(r) }); len(fields) > 0 { @@ -200,10 +216,13 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { return -1 }(); i >= 0 { - createTensor(t, layers[i].bts) + createTensor(tensor{source: t}, layers[i].bts) } else { - for _, layer := range layers { - createTensor(t, layer.bts) + for i, layer := range layers { + createTensor(tensor{ + source: t, + target: "blk." + strconv.Itoa(i) + "." + t.Name, + }, layer.bts) } } } @@ -237,28 +256,34 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { sr := io.NewSectionReader(r, int64(meta.Tensors().Offset), n-int64(meta.Tensors().Offset)) var g errgroup.Group for _, t := range meta.Tensors().Items() { - g.Go(func() error { - tt, ok := tensors[t.Name] - if !ok { - return fmt.Errorf("unassigned tensor: %s", t.Name) - } + for _, target := range targets[t.Name] { + g.Go(func() error { + if target == "" { + target = t.Name + } - bts := make([]byte, t.Size()) - n, err := io.ReadFull(io.NewSectionReader(sr, int64(t.Offset), int64(t.Size())), bts) - if err != nil { - return err - } + tt, ok := tensors[target] + if !ok { + return fmt.Errorf("unassigned tensor: %s", t.Name) + } - if n != len(bts) { - return errors.New("short read") - } + bts := make([]byte, t.Size()) + n, err := io.ReadFull(io.NewSectionReader(sr, int64(t.Offset), int64(t.Size())), bts) + if err != nil { + return err + } - cname := C.CString(t.Name) - C.ggml_backend_tensor_set(tt, unsafe.Pointer(&bts[0]), 0, C.size_t(t.Size())) - C.free(unsafe.Pointer(cname)) + if n != len(bts) { + return errors.New("short read") + } - return nil - }) + cname := C.CString(t.Name) + C.ggml_backend_tensor_set(tt, unsafe.Pointer(&bts[0]), 0, C.size_t(t.Size())) + C.free(unsafe.Pointer(cname)) + + return nil + }) + } } if g.Wait() != nil { diff --git a/ml/backend/ggml/ggml/src/ggml-backend-reg.cpp b/ml/backend/ggml/ggml/src/ggml-backend-reg.cpp index 00cfc968..799af5f3 100644 --- a/ml/backend/ggml/ggml/src/ggml-backend-reg.cpp +++ b/ml/backend/ggml/ggml/src/ggml-backend-reg.cpp @@ -207,7 +207,13 @@ struct ggml_backend_registry { for (size_t i = 0; i < ggml_backend_reg_dev_count(reg); i++) { register_device(ggml_backend_reg_dev_get(reg, i), score); } + } + void register_device(ggml_backend_dev_t device, int score = -1) { +#ifndef NDEBUG + GGML_LOG_DEBUG("%s: registered device %s (%s)\n", __func__, ggml_backend_dev_name(device), ggml_backend_dev_description(device)); +#endif + devices.push_back({device, score}); std::stable_sort(devices.begin(), devices.end(), [](const auto & a, const auto & b) { return a.second > b.second; @@ -215,21 +221,6 @@ struct ggml_backend_registry { ); } - void register_device(ggml_backend_dev_t device, int score = -1) { - switch (ggml_backend_dev_type(device)) { - case GGML_BACKEND_DEVICE_TYPE_CPU: - case GGML_BACKEND_DEVICE_TYPE_GPU: - score += 1 << 16; - case GGML_BACKEND_DEVICE_TYPE_ACCEL: - score += 1 << 20; - } - -#ifndef NDEBUG - GGML_LOG_DEBUG("%s: registered device %s (%s)\n", __func__, ggml_backend_dev_name(device), ggml_backend_dev_description(device)); -#endif - devices.push_back({device, score}); - } - ggml_backend_reg_t load_backend(const std::filesystem::path & path, bool silent) { dl_handle_ptr handle { dl_load_library(path) }; if (!handle) { From 764e199d6703d80da4d245381efa4a3a412813b2 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Tue, 25 Feb 2025 12:57:49 -0800 Subject: [PATCH 011/157] kvcache: create cache ctx per layer each cache layer creates and maintains its own context instead of using a large context for all layers --- kvcache/causal.go | 52 +++++++++++++++++++++++++---------------- kvcache/encoder.go | 36 +++++++++++++++++----------- ml/backend.go | 2 +- ml/backend/ggml/ggml.go | 24 ++++++++++--------- 4 files changed, 68 insertions(+), 46 deletions(-) diff --git a/kvcache/causal.go b/kvcache/causal.go index b2e7b3ab..6a927cb8 100644 --- a/kvcache/causal.go +++ b/kvcache/causal.go @@ -55,8 +55,8 @@ type Causal struct { shiftFn shiftFn backend ml.Backend - cacheCtx ml.Context - keys, values []ml.Tensor + ctxs map[int]ml.Context + keys, values map[int]ml.Tensor } type cacheCell struct { @@ -70,11 +70,23 @@ type cellRange struct { } func NewCausalCache(shift shiftFn) *Causal { - return &Causal{windowSize: math.MaxInt32, shiftFn: shift} + return &Causal{ + windowSize: math.MaxInt32, + shiftFn: shift, + ctxs: make(map[int]ml.Context), + keys: make(map[int]ml.Tensor), + values: make(map[int]ml.Tensor), + } } func NewSWACache(windowSize int32, shift shiftFn) *Causal { - return &Causal{windowSize: windowSize, shiftFn: shift} + return &Causal{ + windowSize: windowSize, + shiftFn: shift, + ctxs: make(map[int]ml.Context), + keys: make(map[int]ml.Tensor), + values: make(map[int]ml.Tensor), + } } func (c *Causal) Init(backend ml.Backend, dtype ml.DType, capacity int32) { @@ -103,7 +115,6 @@ func (c *Causal) Init(backend ml.Backend, dtype ml.DType, capacity int32) { c.cells = make([]cacheCell, c.Capacity) c.cellRanges = make(map[int]cellRange) c.backend = backend - c.cacheCtx = backend.NewContext() } func (c *Causal) SetConfig(config ml.CacheConfig) { @@ -115,7 +126,9 @@ func (c *Causal) SetConfig(config ml.CacheConfig) { } func (c *Causal) Close() { - c.cacheCtx.Close() + for _, ctx := range c.ctxs { + ctx.Close() + } } func (c *Causal) StartForward(ctx ml.Context, positions []int32, seqs []int) error { @@ -239,13 +252,11 @@ func (c *Causal) buildMask(ctx ml.Context, positions []int32, seqs []int) (ml.Te } func (c *Causal) moveCells(ctx ml.Context, src, dst, len int) { - for i := range c.keys { - if c.keys[i] == nil { + for i, key := range c.keys { + if key == nil { continue } - key := c.keys[i] - kHeadDim := key.Dim(0) numKVHeads := key.Dim(1) rowSize := key.Stride(2) @@ -305,7 +316,7 @@ func (c *Causal) defrag() { layers++ } - maxMoves := ctx.MaxTensors() / (6 * layers) + maxMoves := ctx.MaxGraphNodes() / (6 * layers) moves := 0 var pendingSrc, pendingDst, pendingLen int @@ -377,11 +388,6 @@ func (c *Causal) defrag() { } func (c *Causal) SetLayer(layer int) { - if layer >= len(c.keys) { - c.keys = append(c.keys, make([]ml.Tensor, layer-len(c.keys)+1)...) - c.values = append(c.values, make([]ml.Tensor, layer-len(c.values)+1)...) - } - c.curLayer = layer } @@ -433,13 +439,19 @@ func (c *Causal) Put(ctx ml.Context, key, value ml.Tensor) { panic(fmt.Errorf("inconsistent batch sizes (layer: %v, batch size: %v layer batch size: %v)", c.curLayer, c.curBatchSize, batchSize)) } - if c.keys[c.curLayer] == nil || c.values[c.curLayer] == nil { - c.keys[c.curLayer] = c.cacheCtx.Zeros(c.DType, kHeadDim, numKVHeads, int(c.Capacity)) + if _, ok := c.ctxs[c.curLayer]; !ok { + c.ctxs[c.curLayer] = c.backend.NewContext() + } + if _, ok := c.keys[c.curLayer]; !ok { + c.keys[c.curLayer] = c.ctxs[c.curLayer].Zeros(c.DType, kHeadDim, numKVHeads, int(c.Capacity)) + } + + if _, ok := c.values[c.curLayer]; !ok { if c.config.PermutedV { - c.values[c.curLayer] = c.cacheCtx.Zeros(c.DType, int(c.Capacity), vHeadDim, numKVHeads) + c.values[c.curLayer] = c.ctxs[c.curLayer].Zeros(c.DType, int(c.Capacity), vHeadDim, numKVHeads) } else { - c.values[c.curLayer] = c.cacheCtx.Zeros(c.DType, vHeadDim, numKVHeads, int(c.Capacity)) + c.values[c.curLayer] = c.ctxs[c.curLayer].Zeros(c.DType, vHeadDim, numKVHeads, int(c.Capacity)) } } diff --git a/kvcache/encoder.go b/kvcache/encoder.go index 39b4cdfb..6a24e867 100644 --- a/kvcache/encoder.go +++ b/kvcache/encoder.go @@ -35,13 +35,17 @@ type EncoderCache struct { encoderPos int32 // ** cache data storage ** - - cacheCtx ml.Context - keys, values []ml.Tensor + backend ml.Backend + ctxs map[int]ml.Context + keys, values map[int]ml.Tensor } func NewEncoderCache() *EncoderCache { - return &EncoderCache{} + return &EncoderCache{ + ctxs: make(map[int]ml.Context), + keys: make(map[int]ml.Tensor), + values: make(map[int]ml.Tensor), + } } func (c *EncoderCache) Init(backend ml.Backend, dtype ml.DType, capacity int32) { @@ -57,7 +61,7 @@ func (c *EncoderCache) Init(backend ml.Backend, dtype ml.DType, capacity int32) panic(fmt.Errorf("encoder cache is unable to enforce requested CachePadding (%v)", c.config.CachePadding)) } - c.cacheCtx = backend.NewContext() + c.backend = backend } func (c *EncoderCache) SetConfig(config ml.CacheConfig) { @@ -69,7 +73,9 @@ func (c *EncoderCache) SetConfig(config ml.CacheConfig) { } func (c *EncoderCache) Close() { - c.cacheCtx.Close() + for _, ctx := range c.ctxs { + ctx.Close() + } } func (c *EncoderCache) StartForward(ctx ml.Context, positions []int32, seqs []int) error { @@ -80,11 +86,6 @@ func (c *EncoderCache) StartForward(ctx ml.Context, positions []int32, seqs []in } func (c *EncoderCache) SetLayer(layer int) { - if layer >= len(c.keys) { - c.keys = append(c.keys, make([]ml.Tensor, layer-len(c.keys)+1)...) - c.values = append(c.values, make([]ml.Tensor, layer-len(c.values)+1)...) - } - c.curLayer = layer } @@ -104,9 +105,16 @@ func (c *EncoderCache) Put(ctx ml.Context, key, value ml.Tensor) { value = value.Permute(ctx, 1, 2, 0, 3) } - if c.keys[c.curLayer] == nil || c.values[c.curLayer] == nil { - c.keys[c.curLayer] = c.cacheCtx.Empty(key.DType(), key.Shape()...) - c.values[c.curLayer] = c.cacheCtx.Empty(value.DType(), value.Shape()...) + if _, ok := c.ctxs[c.curLayer]; !ok { + c.ctxs[c.curLayer] = c.backend.NewContext() + } + + if _, ok := c.keys[c.curLayer]; !ok { + c.keys[c.curLayer] = c.ctxs[c.curLayer].Empty(key.DType(), key.Shape()...) + } + + if _, ok := c.values[c.curLayer]; !ok { + c.values[c.curLayer] = c.ctxs[c.curLayer].Empty(value.DType(), value.Shape()...) } ctx.Forward( diff --git a/ml/backend.go b/ml/backend.go index 3ef8a1ac..1eeb635b 100644 --- a/ml/backend.go +++ b/ml/backend.go @@ -99,7 +99,7 @@ type Context interface { Forward(...Tensor) Context Compute(...Tensor) - MaxTensors() int + MaxGraphNodes() int Close() } diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index e909f53c..1a272256 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -339,14 +339,15 @@ func (b *Backend) Get(name string) ml.Tensor { } func (b *Backend) NewContext() ml.Context { - maxTensors := max(8192, len(b.meta.Tensors().Items())*5) + maxGraphNodes := max(8192, len(b.meta.Tensors().Items())*5) return &Context{ b: b, - maxTensors: maxTensors, ctx: C.ggml_init(C.struct_ggml_init_params{ - mem_size: C.size_t(maxTensors)*C.ggml_tensor_overhead() + C.ggml_graph_overhead_custom(C.size_t(maxTensors), false), + mem_size: C.size_t(maxGraphNodes)*C.ggml_tensor_overhead() + C.ggml_graph_overhead_custom(C.size_t(maxGraphNodes), false), no_alloc: true, }), + backend: C.ggml_backend_sched_get_backend(b.sched, 0), + maxGraphNodes: maxGraphNodes, } } @@ -363,13 +364,14 @@ type Context struct { ctx *C.struct_ggml_context graph *C.struct_ggml_cgraph + backend *C.struct_ggml_backend - maxTensors int + maxGraphNodes int } func (c *Context) Forward(tensors ...ml.Tensor) ml.Context { if c.graph == nil { - c.graph = C.ggml_new_graph_custom(c.ctx, C.size_t(c.maxTensors), false) + c.graph = C.ggml_new_graph_custom(c.ctx, C.size_t(c.maxGraphNodes), false) } for _, tensor := range tensors { @@ -399,8 +401,8 @@ func (c *Context) Compute(tensors ...ml.Tensor) { } } -func (c *Context) MaxTensors() int { - return c.maxTensors +func (c *Context) MaxGraphNodes() int { + return c.maxGraphNodes } func shapeToGGML(shape []int) *C.int64_t { @@ -435,7 +437,7 @@ func newTensor(ctx Context, dtype ml.DType, shape []int) ml.Tensor { panic("unsupported dtype") } - b := C.ggml_backend_alloc_buffer(C.ggml_backend_sched_get_backend(ctx.b.sched, 0), C.ggml_nbytes(t)) + b := C.ggml_backend_alloc_buffer(ctx.backend, C.ggml_nbytes(t)) C.ggml_backend_tensor_alloc(b, t, C.ggml_backend_buffer_get_base(b)) C.ggml_set_input(t) return &Tensor{b: ctx.b, t: t} @@ -469,7 +471,7 @@ func fromSlice[S ~[]E, E float32 | int32](ctx Context, s S, shape []int, dtype u } t := C.ggml_new_tensor(ctx.ctx, dtype, C.int(len(shape)), shapeToGGML(shape)) - b := C.ggml_backend_alloc_buffer(C.ggml_backend_sched_get_backend(ctx.b.sched, 0), C.ggml_nbytes(t)) + b := C.ggml_backend_alloc_buffer(ctx.backend, C.ggml_nbytes(t)) C.ggml_backend_tensor_alloc(b, t, C.ggml_backend_buffer_get_base(b)) C.ggml_backend_tensor_set(t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t)) C.ggml_set_input(t) @@ -484,8 +486,8 @@ func (c Context) FromIntSlice(s []int32, shape ...int) (ml.Tensor, error) { return fromSlice(c, s, shape, C.GGML_TYPE_I32) } -func (c *Context) Close() { - if c != nil { +func (c Context) Close() { + if c.ctx != nil { C.ggml_free(c.ctx) } } From 7bae7fa5ce6a83cfecde12015a7c43dfa3e8bffc Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Tue, 25 Feb 2025 16:06:32 -0800 Subject: [PATCH 012/157] ml/backend/ggml: create tensor on specific backend some tensors should be created on specific backends to reduce number of copies and improve performance --- kvcache/causal.go | 6 +- kvcache/encoder.go | 2 +- ml/backend.go | 10 +++ ml/backend/ggml/ggml.go | 153 ++++++++++++++++++++++++----------- model/models/llama/model.go | 6 +- model/models/mllama/model.go | 12 +-- 6 files changed, 129 insertions(+), 60 deletions(-) diff --git a/kvcache/causal.go b/kvcache/causal.go index 6a927cb8..3d1c71db 100644 --- a/kvcache/causal.go +++ b/kvcache/causal.go @@ -237,13 +237,13 @@ func (c *Causal) buildMask(ctx ml.Context, positions []int32, seqs []int) (ml.Te mask[i] = float32(math.Inf(-1)) } - maskTensor, err := ctx.FromFloatSlice(mask, length, batchSize) + maskTensor, err := ctx.Input().FromFloatSlice(mask, length, batchSize) if err != nil { return nil, err } if c.config.MaskDType != ml.DTypeF32 { - out := ctx.Empty(c.config.MaskDType, maskTensor.Shape()...) + out := ctx.Input().Empty(c.config.MaskDType, maskTensor.Shape()...) ctx.Forward(maskTensor.Copy(ctx, out)) maskTensor = out } @@ -440,7 +440,7 @@ func (c *Causal) Put(ctx ml.Context, key, value ml.Tensor) { } if _, ok := c.ctxs[c.curLayer]; !ok { - c.ctxs[c.curLayer] = c.backend.NewContext() + c.ctxs[c.curLayer] = c.backend.NewContextSize(2).Layer(c.curLayer) } if _, ok := c.keys[c.curLayer]; !ok { diff --git a/kvcache/encoder.go b/kvcache/encoder.go index 6a24e867..867ee37a 100644 --- a/kvcache/encoder.go +++ b/kvcache/encoder.go @@ -106,7 +106,7 @@ func (c *EncoderCache) Put(ctx ml.Context, key, value ml.Tensor) { } if _, ok := c.ctxs[c.curLayer]; !ok { - c.ctxs[c.curLayer] = c.backend.NewContext() + c.ctxs[c.curLayer] = c.backend.NewContextSize(2).Layer(c.curLayer) } if _, ok := c.keys[c.curLayer]; !ok { diff --git a/ml/backend.go b/ml/backend.go index 1eeb635b..915c9ad6 100644 --- a/ml/backend.go +++ b/ml/backend.go @@ -24,6 +24,7 @@ type Backend interface { Config() Config Get(name string) Tensor NewContext() Context + NewContextSize(size int) Context } // BackendCacheConfig should be implemented by backends that need special output @@ -101,6 +102,15 @@ type Context interface { Compute(...Tensor) MaxGraphNodes() int Close() + + // Input returns a context appropriate for creating input tensors + Input() Context + + // Output returns a context appropriate for creating output tensors + Output() Context + + // Layer returns a context appropriate for creating intermediate tensors + Layer(int) Context } type Tensor interface { diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index 1a272256..a55d42f1 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -41,16 +41,14 @@ func devices() iter.Seq[*C.struct_ggml_backend_device] { } type Backend struct { - meta *fs.GGML + meta *fs.GGML + sched *C.struct_ggml_backend_sched + tensors map[string]*C.struct_ggml_tensor + input *C.struct_ggml_backend + output *C.struct_ggml_backend + layers map[int]*C.struct_ggml_backend flashAttention bool - - sched *C.struct_ggml_backend_sched - - tensors map[string]*C.struct_ggml_tensor - ctxs []*C.struct_ggml_context - backends []*C.struct_ggml_backend - bufts []*C.struct_ggml_backend_buffer_type } func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { @@ -118,7 +116,6 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { } input := dbt{C.ggml_backend_dev_by_type(C.GGML_BACKEND_DEVICE_TYPE_CPU), cpuBufferTypes} - slog.Info("input layer", "device", C.GoString(C.ggml_backend_dev_name(input.d))) var blocks int for key, value := range meta.KV() { @@ -136,18 +133,14 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { layers := make([]dbt, blocks) for i := range layers { layers[i] = gpuBufferTypes[slices.IndexFunc(splits, indexFunc(i))] - slog.Info("layer", "i", i, "device", C.GoString(C.ggml_backend_dev_name(layers[i].d))) } output := gpuBufferTypes[slices.IndexFunc(splits, indexFunc(blocks))] - slog.Info("output layer", "device", C.GoString(C.ggml_backend_dev_name(output.d))) maxTensors := len(meta.Tensors().Items()) maxTensors += 1 maxTensors += blocks * 2 - slog.Info("max tensors", "max_tensors", maxTensors) - type tensor struct { source *fs.Tensor target string @@ -242,7 +235,7 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { for bs := range maps.Values(bbs) { for _, b := range bs { - slog.Info("model", "buffer", C.GoString(C.ggml_backend_buffer_name(b)), "size", format.HumanBytes2(uint64(C.ggml_backend_buffer_get_size(b)))) + slog.Info("model weights", "buffer", C.GoString(C.ggml_backend_buffer_name(b)), "size", format.HumanBytes2(uint64(C.ggml_backend_buffer_get_size(b)))) } } @@ -290,11 +283,13 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { return nil, err } + deviceBackends := make(map[*C.struct_ggml_backend_device]*C.struct_ggml_backend) var backends []*C.struct_ggml_backend var bufts []*C.struct_ggml_backend_buffer_type for _, d := range append(gpus, append(accels, cpus...)...) { b := C.ggml_backend_dev_init(d, nil) backends = append(backends, b) + deviceBackends[d] = b bt := C.ggml_backend_get_default_buffer_type(b) if d := C.ggml_backend_get_device(b); C.ggml_backend_dev_type(d) == C.GGML_BACKEND_DEVICE_TYPE_CPU && len(gpus) > 0 { @@ -305,13 +300,13 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { bufts = append(bufts, bt) - slog.Info("compute buffer", "backend", C.GoString(C.ggml_backend_name(b)), "buffer_type", C.GoString(C.ggml_backend_buft_name(bt))) + slog.Info("compute graph", "backend", C.GoString(C.ggml_backend_name(b)), "buffer_type", C.GoString(C.ggml_backend_buft_name(bt))) } return &Backend{ flashAttention: params.FlashAttention, - meta: meta, - tensors: tensors, + meta: meta, + tensors: tensors, sched: C.ggml_backend_sched_new( (*C.ggml_backend_t)(unsafe.Pointer(&backends[0])), (*C.ggml_backend_buffer_type_t)(unsafe.Pointer(&bufts[0])), @@ -319,6 +314,15 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { C.size_t(max(8192, len(meta.Tensors().Items())*5)), true, ), + input: deviceBackends[input.d], + output: deviceBackends[output.d], + layers: func() map[int]*C.struct_ggml_backend { + m := make(map[int]*C.struct_ggml_backend) + for i, layer := range layers { + m[i] = deviceBackends[layer.d] + } + return m + }(), }, nil } @@ -339,15 +343,21 @@ func (b *Backend) Get(name string) ml.Tensor { } func (b *Backend) NewContext() ml.Context { - maxGraphNodes := max(8192, len(b.meta.Tensors().Items())*5) + return b.NewContextSize(max(8192, len(b.meta.Tensors().Items())*5)) +} + +func (b *Backend) NewContextSize(n int) ml.Context { return &Context{ - b: b, + b: b, ctx: C.ggml_init(C.struct_ggml_init_params{ - mem_size: C.size_t(maxGraphNodes)*C.ggml_tensor_overhead() + C.ggml_graph_overhead_custom(C.size_t(maxGraphNodes), false), + mem_size: C.size_t(n)*C.ggml_tensor_overhead() + C.ggml_graph_overhead_custom(C.size_t(n), false), no_alloc: true, }), backend: C.ggml_backend_sched_get_backend(b.sched, 0), - maxGraphNodes: maxGraphNodes, + maxGraphNodes: n, + input: b.input, + output: b.output, + layers: b.layers, } } @@ -364,11 +374,61 @@ type Context struct { ctx *C.struct_ggml_context graph *C.struct_ggml_cgraph + + // backend is the backend used for new tensors backend *C.struct_ggml_backend + // input is the backend used for inputs + input *C.struct_ggml_backend + + // output is the backend used for outputs + output *C.struct_ggml_backend + + // output is the backend used for repeating layers + layers map[int]*C.struct_ggml_backend + maxGraphNodes int } +func (c *Context) Input() ml.Context { + if c.input != nil { + return &Context{ + b: c.b, + ctx: c.ctx, + backend: c.input, + maxGraphNodes: c.maxGraphNodes, + } + } + + return c +} + +func (c *Context) Output() ml.Context { + if c.output != nil { + return &Context{ + b: c.b, + ctx: c.ctx, + backend: c.output, + maxGraphNodes: c.maxGraphNodes, + } + } + + return c +} + +func (c *Context) Layer(i int) ml.Context { + if backend, ok := c.layers[i]; ok { + return &Context{ + b: c.b, + ctx: c.ctx, + backend: backend, + maxGraphNodes: c.maxGraphNodes, + } + } + + return c +} + func (c *Context) Forward(tensors ...ml.Tensor) ml.Context { if c.graph == nil { c.graph = C.ggml_new_graph_custom(c.ctx, C.size_t(c.maxGraphNodes), false) @@ -414,7 +474,7 @@ func shapeToGGML(shape []int) *C.int64_t { return &sh[0] } -func newTensor(ctx Context, dtype ml.DType, shape []int) ml.Tensor { +func (c Context) newTensor(dtype ml.DType, shape []int) ml.Tensor { if len(shape) < 1 || len(shape) > 4 { panic("unsupported number of dimensions") } @@ -428,62 +488,61 @@ func newTensor(ctx Context, dtype ml.DType, shape []int) ml.Tensor { var t *C.struct_ggml_tensor switch dtype { case ml.DTypeF32: - t = C.ggml_new_tensor(ctx.ctx, C.GGML_TYPE_F32, C.int(len(shape)), shapeToGGML(shape)) + t = C.ggml_new_tensor(c.ctx, C.GGML_TYPE_F32, C.int(len(shape)), shapeToGGML(shape)) case ml.DTypeF16: - t = C.ggml_new_tensor(ctx.ctx, C.GGML_TYPE_F16, C.int(len(shape)), shapeToGGML(shape)) + t = C.ggml_new_tensor(c.ctx, C.GGML_TYPE_F16, C.int(len(shape)), shapeToGGML(shape)) case ml.DTypeI32: - t = C.ggml_new_tensor(ctx.ctx, C.GGML_TYPE_I32, C.int(len(shape)), shapeToGGML(shape)) + t = C.ggml_new_tensor(c.ctx, C.GGML_TYPE_I32, C.int(len(shape)), shapeToGGML(shape)) default: panic("unsupported dtype") } - b := C.ggml_backend_alloc_buffer(ctx.backend, C.ggml_nbytes(t)) + b := C.ggml_backend_alloc_buffer(c.backend, C.ggml_nbytes(t)) C.ggml_backend_tensor_alloc(b, t, C.ggml_backend_buffer_get_base(b)) - C.ggml_set_input(t) - return &Tensor{b: ctx.b, t: t} + return &Tensor{b: c.b, t: t} } func (c Context) Empty(dtype ml.DType, shape ...int) ml.Tensor { - return newTensor(c, dtype, shape) + return c.newTensor(dtype, shape) } func (c Context) Zeros(dtype ml.DType, shape ...int) ml.Tensor { - t := newTensor(c, dtype, shape) + t := c.newTensor(dtype, shape) C.ggml_set_zero(t.(*Tensor).t) return t } -func fromSlice[S ~[]E, E float32 | int32](ctx Context, s S, shape []int, dtype uint32) (ml.Tensor, error) { +func checkShape[S ~[]E, E any](s S, shape ...int) error { n := len(s) - - if n == 0 { - var shape C.int64_t = 0 - t := C.ggml_new_tensor(ctx.ctx, dtype, 1, &shape) - return &Tensor{b: ctx.b, t: t}, nil - } - for _, v := range shape { n /= v } if n != 1 { - return nil, fmt.Errorf("invalid shape %v for %d elements", shape, len(s)) + return fmt.Errorf("invalid shape: %v", shape) } - t := C.ggml_new_tensor(ctx.ctx, dtype, C.int(len(shape)), shapeToGGML(shape)) - b := C.ggml_backend_alloc_buffer(ctx.backend, C.ggml_nbytes(t)) - C.ggml_backend_tensor_alloc(b, t, C.ggml_backend_buffer_get_base(b)) - C.ggml_backend_tensor_set(t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t)) - C.ggml_set_input(t) - return &Tensor{b: ctx.b, t: t}, nil + return nil } func (c Context) FromFloatSlice(s []float32, shape ...int) (ml.Tensor, error) { - return fromSlice(c, s, shape, C.GGML_TYPE_F32) + if err := checkShape(s, shape...); err != nil { + return nil, err + } + + t := c.newTensor(ml.DTypeF32, shape) + C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t)) + return t, nil } func (c Context) FromIntSlice(s []int32, shape ...int) (ml.Tensor, error) { - return fromSlice(c, s, shape, C.GGML_TYPE_I32) + if err := checkShape(s, shape...); err != nil { + return nil, err + } + + t := c.newTensor(ml.DTypeI32, shape) + C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t)) + return t, nil } func (c Context) Close() { diff --git a/model/models/llama/model.go b/model/models/llama/model.go index 73a23621..9ccfff61 100644 --- a/model/models/llama/model.go +++ b/model/models/llama/model.go @@ -138,17 +138,17 @@ func (l *Layer) Forward(ctx ml.Context, hiddenState, positionIDs, outputs ml.Ten } func (m *Model) Forward(ctx ml.Context, opts model.Options) (ml.Tensor, error) { - inputs, err := ctx.FromIntSlice(opts.Inputs, len(opts.Inputs)) + inputs, err := ctx.Input().FromIntSlice(opts.Inputs, len(opts.Inputs)) if err != nil { return nil, err } - positions, err := ctx.FromIntSlice(opts.Positions, len(opts.Positions)) + positions, err := ctx.Input().FromIntSlice(opts.Positions, len(opts.Positions)) if err != nil { return nil, err } - outputs, err := ctx.FromIntSlice(opts.Outputs, len(opts.Outputs)) + outputs, err := ctx.Output().FromIntSlice(opts.Outputs, len(opts.Outputs)) if err != nil { return nil, err } diff --git a/model/models/mllama/model.go b/model/models/mllama/model.go index 945c7295..54c63296 100644 --- a/model/models/mllama/model.go +++ b/model/models/mllama/model.go @@ -72,7 +72,7 @@ func (m *Model) EncodeMultimodal(ctx ml.Context, multimodalData []byte) (any, er return nil, err } - pixelValues, err := ctx.FromFloatSlice(f32s, + pixelValues, err := ctx.Input().FromFloatSlice(f32s, m.ImageProcessor.imageSize, m.ImageProcessor.imageSize, m.ImageProcessor.numChannels, @@ -82,7 +82,7 @@ func (m *Model) EncodeMultimodal(ctx ml.Context, multimodalData []byte) (any, er return nil, err } - aspectRatio, err := ctx.FromIntSlice([]int32{int32(aspectRatioID)}, 1) + aspectRatio, err := ctx.Input().FromIntSlice([]int32{int32(aspectRatioID)}, 1) if err != nil { return nil, err } @@ -92,7 +92,7 @@ func (m *Model) EncodeMultimodal(ctx ml.Context, multimodalData []byte) (any, er positions[i] = int32(i) } - positionIDs, err := ctx.FromIntSlice(positions, len(positions)) + positionIDs, err := ctx.Input().FromIntSlice(positions, len(positions)) if err != nil { return nil, err } @@ -136,17 +136,17 @@ func (m *Model) Forward(ctx ml.Context, opts model.Options) (ml.Tensor, error) { crossAttentionStates = opts.Multimodal[0].Multimodal.(ml.Tensor) } - inputs, err := ctx.FromIntSlice(opts.Inputs, len(opts.Inputs)) + inputs, err := ctx.Input().FromIntSlice(opts.Inputs, len(opts.Inputs)) if err != nil { return nil, err } - positions, err := ctx.FromIntSlice(opts.Positions, len(opts.Positions)) + positions, err := ctx.Input().FromIntSlice(opts.Positions, len(opts.Positions)) if err != nil { return nil, err } - outputs, err := ctx.FromIntSlice(opts.Outputs, len(opts.Outputs)) + outputs, err := ctx.Output().FromIntSlice(opts.Outputs, len(opts.Outputs)) if err != nil { return nil, err } From 58b9ec1f6b9c85369349e971cb2e17a016ce35dd Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 26 Feb 2025 12:16:59 -0800 Subject: [PATCH 013/157] kvcache: update tests --- kvcache/causal_test.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/kvcache/causal_test.go b/kvcache/causal_test.go index 84d8de54..412f33e3 100644 --- a/kvcache/causal_test.go +++ b/kvcache/causal_test.go @@ -303,6 +303,10 @@ func (b *testBackend) NewContext() ml.Context { return &testContext{} } +func (b *testBackend) NewContextSize(int) ml.Context { + return &testContext{} +} + func (b *testBackend) SystemInfo() string { return "not implemented" } @@ -346,11 +350,15 @@ func (c *testContext) FromIntSlice(s []int32, shape ...int) (ml.Tensor, error) { return out, nil } +func (c *testContext) Input() ml.Context { return c } +func (c *testContext) Output() ml.Context { return c } +func (c *testContext) Layer(int) ml.Context { return c } + func (c *testContext) Forward(...ml.Tensor) ml.Context { return c } func (c *testContext) Compute(...ml.Tensor) {} -func (c *testContext) MaxTensors() int { +func (c *testContext) MaxGraphNodes() int { return 10 } From bf920883d54eaeaff174e82e9cc77197bdc7f645 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 26 Feb 2025 13:53:56 -0800 Subject: [PATCH 014/157] ml/backend/ggml: set cpu n_threads --- ml/backend/ggml/ggml.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index a55d42f1..f6061945 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -301,6 +301,10 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { bufts = append(bufts, bt) slog.Info("compute graph", "backend", C.GoString(C.ggml_backend_name(b)), "buffer_type", C.GoString(C.ggml_backend_buft_name(bt))) + + if C.ggml_backend_is_cpu(b) { + C.ggml_backend_cpu_set_n_threads(b, C.int(params.NumThreads)) + } } return &Backend{ From 26c2e0bd35feb7f958924269ccfba6331a1dadbc Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 26 Feb 2025 14:17:08 -0800 Subject: [PATCH 015/157] ml/backend/ggml: handle user specified cpu offloading --- ml/backend/ggml/ggml.go | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index f6061945..5c1e55b5 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -67,7 +67,7 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { "num_key_values", len(meta.KV()), ) - type dbt struct { + type deviceBufferType struct { d *C.struct_ggml_backend_device bts []*C.struct_ggml_backend_buffer_type } @@ -96,7 +96,7 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { var sum uint64 var cumsum []uint64 - var gpuBufferTypes []dbt + var gpuDeviceBufferTypes []deviceBufferType for _, d := range gpus { var free, total C.size_t C.ggml_backend_dev_memory(d, &free, &total) @@ -104,7 +104,7 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { cumsum = append(cumsum, sum) bt := C.ggml_backend_dev_buffer_type(d) - gpuBufferTypes = append(gpuBufferTypes, dbt{ + gpuDeviceBufferTypes = append(gpuDeviceBufferTypes, deviceBufferType{ d: d, bts: append([]*C.struct_ggml_backend_buffer_type{bt}, cpuBufferTypes...), }) @@ -115,7 +115,8 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { splits[i] = float64(cumsum[i]) / float64(sum) } - input := dbt{C.ggml_backend_dev_by_type(C.GGML_BACKEND_DEVICE_TYPE_CPU), cpuBufferTypes} + cpuDeviceBufferTypes := deviceBufferType{C.ggml_backend_dev_by_type(C.GGML_BACKEND_DEVICE_TYPE_CPU), cpuBufferTypes} + input := cpuDeviceBufferTypes var blocks int for key, value := range meta.KV() { @@ -124,18 +125,22 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { } } - indexFunc := func(i int) func(float64) bool { - return func(f float64) bool { - return float64(i)/float64(blocks+1) < f + assignLayer := func(i int) (temp deviceBufferType) { + if i >= params.NumGPULayers { + return cpuDeviceBufferTypes } + + return gpuDeviceBufferTypes[slices.IndexFunc(splits, func(f float64) bool { + return float64(i)/float64(blocks+1) < f + })] } - layers := make([]dbt, blocks) + layers := make([]deviceBufferType, blocks) for i := range layers { - layers[i] = gpuBufferTypes[slices.IndexFunc(splits, indexFunc(i))] + layers[i] = assignLayer(i) } - output := gpuBufferTypes[slices.IndexFunc(splits, indexFunc(blocks))] + output := assignLayer(blocks) maxTensors := len(meta.Tensors().Items()) maxTensors += 1 From b5312f30e80f33ad5131aca0c892a54517a1a633 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 26 Feb 2025 15:14:16 -0800 Subject: [PATCH 016/157] ml/backend/ggml: handle tensor split --- ml/backend/ggml/ggml.go | 45 +++++++++++++++++++++++++++++------------ 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index 5c1e55b5..a597abed 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -93,16 +93,8 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { } } - var sum uint64 - var cumsum []uint64 - var gpuDeviceBufferTypes []deviceBufferType for _, d := range gpus { - var free, total C.size_t - C.ggml_backend_dev_memory(d, &free, &total) - sum += uint64(free) - cumsum = append(cumsum, sum) - bt := C.ggml_backend_dev_buffer_type(d) gpuDeviceBufferTypes = append(gpuDeviceBufferTypes, deviceBufferType{ d: d, @@ -110,9 +102,33 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { }) } - splits := make([]float64, len(cumsum)) + splits := make([]float32, len(gpus)) + if func() bool { + for _, s := range params.TensorSplit { + if s != 0 { + return true + } + } + + return false + }() { + splits = params.TensorSplit + } else { + for i := range splits { + var free, total C.size_t + C.ggml_backend_dev_memory(gpus[i], &free, &total) + splits[i] = float32(free) + } + } + + var sum float32 for i := range splits { - splits[i] = float64(cumsum[i]) / float64(sum) + sum += splits[i] + splits[i] = sum + } + + for i := range splits { + splits[i] /= sum } cpuDeviceBufferTypes := deviceBufferType{C.ggml_backend_dev_by_type(C.GGML_BACKEND_DEVICE_TYPE_CPU), cpuBufferTypes} @@ -130,9 +146,12 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { return cpuDeviceBufferTypes } - return gpuDeviceBufferTypes[slices.IndexFunc(splits, func(f float64) bool { - return float64(i)/float64(blocks+1) < f - })] + index := slices.IndexFunc(splits, func(f float32) bool { return float32(i)/float32(blocks+1) < f }) + if index < 0 || index >= len(gpuDeviceBufferTypes) { + return cpuDeviceBufferTypes + } + + return gpuDeviceBufferTypes[index] } layers := make([]deviceBufferType, blocks) From 2dc60d4620f216dce463e649812cc59f83ed0f06 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 27 Feb 2025 16:46:01 -0800 Subject: [PATCH 017/157] ml/backend/ggml: offload vision to cpu temporary until tensor loading can accurately account for vision models --- ml/backend/ggml/ggml.go | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index a597abed..6d902045 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -134,13 +134,7 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { cpuDeviceBufferTypes := deviceBufferType{C.ggml_backend_dev_by_type(C.GGML_BACKEND_DEVICE_TYPE_CPU), cpuBufferTypes} input := cpuDeviceBufferTypes - var blocks int - for key, value := range meta.KV() { - if strings.HasSuffix(key, ".block_count") { - blocks += int(value.(uint32)) - } - } - + blocks := int(meta.KV().BlockCount()) assignLayer := func(i int) (temp deviceBufferType) { if i >= params.NumGPULayers { return cpuDeviceBufferTypes @@ -206,7 +200,7 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { return nil } - hasPart := func(s string, parts ...string) bool { + contains := func(s string, parts ...string) bool { split := strings.Split(s, ".") for _, part := range parts { if slices.Contains(split, part) { @@ -219,10 +213,12 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { for _, t := range meta.Tensors().Items() { switch { - case hasPart(t.Name, "position_embd", "token_embd", "token_norm_embd", "token_types"): + case contains(t.Name, "position_embd", "token_embd", "token_norm_embd", "token_types"): createTensor(tensor{source: t}, input.bts) - case hasPart(t.Name, "cls", "output", "output_norm"): + case contains(t.Name, "cls", "output", "output_norm"): createTensor(tensor{source: t}, output.bts) + case strings.HasPrefix(t.Name, "v.") || strings.HasPrefix(t.Name, "mm."): + createTensor(tensor{source: t}, input.bts) default: if i := func() int { if fields := strings.FieldsFunc(t.Name, func(r rune) bool { return !unicode.IsNumber(r) }); len(fields) > 0 { From daaf42e4a4df43d4ca1d3d39495a4bd20ada0187 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Fri, 28 Feb 2025 15:42:52 -0800 Subject: [PATCH 018/157] ml/backend/ggml: clean up --- ml/backend/ggml/ggml.go | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index 6d902045..9cee8216 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -84,12 +84,12 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { } } - var cpuBufferTypes []*C.struct_ggml_backend_buffer_type + cpuDeviceBufferType := deviceBufferType{d: C.ggml_backend_dev_by_type(C.GGML_BACKEND_DEVICE_TYPE_CPU)} for _, d := range append(accels, append(gpus, cpus...)...) { switch C.ggml_backend_dev_type(d) { case C.GGML_BACKEND_DEVICE_TYPE_CPU, C.GGML_BACKEND_DEVICE_TYPE_ACCEL: - cpuBufferTypes = append(cpuBufferTypes, C.ggml_backend_dev_buffer_type(d)) + cpuDeviceBufferType.bts = append(cpuDeviceBufferType.bts, C.ggml_backend_dev_buffer_type(d)) } } @@ -98,7 +98,7 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { bt := C.ggml_backend_dev_buffer_type(d) gpuDeviceBufferTypes = append(gpuDeviceBufferTypes, deviceBufferType{ d: d, - bts: append([]*C.struct_ggml_backend_buffer_type{bt}, cpuBufferTypes...), + bts: append([]*C.struct_ggml_backend_buffer_type{bt}, cpuDeviceBufferType.bts...), }) } @@ -131,18 +131,17 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { splits[i] /= sum } - cpuDeviceBufferTypes := deviceBufferType{C.ggml_backend_dev_by_type(C.GGML_BACKEND_DEVICE_TYPE_CPU), cpuBufferTypes} - input := cpuDeviceBufferTypes + input := cpuDeviceBufferType blocks := int(meta.KV().BlockCount()) - assignLayer := func(i int) (temp deviceBufferType) { + assignLayer := func(i int) deviceBufferType { if i >= params.NumGPULayers { - return cpuDeviceBufferTypes + return cpuDeviceBufferType } index := slices.IndexFunc(splits, func(f float32) bool { return float32(i)/float32(blocks+1) < f }) if index < 0 || index >= len(gpuDeviceBufferTypes) { - return cpuDeviceBufferTypes + return cpuDeviceBufferType } return gpuDeviceBufferTypes[index] From 45df786f09fec84ff68795845a4b3395cc0def90 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Tue, 4 Mar 2025 13:06:56 -0800 Subject: [PATCH 019/157] comments --- ml/backend/ggml/ggml.go | 212 ++++++++++++++++++++++------------------ 1 file changed, 117 insertions(+), 95 deletions(-) diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index 9cee8216..c4adcd98 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -12,7 +12,6 @@ import ( "errors" "fmt" "io" - "iter" "log/slog" "maps" "os" @@ -29,26 +28,34 @@ import ( "golang.org/x/sync/errgroup" ) -func devices() iter.Seq[*C.struct_ggml_backend_device] { - return func(yield func(*C.struct_ggml_backend_device) bool) { - ggml.OnceLoad() - for i := range C.ggml_backend_dev_count() { - if !yield(C.ggml_backend_dev_get(i)) { - return - } - } +func devices() []*C.struct_ggml_backend_device { + ggml.OnceLoad() + ds := make([]*C.struct_ggml_backend_device, C.ggml_backend_dev_count()) + for i := range ds { + ds[i] = C.ggml_backend_dev_get(C.size_t(i)) } + + return ds } type Backend struct { meta *fs.GGML sched *C.struct_ggml_backend_sched tensors map[string]*C.struct_ggml_tensor - input *C.struct_ggml_backend - output *C.struct_ggml_backend - layers map[int]*C.struct_ggml_backend + + // input is the backend used for inputs + input *C.struct_ggml_backend + + // output is the backend used for outputs + output *C.struct_ggml_backend + + // layers is the backend used for repeating layers + layers map[int]*C.struct_ggml_backend flashAttention bool + + // maxGraphNodes is the maximum allowed number of graph nodes in this scheduler + maxGraphNodes int } func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { @@ -73,7 +80,7 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { } var cpus, accels, gpus []*C.struct_ggml_backend_device - for d := range devices() { + for _, d := range devices() { switch C.ggml_backend_dev_type(d) { case C.GGML_BACKEND_DEVICE_TYPE_CPU: cpus = append(cpus, d) @@ -84,6 +91,7 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { } } + // create list of buffer types for the cpu cpuDeviceBufferType := deviceBufferType{d: C.ggml_backend_dev_by_type(C.GGML_BACKEND_DEVICE_TYPE_CPU)} for _, d := range append(accels, append(gpus, cpus...)...) { switch C.ggml_backend_dev_type(d) { @@ -93,6 +101,7 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { } } + // create list of buffer types for each gpu var gpuDeviceBufferTypes []deviceBufferType for _, d := range gpus { bt := C.ggml_backend_dev_buffer_type(d) @@ -102,44 +111,53 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { }) } - splits := make([]float32, len(gpus)) - if func() bool { - for _, s := range params.TensorSplit { - if s != 0 { - return true - } + useDefaultSplit := true + for _, s := range params.TensorSplit { + if s != 0 { + useDefaultSplit = false + break } + } - return false - }() { - splits = params.TensorSplit - } else { + // calculate splits + splits := make([]float32, len(gpus)) + if useDefaultSplit { + // default: split on free memory for i := range splits { var free, total C.size_t C.ggml_backend_dev_memory(gpus[i], &free, &total) splits[i] = float32(free) } + } else { + splits = params.TensorSplit } var sum float32 + // cumulative sum of all splits for i := range splits { sum += splits[i] splits[i] = sum } + // normalize splits for i := range splits { splits[i] /= sum } + // inputs always use cpu input := cpuDeviceBufferType blocks := int(meta.KV().BlockCount()) + + // define a range of gpu layers. anything outside of this range is assigned to the cpu + gpuRangeStart := max(0, blocks-params.NumGPULayers) + gpuRangeStop := min(gpuRangeStart+params.NumGPULayers, blocks+1) assignLayer := func(i int) deviceBufferType { - if i >= params.NumGPULayers { + if i < gpuRangeStart || i >= gpuRangeStop { return cpuDeviceBufferType } - index := slices.IndexFunc(splits, func(f float32) bool { return float32(i)/float32(blocks+1) < f }) + index := slices.IndexFunc(splits, func(f float32) bool { return float32(i-gpuRangeStart)/float32(gpuRangeStop-gpuRangeStart) < f }) if index < 0 || index >= len(gpuDeviceBufferTypes) { return cpuDeviceBufferType } @@ -147,15 +165,18 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { return gpuDeviceBufferTypes[index] } + // repeating layers are assigned based on their index in reverse order, e.g. i / (block_count + 1) layers := make([]deviceBufferType, blocks) for i := range layers { layers[i] = assignLayer(i) } + // outputs are assigned iff allowed by splits and configured number of gpu layers output := assignLayer(blocks) maxTensors := len(meta.Tensors().Items()) maxTensors += 1 + // each layer has at most 2 extra tensors for rope operations maxTensors += blocks * 2 type tensor struct { @@ -163,8 +184,10 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { target string } + // some tensors are mapped to different names so keep a list targets := make(map[string][]string) + // contexts are shared by tensors of the same buffer type ctxs := make(map[*C.struct_ggml_backend_buffer_type]*C.struct_ggml_context) createTensor := func(t tensor, bts []*C.struct_ggml_backend_buffer_type) *C.struct_ggml_tensor { for _, bt := range bts { @@ -217,19 +240,21 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { case contains(t.Name, "cls", "output", "output_norm"): createTensor(tensor{source: t}, output.bts) case strings.HasPrefix(t.Name, "v.") || strings.HasPrefix(t.Name, "mm."): + // TODO: assign vision tensors to the gpu if possible createTensor(tensor{source: t}, input.bts) default: - if i := func() int { - if fields := strings.FieldsFunc(t.Name, func(r rune) bool { return !unicode.IsNumber(r) }); len(fields) > 0 { - if i, err := strconv.Atoi(fields[0]); err == nil { - return i - } + layerIndex := -1 + if fields := strings.FieldsFunc(t.Name, func(r rune) bool { return !unicode.IsNumber(r) }); len(fields) > 0 { + if i, err := strconv.Atoi(fields[0]); err == nil { + layerIndex = i } + } - return -1 - }(); i >= 0 { - createTensor(tensor{source: t}, layers[i].bts) + if layerIndex >= 0 { + createTensor(tensor{source: t}, layers[layerIndex].bts) } else { + // this is a repeating tensor that doesn't explicitly associated with a layer so + // duplicate it for each layer for i, layer := range layers { createTensor(tensor{ source: t, @@ -240,8 +265,8 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { } } - bbs := make(map[*C.struct_ggml_context][]*C.struct_ggml_backend_buffer, len(ctxs)) - + // allocate buffers for each context + bbs := make(map[*C.struct_ggml_context]*C.struct_ggml_backend_buffer, len(ctxs)) for bt, c := range ctxs { if C.ggml_get_first_tensor(c) == nil { continue @@ -249,15 +274,14 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { b := C.ggml_backend_alloc_ctx_tensors_from_buft(c, bt) C.ggml_backend_buffer_set_usage(b, C.GGML_BACKEND_BUFFER_USAGE_WEIGHTS) - bbs[c] = append(bbs[c], b) + bbs[c] = b } for bs := range maps.Values(bbs) { - for _, b := range bs { - slog.Info("model weights", "buffer", C.GoString(C.ggml_backend_buffer_name(b)), "size", format.HumanBytes2(uint64(C.ggml_backend_buffer_get_size(b)))) - } + slog.Info("model weights", "buffer", C.GoString(C.ggml_backend_buffer_name(bs)), "size", format.HumanBytes2(uint64(C.ggml_backend_buffer_get_size(bs)))) } + // map tensor names to tensors for easy lookup later tensors := make(map[string]*C.struct_ggml_tensor) for _, c := range ctxs { for t := C.ggml_get_first_tensor(c); t != nil; t = C.ggml_get_next_tensor(c, t) { @@ -265,6 +289,7 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { } } + // concurrently read in tensor data. uses a section reader which is safe for concurrent reads sr := io.NewSectionReader(r, int64(meta.Tensors().Offset), n-int64(meta.Tensors().Offset)) var g errgroup.Group for _, t := range meta.Tensors().Items() { @@ -289,10 +314,7 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { return errors.New("short read") } - cname := C.CString(t.Name) C.ggml_backend_tensor_set(tt, unsafe.Pointer(&bts[0]), 0, C.size_t(t.Size())) - C.free(unsafe.Pointer(cname)) - return nil }) } @@ -302,39 +324,45 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { return nil, err } + // map devices to backends so tensors created post initialization can be assigned to the correct device deviceBackends := make(map[*C.struct_ggml_backend_device]*C.struct_ggml_backend) - var backends []*C.struct_ggml_backend - var bufts []*C.struct_ggml_backend_buffer_type + + // create backends and buffer types used for the compute graph scheduler + var schedBackends []*C.struct_ggml_backend + var schedBufts []*C.struct_ggml_backend_buffer_type for _, d := range append(gpus, append(accels, cpus...)...) { b := C.ggml_backend_dev_init(d, nil) - backends = append(backends, b) + schedBackends = append(schedBackends, b) deviceBackends[d] = b bt := C.ggml_backend_get_default_buffer_type(b) + // use the first gpu host buffer type for gpu if possible if d := C.ggml_backend_get_device(b); C.ggml_backend_dev_type(d) == C.GGML_BACKEND_DEVICE_TYPE_CPU && len(gpus) > 0 { if hbt := C.ggml_backend_dev_host_buffer_type(d); hbt != nil { bt = hbt } } - bufts = append(bufts, bt) + schedBufts = append(schedBufts, bt) slog.Info("compute graph", "backend", C.GoString(C.ggml_backend_name(b)), "buffer_type", C.GoString(C.ggml_backend_buft_name(bt))) if C.ggml_backend_is_cpu(b) { + // set number of threads for cpu backend C.ggml_backend_cpu_set_n_threads(b, C.int(params.NumThreads)) } } + maxGraphNodes := max(8192, len(meta.Tensors().Items())*5) return &Backend{ flashAttention: params.FlashAttention, meta: meta, tensors: tensors, sched: C.ggml_backend_sched_new( - (*C.ggml_backend_t)(unsafe.Pointer(&backends[0])), - (*C.ggml_backend_buffer_type_t)(unsafe.Pointer(&bufts[0])), - C.int(len(backends)), - C.size_t(max(8192, len(meta.Tensors().Items())*5)), + (*C.ggml_backend_t)(unsafe.Pointer(&schedBackends[0])), + (*C.ggml_backend_buffer_type_t)(unsafe.Pointer(&schedBufts[0])), + C.int(len(schedBackends)), + C.size_t(maxGraphNodes), true, ), input: deviceBackends[input.d], @@ -346,6 +374,7 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { } return m }(), + maxGraphNodes: maxGraphNodes, }, nil } @@ -366,10 +395,11 @@ func (b *Backend) Get(name string) ml.Tensor { } func (b *Backend) NewContext() ml.Context { - return b.NewContextSize(max(8192, len(b.meta.Tensors().Items())*5)) + return b.NewContextSize(b.maxGraphNodes) } func (b *Backend) NewContextSize(n int) ml.Context { + n = min(n, b.maxGraphNodes) return &Context{ b: b, ctx: C.ggml_init(C.struct_ggml_init_params{ @@ -378,9 +408,6 @@ func (b *Backend) NewContextSize(n int) ml.Context { }), backend: C.ggml_backend_sched_get_backend(b.sched, 0), maxGraphNodes: n, - input: b.input, - output: b.output, - layers: b.layers, } } @@ -401,46 +428,38 @@ type Context struct { // backend is the backend used for new tensors backend *C.struct_ggml_backend - // input is the backend used for inputs - input *C.struct_ggml_backend - - // output is the backend used for outputs - output *C.struct_ggml_backend - - // output is the backend used for repeating layers - layers map[int]*C.struct_ggml_backend - + // maxGraphNodes is the maximum allowed number of graph nodes in this context maxGraphNodes int } -func (c *Context) Input() ml.Context { - if c.input != nil { +func (c Context) Input() ml.Context { + if c.b.input != nil { return &Context{ b: c.b, ctx: c.ctx, - backend: c.input, + backend: c.b.input, maxGraphNodes: c.maxGraphNodes, } } - return c + return &c } -func (c *Context) Output() ml.Context { - if c.output != nil { +func (c Context) Output() ml.Context { + if c.b.output != nil { return &Context{ b: c.b, ctx: c.ctx, - backend: c.output, + backend: c.b.output, maxGraphNodes: c.maxGraphNodes, } } - return c + return &c } -func (c *Context) Layer(i int) ml.Context { - if backend, ok := c.layers[i]; ok { +func (c Context) Layer(i int) ml.Context { + if backend, ok := c.b.layers[i]; ok { return &Context{ b: c.b, ctx: c.ctx, @@ -449,7 +468,7 @@ func (c *Context) Layer(i int) ml.Context { } } - return c + return &c } func (c *Context) Forward(tensors ...ml.Tensor) ml.Context { @@ -464,10 +483,9 @@ func (c *Context) Forward(tensors ...ml.Tensor) ml.Context { return c } -func (c *Context) Compute(tensors ...ml.Tensor) { - C.ggml_backend_sched_reset(c.b.sched) - C.ggml_backend_sched_alloc_graph(c.b.sched, c.graph) +func (c Context) Compute(tensors ...ml.Tensor) { C.ggml_backend_sched_graph_compute_async(c.b.sched, c.graph) + C.ggml_backend_sched_reset(c.b.sched) needSync := true sync := func() { @@ -484,7 +502,7 @@ func (c *Context) Compute(tensors ...ml.Tensor) { } } -func (c *Context) MaxGraphNodes() int { +func (c Context) MaxGraphNodes() int { return c.maxGraphNodes } @@ -498,7 +516,22 @@ func shapeToGGML(shape []int) *C.int64_t { } func (c Context) newTensor(dtype ml.DType, shape []int) ml.Tensor { - if len(shape) < 1 || len(shape) > 4 { + var cdtype uint32 + switch dtype { + case ml.DTypeF32: + cdtype = C.GGML_TYPE_F32 + case ml.DTypeF16: + cdtype = C.GGML_TYPE_F16 + case ml.DTypeI32: + cdtype = C.GGML_TYPE_I32 + default: + panic("unsupported dtype") + } + + if len(shape) < 1 { + var shape C.int64_t = 0 + return &Tensor{b: c.b, t: C.ggml_new_tensor(c.ctx, cdtype, 1, &shape)} + } else if len(shape) > 4 { panic("unsupported number of dimensions") } @@ -508,18 +541,7 @@ func (c Context) newTensor(dtype ml.DType, shape []int) ml.Tensor { } } - var t *C.struct_ggml_tensor - switch dtype { - case ml.DTypeF32: - t = C.ggml_new_tensor(c.ctx, C.GGML_TYPE_F32, C.int(len(shape)), shapeToGGML(shape)) - case ml.DTypeF16: - t = C.ggml_new_tensor(c.ctx, C.GGML_TYPE_F16, C.int(len(shape)), shapeToGGML(shape)) - case ml.DTypeI32: - t = C.ggml_new_tensor(c.ctx, C.GGML_TYPE_I32, C.int(len(shape)), shapeToGGML(shape)) - default: - panic("unsupported dtype") - } - + t := C.ggml_new_tensor(c.ctx, cdtype, C.int(len(shape)), shapeToGGML(shape)) b := C.ggml_backend_alloc_buffer(c.backend, C.ggml_nbytes(t)) C.ggml_backend_tensor_alloc(b, t, C.ggml_backend_buffer_get_base(b)) return &Tensor{b: c.b, t: t} @@ -549,7 +571,7 @@ func checkShape[S ~[]E, E any](s S, shape ...int) error { } func (c Context) FromFloatSlice(s []float32, shape ...int) (ml.Tensor, error) { - if err := checkShape(s, shape...); err != nil { + if err := checkShape(s, shape...); err != nil && len(shape) > 0 { return nil, err } @@ -559,7 +581,7 @@ func (c Context) FromFloatSlice(s []float32, shape ...int) (ml.Tensor, error) { } func (c Context) FromIntSlice(s []int32, shape ...int) (ml.Tensor, error) { - if err := checkShape(s, shape...); err != nil { + if err := checkShape(s, shape...); err != nil && len(shape) > 0 { return nil, err } @@ -568,8 +590,8 @@ func (c Context) FromIntSlice(s []int32, shape ...int) (ml.Tensor, error) { return t, nil } -func (c Context) Close() { - if c.ctx != nil { +func (c *Context) Close() { + if c != nil { C.ggml_free(c.ctx) } } From b27e8f3f10ff6a95dee7e43215350fa0da2bb691 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 5 Mar 2025 14:48:27 -0800 Subject: [PATCH 020/157] ml/backend/ggml: use backend buffer type this ensures the tensor is created on the right buffer type for backends such as cpu --- ml/backend/ggml/ggml.go | 58 +++++++++++++++++++++++------------------ 1 file changed, 32 insertions(+), 26 deletions(-) diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index c4adcd98..af5dbf99 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -44,13 +44,13 @@ type Backend struct { tensors map[string]*C.struct_ggml_tensor // input is the backend used for inputs - input *C.struct_ggml_backend + input *C.struct_ggml_backend_buffer_type // output is the backend used for outputs - output *C.struct_ggml_backend + output *C.struct_ggml_backend_buffer_type // layers is the backend used for repeating layers - layers map[int]*C.struct_ggml_backend + layers map[int]*C.struct_ggml_backend_buffer_type flashAttention bool @@ -83,7 +83,10 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { for _, d := range devices() { switch C.ggml_backend_dev_type(d) { case C.GGML_BACKEND_DEVICE_TYPE_CPU: - cpus = append(cpus, d) + if len(cpus) == 0 { + // only the first cpu device should be used + cpus = append(cpus, d) + } case C.GGML_BACKEND_DEVICE_TYPE_ACCEL: accels = append(accels, d) case C.GGML_BACKEND_DEVICE_TYPE_GPU: @@ -324,25 +327,25 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { return nil, err } - // map devices to backends so tensors created post initialization can be assigned to the correct device - deviceBackends := make(map[*C.struct_ggml_backend_device]*C.struct_ggml_backend) + // map devices to backend buffer types so new tensors can be assigned to the correct device + deviceBufferTypes := make(map[*C.struct_ggml_backend_device]*C.struct_ggml_backend_buffer_type) // create backends and buffer types used for the compute graph scheduler var schedBackends []*C.struct_ggml_backend var schedBufts []*C.struct_ggml_backend_buffer_type for _, d := range append(gpus, append(accels, cpus...)...) { b := C.ggml_backend_dev_init(d, nil) - schedBackends = append(schedBackends, b) - deviceBackends[d] = b - bt := C.ggml_backend_get_default_buffer_type(b) - // use the first gpu host buffer type for gpu if possible if d := C.ggml_backend_get_device(b); C.ggml_backend_dev_type(d) == C.GGML_BACKEND_DEVICE_TYPE_CPU && len(gpus) > 0 { - if hbt := C.ggml_backend_dev_host_buffer_type(d); hbt != nil { + // use the first gpu host buffer type for gpu if possible + if hbt := C.ggml_backend_dev_host_buffer_type(gpus[0]); hbt != nil { bt = hbt } } + deviceBufferTypes[d] = bt + + schedBackends = append(schedBackends, b) schedBufts = append(schedBufts, bt) slog.Info("compute graph", "backend", C.GoString(C.ggml_backend_name(b)), "buffer_type", C.GoString(C.ggml_backend_buft_name(bt))) @@ -365,12 +368,12 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { C.size_t(maxGraphNodes), true, ), - input: deviceBackends[input.d], - output: deviceBackends[output.d], - layers: func() map[int]*C.struct_ggml_backend { - m := make(map[int]*C.struct_ggml_backend) + input: deviceBufferTypes[input.d], + output: deviceBufferTypes[output.d], + layers: func() map[int]*C.struct_ggml_backend_buffer_type { + m := make(map[int]*C.struct_ggml_backend_buffer_type) for i, layer := range layers { - m[i] = deviceBackends[layer.d] + m[i] = deviceBufferTypes[layer.d] } return m }(), @@ -401,13 +404,12 @@ func (b *Backend) NewContext() ml.Context { func (b *Backend) NewContextSize(n int) ml.Context { n = min(n, b.maxGraphNodes) return &Context{ - b: b, + b: b, + maxGraphNodes: n, ctx: C.ggml_init(C.struct_ggml_init_params{ mem_size: C.size_t(n)*C.ggml_tensor_overhead() + C.ggml_graph_overhead_custom(C.size_t(n), false), no_alloc: true, }), - backend: C.ggml_backend_sched_get_backend(b.sched, 0), - maxGraphNodes: n, } } @@ -425,8 +427,8 @@ type Context struct { ctx *C.struct_ggml_context graph *C.struct_ggml_cgraph - // backend is the backend used for new tensors - backend *C.struct_ggml_backend + // buft is the buffer type used for new tensors + buft *C.struct_ggml_backend_buffer_type // maxGraphNodes is the maximum allowed number of graph nodes in this context maxGraphNodes int @@ -437,7 +439,7 @@ func (c Context) Input() ml.Context { return &Context{ b: c.b, ctx: c.ctx, - backend: c.b.input, + buft: c.b.input, maxGraphNodes: c.maxGraphNodes, } } @@ -450,7 +452,7 @@ func (c Context) Output() ml.Context { return &Context{ b: c.b, ctx: c.ctx, - backend: c.b.output, + buft: c.b.output, maxGraphNodes: c.maxGraphNodes, } } @@ -459,11 +461,11 @@ func (c Context) Output() ml.Context { } func (c Context) Layer(i int) ml.Context { - if backend, ok := c.b.layers[i]; ok { + if buft, ok := c.b.layers[i]; ok { return &Context{ b: c.b, ctx: c.ctx, - backend: backend, + buft: buft, maxGraphNodes: c.maxGraphNodes, } } @@ -516,6 +518,10 @@ func shapeToGGML(shape []int) *C.int64_t { } func (c Context) newTensor(dtype ml.DType, shape []int) ml.Tensor { + if c.buft == nil { + panic("set Input, Output, or Layer before creating tensors") + } + var cdtype uint32 switch dtype { case ml.DTypeF32: @@ -542,7 +548,7 @@ func (c Context) newTensor(dtype ml.DType, shape []int) ml.Tensor { } t := C.ggml_new_tensor(c.ctx, cdtype, C.int(len(shape)), shapeToGGML(shape)) - b := C.ggml_backend_alloc_buffer(c.backend, C.ggml_nbytes(t)) + b := C.ggml_backend_buft_alloc_buffer(c.buft, C.ggml_nbytes(t)) C.ggml_backend_tensor_alloc(b, t, C.ggml_backend_buffer_get_base(b)) return &Tensor{b: c.b, t: t} } From 98272fbd58699366de72b5111d4f8fa79d9f71a4 Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Fri, 7 Mar 2025 11:19:03 -0800 Subject: [PATCH 021/157] additional review comments --- ml/backend/ggml/ggml.go | 26 ++++++++++++++++++++------ model/models/mllama/model_text.go | 22 ++++++++++++---------- 2 files changed, 32 insertions(+), 16 deletions(-) diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index af5dbf99..00873b4f 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -402,7 +402,10 @@ func (b *Backend) NewContext() ml.Context { } func (b *Backend) NewContextSize(n int) ml.Context { - n = min(n, b.maxGraphNodes) + if n > b.maxGraphNodes { + panic(fmt.Errorf("requested number of graph nodes (%v) for new context exceeds maximum (%v)", n, b.maxGraphNodes)) + } + return &Context{ b: b, maxGraphNodes: n, @@ -534,7 +537,7 @@ func (c Context) newTensor(dtype ml.DType, shape []int) ml.Tensor { panic("unsupported dtype") } - if len(shape) < 1 { + if len(shape) < 1 || shape[0] == 0 { var shape C.int64_t = 0 return &Tensor{b: c.b, t: C.ggml_new_tensor(c.ctx, cdtype, 1, &shape)} } else if len(shape) > 4 { @@ -565,6 +568,11 @@ func (c Context) Zeros(dtype ml.DType, shape ...int) ml.Tensor { func checkShape[S ~[]E, E any](s S, shape ...int) error { n := len(s) + + if n == 0 { + return nil + } + for _, v := range shape { n /= v } @@ -577,22 +585,28 @@ func checkShape[S ~[]E, E any](s S, shape ...int) error { } func (c Context) FromFloatSlice(s []float32, shape ...int) (ml.Tensor, error) { - if err := checkShape(s, shape...); err != nil && len(shape) > 0 { + if err := checkShape(s, shape...); err != nil { return nil, err } t := c.newTensor(ml.DTypeF32, shape) - C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t)) + if len(s) > 0 { + C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t)) + } + return t, nil } func (c Context) FromIntSlice(s []int32, shape ...int) (ml.Tensor, error) { - if err := checkShape(s, shape...); err != nil && len(shape) > 0 { + if err := checkShape(s, shape...); err != nil { return nil, err } t := c.newTensor(ml.DTypeI32, shape) - C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t)) + if len(s) > 0 { + C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t)) + } + return t, nil } diff --git a/model/models/mllama/model_text.go b/model/models/mllama/model_text.go index e294b4c7..373589f9 100644 --- a/model/models/mllama/model_text.go +++ b/model/models/mllama/model_text.go @@ -10,10 +10,11 @@ import ( ) type TextSelfAttention struct { - Query *nn.Linear `gguf:"attn_q"` - Key *nn.Linear `gguf:"attn_k"` - Value *nn.Linear `gguf:"attn_v"` - Output *nn.Linear `gguf:"attn_output"` + Query *nn.Linear `gguf:"attn_q"` + Key *nn.Linear `gguf:"attn_k"` + Value *nn.Linear `gguf:"attn_v"` + Output *nn.Linear `gguf:"attn_output"` + RopeFactors ml.Tensor `gguf:"rope_freqs.weight"` } func (sa *TextSelfAttention) Forward(ctx ml.Context, hiddenState, positions, _ ml.Tensor, cache *kvcache.WrapperCache, opts *TextModelOptions) ml.Tensor { @@ -22,11 +23,11 @@ func (sa *TextSelfAttention) Forward(ctx ml.Context, hiddenState, positions, _ m query := sa.Query.Forward(ctx, hiddenState) query = query.Reshape(ctx, headDim, opts.numHeads, batchSize) - query = query.RoPE(ctx, positions, opts.RopeFactors, opts.ropeDim, opts.ropeBase, opts.ropeScale) + query = query.RoPE(ctx, positions, sa.RopeFactors, opts.ropeDim, opts.ropeBase, opts.ropeScale) key := sa.Key.Forward(ctx, hiddenState) key = key.Reshape(ctx, headDim, opts.numKVHeads, batchSize) - key = key.RoPE(ctx, positions, opts.RopeFactors, opts.ropeDim, opts.ropeBase, opts.ropeScale) + key = key.RoPE(ctx, positions, sa.RopeFactors, opts.ropeDim, opts.ropeBase, opts.ropeScale) value := sa.Value.Forward(ctx, hiddenState) value = value.Reshape(ctx, headDim, opts.numKVHeads, batchSize) @@ -39,8 +40,11 @@ func (sa *TextSelfAttention) Forward(ctx ml.Context, hiddenState, positions, _ m } func (m *TextModel) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { - // This will only get called for layers in the causal cache, which are just the self attention layers - return key.RoPE(ctx, shift, m.RopeFactors, m.ropeDim, m.ropeBase, m.ropeScale), nil + if sa, ok := m.Transformer.Layers[layer].(*TextSelfAttentionDecoderLayer); ok { + return key.RoPE(ctx, shift, sa.SelfAttention.RopeFactors, m.ropeDim, m.ropeBase, m.ropeScale), nil + } + + return key, nil } type TextMLP struct { @@ -191,8 +195,6 @@ func (d *TextDecoder) Forward(ctx ml.Context, hiddenState, positionIDs, outputs, } type TextModelOptions struct { - RopeFactors ml.Tensor `gguf:"rope_freqs.weight"` - hiddenSize, numHeads, numKVHeads int eps, ropeBase, ropeScale float32 ropeDim uint32 From 0daaaef8c99d94853eb12e897b2c15e0743a2575 Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Fri, 7 Mar 2025 15:14:22 -0800 Subject: [PATCH 022/157] ollamarunner: Quiet debug logging and panic on unimplemented features Debug logging of every token has previously caused test timeouts on slower machines. --- model/process_text.go | 4 ---- runner/ollamarunner/runner.go | 10 ++++++++-- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/model/process_text.go b/model/process_text.go index bfb0a5f2..0d75a0ed 100644 --- a/model/process_text.go +++ b/model/process_text.go @@ -177,7 +177,6 @@ func (bpe BytePairEncoding) Encode(s string, addSpecial bool) ([]int32, error) { for _, frag := range fragments { if len(frag.ids) > 0 { ids = append(ids, frag.ids...) - slog.Debug("encoded", "text", frag.value, "ids", frag.ids, "special", true) continue } @@ -201,7 +200,6 @@ func (bpe BytePairEncoding) Encode(s string, addSpecial bool) ([]int32, error) { // short circuit if the fragment is in the vocabulary if id := bpe.vocab.Encode(sb.String()); id >= 0 { ids = append(ids, id) - slog.Debug("encoded", "text", sb.String(), "ids", []int32{id}) continue } @@ -275,7 +273,6 @@ func (bpe BytePairEncoding) Encode(s string, addSpecial bool) ([]int32, error) { // TODO: handle the edge case where the rune isn't in the vocabulary if id := bpe.vocab.Encode(string(merge.runes)); id >= 0 { ids = append(ids, id) - slog.Debug("encoded", "text", string(merge.runes), "ids", []int32{id}) } } } @@ -329,6 +326,5 @@ func (bpe BytePairEncoding) Decode(ids []int32) (string, error) { } } - slog.Debug("decoded", "ids", ids, "text", sb.String()) return sb.String(), nil } diff --git a/runner/ollamarunner/runner.go b/runner/ollamarunner/runner.go index 81e06562..d9f47970 100644 --- a/runner/ollamarunner/runner.go +++ b/runner/ollamarunner/runner.go @@ -436,8 +436,10 @@ func (s *Server) processBatch() error { // if done processing the prompt, generate an embedding and return if seq.embeddingOnly { // TODO(jessegross): Embedding support - s.removeSequence(i, "") - continue + // s.removeSequence(i, "") + // continue + + panic("generation of embedding outputs not yet supported") } // sample a token @@ -597,6 +599,10 @@ func (s *Server) completion(w http.ResponseWriter, r *http.Request) { req.Seed, ) + if req.Grammar != "" { + panic("grammars are not yet supported") + } + seq, err := s.NewSequence(req.Prompt, req.Images, NewSequenceParams{ numPredict: req.NumPredict, stop: req.Stop, From 6da8b6a87902c8cf875028329ade512c8d859059 Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Fri, 7 Mar 2025 14:20:31 -0800 Subject: [PATCH 023/157] kvcache: Support non-causal attention Models can disable causality for all or part of their processing while continuing to store data in the KV cache. --- kvcache/causal.go | 40 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 36 insertions(+), 4 deletions(-) diff --git a/kvcache/causal.go b/kvcache/causal.go index 3d1c71db..d519cf60 100644 --- a/kvcache/causal.go +++ b/kvcache/causal.go @@ -20,6 +20,7 @@ type shiftFn func(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, e type Causal struct { DType ml.DType Capacity int32 + causal bool windowSize int32 // config controls mostly backend-specific optimizations @@ -42,6 +43,12 @@ type Causal struct { // locations in the cache that are needed for this batch curCellRange cellRange + // curSequences is the sequences corresponding to this pass's entries in the cache + curSequences []int + + // curPositions is the positions corresponding to this pass's entries in the cache + curPositions []int32 + // ** cache metadata ** // for each possible location in the cache, stores the position and set of sequences @@ -71,6 +78,7 @@ type cellRange struct { func NewCausalCache(shift shiftFn) *Causal { return &Causal{ + causal: true, windowSize: math.MaxInt32, shiftFn: shift, ctxs: make(map[int]ml.Context), @@ -81,6 +89,7 @@ func NewCausalCache(shift shiftFn) *Causal { func NewSWACache(windowSize int32, shift shiftFn) *Causal { return &Causal{ + causal: true, windowSize: windowSize, shiftFn: shift, ctxs: make(map[int]ml.Context), @@ -133,6 +142,8 @@ func (c *Causal) Close() { func (c *Causal) StartForward(ctx ml.Context, positions []int32, seqs []int) error { c.curBatchSize = len(positions) + c.curSequences = seqs + c.curPositions = positions var err error c.curLoc, err = c.findStartLoc() @@ -171,7 +182,7 @@ func (c *Causal) StartForward(ctx ml.Context, positions []int32, seqs []int) err c.cellRanges[seq] = seqRange } - c.curMask, err = c.buildMask(ctx, positions, seqs) + c.curMask, err = c.buildMask(ctx) return err } @@ -212,7 +223,7 @@ func roundUp(length, pad int) int { // Builds a mask of history x batch indicating whether for each token in the batch the // token in the history should apply. This is based on both the sequence and causality (the // position of the history is not ahead of the token in the batch). -func (c *Causal) buildMask(ctx ml.Context, positions []int32, seqs []int) (ml.Tensor, error) { +func (c *Causal) buildMask(ctx ml.Context) (ml.Tensor, error) { // Align and pad the two dimensions as required by the backend batchSize := roundUp(c.curBatchSize, c.config.MaskBatchPadding) @@ -224,8 +235,9 @@ func (c *Causal) buildMask(ctx ml.Context, positions []int32, seqs []int) (ml.Te for i := range c.curBatchSize { for j := c.curCellRange.min; j <= c.curCellRange.max; j++ { - if !slices.Contains(c.cells[j].sequences, seqs[i]) || c.cells[j].pos > positions[i] || - c.cells[j].pos < positions[i]-c.windowSize { + if !slices.Contains(c.cells[j].sequences, c.curSequences[i]) || + (c.causal && c.cells[j].pos > c.curPositions[i]) || + c.cells[j].pos < c.curPositions[i]-c.windowSize { mask[i*length+(j-c.curCellRange.min)] = float32(math.Inf(-1)) } } @@ -391,6 +403,26 @@ func (c *Causal) SetLayer(layer int) { c.curLayer = layer } +// SetCausal enables or disables causal mask generation for subsequent calls to Get. +// This state carries over to future forward passes. The default value is true. +// +// ctx may be set to nil if this is called from outside of a forward pass, for +// example, when initializing the cache. +func (c *Causal) SetCausal(ctx ml.Context, causal bool) { + if c.causal != causal { + c.causal = causal + + if ctx != nil { + var err error + c.curMask, err = c.buildMask(ctx) + if err != nil { + // This error should never occur because we have previously built a mask with the same shape + panic(fmt.Errorf("SetCausal: %w", err)) + } + } + } +} + func (c *Causal) Get(ctx ml.Context) (ml.Tensor, ml.Tensor, ml.Tensor) { key := c.keys[c.curLayer] value := c.values[c.curLayer] From 25f9b152f919b0262531873c935c301201d546aa Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Fri, 7 Mar 2025 17:20:54 -0800 Subject: [PATCH 024/157] ggml-backend: Ensure allocation meet backend requirements Backends can impose additional alignment requirements on buffer sizes. We should ensure that we meet these or allocations can fail. --- ml/backend/ggml/ggml.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index 00873b4f..bf17c882 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -520,6 +520,10 @@ func shapeToGGML(shape []int) *C.int64_t { return &sh[0] } +func pad(length, pad C.size_t) C.size_t { + return ((length + pad - 1) / pad) * pad +} + func (c Context) newTensor(dtype ml.DType, shape []int) ml.Tensor { if c.buft == nil { panic("set Input, Output, or Layer before creating tensors") @@ -551,7 +555,8 @@ func (c Context) newTensor(dtype ml.DType, shape []int) ml.Tensor { } t := C.ggml_new_tensor(c.ctx, cdtype, C.int(len(shape)), shapeToGGML(shape)) - b := C.ggml_backend_buft_alloc_buffer(c.buft, C.ggml_nbytes(t)) + size := pad(C.ggml_backend_buft_get_alloc_size(c.buft, t), C.ggml_backend_buft_get_alignment(c.buft)) + b := C.ggml_backend_buft_alloc_buffer(c.buft, size) C.ggml_backend_tensor_alloc(b, t, C.ggml_backend_buffer_get_base(b)) return &Tensor{b: c.b, t: t} } From f52b2615ef3d82d4d95ce237233436f7cb818782 Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Fri, 7 Mar 2025 18:16:52 -0800 Subject: [PATCH 025/157] kvcache: Set context for shift offsets --- kvcache/causal.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvcache/causal.go b/kvcache/causal.go index d519cf60..9a79fa57 100644 --- a/kvcache/causal.go +++ b/kvcache/causal.go @@ -545,7 +545,7 @@ func (c *Causal) shift(seq int, beginIndex, offset int32) error { } } - kShift, err := ctx.FromIntSlice(offsets, len(offsets)) + kShift, err := ctx.Input().FromIntSlice(offsets, len(offsets)) if err != nil { return err } From 4100ed7bdd417ae6d25bf64467fb9df33f3f6525 Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Fri, 21 Feb 2025 20:54:14 -0800 Subject: [PATCH 026/157] ml: Add support for quantized KV cache Similar to the llama engine, quantizing the KV cache requires flash attention to be enabled through the Ollama server. --- ml/backend.go | 4 +++- ml/backend/ggml/ggml.go | 8 ++++++++ runner/ollamarunner/cache.go | 4 ++-- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/ml/backend.go b/ml/backend.go index 915c9ad6..3abacbf1 100644 --- a/ml/backend.go +++ b/ml/backend.go @@ -215,7 +215,7 @@ func Dump(ctx Context, t Tensor, opts ...DumpOptions) string { return dump[[]float32](ctx, t, opts[0].Items, func(f float32) string { return strconv.FormatFloat(float64(f), 'f', opts[0].Precision, 32) }) - case DTypeF16: + case DTypeF16, DTypeQ80, DTypeQ40: f32 := ctx.Empty(DTypeF32, t.Shape()...) f32 = t.Copy(ctx, f32) return dump[[]float32](ctx, f32, opts[0].Items, func(f float32) string { @@ -283,5 +283,7 @@ const ( DTypeOther DType = iota DTypeF32 DTypeF16 + DTypeQ80 + DTypeQ40 DTypeI32 ) diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index bf17c882..74512f33 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -535,6 +535,10 @@ func (c Context) newTensor(dtype ml.DType, shape []int) ml.Tensor { cdtype = C.GGML_TYPE_F32 case ml.DTypeF16: cdtype = C.GGML_TYPE_F16 + case ml.DTypeQ80: + cdtype = C.GGML_TYPE_Q8_0 + case ml.DTypeQ40: + cdtype = C.GGML_TYPE_Q4_0 case ml.DTypeI32: cdtype = C.GGML_TYPE_I32 default: @@ -680,6 +684,10 @@ func (t *Tensor) DType() ml.DType { return ml.DTypeF32 case C.GGML_TYPE_F16: return ml.DTypeF16 + case C.GGML_TYPE_Q8_0: + return ml.DTypeQ80 + case C.GGML_TYPE_Q4_0: + return ml.DTypeQ40 case C.GGML_TYPE_I32: return ml.DTypeI32 default: diff --git a/runner/ollamarunner/cache.go b/runner/ollamarunner/cache.go index 2fd060a1..3244c0b8 100644 --- a/runner/ollamarunner/cache.go +++ b/runner/ollamarunner/cache.go @@ -58,9 +58,9 @@ func NewInputCache(model model.Model, kvCacheType string, kvSize int32, numSlots func kvCacheTypeFromStr(s string) ml.DType { switch s { case "q8_0": - panic("kv cache quantization not yet implemented") + return ml.DTypeQ80 case "q4_0": - panic("kv cache quantization not yet implemented") + return ml.DTypeQ40 default: return ml.DTypeF16 } From 4614fafae0ee58af5b9d04ec4b8c2eb3846274da Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Sat, 8 Mar 2025 18:48:32 -0800 Subject: [PATCH 027/157] ollamarunner: Don't panic for unimplemented features at runtime. It's ok to fail on startup but we shouldn't panic during runtime based on user input. Downgrade the panic to a warning. --- runner/ollamarunner/runner.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/runner/ollamarunner/runner.go b/runner/ollamarunner/runner.go index d9f47970..a51b1459 100644 --- a/runner/ollamarunner/runner.go +++ b/runner/ollamarunner/runner.go @@ -436,10 +436,9 @@ func (s *Server) processBatch() error { // if done processing the prompt, generate an embedding and return if seq.embeddingOnly { // TODO(jessegross): Embedding support - // s.removeSequence(i, "") - // continue - - panic("generation of embedding outputs not yet supported") + slog.Warn("generation of embedding outputs not yet supported") + s.removeSequence(i, "") + continue } // sample a token From a1cda80bcb0b47d493be9dc061a2dfa8a0ddd61c Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Sat, 8 Mar 2025 15:45:31 -0800 Subject: [PATCH 028/157] model: Update encoder cache to use multimodal input processing handler The encoder cache needs to know the position of images in the input stream so that it knows when to delete them. Previously images didn't have a position, so we implied one by breaking batches before an image and then assuming the image was in the first position. However, multimodal objects are now given explicit positions in the input stream, so we can use that instead. Breaking batches was also a way to simulate a cross attention mask for mllama. However, given that it only supports a single sequence and a single image, this mask doesn't serve any real purpose. Removing the batch break does not appear to affect the quality of the output. Most of this is simply moving the input data structures to a new package to avoid import cycles. --- kvcache/cache.go | 3 +- kvcache/causal.go | 13 ++--- kvcache/causal_test.go | 3 +- kvcache/encoder.go | 9 ++-- kvcache/wrapper.go | 9 ++-- model/input/input.go | 37 ++++++++++++++ model/model.go | 83 +++++++++---------------------- model/model_test.go | 3 +- model/models/llama/model.go | 3 +- model/models/mllama/model.go | 13 ++--- runner/ollamarunner/cache.go | 13 ++--- runner/ollamarunner/cache_test.go | 72 +++++++++++++-------------- runner/ollamarunner/runner.go | 56 ++++++++------------- 13 files changed, 157 insertions(+), 160 deletions(-) create mode 100644 model/input/input.go diff --git a/kvcache/cache.go b/kvcache/cache.go index 2541f7c1..d3548905 100644 --- a/kvcache/cache.go +++ b/kvcache/cache.go @@ -4,6 +4,7 @@ import ( "errors" "github.com/ollama/ollama/ml" + "github.com/ollama/ollama/model/input" ) var ( @@ -51,7 +52,7 @@ type Cache interface { // StartForward is called before the start of the model's forward pass. // For each token in the coming batch, there must be a corresponding // entry in positions and seqs. - StartForward(ctx ml.Context, positions []int32, seqs []int) error + StartForward(ctx ml.Context, opts input.Options) error // CopyPrefix copies tokens in the range [0, len) from srcSeq to dstSeq CopyPrefix(srcSeq, dstSeq int, len int32) diff --git a/kvcache/causal.go b/kvcache/causal.go index 9a79fa57..34d5337c 100644 --- a/kvcache/causal.go +++ b/kvcache/causal.go @@ -8,6 +8,7 @@ import ( "slices" "github.com/ollama/ollama/ml" + "github.com/ollama/ollama/model/input" ) type shiftFn func(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) @@ -140,10 +141,10 @@ func (c *Causal) Close() { } } -func (c *Causal) StartForward(ctx ml.Context, positions []int32, seqs []int) error { - c.curBatchSize = len(positions) - c.curSequences = seqs - c.curPositions = positions +func (c *Causal) StartForward(ctx ml.Context, opts input.Options) error { + c.curBatchSize = len(opts.Positions) + c.curSequences = opts.Sequences + c.curPositions = opts.Positions var err error c.curLoc, err = c.findStartLoc() @@ -156,8 +157,8 @@ func (c *Causal) StartForward(ctx ml.Context, positions []int32, seqs []int) err } c.curCellRange = newRange() - for i, pos := range positions { - seq := seqs[i] + for i, pos := range opts.Positions { + seq := opts.Sequences[i] c.cells[c.curLoc+i] = cacheCell{pos: pos, sequences: []int{seq}} diff --git a/kvcache/causal_test.go b/kvcache/causal_test.go index 412f33e3..22d8efb4 100644 --- a/kvcache/causal_test.go +++ b/kvcache/causal_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/ollama/ollama/ml" + "github.com/ollama/ollama/model/input" ) type testCase struct { @@ -269,7 +270,7 @@ func testCache(t *testing.T, backend ml.Backend, cache Cache, tests []testCase) context := backend.NewContext() defer context.Close() - err := cache.StartForward(context, test.pos, test.seqs) + err := cache.StartForward(context, input.Options{Positions: test.pos, Sequences: test.seqs}) if err != nil { panic(err) } diff --git a/kvcache/encoder.go b/kvcache/encoder.go index 867ee37a..6a9df2ab 100644 --- a/kvcache/encoder.go +++ b/kvcache/encoder.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/ollama/ollama/ml" + "github.com/ollama/ollama/model/input" ) // Encoder cache stores K and V tensors that are position independent @@ -78,9 +79,11 @@ func (c *EncoderCache) Close() { } } -func (c *EncoderCache) StartForward(ctx ml.Context, positions []int32, seqs []int) error { - // The image is always in the first position - c.curPos = positions[0] +func (c *EncoderCache) StartForward(ctx ml.Context, opts input.Options) error { + // We work with the most recent image + if len(opts.Multimodal) > 0 { + c.curPos = opts.Positions[opts.Multimodal[len(opts.Multimodal)-1].Index] + } return nil } diff --git a/kvcache/wrapper.go b/kvcache/wrapper.go index 76956a88..aaccd166 100644 --- a/kvcache/wrapper.go +++ b/kvcache/wrapper.go @@ -4,6 +4,7 @@ import ( "math" "github.com/ollama/ollama/ml" + "github.com/ollama/ollama/model/input" ) // Wrapper cache is a container for multiple types of caches, @@ -40,14 +41,14 @@ func (c *WrapperCache) Close() { } } -func (c *WrapperCache) StartForward(ctx ml.Context, positions []int32, seqs []int) error { +func (c *WrapperCache) StartForward(ctx ml.Context, opts input.Options) error { for i, cache := range c.caches { - err := cache.StartForward(ctx, positions, seqs) + err := cache.StartForward(ctx, opts) if err != nil { // unwind on error - Remove with endIndex set to math.MaxInt32 does not fail for j := i - 1; j >= 0; j-- { - for k := range positions { - _ = c.caches[j].Remove(seqs[k], positions[k], math.MaxInt32) + for k := range opts.Positions { + _ = c.caches[j].Remove(opts.Sequences[k], opts.Positions[k], math.MaxInt32) } } return err diff --git a/model/input/input.go b/model/input/input.go new file mode 100644 index 00000000..0cb3f3f4 --- /dev/null +++ b/model/input/input.go @@ -0,0 +1,37 @@ +package input + +// Input represents one token in the input stream +type Input struct { + // Token is a single element of text. + Token int32 + + // Multimodal is opaque data representing a non-text + // element such as an image (or part of one if the image + // can be processed in pieces). It may be either together + // with Token or on its own. + Multimodal any + + // MultimodalHash is a unique representation of the data + // stored in Multimodal, used for caching and comparing + // equality. + MultimodalHash uint64 +} + +// MultimodalIndex is a multimodal element (such as an image) +// together with an index into the slice of Inputs with the +// corresponding token. Note that the index is not the same +// as the position - to find that use the index with the +// Positions slice. +type MultimodalIndex struct { + Index int + Multimodal any +} + +// Options contains the inputs for a model forward pass +type Options struct { + Inputs []int32 + Multimodal []MultimodalIndex + Positions []int32 + Sequences []int + Outputs []int32 +} diff --git a/model/model.go b/model/model.go index 75b7f639..89b6c803 100644 --- a/model/model.go +++ b/model/model.go @@ -19,66 +19,12 @@ import ( "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" _ "github.com/ollama/ollama/ml/backend" + "github.com/ollama/ollama/model/input" ) -// Input represents one token in the input stream -type Input struct { - // Token is a single element of text. - Token int32 - - // Multimodal is opaque data representing a non-text - // element such as an image (or part of one if the image - // can be processed in pieces). It may be either together - // with Token or on its own. - Multimodal any - - // MultimodalHash is a unique representation of the data - // stored in Multimodal, used for caching and comparing - // equality. - MultimodalHash uint64 -} - -// MultimodalIndex is a multimodal element (such as an image) -// together with an index into the slice of Inputs with the -// corresponding token. Note that the index is not the same -// as the position - to find that use the index with the -// Positions slice. -type MultimodalIndex struct { - Index int - Multimodal any -} - -// Options contains the inputs for a model forward pass -type Options struct { - Inputs []int32 - Multimodal []MultimodalIndex - Positions []int32 - Sequences []int - Outputs []int32 -} - -type config struct { - Cache kvcache.Cache -} - -// Base implements the common fields and methods for all models -type Base struct { - b ml.Backend - config -} - -// Backend returns the underlying backend that will run the model -func (m *Base) Backend() ml.Backend { - return m.b -} - -func (m *Base) Config() config { - return m.config -} - // Model implements a specific model architecture, defining the forward pass and any model-specific configuration type Model interface { - Forward(ml.Context, Options) (ml.Tensor, error) + Forward(ml.Context, input.Options) (ml.Tensor, error) Backend() ml.Backend Config() config @@ -112,7 +58,26 @@ type MultimodalProcessor interface { // This function is also responsible for updating MultimodalHash for any Multimodal // that is modified to ensure that there is a unique hash value that accurately // represents the contents. - PostTokenize(ml.Context, []Input) ([]Input, error) + PostTokenize(ml.Context, []input.Input) ([]input.Input, error) +} + +// Base implements the common fields and methods for all models +type Base struct { + b ml.Backend + config +} + +type config struct { + Cache kvcache.Cache +} + +// Backend returns the underlying backend that will run the model +func (m *Base) Backend() ml.Backend { + return m.b +} + +func (m *Base) Config() config { + return m.config } var models = make(map[string]func(ml.Config) (Model, error)) @@ -313,7 +278,7 @@ func canNil(t reflect.Type) bool { t.Kind() == reflect.Slice } -func Forward(ctx ml.Context, m Model, opts Options) (ml.Tensor, error) { +func Forward(ctx ml.Context, m Model, opts input.Options) (ml.Tensor, error) { if len(opts.Positions) != len(opts.Sequences) { return nil, fmt.Errorf("length of positions (%v) must match length of seqs (%v)", len(opts.Positions), len(opts.Sequences)) } @@ -324,7 +289,7 @@ func Forward(ctx ml.Context, m Model, opts Options) (ml.Tensor, error) { cache := m.Config().Cache if cache != nil { - err := cache.StartForward(ctx, opts.Positions, opts.Sequences) + err := cache.StartForward(ctx, opts) if err != nil { return nil, err } diff --git a/model/model_test.go b/model/model_test.go index 8761817e..354dd1d8 100644 --- a/model/model_test.go +++ b/model/model_test.go @@ -11,6 +11,7 @@ import ( "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/backend/ggml" "github.com/ollama/ollama/ml/nn" + "github.com/ollama/ollama/model/input" ) func TestParseTags(t *testing.T) { @@ -162,7 +163,7 @@ func TestGetTextProcessor(t *testing.T) { type notTextProcessorModel struct{} -func (notTextProcessorModel) Forward(ml.Context, Options) (ml.Tensor, error) { +func (notTextProcessorModel) Forward(ml.Context, input.Options) (ml.Tensor, error) { panic("unimplemented") } diff --git a/model/models/llama/model.go b/model/models/llama/model.go index 9ccfff61..1f27f522 100644 --- a/model/models/llama/model.go +++ b/model/models/llama/model.go @@ -9,6 +9,7 @@ import ( "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/model" + "github.com/ollama/ollama/model/input" ) type Options struct { @@ -137,7 +138,7 @@ func (l *Layer) Forward(ctx ml.Context, hiddenState, positionIDs, outputs ml.Ten return hiddenState.Add(ctx, residual) } -func (m *Model) Forward(ctx ml.Context, opts model.Options) (ml.Tensor, error) { +func (m *Model) Forward(ctx ml.Context, opts input.Options) (ml.Tensor, error) { inputs, err := ctx.Input().FromIntSlice(opts.Inputs, len(opts.Inputs)) if err != nil { return nil, err diff --git a/model/models/mllama/model.go b/model/models/mllama/model.go index 54c63296..31ba15df 100644 --- a/model/models/mllama/model.go +++ b/model/models/mllama/model.go @@ -12,6 +12,7 @@ import ( "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/model" + "github.com/ollama/ollama/model/input" ) type Model struct { @@ -101,8 +102,8 @@ func (m *Model) EncodeMultimodal(ctx ml.Context, multimodalData []byte) (any, er return m.Projector.Forward(ctx, crossAttentionStates), nil } -func (m *Model) PostTokenize(ctx ml.Context, inputs []model.Input) ([]model.Input, error) { - var images []model.Input +func (m *Model) PostTokenize(ctx ml.Context, inputs []input.Input) ([]input.Input, error) { + var images []input.Input fnvHash := fnv.New64a() for i := range inputs { @@ -125,15 +126,15 @@ func (m *Model) PostTokenize(ctx ml.Context, inputs []model.Input) ([]model.Inpu } } - inputs = slices.DeleteFunc(inputs, func(input model.Input) bool { return input.Token == -1 }) + inputs = slices.DeleteFunc(inputs, func(input input.Input) bool { return input.Token == -1 }) return inputs, nil } -func (m *Model) Forward(ctx ml.Context, opts model.Options) (ml.Tensor, error) { +func (m *Model) Forward(ctx ml.Context, opts input.Options) (ml.Tensor, error) { var crossAttentionStates ml.Tensor - if opts.Multimodal != nil { - crossAttentionStates = opts.Multimodal[0].Multimodal.(ml.Tensor) + if len(opts.Multimodal) > 0 { + crossAttentionStates = opts.Multimodal[len(opts.Multimodal)-1].Multimodal.(ml.Tensor) } inputs, err := ctx.Input().FromIntSlice(opts.Inputs, len(opts.Inputs)) diff --git a/runner/ollamarunner/cache.go b/runner/ollamarunner/cache.go index 3244c0b8..a411fddb 100644 --- a/runner/ollamarunner/cache.go +++ b/runner/ollamarunner/cache.go @@ -10,6 +10,7 @@ import ( "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/model" + "github.com/ollama/ollama/model/input" ) type InputCache struct { @@ -79,7 +80,7 @@ type InputCacheSlot struct { Id int // Inputs that are stored in the KV cache - Inputs []model.Input + Inputs []input.Input // is this cache actively being processed as part of a sequence? InUse bool @@ -88,7 +89,7 @@ type InputCacheSlot struct { lastUsed time.Time } -func (c *InputCache) LoadCacheSlot(prompt []model.Input, cachePrompt bool) (*InputCacheSlot, []model.Input, error) { +func (c *InputCache) LoadCacheSlot(prompt []input.Input, cachePrompt bool) (*InputCacheSlot, []input.Input, error) { var slot *InputCacheSlot var numPast int32 var err error @@ -139,7 +140,7 @@ func (c *InputCache) LoadCacheSlot(prompt []model.Input, cachePrompt bool) (*Inp return slot, prompt, nil } -func (c *InputCache) findLongestCacheSlot(prompt []model.Input) (*InputCacheSlot, int32, error) { +func (c *InputCache) findLongestCacheSlot(prompt []input.Input) (*InputCacheSlot, int32, error) { longest := int32(-1) var longestSlot *InputCacheSlot @@ -162,7 +163,7 @@ func (c *InputCache) findLongestCacheSlot(prompt []model.Input) (*InputCacheSlot return longestSlot, longest, nil } -func (c *InputCache) findBestCacheSlot(prompt []model.Input) (*InputCacheSlot, int32, error) { +func (c *InputCache) findBestCacheSlot(prompt []input.Input) (*InputCacheSlot, int32, error) { oldest := time.Now() var oldestSlot *InputCacheSlot @@ -198,7 +199,7 @@ func (c *InputCache) findBestCacheSlot(prompt []model.Input) (*InputCacheSlot, i if longest > 0 && longestSlot != oldestSlot { slog.Debug("forking cache slot", "src", longestSlot.Id, "dst", oldestSlot.Id, "inputs", longest, "total", len(longestSlot.Inputs)) - oldestSlot.Inputs = make([]model.Input, longest) + oldestSlot.Inputs = make([]input.Input, longest) copy(oldestSlot.Inputs, longestSlot.Inputs[:longest]) if c.cache != nil { c.cache.CopyPrefix(longestSlot.Id, oldestSlot.Id, longest) @@ -208,7 +209,7 @@ func (c *InputCache) findBestCacheSlot(prompt []model.Input) (*InputCacheSlot, i return oldestSlot, longest, nil } -func countCommonPrefix(a []model.Input, b []model.Input) int32 { +func countCommonPrefix(a []input.Input, b []input.Input) int32 { var count int32 for i := range a { diff --git a/runner/ollamarunner/cache_test.go b/runner/ollamarunner/cache_test.go index 9ce03b73..0a1b73f5 100644 --- a/runner/ollamarunner/cache_test.go +++ b/runner/ollamarunner/cache_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/ollama/ollama/model" + "github.com/ollama/ollama/model/input" ) func TestCountCommon(t *testing.T) { @@ -15,50 +15,50 @@ func TestCountCommon(t *testing.T) { tests := []struct { name string - t1 []model.Input - t2 []model.Input + t1 []input.Input + t2 []input.Input expected int32 }{ { name: "Equal", - t1: []model.Input{{Token: 1}, {Token: 2}, {Token: 3}}, - t2: []model.Input{{Token: 1}, {Token: 2}, {Token: 3}}, + t1: []input.Input{{Token: 1}, {Token: 2}, {Token: 3}}, + t2: []input.Input{{Token: 1}, {Token: 2}, {Token: 3}}, expected: 3, }, { name: "Prefix", - t1: []model.Input{{Token: 1}}, - t2: []model.Input{{Token: 1}, {Token: 2}, {Token: 3}}, + t1: []input.Input{{Token: 1}}, + t2: []input.Input{{Token: 1}, {Token: 2}, {Token: 3}}, expected: 1, }, { name: "Image Prefix", - t1: []model.Input{{Multimodal: imgA, MultimodalHash: 1}}, - t2: []model.Input{{Multimodal: imgA, MultimodalHash: 1}, {Multimodal: imgB, MultimodalHash: 2}, {Multimodal: imgC, MultimodalHash: 3}}, + t1: []input.Input{{Multimodal: imgA, MultimodalHash: 1}}, + t2: []input.Input{{Multimodal: imgA, MultimodalHash: 1}, {Multimodal: imgB, MultimodalHash: 2}, {Multimodal: imgC, MultimodalHash: 3}}, expected: 1, }, { name: "Mixed", - t1: []model.Input{{Token: 1}, {Multimodal: imgA, MultimodalHash: 1}}, - t2: []model.Input{{Token: 1}, {Multimodal: imgA, MultimodalHash: 1}, {Token: 5}}, + t1: []input.Input{{Token: 1}, {Multimodal: imgA, MultimodalHash: 1}}, + t2: []input.Input{{Token: 1}, {Multimodal: imgA, MultimodalHash: 1}, {Token: 5}}, expected: 2, }, { name: "Mixed, Same Length", - t1: []model.Input{{Token: 1}, {Multimodal: imgA, MultimodalHash: 1}}, - t2: []model.Input{{Token: 1}, {Multimodal: imgB, MultimodalHash: 2}}, + t1: []input.Input{{Token: 1}, {Multimodal: imgA, MultimodalHash: 1}}, + t2: []input.Input{{Token: 1}, {Multimodal: imgB, MultimodalHash: 2}}, expected: 1, }, { name: "Empty", - t1: []model.Input{}, - t2: []model.Input{{Token: 1}, {Token: 2}, {Token: 3}}, + t1: []input.Input{}, + t2: []input.Input{{Token: 1}, {Token: 2}, {Token: 3}}, expected: 0, }, { name: "Both Empty", - t1: []model.Input{}, - t2: []model.Input{}, + t1: []input.Input{}, + t2: []input.Input{}, expected: 0, }, } @@ -82,7 +82,7 @@ func TestFindCacheSlot(t *testing.T) { tests := []struct { name string cache InputCache - prompt []model.Input + prompt []input.Input longest expected best expected }{ @@ -91,18 +91,18 @@ func TestFindCacheSlot(t *testing.T) { cache: InputCache{slots: []InputCacheSlot{ { Id: 0, - Inputs: []model.Input{}, + Inputs: []input.Input{}, InUse: false, lastUsed: time.Time{}, }, { Id: 1, - Inputs: []model.Input{}, + Inputs: []input.Input{}, InUse: false, lastUsed: time.Time{}, }, }}, - prompt: []model.Input{{Token: 1}}, + prompt: []input.Input{{Token: 1}}, longest: expected{result: 0, len: 0}, best: expected{result: 0, len: 0}, }, @@ -111,18 +111,18 @@ func TestFindCacheSlot(t *testing.T) { cache: InputCache{slots: []InputCacheSlot{ { Id: 0, - Inputs: []model.Input{{Token: 1}}, + Inputs: []input.Input{{Token: 1}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, - Inputs: []model.Input{{Token: 1}, {Token: 2}}, + Inputs: []input.Input{{Token: 1}, {Token: 2}}, InUse: false, lastUsed: time.Now().Add(-2 * time.Second), }, }}, - prompt: []model.Input{{Token: 1}, {Token: 2}}, + prompt: []input.Input{{Token: 1}, {Token: 2}}, longest: expected{result: 1, len: 2}, best: expected{result: 1, len: 2}, }, @@ -131,18 +131,18 @@ func TestFindCacheSlot(t *testing.T) { cache: InputCache{slots: []InputCacheSlot{ { Id: 0, - Inputs: []model.Input{{Token: 1}, {Token: 2}}, + Inputs: []input.Input{{Token: 1}, {Token: 2}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, - Inputs: []model.Input{}, + Inputs: []input.Input{}, InUse: false, lastUsed: time.Time{}, }, }}, - prompt: []model.Input{{Token: 2}}, + prompt: []input.Input{{Token: 2}}, longest: expected{result: 0, len: 0}, best: expected{result: 1, len: 0}, }, @@ -152,19 +152,19 @@ func TestFindCacheSlot(t *testing.T) { slots: []InputCacheSlot{ { Id: 0, - Inputs: []model.Input{{Token: 1}, {Token: 2}}, + Inputs: []input.Input{{Token: 1}, {Token: 2}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, - Inputs: []model.Input{}, + Inputs: []input.Input{}, InUse: false, lastUsed: time.Time{}, }, }, }, - prompt: []model.Input{{Token: 1}}, + prompt: []input.Input{{Token: 1}}, longest: expected{result: 0, len: 1}, best: expected{result: 1, len: 1}, }, @@ -173,18 +173,18 @@ func TestFindCacheSlot(t *testing.T) { cache: InputCache{slots: []InputCacheSlot{ { Id: 0, - Inputs: []model.Input{{Token: 1}}, + Inputs: []input.Input{{Token: 1}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, - Inputs: []model.Input{{Token: 1}, {Token: 2}}, + Inputs: []input.Input{{Token: 1}, {Token: 2}}, InUse: false, lastUsed: time.Now().Add(-2 * time.Second), }, }}, - prompt: []model.Input{{Token: 2}, {Token: 3}}, + prompt: []input.Input{{Token: 2}, {Token: 3}}, longest: expected{result: 0, len: 0}, best: expected{result: 1, len: 0}, }, @@ -193,18 +193,18 @@ func TestFindCacheSlot(t *testing.T) { cache: InputCache{slots: []InputCacheSlot{ { Id: 0, - Inputs: []model.Input{{Token: 1}, {Token: 2}}, + Inputs: []input.Input{{Token: 1}, {Token: 2}}, InUse: true, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, - Inputs: []model.Input{{Token: 1}}, + Inputs: []input.Input{{Token: 1}}, InUse: false, lastUsed: time.Now().Add(-2 * time.Second), }, }}, - prompt: []model.Input{{Token: 1}, {Token: 2}}, + prompt: []input.Input{{Token: 1}, {Token: 2}}, longest: expected{result: 1, len: 1}, best: expected{result: 1, len: 2}, }, diff --git a/runner/ollamarunner/runner.go b/runner/ollamarunner/runner.go index a51b1459..c8383a5d 100644 --- a/runner/ollamarunner/runner.go +++ b/runner/ollamarunner/runner.go @@ -26,6 +26,7 @@ import ( "github.com/ollama/ollama/api" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/model" + "github.com/ollama/ollama/model/input" "github.com/ollama/ollama/runner/common" "github.com/ollama/ollama/sample" @@ -41,10 +42,10 @@ type Sequence struct { iBatch int // prompt inputs left to evaluate - inputs []model.Input + inputs []input.Input // inputs that have been added to a batch but not yet submitted to Forward - pendingInputs []model.Input + pendingInputs []input.Input // tokens that have been generated but not returned yet (e.g. for stop sequences) pendingResponses []string @@ -144,8 +145,8 @@ func (s *Server) NewSequence(prompt string, images []ImageData, params NewSequen // inputs processes the prompt and images into a list of inputs // by splitting the prompt on [img-] tags, tokenizing text and // decoding images -func (s *Server) inputs(ctx ml.Context, prompt string, images []ImageData) ([]model.Input, error) { - var inputs []model.Input +func (s *Server) inputs(ctx ml.Context, prompt string, images []ImageData) ([]input.Input, error) { + var inputs []input.Input var parts []string var matches [][]string @@ -168,7 +169,7 @@ func (s *Server) inputs(ctx ml.Context, prompt string, images []ImageData) ([]mo } for _, t := range tokens { - inputs = append(inputs, model.Input{Token: t}) + inputs = append(inputs, input.Input{Token: t}) } // image - decode and store @@ -196,7 +197,7 @@ func (s *Server) inputs(ctx ml.Context, prompt string, images []ImageData) ([]mo _, _ = s.multimodalHash.Write(images[imageIndex].Data) imageHash := s.multimodalHash.Sum64() - inputs = append(inputs, model.Input{Multimodal: imageEmbeddings, MultimodalHash: imageHash}) + inputs = append(inputs, input.Input{Multimodal: imageEmbeddings, MultimodalHash: imageHash}) postTokenize = true } } @@ -250,9 +251,6 @@ type Server struct { // KV cache cache *InputCache - // next sequence for prompt processing to avoid starvation - nextSeq int - // multimodalHash generates hashes for comparing equality // of non-text data multimodalHash maphash.Hash @@ -329,29 +327,25 @@ func (s *Server) processBatch() error { } defer s.mu.Unlock() - var options model.Options - - seqIdx := s.nextSeq - 1 - for range s.seqs { - seqIdx = (seqIdx + 1) % len(s.seqs) - seq := s.seqs[seqIdx] + var options input.Options + for i, seq := range s.seqs { if seq == nil { continue } // if past the num predict limit if seq.numPredict > 0 && seq.numPredicted >= seq.numPredict { - s.removeSequence(seqIdx, "limit") + s.removeSequence(i, "limit") continue } if !s.cache.enabled { seq.inputs = append(seq.cache.Inputs, seq.inputs...) - seq.cache.Inputs = []model.Input{} + seq.cache.Inputs = []input.Input{} } - for i, input := range seq.inputs { + for j, inp := range seq.inputs { if int32(len(seq.cache.Inputs)+len(seq.pendingInputs)+1) > s.cache.numCtx { if len(seq.pendingInputs) == 0 { err := s.cache.ShiftCacheSlot(seq.cache, seq.numKeep) @@ -363,33 +357,23 @@ func (s *Server) processBatch() error { } } - if i >= s.batchSize { + if j >= s.batchSize { break } - // TODO(jessegross): This is a workaround for generating an attention mask and also providing a hint - // to the encoder cache. - // - // Break the batch when switching from text to images so that images are always at the beginning. - if input.Multimodal != nil && !(len(seq.pendingInputs) == 0 || - (len(options.Multimodal) > 0 && options.Multimodal[len(options.Multimodal)-1].Index == len(options.Inputs)-1)) { - s.nextSeq = seqIdx - break - } - - options.Inputs = append(options.Inputs, input.Token) - if input.Multimodal != nil { - options.Multimodal = append(options.Multimodal, model.MultimodalIndex{Index: len(options.Inputs) - 1, Multimodal: input.Multimodal}) + options.Inputs = append(options.Inputs, inp.Token) + if inp.Multimodal != nil { + options.Multimodal = append(options.Multimodal, input.MultimodalIndex{Index: len(options.Inputs) - 1, Multimodal: inp.Multimodal}) } options.Positions = append(options.Positions, int32(len(seq.cache.Inputs)+len(seq.pendingInputs))) options.Sequences = append(options.Sequences, seq.cache.Id) seq.iBatch = len(options.Outputs) - if i+1 == len(seq.inputs) { + if j+1 == len(seq.inputs) { options.Outputs = append(options.Outputs, int32(len(options.Inputs)-1)) } - seq.pendingInputs = append(seq.pendingInputs, input) + seq.pendingInputs = append(seq.pendingInputs, inp) } seq.inputs = seq.inputs[len(seq.pendingInputs):] @@ -417,7 +401,7 @@ func (s *Server) processBatch() error { // After calling Forward, pending inputs are now in the cache if len(seq.pendingInputs) > 0 { seq.cache.Inputs = append(seq.cache.Inputs, seq.pendingInputs...) - seq.pendingInputs = []model.Input{} + seq.pendingInputs = []input.Input{} } // don't sample prompt processing @@ -464,7 +448,7 @@ func (s *Server) processBatch() error { return err } - seq.inputs = []model.Input{{Token: token}} + seq.inputs = []input.Input{{Token: token}} seq.pendingResponses = append(seq.pendingResponses, piece) sequence := strings.Join(seq.pendingResponses, "") From e093db92c4731b0cada767c7d5877c20d5f61dcf Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Mon, 10 Mar 2025 16:17:39 +0100 Subject: [PATCH 029/157] sample: temporarily use grammars for constrained generation in new engine (#9586) --- llama/llama.go | 68 +++++++++++ llama/sampling_ext.cpp | 22 ++++ llama/sampling_ext.h | 3 + llm/server.go | 39 +++--- runner/ollamarunner/runner.go | 23 +++- sample/samplers.go | 193 +++++++++++++++++++++--------- sample/samplers_benchmark_test.go | 28 ++--- sample/samplers_test.go | 94 +-------------- sample/transforms.go | 16 +-- sample/transforms_test.go | 28 ++--- 10 files changed, 301 insertions(+), 213 deletions(-) diff --git a/llama/llama.go b/llama/llama.go index bb5028bd..a026bee2 100644 --- a/llama/llama.go +++ b/llama/llama.go @@ -245,6 +245,20 @@ func LoadModelFromFile(modelPath string, params ModelParams) (*Model, error) { return &m, nil } +func LoadVocabFromFile(path string) (*Vocab, error) { + mp := C.CString(path) + defer C.free(unsafe.Pointer(mp)) + v := Vocab{c: C.llama_load_vocab_from_file(mp)} + if v.c == nil { + return nil, fmt.Errorf("unable to load vocab: %s", path) + } + return &v, nil +} + +func FreeVocab(vocab *Vocab) { + C.llama_free_vocab(vocab.c) +} + func FreeModel(model *Model) { C.llama_model_free(model.c) } @@ -293,6 +307,10 @@ func (m *Model) ApplyLoraFromFile(context *Context, loraPath string, scale float return nil } +type Vocab struct { + c *C.struct_llama_vocab +} + func (m *Model) Vocab() *C.struct_llama_vocab { return C.llama_model_get_vocab(m.c) } @@ -669,3 +687,53 @@ func SchemaToGrammar(schema []byte) []byte { } return buf[:n] } + +type Sampler struct { + c *C.struct_llama_sampler +} + +func NewGrammarSampler(vocab *Vocab, grammar string) *Sampler { + cGrammar := C.CString(grammar) + cRoot := C.CString("root") + defer C.free(unsafe.Pointer(cGrammar)) + defer C.free(unsafe.Pointer(cRoot)) + + sampler := &Sampler{c: C.llama_sampler_init_grammar(vocab.c, cGrammar, cRoot)} + + return sampler +} + +func (s *Sampler) Accept(token int32) { + C.llama_sampler_accept(s.c, C.llama_token(token)) +} + +type TokenData struct { + Id int32 + Logit float32 +} + +func (s *Sampler) Apply(tokens []TokenData) { + tds := make([]C.struct_llama_token_data, len(tokens)) + for i, token := range tokens { + tds[i] = C.struct_llama_token_data{ + id: C.int32_t(token.Id), + logit: C.float(token.Logit), + p: C.float(0.0), + } + } + tda := &C.llama_token_data_array{ + data: (*C.struct_llama_token_data)(unsafe.Pointer(&tds[0])), + size: C.size_t(len(tokens)), + selected: C.int64_t(-1), + sorted: C.bool(false), + } + + var pinner runtime.Pinner + pinner.Pin(&tds[0]) + defer pinner.Unpin() + + C.llama_sampler_apply(s.c, tda) + for i := range tokens { + tokens[i].Logit = float32(tds[i].logit) + } +} diff --git a/llama/sampling_ext.cpp b/llama/sampling_ext.cpp index 0f137dc8..b816cedd 100644 --- a/llama/sampling_ext.cpp +++ b/llama/sampling_ext.cpp @@ -2,6 +2,9 @@ #include "sampling.h" #include "sampling_ext.h" #include "json-schema-to-grammar.h" +#include "llama.h" +#include "llama-model.h" +#include "llama-model-loader.h" struct common_sampler *common_sampler_cinit(const struct llama_model *model, struct common_sampler_cparams *params) { try { @@ -64,3 +67,22 @@ int schema_to_grammar(const char *json_schema, char *grammar, size_t max_len) return 0; } } + +struct llama_vocab * llama_load_vocab_from_file(const char * fname) { + llama_vocab * vocab = new llama_vocab(); + try { + const auto kv = LLM_KV(LLM_ARCH_UNKNOWN); + std::vector splits = {}; + llama_model_loader ml(std::string(fname), splits, false, false, nullptr); + vocab->load(ml, kv); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what()); + return nullptr; + } + + return vocab; +} + +void llama_free_vocab(struct llama_vocab * vocab) { + delete vocab; +} diff --git a/llama/sampling_ext.h b/llama/sampling_ext.h index 39f499f1..9be7c100 100644 --- a/llama/sampling_ext.h +++ b/llama/sampling_ext.h @@ -35,6 +35,9 @@ extern "C" int schema_to_grammar(const char *json_schema, char *grammar, size_t max_len); + struct llama_vocab * llama_load_vocab_from_file(const char * fname); + void llama_free_vocab(struct llama_vocab * vocab); + #ifdef __cplusplus } #endif diff --git a/llm/server.go b/llm/server.go index 9553ba8f..a53306fb 100644 --- a/llm/server.go +++ b/llm/server.go @@ -729,29 +729,24 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu } if len(req.Format) > 0 { - format := string(req.Format) - if format != `null` && format != `""` { - if s.textProcessor != nil { - // New engine handles this on the backend - request["format"] = req.Format - } else { - // old engine - switch format { - case `"json"`: - request["grammar"] = grammarJSON - default: - if req.Format[0] != '{' { - return fmt.Errorf("invalid format: %q; expected \"json\" or a valid JSON Schema object", req.Format) - } - - // User provided a JSON schema - g := llama.SchemaToGrammar(req.Format) - if g == nil { - return fmt.Errorf("invalid JSON schema in format") - } - request["grammar"] = string(g) - } + switch string(req.Format) { + case `null`, `""`: + // Field was set, but "missing" a value. We accept + // these as "not set". + break + case `"json"`: + request["grammar"] = grammarJSON + default: + if req.Format[0] != '{' { + return fmt.Errorf("invalid format: %q; expected \"json\" or a valid JSON Schema object", req.Format) } + + // User provided a JSON schema + g := llama.SchemaToGrammar(req.Format) + if g == nil { + return fmt.Errorf("invalid JSON schema in format") + } + request["grammar"] = string(g) } } diff --git a/runner/ollamarunner/runner.go b/runner/ollamarunner/runner.go index c8383a5d..c1475cbb 100644 --- a/runner/ollamarunner/runner.go +++ b/runner/ollamarunner/runner.go @@ -254,6 +254,12 @@ type Server struct { // multimodalHash generates hashes for comparing equality // of non-text data multimodalHash maphash.Hash + + // vocab is a llama.cpp vocab required for gammar-based + // constrained generation (json mode, structured outputs) + // TODO: this is temporary until Ollama sampling supports + // constrained generation + vocab *sample.Vocab } func (s *Server) allNil() bool { @@ -574,18 +580,25 @@ func (s *Server) completion(w http.ResponseWriter, r *http.Request) { return } + var grammar *sample.Grammar + var err error + if req.Grammar != "" { + grammar, err = sample.NewGrammar(s.vocab, req.Grammar) + if err != nil { + http.Error(w, "failed to load model vocabulary required for format", http.StatusInternalServerError) + return + } + } + sampler := sample.NewSampler( req.Temperature, req.TopK, req.TopP, req.MinP, req.Seed, + grammar, ) - if req.Grammar != "" { - panic("grammars are not yet supported") - } - seq, err := s.NewSequence(req.Prompt, req.Images, NewSequenceParams{ numPredict: req.NumPredict, stop: req.Stop, @@ -797,6 +810,8 @@ func (s *Server) loadModel( panic(err) } + s.vocab = sample.NewVocab(mpath) + // TODO(jessegross): LoRA loading if lpath.String() != "" { panic("loras are not yet implemented") diff --git a/sample/samplers.go b/sample/samplers.go index a5a0507c..a9d90692 100644 --- a/sample/samplers.go +++ b/sample/samplers.go @@ -2,43 +2,88 @@ package sample import ( "errors" + "math" "math/rand/v2" "slices" + "sync" + + "github.com/ollama/ollama/llama" ) -// Sampler is not thread-safe. Each goroutine should have its own instance -type Sampler interface { - Sample([]float32) (int32, error) -} - -// logit represents information about a single token during sampling -type logit struct { +// token represents information about a single token during sampling +type token struct { id int32 // The token's unique identifier value float32 // The raw logit or probability from the model } -type weighted struct { +type Sampler struct { rng *rand.Rand - tokens []logit topK int topP float32 minP float32 temperature float32 + grammar *Grammar } -func (s *weighted) Sample(logits []float32) (int32, error) { - if len(s.tokens) < len(logits) { - s.tokens = make([]logit, len(logits)) - } - - tokens := s.tokens[:len(logits)] - - for i, v := range logits { +func (s *Sampler) Sample(logits []float32) (int32, error) { + tokens := make([]token, len(logits)) + for i := range logits { tokens[i].id = int32(i) - tokens[i].value = v + tokens[i].value = logits[i] + } + + t, err := s.sample(tokens) + if err != nil { + return -1, err + } + + if s.grammar != nil { + // optimization: first check if the max logit is accepted by the grammar + // if the max logit is rejected, apply the grammar to all logits (slower) + top := []token{t} + s.grammar.Apply(top) + if !math.IsInf(float64(top[0].value), -1) { + s.grammar.Accept(top[0].id) + return top[0].id, nil + } + + // since .sample has side effects of modifying the tokens + // we need to reset them before applying the grammar and + // sampling again + for i := range logits { + tokens[i].id = int32(i) + tokens[i].value = logits[i] + } + s.grammar.Apply(tokens) + t, err = s.sample(tokens) + if err != nil { + return -1, err + } + s.grammar.Accept(t.id) + } + + return t.id, nil +} + +// greedy returns the highest probability token from the tokens +func greedy(tokens []token) token { + max := tokens[0] + for i := 1; i < len(tokens); i++ { + if tokens[i].value > max.value { + max = tokens[i] + } + } + + return max +} + +// sample returns the highest probability token from the tokens +// given sampler parameters. It also has side effects of modifying the tokens +func (s *Sampler) sample(tokens []token) (token, error) { + if s.temperature == 0 { + return greedy(tokens), nil } - // Tokens are sorted by logits in TopK or SortTokens if s.topK > 0 { tokens = topK(tokens, s.topK) } else { @@ -47,12 +92,14 @@ func (s *weighted) Sample(logits []float32) (int32, error) { tokens = temperature(tokens, s.temperature) tokens = softmax(tokens) - tokens = topP(tokens, s.topP) tokens = minP(tokens, s.minP) + // TODO: this should fall back to greedy sampling + // or topP, topK values etc should be such that + // there are always tokens to sample from if len(tokens) == 0 { - return -1, errors.New("no valid logits found for weighted sampling") + return token{}, errors.New("no tokens to sample from") } var r float32 @@ -70,48 +117,18 @@ func (s *weighted) Sample(logits []float32) (int32, error) { } r *= tokens[len(tokens)-1].value - idx, _ := slices.BinarySearchFunc(tokens, r, func(token logit, target float32) int { - // Compare cumulative probabilities + idx, _ := slices.BinarySearchFunc(tokens, r, func(token token, target float32) int { if token.value < target { return -1 } - // First token that exceeds target return 1 }) - if idx >= len(tokens) { - idx = len(tokens) - 1 - } - - return tokens[idx].id, nil -} - -type greedy struct{} - -// Greedy sample returns the index of the maximum value in logits. -func (s greedy) Sample(logits []float32) (int32, error) { - if len(logits) == 0 { - return -1, errors.New("no logits provided for greedy sampling") - } - - maxIdx := 0 - maxVal := logits[0] - for i := 1; i < len(logits); i++ { - if logits[i] > maxVal { - maxVal = logits[i] - maxIdx = i - } - } - - return int32(maxIdx), nil + return tokens[idx], nil } // TODO(parthsareen): update sampler interface to use json unmarshal https://github.com/ollama/ollama/issues/9278 -func NewSampler(temperature float32, topK int, topP float32, minP float32, seed int) Sampler { - if temperature == 0 { - return &greedy{} - } - +func NewSampler(temperature float32, topK int, topP float32, minP float32, seed int, grammar *Grammar) Sampler { var rng *rand.Rand if seed != -1 { // PCG requires two parameters: sequence and stream @@ -120,7 +137,9 @@ func NewSampler(temperature float32, topK int, topP float32, minP float32, seed // Use golden ratio hash to generate statistically independent seeds rng = rand.New(rand.NewPCG(sequence, sequence^0x9E3779B9)) } - temperature = max(temperature, 1) + if temperature < 0.0 { + temperature = 0.0 + } if topP < 0.0 { topP = 0.0 @@ -136,11 +155,73 @@ func NewSampler(temperature float32, topK int, topP float32, minP float32, seed minP = 1.0 } - return &weighted{ + return Sampler{ rng: rng, topK: topK, topP: topP, minP: minP, temperature: temperature, + grammar: grammar, } } + +type Grammar struct { + vocab *Vocab + grammar string + sampler *llama.Sampler +} + +func NewGrammar(vocab *Vocab, grammar string) (*Grammar, error) { + v, err := vocab.Load() + if err != nil { + return nil, err + } + + return &Grammar{ + vocab: vocab, + grammar: grammar, + sampler: llama.NewGrammarSampler(v, grammar), + }, nil +} + +func (g *Grammar) Apply(tokens []token) { + tds := make([]llama.TokenData, len(tokens)) + for i, token := range tokens { + tds[i].Id = token.id + tds[i].Logit = token.value + } + + g.sampler.Apply(tds) + + for i := range tokens { + tokens[i].value = tds[i].Logit + } +} + +func (g *Grammar) Accept(token int32) { + g.sampler.Accept(token) +} + +type Vocab struct { + once sync.Once + vocab *llama.Vocab + err error + path string +} + +func NewVocab(path string) *Vocab { + return &Vocab{path: path} +} + +// Load returns the lazily-loaded vocabulary +func (v *Vocab) Load() (*llama.Vocab, error) { + v.once.Do(func() { + vocab, err := llama.LoadVocabFromFile(v.path) + if err != nil { + v.err = err + return + } + v.vocab = vocab + }) + return v.vocab, v.err +} diff --git a/sample/samplers_benchmark_test.go b/sample/samplers_benchmark_test.go index 41c0b487..cd138014 100644 --- a/sample/samplers_benchmark_test.go +++ b/sample/samplers_benchmark_test.go @@ -16,13 +16,10 @@ func BenchmarkWeightedSampler(b *testing.B) { logits[i] = float32(rand.Float64()*10 - 5) } - sampler := NewSampler(0.8, 0, 0, 0, 42) + sampler := NewSampler(0.8, 0, 0, 0, 42, nil) b.ResetTimer() for b.Loop() { - _, err := sampler.Sample(logits) - if err != nil { - b.Fatalf("Sampling failed: %v", err) - } + sampler.Sample(logits) } }) } @@ -52,30 +49,24 @@ func BenchmarkWeightedSampler(b *testing.B) { for _, tc := range configs { b.Run("Config"+tc.name, func(b *testing.B) { - sampler := NewSampler(tc.temperature, tc.topK, tc.topP, tc.minP, tc.seed) + sampler := NewSampler(tc.temperature, tc.topK, tc.topP, tc.minP, tc.seed, nil) sampler.Sample(logits) b.ResetTimer() for b.Loop() { - _, err := sampler.Sample(logits) - if err != nil { - b.Fatalf("Sampling failed: %v", err) - } + sampler.Sample(logits) } }) } // Test with combined transforms separately - topK influences performance greatly b.Run("TransformCombined", func(b *testing.B) { - sampler := NewSampler(0.8, 50, 0.9, 0.05, 42) + sampler := NewSampler(0.8, 50, 0.9, 0.05, 42, nil) b.ResetTimer() for b.Loop() { - _, err := sampler.Sample(logits) - if err != nil { - b.Fatalf("Sampling failed: %v", err) - } + sampler.Sample(logits) } }) } @@ -90,14 +81,11 @@ func BenchmarkGreedySampler(b *testing.B) { logits[i] = float32(rand.Float64()*10 - 5) } - sampler := NewSampler(0, -1, 0, 0, -1) + sampler := NewSampler(0, -1, 0, 0, -1, nil) b.ResetTimer() for b.Loop() { - _, err := sampler.Sample(logits) - if err != nil { - b.Fatalf("Sampling failed: %v", err) - } + sampler.Sample(logits) } }) } diff --git a/sample/samplers_test.go b/sample/samplers_test.go index dbbee17b..38b9b352 100644 --- a/sample/samplers_test.go +++ b/sample/samplers_test.go @@ -7,7 +7,7 @@ import ( func TestWeighted(t *testing.T) { logits := []float32{-10, 3, -10, -10} - sampler := NewSampler(0, 0, 0, 0, 0) + sampler := NewSampler(0, 0, 0, 0, 0, nil) got, err := sampler.Sample(logits) if err != nil { t.Error(err) @@ -19,7 +19,7 @@ func TestWeighted(t *testing.T) { } logits = []float32{-100, -10, 0, 10} - sampler = NewSampler(0, 0, 0, 0, 0) + sampler = NewSampler(0, 0, 0, 0, 0, nil) got, err = sampler.Sample(logits) if err != nil { t.Error(err) @@ -31,94 +31,10 @@ func TestWeighted(t *testing.T) { } } -func TestNewSampler(t *testing.T) { - tests := []struct { - name string - temperature float32 - topK int - topP float32 - minP float32 - seed int - wantGreedy bool // Instead of wantErr, check if we get greedy sampler - }{ - { - name: "temperature", - temperature: 0.5, - wantGreedy: false, - }, - { - name: "zero temperature - greedy", - temperature: 0, - wantGreedy: true, - }, - { - name: "top k", - temperature: 0.1, - topK: 10, - wantGreedy: false, - }, - { - name: "top p", - temperature: 0.1, - topP: 0.9, - wantGreedy: false, - }, - { - name: "min p", - temperature: 0.1, - minP: 0.2, - wantGreedy: false, - }, - { - name: "seed - weighted", - temperature: 0.1, - seed: 42, - wantGreedy: false, - }, - { - name: "default values", - temperature: 0.8, - topK: 40, - topP: 0.9, - minP: 0.0, - seed: 0, - wantGreedy: false, - }, - { - name: "all zeroes - greedy", - temperature: 0.0, - topK: 0, - topP: 0.0, - minP: 0.0, - seed: 0, - wantGreedy: true, - }, - { - name: "all transforms", - temperature: 0.8, - topK: 50, - topP: 0.95, - minP: 0.1, - seed: 42, - wantGreedy: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - sampler := NewSampler(tt.temperature, tt.topK, tt.topP, tt.minP, tt.seed) - _, isGreedy := sampler.(*greedy) - if isGreedy != tt.wantGreedy { - t.Errorf("NewSampler() got greedy = %v, want %v", isGreedy, tt.wantGreedy) - } - }) - } -} - func BenchmarkSample(b *testing.B) { - weighted := NewSampler(0.5, 10, 0.9, 0.2, -1) samplers := map[string]Sampler{ - "Greedy": NewSampler(0, 0, 0, 0, 0), // Use NewSampler with temp=0 for greedy - "Weighted": weighted, + "Greedy": NewSampler(0, 0, 0, 0, 0, nil), // Use NewSampler with temp=0 for greedy + "Weighted": NewSampler(0.5, 10, 0.9, 0.2, -1, nil), } // Generate random logits for benchmarking @@ -132,7 +48,7 @@ func BenchmarkSample(b *testing.B) { b.ResetTimer() for b.Loop() { if _, err := s.Sample(logits); err != nil { - b.Error(err) + b.Fatalf("error sampling: %v", err) } } }) diff --git a/sample/transforms.go b/sample/transforms.go index f1f4f3b1..49625297 100644 --- a/sample/transforms.go +++ b/sample/transforms.go @@ -5,7 +5,7 @@ import ( "slices" ) -func softmax(ts []logit) []logit { +func softmax(ts []token) []token { var sum float32 for i, v := range ts { ts[i].value = float32(math.Exp(float64(v.value))) @@ -19,7 +19,7 @@ func softmax(ts []logit) []logit { return ts } -func temperature(ti []logit, t float32) []logit { +func temperature(ti []token, t float32) []token { if t == 1 { return ti } @@ -51,7 +51,7 @@ func temperature(ti []logit, t float32) []logit { // 1. Finds the smallest value between the node and its children // 2. If the node is not the smallest, swaps it with its smallest child // 3. Continues this process down the affected path until the min-heap property is restored -func siftDown(data []logit, start, end int) { +func siftDown(data []token, start, end int) { root := start for { child := 2*root + 1 @@ -73,7 +73,7 @@ func siftDown(data []logit, start, end int) { } // topK limits the number of tokens considered to the k highest logits -func topK(ts []logit, k int) []logit { +func topK(ts []token, k int) []token { if k >= len(ts) { return ts } @@ -99,7 +99,7 @@ func topK(ts []logit, k int) []logit { } // topP limits tokens to those with cumulative probability p -func topP(ts []logit, p float32) []logit { +func topP(ts []token, p float32) []token { if p == 1.0 { return ts } @@ -118,7 +118,7 @@ func topP(ts []logit, p float32) []logit { } // minP limits tokens to those with cumulative probability p -func minP(ts []logit, p float32) []logit { +func minP(ts []token, p float32) []token { if p == 1.0 { return ts } @@ -146,7 +146,7 @@ func minP(ts []logit, p float32) []logit { // TODO(parthsareen): possibly replace with simpler implementation https://github.com/ollama/ollama/issues/9584 // Conting sort implementation to sort tokens by logits -func sortLogits(tokens []logit) { +func sortLogits(tokens []token) { if len(tokens) <= 1 { return } @@ -187,7 +187,7 @@ func sortLogits(tokens []logit) { } // Second pass: place elements in correct position - output := make([]logit, len(tokens)) + output := make([]token, len(tokens)) // Track current positions countsCopy := counts diff --git a/sample/transforms_test.go b/sample/transforms_test.go index 950d79b3..1065231d 100644 --- a/sample/transforms_test.go +++ b/sample/transforms_test.go @@ -7,10 +7,10 @@ import ( ) // Helper to convert float64 slice to logit slice -func toLogits(values []float64) []logit { - tokens := make([]logit, len(values)) +func toTokens(values []float64) []token { + tokens := make([]token, len(values)) for i, v := range values { - tokens[i] = logit{ + tokens[i] = token{ id: int32(i), value: float32(v), } @@ -19,7 +19,7 @@ func toLogits(values []float64) []logit { } // Helper to compare logit slices -func compareLogits(t *testing.T, name string, want []float64, got []logit) { +func compareLogits(t *testing.T, name string, want []float64, got []token) { t.Helper() if len(want) != len(got) { t.Errorf("%s: length mismatch: want %d, got %d", name, len(want), len(got)) @@ -36,13 +36,13 @@ func TestTemperature(t *testing.T) { input := []float64{2, -1, 4, -3, 1, -2, 0} want := []float64{-4, -10, 0, -14, -6, -12, -8} // (logit - max logit) / temp - got := temperature(toLogits(input), 0.5) + got := temperature(toTokens(input), 0.5) compareLogits(t, "Temperature", want, got) } func TestSoftmax(t *testing.T) { input := []float64{-3, -2, -1, 0, 1, 2, 4} - got := softmax(toLogits(input)) + got := softmax(toTokens(input)) // Check probabilities sum to 1 var sum float32 @@ -65,7 +65,7 @@ func TestTopK(t *testing.T) { input := []float64{-3, -2, -1, 0, 1, 2, 4} // Test k=3 - got := topK(toLogits(input), 3) + got := topK(toTokens(input), 3) if len(got) != 3 { t.Errorf("topK(3): wrong length: want 3, got %d", len(got)) } @@ -74,13 +74,13 @@ func TestTopK(t *testing.T) { compareLogits(t, "topK(3)", want, got) // Test k > len - got = topK(toLogits(input), 10) + got = topK(toTokens(input), 10) compareLogits(t, "topK(10)", input, got) } func TestTopP(t *testing.T) { input := []float64{-3, -2, -1, 0, 1, 2, 4} - tokens := toLogits(input) + tokens := toTokens(input) // First apply temperature and softmax to get probabilities tokens = temperature(tokens, 1) @@ -99,7 +99,7 @@ func TestTopP(t *testing.T) { func TestMinP(t *testing.T) { input := []float64{-3, -2, -1, 0, 1, 2, 4, 3} - tokens := toLogits(input) + tokens := toTokens(input) // First apply temperature and softmax tokens = temperature(tokens, 1) @@ -116,7 +116,7 @@ func TestMinP(t *testing.T) { func TestSortLogits(t *testing.T) { input := []float64{3, 1, 4, 2, -1, 0, -2} - tokens := toLogits(input) + tokens := toTokens(input) sortLogits(tokens) @@ -133,15 +133,15 @@ func TestSortLogits(t *testing.T) { func BenchmarkTransforms(b *testing.B) { // Generate random logits - tokens := make([]logit, 1<<16) + tokens := make([]token, 1<<16) for i := range tokens { - tokens[i] = logit{ + tokens[i] = token{ id: int32(i), value: rand.Float32(), } } - tokensCopy := make([]logit, len(tokens)) + tokensCopy := make([]token, len(tokens)) b.Run("Temperature", func(b *testing.B) { b.ResetTimer() From 96ec8afd091a763762a75b57622759a1d278fa2c Mon Sep 17 00:00:00 2001 From: Sam Date: Tue, 11 Mar 2025 03:52:02 +1100 Subject: [PATCH 030/157] docs(tool): add mcp-llm (#9537) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 1162f891..89ff82e2 100644 --- a/README.md +++ b/README.md @@ -561,6 +561,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [TextLLaMA](https://github.com/adarshM84/TextLLaMA) A Chrome Extension that helps you write emails, correct grammar, and translate into any language - [Simple-Discord-AI](https://github.com/zyphixor/simple-discord-ai) - [LLM Telegram Bot](https://github.com/innightwolfsleep/llm_telegram_bot) (telegram bot, primary for RP. Oobabooga-like buttons, [A1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) API integration e.t.c) +- [mcp-llm](https://github.com/sammcj/mcp-llm) (MCP Server to allow LLMs to call other LLMs) ### Supported backends From 757668c42f92acf249677c9b3726bbfb2c1f5593 Mon Sep 17 00:00:00 2001 From: Xiaowei Zhu <33129495+zhu-xiaowei@users.noreply.github.com> Date: Mon, 10 Mar 2025 18:01:09 +0000 Subject: [PATCH 031/157] docs: add SwiftChat (#9540) --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 89ff82e2..96b25045 100644 --- a/README.md +++ b/README.md @@ -276,6 +276,7 @@ See the [API documentation](./docs/api.md) for all endpoints. ### Web & Desktop - [Open WebUI](https://github.com/open-webui/open-webui) +- [SwiftChat (macOS with ReactNative)](https://github.com/aws-samples/swift-chat) - [Enchanted (macOS native)](https://github.com/AugustDev/enchanted) - [Hollama](https://github.com/fmaclen/hollama) - [Lollms-Webui](https://github.com/ParisNeo/lollms-webui) @@ -433,6 +434,7 @@ See the [API documentation](./docs/api.md) for all endpoints. ### Apple Vision Pro +- [SwiftChat](https://github.com/aws-samples/swift-chat) (Cross-platform AI chat app supporting Apple Vision Pro via "Designed for iPad") - [Enchanted](https://github.com/AugustDev/enchanted) ### Database @@ -510,6 +512,7 @@ See the [API documentation](./docs/api.md) for all endpoints. ### Mobile +- [SwiftChat](https://github.com/aws-samples/swift-chat) (Lightning-fast Cross-platform AI chat app with native UI for Android, iOS and iPad) - [Enchanted](https://github.com/AugustDev/enchanted) - [Maid](https://github.com/Mobile-Artificial-Intelligence/maid) - [Ollama App](https://github.com/JHubi1/ollama-app) (Modern and easy-to-use multi-platform client for Ollama) From d8a5d96b981bf6e1c5a61fde18acaeed0fb89f7c Mon Sep 17 00:00:00 2001 From: frob Date: Mon, 10 Mar 2025 19:02:54 +0100 Subject: [PATCH 032/157] docs: Add OLLAMA_CONTEXT_LENGTH to FAQ. (#9545) --- docs/faq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/faq.md b/docs/faq.md index 04e8433d..4aaccc2e 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -20,7 +20,7 @@ Please refer to the [GPU docs](./gpu.md). ## How can I specify the context window size? -By default, Ollama uses a context window size of 2048 tokens. +By default, Ollama uses a context window size of 2048 tokens. This can be overridden with the `OLLAMA_CONTEXT_LENGTH` environment variable. For example, to set the default context length to 8K, use: `OLLAMA_CONTEXT_LENGTH=8192 ollama serve`. To change this when using `ollama run`, use `/set parameter`: From 7e34f4fbfa192b3a2334d8fc28e24d69b83064d9 Mon Sep 17 00:00:00 2001 From: Parth Sareen Date: Mon, 10 Mar 2025 14:43:53 -0700 Subject: [PATCH 033/157] sample: add numerical stability to temperature/softmax transform (#9631) --- sample/samplers.go | 3 ++- sample/transforms.go | 40 ++++++++++++++++----------------------- sample/transforms_test.go | 27 ++++++++++---------------- 3 files changed, 28 insertions(+), 42 deletions(-) diff --git a/sample/samplers.go b/sample/samplers.go index a9d90692..aea99b3f 100644 --- a/sample/samplers.go +++ b/sample/samplers.go @@ -90,8 +90,9 @@ func (s *Sampler) sample(tokens []token) (token, error) { sortLogits(tokens) } + // token logit values are updated to probabilities tokens = temperature(tokens, s.temperature) - tokens = softmax(tokens) + tokens = topP(tokens, s.topP) tokens = minP(tokens, s.minP) diff --git a/sample/transforms.go b/sample/transforms.go index 49625297..ab62455f 100644 --- a/sample/transforms.go +++ b/sample/transforms.go @@ -5,13 +5,25 @@ import ( "slices" ) -func softmax(ts []token) []token { +// temperature applies scaling and softmax to the logits +func temperature(ts []token, temp float32) []token { + // Find max logit for numerical stability + maxLogit := float32(math.Inf(-1)) + for _, t := range ts { + if t.value > maxLogit { + maxLogit = t.value + } + } + + // Apply temperature and compute exp(x - max) + temp = max(temp, 1e-7) var sum float32 for i, v := range ts { - ts[i].value = float32(math.Exp(float64(v.value))) + ts[i].value = float32(math.Exp(float64((v.value - maxLogit) / temp))) sum += ts[i].value } + // Normalize for i := range ts { ts[i].value /= sum } @@ -19,27 +31,6 @@ func softmax(ts []token) []token { return ts } -func temperature(ti []token, t float32) []token { - if t == 1 { - return ti - } - - temp := max(t, 1e-7) - maxLogit := float32(math.Inf(-1)) - for _, token := range ti { - if token.value > maxLogit { - maxLogit = token.value - } - } - - // subtracting max logit to avoid under/overflow - for i := range ti { - ti[i].value = (ti[i].value - maxLogit) / temp - } - - return ti -} - // siftDown maintains a min-heap property by recursively moving larger elements down the heap. // // The heap is represented as an array where for any node at index i: @@ -145,7 +136,8 @@ func minP(ts []token, p float32) []token { } // TODO(parthsareen): possibly replace with simpler implementation https://github.com/ollama/ollama/issues/9584 -// Conting sort implementation to sort tokens by logits +// sortLogits sorts implementation to sort tokens by logits using counting sort +// counting sort is faster than built-in sort for this use case func sortLogits(tokens []token) { if len(tokens) <= 1 { return diff --git a/sample/transforms_test.go b/sample/transforms_test.go index 1065231d..81e8849b 100644 --- a/sample/transforms_test.go +++ b/sample/transforms_test.go @@ -32,17 +32,9 @@ func compareLogits(t *testing.T, name string, want []float64, got []token) { } } -func TestTemperature(t *testing.T) { - input := []float64{2, -1, 4, -3, 1, -2, 0} - want := []float64{-4, -10, 0, -14, -6, -12, -8} // (logit - max logit) / temp - +func TestTemperatureAndSoftmax(t *testing.T) { + input := []float64{1, 4, -2, 0} got := temperature(toTokens(input), 0.5) - compareLogits(t, "Temperature", want, got) -} - -func TestSoftmax(t *testing.T) { - input := []float64{-3, -2, -1, 0, 1, 2, 4} - got := softmax(toTokens(input)) // Check probabilities sum to 1 var sum float32 @@ -53,11 +45,14 @@ func TestSoftmax(t *testing.T) { t.Errorf("probabilities don't sum to 1: got %f", sum) } - // Check relative ordering is preserved - for i := 1; i < len(got); i++ { - if got[i].value < got[i-1].value { - t.Errorf("probability ordering not preserved at index %d", i) - } + got = temperature(toTokens(input), 1) + // Check probabilities sum to 1 + sum = 0.0 + for _, token := range got { + sum += token.value + } + if math.Abs(float64(sum)-1.0) > 1e-6 { + t.Errorf("probabilities don't sum to 1: got %f", sum) } } @@ -84,7 +79,6 @@ func TestTopP(t *testing.T) { // First apply temperature and softmax to get probabilities tokens = temperature(tokens, 1) - tokens = softmax(tokens) sortLogits(tokens) // Then apply topP @@ -103,7 +97,6 @@ func TestMinP(t *testing.T) { // First apply temperature and softmax tokens = temperature(tokens, 1) - tokens = softmax(tokens) // Then apply minP got := minP(tokens, 0.2) From 8585b7b151c404cc8044eeafe3734e676a0e572a Mon Sep 17 00:00:00 2001 From: Vincent Koc Date: Tue, 11 Mar 2025 10:15:10 +1100 Subject: [PATCH 034/157] docs: add opik to observability integrations (#9626) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 96b25045..b4df5e2a 100644 --- a/README.md +++ b/README.md @@ -571,6 +571,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [llama.cpp](https://github.com/ggerganov/llama.cpp) project founded by Georgi Gerganov. ### Observability +- [Opik](https://www.comet.com/docs/opik/cookbook/ollama) is an open-source platform to debug, evaluate, and monitor your LLM applications, RAG systems, and agentic workflows with comprehensive tracing, automated evaluations, and production-ready dashboards. Opik supports native intergration to Ollama. - [Lunary](https://lunary.ai/docs/integrations/ollama) is the leading open-source LLM observability platform. It provides a variety of enterprise-grade features such as real-time analytics, prompt templates management, PII masking, and comprehensive agent tracing. - [OpenLIT](https://github.com/openlit/openlit) is an OpenTelemetry-native tool for monitoring Ollama Applications & GPUs using traces and metrics. - [HoneyHive](https://docs.honeyhive.ai/integrations/ollama) is an AI observability and evaluation platform for AI agents. Use HoneyHive to evaluate agent performance, interrogate failures, and monitor quality in production. From 9926eae01516fa3ab65d60df7dd0b51dceccdebf Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Fri, 7 Mar 2025 18:04:16 -0800 Subject: [PATCH 035/157] fix: pad tensor item if ge zero this produces a nicer output since both positive and negative values produces the same width --- ml/backend.go | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/ml/backend.go b/ml/backend.go index 3abacbf1..641175f0 100644 --- a/ml/backend.go +++ b/ml/backend.go @@ -5,6 +5,7 @@ import ( "encoding/binary" "fmt" "os" + "slices" "strconv" "strings" ) @@ -241,16 +242,17 @@ func dump[S ~[]E, E number](ctx Context, t Tensor, items int, fn func(E) string) } shape := t.Shape() + slices.Reverse(shape) var sb strings.Builder var f func([]int, int) f = func(dims []int, stride int) { prefix := strings.Repeat(" ", len(shape)-len(dims)+1) - fmt.Fprint(&sb, "[") - defer func() { fmt.Fprint(&sb, "]") }() + sb.WriteString("[") + defer func() { sb.WriteString("]") }() for i := 0; i < dims[0]; i++ { if i >= items && i < dims[0]-items { - fmt.Fprint(&sb, "..., ") + sb.WriteString("..., ") // skip to next printable element skip := dims[0] - 2*items if len(dims) > 1 { @@ -265,9 +267,14 @@ func dump[S ~[]E, E number](ctx Context, t Tensor, items int, fn func(E) string) fmt.Fprint(&sb, ",", strings.Repeat("\n", len(dims)-1), prefix) } } else { - fmt.Fprint(&sb, fn(s[stride+i])) + text := fn(s[stride+i]) + if len(text) > 0 && text[0] != '-' { + sb.WriteString(" ") + } + + sb.WriteString(text) if i < dims[0]-1 { - fmt.Fprint(&sb, ", ") + sb.WriteString(", ") } } } From 4dcf80167aca16c90bd2d01e0b91473e595ae936 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Tue, 11 Mar 2025 08:34:20 -0700 Subject: [PATCH 036/157] Build release for windows with local script (#9636) --- scripts/build_windows.ps1 | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/scripts/build_windows.ps1 b/scripts/build_windows.ps1 index 62930d7f..60485df8 100644 --- a/scripts/build_windows.ps1 +++ b/scripts/build_windows.ps1 @@ -80,13 +80,14 @@ function checkEnv() { function buildOllama() { + mkdir -Force -path "${script:DIST_DIR}\" if ($script:ARCH -ne "arm64") { Remove-Item -ea 0 -recurse -force -path "${script:SRC_DIR}\dist\windows-${script:ARCH}" New-Item "${script:SRC_DIR}\dist\windows-${script:ARCH}\lib\ollama\" -ItemType Directory -ea 0 & cmake --fresh --preset CPU --install-prefix $script:DIST_DIR if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} - & cmake --build --preset CPU --parallel $script:JOBS + & cmake --build --preset CPU --config Release --parallel $script:JOBS if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} & cmake --install build --component CPU --strip if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} @@ -101,7 +102,7 @@ function buildOllama() { # to avoid 2022 (or newer) from being used as the default & cmake --fresh --preset "CUDA 11" -G "Visual Studio 16 2019" --install-prefix $script:DIST_DIR if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} - & cmake --build --preset "CUDA 11" --parallel $script:JOBS + & cmake --build --preset "CUDA 11" --config Release --parallel $script:JOBS if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} & cmake --install build --component "CUDA" --strip if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} @@ -112,7 +113,7 @@ function buildOllama() { write-host "Building CUDA v12 backend libraries" & cmake --fresh --preset "CUDA 12" --install-prefix $script:DIST_DIR if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} - & cmake --build --preset "CUDA 12" --parallel $script:JOBS + & cmake --build --preset "CUDA 12" --config Release --parallel $script:JOBS if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} & cmake --install build --component "CUDA" --strip if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} @@ -131,7 +132,7 @@ function buildOllama() { $env:HIPCXX="" $env:HIP_PLATFORM="" $env:CMAKE_PREFIX_PATH="" - & cmake --build --preset "ROCm" --parallel $script:JOBS + & cmake --build --preset "ROCm" --config Release --parallel $script:JOBS if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} & cmake --install build --component "HIP" --strip if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} From 5f74d1fd47ec396ba40f60aee0a1a585ad0fcb4f Mon Sep 17 00:00:00 2001 From: Patrick Devine Date: Fri, 7 Feb 2025 15:58:15 -0800 Subject: [PATCH 037/157] gemma2 impl --- convert/convert.go | 12 ++ convert/convert_gemma3.go | 81 ++++++++++ convert/tokenizer_spm.go | 70 ++++++-- fs/ggml/ggml.go | 11 +- kvcache/causal_test.go | 2 +- ml/backend.go | 3 +- ml/backend/ggml/ggml.go | 11 +- model/models/gemma2/model.go | 206 ++++++++++++++++++++++++ model/models/gemma3/model.go | 74 +++++++++ model/models/gemma3/model_text.go | 193 ++++++++++++++++++++++ model/models/gemma3/process_image.go | 57 +++++++ model/models/llama/model.go | 7 +- model/models/mllama/model_text.go | 8 +- model/models/models.go | 2 + model/process_text.go | 13 +- model/process_text_spm.go | 221 ++++++++++++++++++++++++++ model/process_text_spm_test.go | 110 +++++++++++++ model/testdata/gemma2/tokenizer.model | Bin 0 -> 4241003 bytes 18 files changed, 1057 insertions(+), 24 deletions(-) create mode 100644 convert/convert_gemma3.go create mode 100644 model/models/gemma2/model.go create mode 100644 model/models/gemma3/model.go create mode 100644 model/models/gemma3/model_text.go create mode 100644 model/models/gemma3/process_image.go create mode 100644 model/process_text_spm.go create mode 100644 model/process_text_spm_test.go create mode 100644 model/testdata/gemma2/tokenizer.model diff --git a/convert/convert.go b/convert/convert.go index 015303e7..eb441715 100644 --- a/convert/convert.go +++ b/convert/convert.go @@ -15,6 +15,11 @@ import ( type ModelParameters struct { Architectures []string `json:"architectures"` VocabSize uint32 `json:"vocab_size"` + TextModel TextParameters `json:"text_config"` +} + +type TextParameters struct { + VocabSize uint32 `json:"vocab_size"` } type AdapterParameters struct { @@ -185,6 +190,8 @@ func ConvertModel(fsys fs.FS, ws io.WriteSeeker) error { conv = &gemmaModel{} case "Gemma2ForCausalLM": conv = &gemma2Model{} + case "Gemma3ForConditionalGeneration": + conv = &gemma3Model{} case "Phi3ForCausalLM": conv = &phi3Model{} case "Qwen2ForCausalLM": @@ -213,6 +220,11 @@ func ConvertModel(fsys fs.FS, ws io.WriteSeeker) error { } vocabSize := int(p.VocabSize) + if vocabSize == 0 { + tVocabSize := int(p.TextModel.VocabSize) + vocabSize = tVocabSize + } + switch { case vocabSize > len(t.Vocabulary.Tokens): slog.Warn("vocabulary is smaller than expected, padding with dummy tokens", "expect", vocabSize, "actual", len(t.Vocabulary.Tokens)) diff --git a/convert/convert_gemma3.go b/convert/convert_gemma3.go new file mode 100644 index 00000000..c2be5570 --- /dev/null +++ b/convert/convert_gemma3.go @@ -0,0 +1,81 @@ +package convert + +import "github.com/ollama/ollama/fs/ggml" + +type gemma3Model struct { + gemmaModel + TextModel gemma3TextModel `json:"text_config"` + VisionModel gemma3VisionModel `json:"vision_config"` +} + +type gemma3TextModel struct { + MaxPositionEmbeddings uint32 `json:"max_position_embeddings"` + HiddenSize uint32 `json:"hidden_size"` + HiddenLayers uint32 `json:"num_hidden_layers"` + IntermediateSize uint32 `json:"intermediate_size"` + NumAttentionHeads uint32 `json:"num_attention_heads"` + NumKeyValueHeads uint32 `json:"num_key_value_heads"` + RMSNormEPS float32 `json:"rms_norm_eps"` + HeadDim uint32 `json:"head_dim"` + SlidingWindow uint32 `json:"sliding_window"` + AttentionLogitSoftcap float32 `json:"attn_logit_softcapping"` + FinalLogitSoftcap float32 `json:"final_logit_softcapping"` + RopeLocalTheta float32 `json:"rope_local_base_freq"` + RopeGlobalTheta float32 `json:"rope_global_base_freq"` +} + +type gemma3VisionModel struct { + ImageSize uint32 `json:"image_size"` + NumChannels uint32 `json:"num_channels"` + HiddenLayers uint32 `json:"num_hidden_layers"` +} + +func (p *gemma3Model) KV(t *Tokenizer) ggml.KV { + kv := p.ModelParameters.KV(t) + kv["general.architecture"] = "gemma3" + kv["gemma3.context_length"] = p.TextModel.MaxPositionEmbeddings + kv["gemma3.embedding_length"] = p.TextModel.HiddenSize + kv["gemma3.block_count"] = p.TextModel.HiddenLayers + kv["gemma3.text.feed_forward_length"] = p.TextModel.IntermediateSize + kv["gemma3.attention.head_count"] = p.TextModel.NumAttentionHeads + kv["gemma3.attention.head_count_kv"] = p.TextModel.NumKeyValueHeads + kv["gemma3.text.attention.layer_norm_rms_epsilon"] = p.TextModel.RMSNormEPS + kv["gemma3.attention.key_length"] = p.TextModel.HeadDim + kv["gemma3.attention.value_length"] = p.TextModel.HeadDim + kv["gemma3.text.attention.sliding_window"] = p.TextModel.SlidingWindow + kv["gemma3.text.final_logit_softcapping"] = p.TextModel.FinalLogitSoftcap + kv["gemma3.text.rope.local.freq_base"] = p.TextModel.RopeLocalTheta + kv["gemma3.text.rope.global.freq_base"] = p.TextModel.RopeGlobalTheta + kv["tokenizer.ggml.bos_token_id"] = uint32(2) + kv["tokenizer.ggml.eot_token_id"] = uint32(1) + kv["gemma3.vision.image_size"] = p.VisionModel.ImageSize + kv["gemma3.vision.num_channels"] = p.VisionModel.NumChannels + kv["gemma3.vision.block_count"] = p.VisionModel.HiddenLayers + return kv +} + +func (p *gemma3Model) Replacements() []string { + return []string{ + "lm_head", "output", + "model.embed_tokens", "token_embd", + "model.norm", "output_norm", + "vision_model.vision_model", "v", + "language_model.", "", + "model.layers", "blk", + "encoder.layers", "blk", + "vision_tower.vision_model.embeddings", "v", + "input_layernorm", "attn_norm", + "self_attn.q_proj", "attn_q", + "self_attn.q_norm", "attn_q_norm", + "self_attn.k_proj", "attn_k", + "self_attn.k_norm", "attn_k_norm", + "self_attn.v_proj", "attn_v", + "self_attn.o_proj", "attn_output", + "mlp.gate_proj", "ffn_gate", + "mlp.down_proj", "ffn_down", + "mlp.up_proj", "ffn_up", + "post_attention_layernorm", "post_attention_norm", + "pre_feedforward_layernorm", "ffn_norm", + "post_feedforward_layernorm", "post_ffw_norm", + } +} diff --git a/convert/tokenizer_spm.go b/convert/tokenizer_spm.go index 5e506087..d8a012c0 100644 --- a/convert/tokenizer_spm.go +++ b/convert/tokenizer_spm.go @@ -6,7 +6,9 @@ import ( "errors" "fmt" "io/fs" + "log/slog" "os" + "reflect" "slices" "google.golang.org/protobuf/proto" @@ -15,6 +17,8 @@ import ( ) func parseSentencePiece(fsys fs.FS) (*Vocabulary, error) { + slog.Debug("using spm vocabulary") + ast, err := parseAdditionalSpecialTokens(fsys) if err != nil { return nil, err @@ -43,8 +47,11 @@ func parseSentencePiece(fsys fs.FS) (*Vocabulary, error) { v.Types = append(v.Types, int32(t)) default: tt := int32(sentencepiece.ModelProto_SentencePiece_NORMAL) - if slices.Contains(ast, piece.GetPiece()) { - tt = int32(sentencepiece.ModelProto_SentencePiece_CONTROL) + for _, t := range ast { + if t.Content == piece.GetPiece() { + tt = int32(sentencepiece.ModelProto_SentencePiece_CONTROL) + break + } } v.Types = append(v.Types, tt) @@ -78,10 +85,16 @@ func parseSentencePiece(fsys fs.FS) (*Vocabulary, error) { return cmp.Compare(i.id, j.id) }) - n := len(v.Tokens) - for i, t := range ts { - if t.id != i+n { - return nil, fmt.Errorf("invalid token id: %d", t.id) + for _, t := range ts { + if t.id < len(v.Tokens) { + if v.Tokens[t.id] == t.content { + slog.Warn("tokenizer", "duplicate token", t.content, "id", t.id) + continue + } + return nil, fmt.Errorf("token mismatch: %s != %s at pos [%d]", t.content, v.Tokens[t.id], t.id) + } + if t.id != len(v.Tokens) { + return nil, fmt.Errorf("invalid token id: [%d] as pos [%d]", t.id, len(v.Tokens)) } v.Tokens = append(v.Tokens, t.content) @@ -92,7 +105,15 @@ func parseSentencePiece(fsys fs.FS) (*Vocabulary, error) { return &v, nil } -func parseAdditionalSpecialTokens(fsys fs.FS) ([]string, error) { +type specialToken struct { + Content string `json:"content"` + Lstrip bool `json:"lstrip"` + Normalized bool `json:"normalized"` + Rstrip bool `json:"rstrip"` + SingleWord bool `json:"single_word"` +} + +func parseAdditionalSpecialTokens(fsys fs.FS) ([]specialToken, error) { f, err := fsys.Open("special_tokens_map.json") if errors.Is(err, os.ErrNotExist) { return nil, nil @@ -102,12 +123,43 @@ func parseAdditionalSpecialTokens(fsys fs.FS) ([]string, error) { defer f.Close() var m struct { - AdditionalSpecialTokens []string `json:"additional_special_tokens"` + AdditionalSpecialTokens any `json:"additional_special_tokens"` } if err := json.NewDecoder(f).Decode(&m); err != nil { return nil, err } - return m.AdditionalSpecialTokens, nil + var ast []specialToken + + switch st := m.AdditionalSpecialTokens.(type) { + case []string: + for _, s := range st { + ast = append(ast, specialToken{Content: s}) + } + case []any: + for _, s := range st { + // marshal and unmarshal the object to get the special token + tMap := s.(map[string]any) + data, err := json.Marshal(tMap) + if err != nil { + return nil, err + } + + var token specialToken + err = json.Unmarshal(data, &token) + if err != nil { + return nil, err + } + + ast = append(ast, token) + } + + default: + slog.Warn("special token", "unknown token", reflect.TypeOf(st)) + } + + slog.Debug("spm tokenizer", "additional tokens", ast) + + return ast, nil } diff --git a/fs/ggml/ggml.go b/fs/ggml/ggml.go index 8662c3b0..fe98a71b 100644 --- a/fs/ggml/ggml.go +++ b/fs/ggml/ggml.go @@ -124,6 +124,15 @@ func (kv KV) Uints(key string, defaultValue ...[]uint32) []uint32 { return s } +func (kv KV) Floats(key string, defaultValue ...[]float32) []float32 { + r := keyValue(kv, key, &array{}) + s := make([]float32, r.size) + for i := range r.size { + s[i] = float32(r.values[i].(float32)) + } + return s +} + func keyValue[T string | uint32 | uint64 | float32 | *array | bool](kv KV, key string, defaultValue ...T) T { if !strings.HasPrefix(key, "tokenizer.") && !strings.HasPrefix(key, "general.") { key = kv.Architecture() + "." + key @@ -476,7 +485,7 @@ func (f GGML) GraphSize(context, batch uint64, kvCacheType string) (kv, partialO // vocab graph 4*batch*(embedding+vocab)+embedding*vocab*105/128, ) - case "gemma", "gemma2": + case "gemma", "gemma2", "gemma3": fullOffload = max( 4*batch*(embedding+vocab), 4*batch*(2+context+context*heads+2*embedding+2*embeddingHeadsK*heads), diff --git a/kvcache/causal_test.go b/kvcache/causal_test.go index 22d8efb4..0c9e000e 100644 --- a/kvcache/causal_test.go +++ b/kvcache/causal_test.go @@ -445,7 +445,7 @@ func (t *testTensor) Conv2D(ctx ml.Context, weight ml.Tensor, s0, s1, p0, p1, d0 panic("not implemented") } -func (t *testTensor) RoPE(ctx ml.Context, positionIDs, ropeFactors ml.Tensor, dim uint32, base, scale float32) ml.Tensor { +func (t *testTensor) RoPE(ctx ml.Context, positionIDs, ropeFactors ml.Tensor, dim, ropeType uint32, base, scale float32) ml.Tensor { panic("not implemented") } diff --git a/ml/backend.go b/ml/backend.go index 641175f0..27c2d14d 100644 --- a/ml/backend.go +++ b/ml/backend.go @@ -19,6 +19,7 @@ type Config interface { Strings(string, ...[]string) []string Uints(string, ...[]uint32) []uint32 + Floats(string, ...[]float32) []float32 } type Backend interface { @@ -135,7 +136,7 @@ type Tensor interface { Scale(ctx Context, s float64) Tensor Conv2D(ctx Context, weight Tensor, s0, s1, p0, p1, d0, d1 int) Tensor - RoPE(ctx Context, positionIDs, ropeFactors Tensor, dim uint32, base, scale float32) Tensor + RoPE(ctx Context, positionIDs, ropeFactors Tensor, dim, ropeType uint32, base, scale float32) Tensor Tanh(ctx Context) Tensor GELU(ctx Context) Tensor diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index 74512f33..8843ae7c 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -893,10 +893,13 @@ func (t *Tensor) View(ctx ml.Context, offset int, shape ...int) ml.Tensor { } const ( - ropeTypeNorm C.int = iota + ropeTypeNorm C.int = 0 + ropeTypeNeox C.int = 2 + ropeTypeMrope C.int = 8 + ropeTypeVision C.int = 24 ) -func (t *Tensor) RoPE(ctx ml.Context, positionIDs, ropeFactors ml.Tensor, ropeDim uint32, ropeBase, ropeScale float32) ml.Tensor { +func (t *Tensor) RoPE(ctx ml.Context, positionIDs, ropeFactors ml.Tensor, ropeDim, ropeType uint32, ropeBase, ropeScale float32) ml.Tensor { if ropeFactors == nil { ropeFactors = &Tensor{b: t.b} } @@ -911,8 +914,8 @@ func (t *Tensor) RoPE(ctx ml.Context, positionIDs, ropeFactors ml.Tensor, ropeDi t: C.ggml_rope_ext( ctx.(*Context).ctx, dequant, positionIDs.(*Tensor).t, ropeFactors.(*Tensor).t, C.int(ropeDim), - 131072, // YaRN n_ctx_train - ropeTypeNorm, // ROPE_TYPE_NORM + C.int(ropeType), + 131072, // YaRN n_ctx_train C.float(ropeBase), C.float(ropeScale), 0., // YaRN ext_factor diff --git a/model/models/gemma2/model.go b/model/models/gemma2/model.go new file mode 100644 index 00000000..2ad9c568 --- /dev/null +++ b/model/models/gemma2/model.go @@ -0,0 +1,206 @@ +package gemma2 + +import ( + "math" + + "github.com/ollama/ollama/kvcache" + "github.com/ollama/ollama/ml" + "github.com/ollama/ollama/ml/nn" + "github.com/ollama/ollama/model" + "github.com/ollama/ollama/model/input" +) + +type Options struct { + hiddenSize, numHeads, numKVHeads int + attnKeyLen, attnValLen int + eps, ropeBase, ropeScale float32 + attnLogitSoftcap float32 + finalLogitSoftcap float32 + largeModelScaling bool +} + +type Model struct { + model.Base + model.SentencePieceModel + + TokenEmbedding *nn.Embedding `gguf:"token_embd"` + Layers []Layer `gguf:"blk"` + OutputNorm *nn.RMSNorm `gguf:"output_norm"` + Output *nn.Linear `gguf:"output,alt:token_embd"` // just set to token_embd? + + *Options +} + +const ( + gemma27BLayerCount = 46 +) + +func New(c ml.Config) (model.Model, error) { + m := Model{ + SentencePieceModel: model.NewSentencePieceModel( + c.String("tokenizer.ggml.pretokenizer", `(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+`), + &model.Vocabulary{ + Values: c.Strings("tokenizer.ggml.tokens"), + Scores: c.Floats("tokenizer.ggml.scores"), + Types: c.Uints("tokenizer.ggml.token_type"), + BOS: int32(c.Uint("tokenizer.ggml.bos_token_id")), + EOS: int32(c.Uint("tokenizer.ggml.eos_token_id")), + }, + ), + Layers: make([]Layer, c.Uint("block_count")), + Options: &Options{ + hiddenSize: int(c.Uint("embedding_length")), + numHeads: int(c.Uint("attention.head_count")), + numKVHeads: int(c.Uint("attention.head_count_kv")), + attnKeyLen: int(c.Uint("attention.key_length")), + attnValLen: int(c.Uint("attention.value_length")), + eps: c.Float("attention.layer_norm_rms_epsilon"), + ropeBase: c.Float("rope.freq_base", 10000.0), + ropeScale: c.Float("rope.freq_scale", 1.0), + attnLogitSoftcap: c.Float("attn_logit_softcapping"), + finalLogitSoftcap: c.Float("final_logit_softcapping"), + }, + } + + slidingWindowLen := int32(c.Uint("attention.sliding_window")) + m.Cache = kvcache.NewWrapperCache(kvcache.NewSWACache(slidingWindowLen, m.Shift), kvcache.NewCausalCache(m.Shift)) + + return &m, nil +} + +type SelfAttention struct { + Query *nn.Linear `gguf:"attn_q"` + Key *nn.Linear `gguf:"attn_k"` + Value *nn.Linear `gguf:"attn_v"` + Output *nn.Linear `gguf:"attn_output"` +} + +func (sa *SelfAttention) Forward(ctx ml.Context, hiddenState, positionIDs ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor { + batchSize := hiddenState.Dim(1) + ropeType := uint32(2) + + q := sa.Query.Forward(ctx, hiddenState) + q = q.Reshape(ctx, opts.attnKeyLen, opts.numHeads, batchSize) + q = q.RoPE(ctx, positionIDs, nil, uint32(opts.attnKeyLen), ropeType, opts.ropeBase, opts.ropeScale) + + if opts.largeModelScaling { + q = q.Scale(ctx, 1.0/math.Sqrt(float64(opts.hiddenSize / opts.numHeads))) + } else { + q = q.Scale(ctx, 1.0/math.Sqrt(float64(opts.attnKeyLen))) + } + + k := sa.Key.Forward(ctx, hiddenState) + k = k.Reshape(ctx, opts.attnKeyLen, opts.numKVHeads, batchSize) + k = k.RoPE(ctx, positionIDs, nil, uint32(opts.attnKeyLen), ropeType, opts.ropeBase, opts.ropeScale) + + v := sa.Value.Forward(ctx, hiddenState) + v = v.Reshape(ctx, opts.attnValLen, opts.numKVHeads, batchSize) + + cache.Put(ctx, k, v) + k, v, mask := cache.Get(ctx) + + q = q.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx) + k = k.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx) + v = v.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx) + + kq := k.Mulmat(ctx, q) + + // logit softcap + kq = kq.Scale(ctx, 1.0/float64(opts.attnLogitSoftcap)) + kq = kq.Tanh(ctx) + kq = kq.Scale(ctx, float64(opts.attnLogitSoftcap)) + + kq = kq.Add(ctx, mask) + kq = kq.Softmax(ctx) + + kqv := v.Mulmat(ctx, kq) + kqv = kqv.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx) + kqv = kqv.Reshape(ctx, opts.attnValLen*opts.numHeads, batchSize) + + return sa.Output.Forward(ctx, kqv) +} + +func (m *Model) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { + return key.RoPE(ctx, shift, nil, uint32(m.Options.attnKeyLen), uint32(2), m.Options.ropeBase, m.Options.ropeScale), nil +} + +type MLP struct { + Up *nn.Linear `gguf:"ffn_up"` + Down *nn.Linear `gguf:"ffn_down"` + Gate *nn.Linear `gguf:"ffn_gate"` +} + +func (mlp *MLP) Forward(ctx ml.Context, hiddenState ml.Tensor, opts *Options) ml.Tensor { + hiddenState = mlp.Gate.Forward(ctx, hiddenState).GELU(ctx).Mul(ctx, mlp.Up.Forward(ctx, hiddenState)) + return mlp.Down.Forward(ctx, hiddenState) +} + +type Layer struct { + AttentionNorm *nn.RMSNorm `gguf:"attn_norm"` + SelfAttention *SelfAttention + PostAttentionNorm *nn.RMSNorm `gguf:"post_attention_norm"` + MLPNorm *nn.RMSNorm `gguf:"ffn_norm"` + MLP *MLP + PostMLPNorm *nn.RMSNorm `gguf:"post_ffw_norm"` +} + +func (l *Layer) Forward(ctx ml.Context, hiddenState, positionIDs ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor { + residual := hiddenState + + hiddenState = l.AttentionNorm.Forward(ctx, hiddenState, opts.eps) + hiddenState = l.SelfAttention.Forward(ctx, hiddenState, positionIDs, cache, opts) + hiddenState = l.PostAttentionNorm.Forward(ctx, hiddenState, opts.eps) + hiddenState = hiddenState.Add(ctx, residual) + residual = hiddenState + + hiddenState = l.MLPNorm.Forward(ctx, hiddenState, opts.eps) + hiddenState = l.MLP.Forward(ctx, hiddenState, opts) + hiddenState = l.PostMLPNorm.Forward(ctx, hiddenState, opts.eps) + return hiddenState.Add(ctx, residual) +} + +func (m *Model) Forward(ctx ml.Context, opts input.Options) (ml.Tensor, error) { + inputs, err := ctx.Input().FromIntSlice(opts.Inputs, len(opts.Inputs)) + if err != nil { + return nil, err + } + + positions, err := ctx.Input().FromIntSlice(opts.Positions, len(opts.Positions)) + if err != nil { + return nil, err + } + + hiddenState := m.TokenEmbedding.Forward(ctx, inputs) + hiddenState = hiddenState.Scale(ctx, math.Sqrt(float64(m.Options.hiddenSize))) + + if len(m.Layers) == gemma27BLayerCount { + m.Options.largeModelScaling = true + } + + for i, layer := range m.Layers { + cacheType := i % 2 + m.Cache.SetLayer(i) + wc := m.Cache.(*kvcache.WrapperCache) + wc.SetLayerType(cacheType) + hiddenState = layer.Forward(ctx, hiddenState, positions, m.Cache, m.Options) + } + + hiddenState = m.OutputNorm.Forward(ctx, hiddenState, m.eps) + hiddenState = m.Output.Forward(ctx, hiddenState) + + // final logit softcap + hiddenState = hiddenState.Scale(ctx, 1.0/float64(m.Options.finalLogitSoftcap)) + hiddenState = hiddenState.Tanh(ctx) + hiddenState = hiddenState.Scale(ctx, float64(m.Options.finalLogitSoftcap)) + + outputs, err := ctx.Output().FromIntSlice(opts.Outputs, len(opts.Outputs)) + if err != nil { + return nil, err + } + + return hiddenState.Rows(ctx, outputs), nil +} + +func init() { + model.Register("gemma2", New) +} diff --git a/model/models/gemma3/model.go b/model/models/gemma3/model.go new file mode 100644 index 00000000..0f4944a4 --- /dev/null +++ b/model/models/gemma3/model.go @@ -0,0 +1,74 @@ +package gemma3 + +import ( + "fmt" + + "github.com/ollama/ollama/kvcache" + "github.com/ollama/ollama/ml" + "github.com/ollama/ollama/model" + "github.com/ollama/ollama/model/input" +) + +type Model struct { + model.Base + model.SentencePieceModel + + //*VisionModel `gguf:"v,vision"` + *TextModel + + //Projector *nn.Linear `gguf:"mm.0"` + + ImageProcessor +} + +func New(c ml.Config) (model.Model, error) { + // Verify unified config + if c.Uint("vision.block_count") == 0 { + return nil, fmt.Errorf("non-unified vision model not supported") + } + m := Model{ + SentencePieceModel: model.NewSentencePieceModel( + c.String("tokenizer.ggml.pretokenizer", `(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+`), + &model.Vocabulary{ + Values: c.Strings("tokenizer.ggml.tokens"), + Scores: c.Floats("tokenizer.ggml.scores"), + Types: c.Uints("tokenizer.ggml.token_type"), + BOS: int32(c.Uint("tokenizer.ggml.bos_token_id")), + AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true), + EOS: int32(c.Uint("tokenizer.ggml.eos_token_id")), + AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false), + }, + ), + ImageProcessor: newImageProcessor(c), + //VisionModel: newVisionModel(c), + TextModel: newTextModel(c), + } + + slidingWindowLen := int32(c.Uint("text.attention.sliding_window")) + m.Cache = kvcache.NewWrapperCache(kvcache.NewSWACache(slidingWindowLen, m.Shift), kvcache.NewCausalCache(m.Shift)) + + return &m, nil +} + +func (m *Model) Forward(ctx ml.Context, opts input.Options) (ml.Tensor, error) { + inputs, err := ctx.Input().FromIntSlice(opts.Inputs, len(opts.Inputs)) + if err != nil { + return nil, err + } + + positions, err := ctx.Input().FromIntSlice(opts.Positions, len(opts.Positions)) + if err != nil { + return nil, err + } + + outputs, err := ctx.Output().FromIntSlice(opts.Outputs, len(opts.Outputs)) + if err != nil { + return nil, err + } + + return m.TextModel.Forward(ctx, inputs, positions, outputs, m.Cache), nil +} + +func init() { + model.Register("gemma3", New) +} diff --git a/model/models/gemma3/model_text.go b/model/models/gemma3/model_text.go new file mode 100644 index 00000000..051e06c5 --- /dev/null +++ b/model/models/gemma3/model_text.go @@ -0,0 +1,193 @@ +package gemma3 + +import ( + "math" + + "github.com/ollama/ollama/kvcache" + "github.com/ollama/ollama/ml" + "github.com/ollama/ollama/ml/nn" + "github.com/ollama/ollama/model" +) + +type TextOptions struct { + hiddenSize, numHeads, numKVHeads int + attnKeyLen, attnValLen int + eps, ropeScale float32 + ropeLocalBase, ropeGlobalBase float32 + finalLogitSoftcap float32 + largeModelScaling bool +} + +type TextModel struct { + model.Base + model.SentencePieceModel + + TokenEmbedding *nn.Embedding `gguf:"token_embd"` + Layers []TextLayer `gguf:"blk"` + OutputNorm *nn.RMSNorm `gguf:"output_norm"` + Output *nn.Linear `gguf:"output,alt:token_embd"` + + *TextOptions +} + +const ( + gemma27BLayerCount = 46 +) + +const ( + cacheTypeSWA = iota + cacheTypeCausal +) + +func newTextModel(c ml.Config) *TextModel { + m := TextModel{ + SentencePieceModel: model.NewSentencePieceModel( + c.String("tokenizer.ggml.pretokenizer", `(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+`), + &model.Vocabulary{ + Values: c.Strings("tokenizer.ggml.tokens"), + Scores: c.Floats("tokenizer.ggml.scores"), + Types: c.Uints("tokenizer.ggml.token_type"), + BOS: int32(c.Uint("tokenizer.ggml.bos_token_id")), + EOS: int32(c.Uint("tokenizer.ggml.eos_token_id")), + }, + ), + Layers: make([]TextLayer, c.Uint("block_count")), + TextOptions: &TextOptions{ + hiddenSize: int(c.Uint("embedding_length")), + numHeads: int(c.Uint("attention.head_count")), + numKVHeads: int(c.Uint("attention.head_count_kv")), + attnKeyLen: int(c.Uint("attention.key_length")), + attnValLen: int(c.Uint("attention.value_length")), + eps: c.Float("text.attention.layer_norm_rms_epsilon"), + ropeLocalBase: c.Float("text.rope.local.freq_base", 10000.0), + ropeGlobalBase: c.Float("text.rope.global.freq_base", 1000000.0), + ropeScale: c.Float("text.rope.freq_scale", 1.0), + finalLogitSoftcap: c.Float("text.final_logit_softcapping"), + }, + } + + slidingWindowLen := int32(c.Uint("text.attention.sliding_window")) + m.Cache = kvcache.NewWrapperCache(kvcache.NewSWACache(slidingWindowLen, m.Shift), kvcache.NewCausalCache(m.Shift)) + + return &m +} + +type TextSelfAttention struct { + Query *nn.Linear `gguf:"attn_q"` + QueryNorm *nn.RMSNorm `gguf:"attn_q_norm"` + Key *nn.Linear `gguf:"attn_k"` + KeyNorm *nn.RMSNorm `gguf:"attn_k_norm"` + Value *nn.Linear `gguf:"attn_v"` + Output *nn.Linear `gguf:"attn_output"` +} + +func (sa *TextSelfAttention) Forward(ctx ml.Context, layer int, hiddenState, positionIDs ml.Tensor, cache kvcache.Cache, opts *TextOptions) ml.Tensor { + batchSize := hiddenState.Dim(1) + ropeType := uint32(2) + + ropeBase := opts.ropeLocalBase + if (layer+1)%6 == 0 { + ropeBase = opts.ropeGlobalBase + } + + q := sa.Query.Forward(ctx, hiddenState) + q = q.Reshape(ctx, opts.attnKeyLen, opts.numHeads, batchSize) + q = sa.QueryNorm.Forward(ctx, q, opts.eps) + q = q.RoPE(ctx, positionIDs, nil, uint32(opts.attnKeyLen), ropeType, ropeBase, opts.ropeScale) + + if opts.largeModelScaling { + q = q.Scale(ctx, 1.0/math.Sqrt(float64(opts.hiddenSize/opts.numHeads))) + } else { + q = q.Scale(ctx, 1.0/math.Sqrt(float64(opts.attnKeyLen))) + } + + k := sa.Key.Forward(ctx, hiddenState) + k = k.Reshape(ctx, opts.attnKeyLen, opts.numKVHeads, batchSize) + k = sa.KeyNorm.Forward(ctx, k, opts.eps) + k = k.RoPE(ctx, positionIDs, nil, uint32(opts.attnKeyLen), ropeType, ropeBase, opts.ropeScale) + + v := sa.Value.Forward(ctx, hiddenState) + v = v.Reshape(ctx, opts.attnValLen, opts.numKVHeads, batchSize) + + scaleFactor := 1.0 + kqv := nn.Attention(ctx, q, k, v, scaleFactor, cache) + kqv = kqv.Reshape(ctx, opts.attnValLen*opts.numHeads, batchSize) + + return sa.Output.Forward(ctx, kqv) +} + +func (m *TextModel) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { + ropeBase := m.TextOptions.ropeLocalBase + if (layer+1)%6 == 0 { + ropeBase = m.TextOptions.ropeGlobalBase + } + + return key.RoPE(ctx, shift, nil, uint32(m.TextOptions.attnKeyLen), uint32(2), ropeBase, m.TextOptions.ropeScale), nil +} + +type TextMLP struct { + Up *nn.Linear `gguf:"ffn_up"` + Down *nn.Linear `gguf:"ffn_down"` + Gate *nn.Linear `gguf:"ffn_gate"` +} + +func (mlp *TextMLP) Forward(ctx ml.Context, hiddenState ml.Tensor, opts *TextOptions) ml.Tensor { + hiddenState = mlp.Gate.Forward(ctx, hiddenState).GELU(ctx).Mul(ctx, mlp.Up.Forward(ctx, hiddenState)) + return mlp.Down.Forward(ctx, hiddenState) +} + +type TextLayer struct { + AttentionNorm *nn.RMSNorm `gguf:"attn_norm"` + SelfAttention *TextSelfAttention + PostAttentionNorm *nn.RMSNorm `gguf:"post_attention_norm"` + MLPNorm *nn.RMSNorm `gguf:"ffn_norm"` + MLP *TextMLP + PostMLPNorm *nn.RMSNorm `gguf:"post_ffw_norm"` +} + +func (l *TextLayer) Forward(ctx ml.Context, layer int, hiddenState, positionIDs ml.Tensor, cache kvcache.Cache, opts *TextOptions) ml.Tensor { + residual := hiddenState + + hiddenState = l.AttentionNorm.Forward(ctx, hiddenState, opts.eps) + hiddenState = l.SelfAttention.Forward(ctx, layer, hiddenState, positionIDs, cache, opts) + hiddenState = l.PostAttentionNorm.Forward(ctx, hiddenState, opts.eps) + hiddenState = hiddenState.Add(ctx, residual) + residual = hiddenState + + hiddenState = l.MLPNorm.Forward(ctx, hiddenState, opts.eps) + hiddenState = l.MLP.Forward(ctx, hiddenState, opts) + hiddenState = l.PostMLPNorm.Forward(ctx, hiddenState, opts.eps) + return hiddenState.Add(ctx, residual) +} + +func (m *TextModel) Forward(ctx ml.Context, inputs, positions, outputs ml.Tensor, cache kvcache.Cache) ml.Tensor { + hiddenState := m.TokenEmbedding.Forward(ctx, inputs) + hiddenState = hiddenState.Scale(ctx, math.Sqrt(float64(m.TextOptions.hiddenSize))) + + if len(m.Layers) == gemma27BLayerCount { + m.TextOptions.largeModelScaling = true + } + + for i, layer := range m.Layers { + // gemma alternates between the sliding window (local) and causal (global) + // kv cache every 6 layers + cacheType := cacheTypeSWA + if (i+1)%6 == 0 { + cacheType = cacheTypeCausal + } + cache.SetLayer(i) + wc := cache.(*kvcache.WrapperCache) + wc.SetLayerType(cacheType) + hiddenState = layer.Forward(ctx, i, hiddenState, positions, cache, m.TextOptions) + } + + hiddenState = m.OutputNorm.Forward(ctx, hiddenState, m.eps) + hiddenState = m.Output.Forward(ctx, hiddenState) + + // final logit softcap + hiddenState = hiddenState.Scale(ctx, 1.0/float64(m.TextOptions.finalLogitSoftcap)) + hiddenState = hiddenState.Tanh(ctx) + hiddenState = hiddenState.Scale(ctx, float64(m.TextOptions.finalLogitSoftcap)) + + return hiddenState.Rows(ctx, outputs) +} diff --git a/model/models/gemma3/process_image.go b/model/models/gemma3/process_image.go new file mode 100644 index 00000000..5cf963e8 --- /dev/null +++ b/model/models/gemma3/process_image.go @@ -0,0 +1,57 @@ +package gemma3 + +import ( + "image" + + "github.com/ollama/ollama/ml" + "github.com/ollama/ollama/model/imageproc" +) + +type ImageProcessor struct { + imageSize, numChannels int +} + +func newImageProcessor(c ml.Config) ImageProcessor { + return ImageProcessor{ + imageSize: int(c.Uint("vision.image_size")), + numChannels: int(c.Uint("vision.num_channels")), + } +} + +func (p *ImageProcessor) pack(img image.Image, mean, std [3]float32) []float32 { + var pixelVals []float32 + + bounds := img.Bounds() + var rVals, gVals, bVals []float32 + for y := bounds.Min.Y; y < bounds.Max.Y; y++ { + for x := bounds.Min.X; x < bounds.Max.X; x++ { + c := img.At(x, y) + r, g, b, _ := c.RGBA() + rVal := float32(r>>8) / 255.0 + gVal := float32(g>>8) / 255.0 + bVal := float32(b>>8) / 255.0 + + rVal = (rVal - mean[0]) / std[0] + gVal = (gVal - mean[1]) / std[1] + bVal = (bVal - mean[2]) / std[2] + + rVals = append(rVals, rVal) + gVals = append(gVals, gVal) + bVals = append(bVals, bVal) + } + } + pixelVals = append(pixelVals, rVals...) + pixelVals = append(pixelVals, gVals...) + pixelVals = append(pixelVals, bVals...) + + return pixelVals +} + +func (p ImageProcessor) ProcessImage(img image.Image) ([]float32, error) { + outputSize := image.Point{p.imageSize, p.imageSize} + newImage := imageproc.Composite(img) + newImage = imageproc.Resize(newImage, outputSize, imageproc.ResizeBilinear) + + data := p.pack(newImage, imageproc.ImageNetStandardMean, imageproc.ImageNetStandardSTD) + return data, nil +} diff --git a/model/models/llama/model.go b/model/models/llama/model.go index 1f27f522..19a2ab8c 100644 --- a/model/models/llama/model.go +++ b/model/models/llama/model.go @@ -76,14 +76,15 @@ type SelfAttention struct { func (sa *SelfAttention) Forward(ctx ml.Context, hiddenState, positionIDs ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor { batchSize := hiddenState.Dim(1) headDim := opts.hiddenSize / opts.numHeads + ropeType := uint32(0) q := sa.Query.Forward(ctx, hiddenState) q = q.Reshape(ctx, headDim, opts.numHeads, batchSize) - q = q.RoPE(ctx, positionIDs, sa.RopeFactors, opts.ropeDim, opts.ropeBase, opts.ropeScale) + q = q.RoPE(ctx, positionIDs, sa.RopeFactors, opts.ropeDim, ropeType, opts.ropeBase, opts.ropeScale) k := sa.Key.Forward(ctx, hiddenState) k = k.Reshape(ctx, headDim, opts.numKVHeads, batchSize) - k = k.RoPE(ctx, positionIDs, sa.RopeFactors, opts.ropeDim, opts.ropeBase, opts.ropeScale) + k = k.RoPE(ctx, positionIDs, sa.RopeFactors, opts.ropeDim, ropeType, opts.ropeBase, opts.ropeScale) v := sa.Value.Forward(ctx, hiddenState) v = v.Reshape(ctx, headDim, opts.numKVHeads, batchSize) @@ -96,7 +97,7 @@ func (sa *SelfAttention) Forward(ctx ml.Context, hiddenState, positionIDs ml.Ten } func (m *Model) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { - return key.RoPE(ctx, shift, m.Layers[layer].SelfAttention.RopeFactors, m.ropeDim, m.ropeBase, m.ropeScale), nil + return key.RoPE(ctx, shift, m.Layers[layer].SelfAttention.RopeFactors, uint32(0), m.ropeDim, m.ropeBase, m.ropeScale), nil } type MLP struct { diff --git a/model/models/mllama/model_text.go b/model/models/mllama/model_text.go index 373589f9..40c9a970 100644 --- a/model/models/mllama/model_text.go +++ b/model/models/mllama/model_text.go @@ -20,14 +20,15 @@ type TextSelfAttention struct { func (sa *TextSelfAttention) Forward(ctx ml.Context, hiddenState, positions, _ ml.Tensor, cache *kvcache.WrapperCache, opts *TextModelOptions) ml.Tensor { batchSize := hiddenState.Dim(1) headDim := opts.hiddenSize / opts.numHeads + ropeType := uint32(0) query := sa.Query.Forward(ctx, hiddenState) query = query.Reshape(ctx, headDim, opts.numHeads, batchSize) - query = query.RoPE(ctx, positions, sa.RopeFactors, opts.ropeDim, opts.ropeBase, opts.ropeScale) + query = query.RoPE(ctx, positions, sa.RopeFactors, opts.ropeDim, ropeType, opts.ropeBase, opts.ropeScale) key := sa.Key.Forward(ctx, hiddenState) key = key.Reshape(ctx, headDim, opts.numKVHeads, batchSize) - key = key.RoPE(ctx, positions, sa.RopeFactors, opts.ropeDim, opts.ropeBase, opts.ropeScale) + key = key.RoPE(ctx, positions, sa.RopeFactors, opts.ropeDim, ropeType, opts.ropeBase, opts.ropeScale) value := sa.Value.Forward(ctx, hiddenState) value = value.Reshape(ctx, headDim, opts.numKVHeads, batchSize) @@ -40,8 +41,9 @@ func (sa *TextSelfAttention) Forward(ctx ml.Context, hiddenState, positions, _ m } func (m *TextModel) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { + // This will only get called for layers in the cache, which are just the self attention layers if sa, ok := m.Transformer.Layers[layer].(*TextSelfAttentionDecoderLayer); ok { - return key.RoPE(ctx, shift, sa.SelfAttention.RopeFactors, m.ropeDim, m.ropeBase, m.ropeScale), nil + return key.RoPE(ctx, shift, sa.SelfAttention.RopeFactors, m.ropeDim, uint32(0), m.ropeBase, m.ropeScale), nil } return key, nil diff --git a/model/models/models.go b/model/models/models.go index d0b68b32..ce1d2ce0 100644 --- a/model/models/models.go +++ b/model/models/models.go @@ -1,6 +1,8 @@ package models import ( + _ "github.com/ollama/ollama/model/models/gemma2" + _ "github.com/ollama/ollama/model/models/gemma3" _ "github.com/ollama/ollama/model/models/llama" _ "github.com/ollama/ollama/model/models/mllama" ) diff --git a/model/process_text.go b/model/process_text.go index 0d75a0ed..cd1deb65 100644 --- a/model/process_text.go +++ b/model/process_text.go @@ -18,6 +18,15 @@ const ( SpecialEOS ) +const ( + TOKEN_TYPE_NORMAL = iota + 1 + TOKEN_TYPE_UNKNOWN + TOKEN_TYPE_CONTROL + TOKEN_TYPE_USER_DEFINED + TOKEN_TYPE_UNUSED + TOKEN_TYPE_BYTE +) + type TextProcessor interface { Encode(s string, addSpecial bool) ([]int32, error) Decode([]int32) (string, error) @@ -27,7 +36,7 @@ type TextProcessor interface { type Vocabulary struct { Values []string Types []uint32 - Scores []uint32 + Scores []float32 Merges []string BOS, EOS int32 @@ -76,7 +85,7 @@ func (v *Vocabulary) Decode(id int32) string { func (v *Vocabulary) SpecialVocabulary() []string { v.specialOnce.Do(func() { for i := range v.Values { - if v.Types[i] == 3 { + if v.Types[i] == TOKEN_TYPE_CONTROL { v.special = append(v.special, v.Values[i]) } } diff --git a/model/process_text_spm.go b/model/process_text_spm.go new file mode 100644 index 00000000..c0bc973f --- /dev/null +++ b/model/process_text_spm.go @@ -0,0 +1,221 @@ +package model + +import ( + "iter" + "log/slog" + "strings" + + "github.com/dlclark/regexp2" + queue "github.com/emirpasic/gods/v2/queues/priorityqueue" +) + +const spmWhitespaceSep = "▁" + +func replaceWhitespaceBySeperator(s string) string { + return strings.ReplaceAll(s, " ", spmWhitespaceSep) +} + +type SentencePieceModel struct { + maxTokenLen int + pre *regexp2.Regexp + vocab *Vocabulary +} + +func NewSentencePieceModel(pre string, vocab *Vocabulary) SentencePieceModel { + slog.Debug("Tokens", "num tokens", len(vocab.Values), "vals", vocab.Values[:5], "scores", vocab.Scores[:5], "types", vocab.Types[:5]) + + counter := map[int]int{} + var maxTokenLen int + for cnt := range vocab.Types { + switch vocab.Types[cnt] { + case TOKEN_TYPE_NORMAL, TOKEN_TYPE_USER_DEFINED, TOKEN_TYPE_UNUSED: + maxTokenLen = max(maxTokenLen, len(vocab.Values[cnt])) + fallthrough + default: + counter[int(vocab.Types[cnt])] += 1 + } + } + + slog.Debug("Token counts", "normal", counter[TOKEN_TYPE_NORMAL], "unknown", counter[TOKEN_TYPE_UNKNOWN], "control", counter[TOKEN_TYPE_CONTROL], + "user defined", counter[TOKEN_TYPE_USER_DEFINED], "unused", counter[TOKEN_TYPE_UNUSED], "byte", counter[TOKEN_TYPE_BYTE], + "max token len", maxTokenLen) + + return SentencePieceModel{ + maxTokenLen: maxTokenLen, + pre: regexp2.MustCompile(pre, regexp2.Unicode|regexp2.RE2), + vocab: vocab, + } +} + +func (spm SentencePieceModel) Is(id int32, special Special) bool { + return spm.vocab.Is(id, special) +} + +func (spm *SentencePieceModel) split(s string) iter.Seq[string] { + return func(yield func(string) bool) { + for m, _ := spm.pre.FindStringMatch(s); m != nil; m, _ = spm.pre.FindNextMatch(m) { + if !yield(m.String()) { + break + } + } + } +} + +func (spm SentencePieceModel) Encode(s string) ([]int32, error) { + fragments := []fragment{{value: s}} + for _, special := range spm.vocab.SpecialVocabulary() { + // TODO: process special tokens concurrently + id := spm.vocab.Encode(special) + for i := 0; i < len(fragments); i++ { + frag := fragments[i] + if len(frag.ids) > 0 { + continue + } + + var middle []fragment + switch i := strings.Index(frag.value, special); { + case i < 0: + middle = append(middle, frag) + case i > 0: + middle = append(middle, fragment{value: frag.value[:i]}) + fallthrough + default: + middle = append(middle, fragment{value: special, ids: []int32{id}}) + if rest := frag.value[i+len(special):]; rest != "" { + middle = append(middle, fragment{value: rest}) + } + } + + fragments = append(fragments[:i], append(middle, fragments[i+1:]...)...) + } + } + slog.Debug("fragments", "frags", fragments) + + var ids []int32 + for _, frag := range fragments { + if len(frag.ids) > 0 { + ids = append(ids, frag.ids...) + continue + } + + for split := range spm.split(frag.value) { + split = replaceWhitespaceBySeperator(split) + + var sb strings.Builder + sb.Write([]byte(split)) + if id := spm.vocab.Encode(sb.String()); id >= 0 { + ids = append(ids, id) + continue + } + + runes := []rune(sb.String()) + pq := queue.NewWith(func(a, b any) int { + priA := a.(*candidate) + priB := b.(*candidate) + if priA.score > priB.score || (priA.score == priB.score && priA.a < priB.a) { + return -1 + } + return 1 + }) + + merges := make([]merge, len(runes)) + for r := range runes { + merges[r] = merge{ + p: r - 1, + n: r + 1, + runes: []rune{runes[r]}, + } + } + + slog.Debug("tokenizer", "merges", merges) + + pairwise := func(a, b int) *candidate { + if a < 0 || b >= len(runes) { + return nil + } + + left, right := string(merges[a].runes), string(merges[b].runes) + if id := spm.vocab.Encode(left + right); id >= 0 { + return &candidate{ + a: a, + b: b, + score: spm.vocab.Scores[id], + } + } + return nil + } + + for i := range len(runes) - 1 { + if pair := pairwise(i, i+1); pair != nil { + pq.Enqueue(pair) + } + } + + pqv := pq.Values() + for _, v := range pqv { + e := v.(*candidate) + slog.Debug("candidate", "candidate", e) + } + + for !pq.Empty() { + v, _ := pq.Dequeue() + pair := v.(*candidate) + left, right := merges[pair.a], merges[pair.b] + + slog.Debug("pair", "left", left, "right", right) + if len(left.runes) == 0 || len(right.runes) == 0 { + continue + } + + merges[pair.a].runes = append(left.runes, right.runes...) + merges[pair.b].runes = nil + merges[pair.a].n = right.n + if right.n < len(merges) { + merges[right.n].p = pair.a + } + + if pair := pairwise(merges[pair.a].p, pair.a); pair != nil { + pq.Enqueue(pair) + } + + if pair := pairwise(pair.a, merges[pair.a].n); pair != nil { + pq.Enqueue(pair) + } + } + + slog.Debug("merges", "merges", merges) + + for _, merge := range merges { + if len(merge.runes) > 0 { + if id := spm.vocab.Encode(string(merge.runes)); id >= 0 { + ids = append(ids, id) + } else { + slog.Debug("missing token", "token", string(merge.runes)) + } + } + } + } + } + slog.Debug("encoded", "ids", ids) + + return ids, nil +} + +type candidate struct { + a, b int + score float32 +} + +func (spm SentencePieceModel) Decode(ids []int32) (string, error) { + var sb strings.Builder + for _, id := range ids { + data := spm.vocab.Decode(id) + data = strings.ReplaceAll(data, spmWhitespaceSep, " ") + if _, err := sb.WriteString(data); err != nil { + return "", err + } + } + + slog.Debug("decoded", "ids", ids, "text", sb.String()) + return sb.String(), nil +} diff --git a/model/process_text_spm_test.go b/model/process_text_spm_test.go new file mode 100644 index 00000000..72bd629c --- /dev/null +++ b/model/process_text_spm_test.go @@ -0,0 +1,110 @@ +package model + +import ( + "log/slog" + "os" + "path/filepath" + "slices" + "testing" + + "google.golang.org/protobuf/proto" + + "github.com/ollama/ollama/convert/sentencepiece" +) + +func loadSentencePieceVocab(t *testing.T) SentencePieceModel { + t.Helper() + + bts, err := os.ReadFile(filepath.Join("testdata", "gemma2", "tokenizer.model")) + if err != nil { + t.Fatal(err) + } + + var spm sentencepiece.ModelProto + if err := proto.Unmarshal(bts, &spm); err != nil { + t.Fatal(err) + } + + preTokenizer := `(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+` + + var v Vocabulary + + for _, piece := range spm.GetPieces() { + v.Values = append(v.Values, piece.GetPiece()) + v.Scores = append(v.Scores, piece.GetScore()) + switch t := piece.GetType(); t { + case sentencepiece.ModelProto_SentencePiece_UNKNOWN, + sentencepiece.ModelProto_SentencePiece_CONTROL, + sentencepiece.ModelProto_SentencePiece_UNUSED, + sentencepiece.ModelProto_SentencePiece_BYTE: + v.Types = append(v.Types, uint32(t)) + default: + tt := uint32(sentencepiece.ModelProto_SentencePiece_NORMAL) + // todo parse the special tokens file + // - this will roundtrip correctly but the and + // tokens aren't processed + v.Types = append(v.Types, tt) + } + } + + return NewSentencePieceModel(preTokenizer, &v) +} + +func TestSentencePieceEncode(t *testing.T) { + logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})) + slog.SetDefault(logger) + + tokenizer := loadSentencePieceVocab(t) + + t.Run("basic roundtrip", func(t *testing.T) { + t.Parallel() + + cases := []string{ + "hello", + "hello ", + "hello ", + " hello", + " hello ", + " hello ", + "hello world", + "请考试我的软件!12345", + "你好", + "Hello 你好 world!", + } + + for _, want := range cases { + ids, err := tokenizer.Encode(want) + if err != nil { + t.Fatal(err) + } + + if got, err := tokenizer.Decode(ids); err != nil { + t.Fatal(err) + } else if got != want { + t.Errorf("got %q, want %q [%#v]", got, want, ids) + } + } + }) + + t.Run("special tokens", func(t *testing.T) { + type candidate struct { + token string + ids []int32 + } + + cases := []candidate{ + {"", []int32{2}}, + {"", []int32{1}}, + } + + for _, want := range cases { + ids, err := tokenizer.Encode(want.token) + if err != nil { + t.Fatal(err) + } + if !slices.Equal(ids, want.ids) { + t.Errorf("got %#v, want %#v", ids, want.ids) + } + } + }) +} diff --git a/model/testdata/gemma2/tokenizer.model b/model/testdata/gemma2/tokenizer.model new file mode 100644 index 0000000000000000000000000000000000000000..14a242262f212c6e4e55913220f22c7d768e0524 GIT binary patch literal 4241003 zcmcG%37BNpS>AuK4VbnK4cOR#!DU&tBrj5Hmu%U-Jv}|r*cwfFdSrRURlUq~X}Y`0 zz07EYm0SsLU<1v*grxxEjjg?6jF*eY5JCvl5CReqQn3gjgpdv)gb+ghzxR9ZJ>9xA z{u7pd=9%Yv&-N|n+_QXVsfjO~_}p6t-NhF@DdX=ckDd@->Tf&(&o}Va+Lze)xMgc?Yh!6~sw!xI*wqJw zU2{O#wFiVIOD85OJ+9tUWMAN(Ci?>SRM{7}r^~*; zJ!SRY9Hh}T2WfQ8 zK^k3ikVe-Wq|vnpX>{#D8eMykM%Nys(X|I@bnQVJU3-v5*B+$NwFhZ*?Lit{caTQc z9i-892WfQOK^k3mkVe-Xq|tQ;X>{E|8eMmgM%Nvr(e(#ubp1gZU4M{9*B_+O^#^Hm z{XrUCe~?DkAEeRs2WfQuK^om~kVZEgq|prrX>`Lu8r^V^MmHR!(G3S_bi+X!-Efdb zHyotVjR$FT<3SqTc#uXn9;DHY2WfQUK^on7kVZEiq|uEBX>{X38r^h|MmHU#(M<X>{{J8r^)5 zMmHa%(an1rJ!RsHZrRxEu5Z4sf9iFcTkC6SnEkSeC)~2Mw)oe=Cni2;qMlMbZsJiB z2|Rw{(Gz><2@{V2rATHtf(bK&37i6$!8jm=eDTC%tB@4L6s8VJVT`DIf3;vwoOs+` zU`qO=iO25?O~GioKRAUynR(eG{_4^(+9SgE)cSH}VvWB#(^pJiamNGYB^@sz(IMM`qX z#Fzg~Nm80;Onk-Pm?kBUyGOJ78ZJ%{7JX3#Rq$58f9;Git=_i=-v|^~inkRO5m1`;uV& zzbVPSRG90tq}rG8*%MFy>~E8NUyf@hp8dDXu`kmN6VLnG zXWEzV=838Qr+oXe-ZF9R|7+HLxnDeS!`~tIz5)(S-28VdU|$)t6Sw@G%h*@UZ4)p4 z`xLXUq?b+{`g@kNudq8NX8-<$?JMtP6Sw_?l((@ef$)zJgyh z@v?u|g7=mE+KH3@;AQVCzB6&xKSuHUI4n%O>K}>2J|?FoUi**7WFH^7u{OK&D@$n> zDyP*#cd*&(@6Ajj)$&sEpV|GjIp1GApYlbTQ~myC3SmNe%jWtc;qoJ4?T~v6Q?}>BrYKJL2w5ET2w@!b2x;W;yK0{}#(VM@?+GO7m-ki@0r%Dr0ryr90r%Do z0ryr60r%Dl%rb77U+FKLdHq&@bInUcD_d=B zuJ_kYKca4yRug2Mam)N8Q120F>k()pAxW^YwU^U)kmBW3UFa|FnLusts)j6~#y(+W z%&Ydw47evwz&&{a?g=D@SM8Um3fL1#t@x@($fP7w_e|9b&Cd&0FadSbNC9$! z$hAF^3poWe?U7u^DMiyB$z_~UH0_aG#VJM89?3Z$cBd3gdn6}!O3}1Oa&D&-O?zD5w8!;LdtBeN z$MsEnT;H@ua-Y|1lj}SMH0_aG<|#$f9?4anQZ((6T;wT5(;mq+o>DaJaYNG{H#F^W zL(?8NH0^Oi(;mtBUb9V(_Y}~yM{>BQ6is_1M|(=qv`2EVrxZ^iKgzdn9LnO3}1Oa`LAXO?xEgeoE1_$IZ?8$IVT9 z+}yOs%}smU+_cBdO?%wjw8wOFemdQpk4-o2G2NVxO*ic^-JFk2H|;UqoR3X6?J?c7 z$8^&k(@lF!H|;Uqw8wPQ9@9;G9BSI*P}3fVn)W!QJq|VPaj0pJLrr@eYTDya z(;kPK_BhnE$DyV@4mIs@sA-QwO?%8V?J?7|$4t{6GfjKUH0?3dw8u=-9y3jQ%rxyW z)3nD-(;hQTd(1TLG1IiiOw%4SO?w<}+T(E39*3LuINY?y;if$fH|=q_X^+EAdmL`s z<8ad+hnw~|+_cBxracZf?QytikHbxS%r@;Y+qB1Q(;l-;d(1ZNG267qY||dIO?%8X z?J?W5$86IcvrT)Md-nK(iAVL;v}snb!}}Oqhns-nlk5vWYH3~J zWfk59Y;yf!wK; zq#fjD53*xYB@aw4Ll!}9sN}Z>xwZ(o9b|U}^1JJj`+#ADuc(Z72e~M6r%oa^ABpe{ zg_m{s0^k>Gpz=qNs3)!u2Dv73>`Ueg>l;m~ zur~*JT1?&=Kd`sWKcL&*3 z>hBG5LALw8I<=DgfK#gh41*k~z#k5BUgSH1T$8Hr401u_yB(@*?+voAy8ZDWx5VTp z9V%6KOe*yKL3U-K4+goR5=>Z{Dn_(GDI8oo6$x5;-}c%e{^q6P575S#aWWiB`SKOU#?N;@hdfoc0Z~M>Bh(Y zYf;Eje1gB85~|_+MvdY#f3QZ;XZ=*}>5by^zI12c~8e9(eqcFtl^_~B59ex(D8{t>H0P;`j@MC~Ktw9^`XEitv`16qD zY;@3nQAZpi;xD79Et&nVOsdAS3+iinQuT8#)Mi%gJ?cVLAE%;6esrW;5r@ZIsCpDh z`?(iXPu)$mzt5{94g)?vGW6B!JT}s7hDcvfGYRWV)H%MEWRI((&gn~fe2t=MKOu_X zh!injXp!{+eNm8`l6TT6)EFNxsg7+}6M=x&S|PYk*j9p{rG`NA2Je^~^t z$H?>KTDTKLJSEa}rT>=)`NB&ezaq%fYEfTVr@kHV)F`IECSkrR=+zhlzq&T+La6_= zI?oZ(wW6%6u7Z4eC7iy)OM+eqp`THwpC#hb8c6PEMk=@ZXVnq65ph{W^w(9?<))+m zy&|~vWzT0v@KTJF&xzp6uORrjbuo7Uo>zmr09Qt$p1P5%>hNa+rXqYm%PtNBG6zN1D_jmK*fF2oSmt|Mcn@v<7NsB1nEg7!AmvghiUm}Q)dn9b1V6T@2Yb|C-I8N!NEpiURmW<3wu>9&iRX5&Equ|Amn~+(CZ=e*VL)P zO0TO0xRmlbb=1;5pl*#~YBpb^=u8(v)K#^Z#X2J9JWEmDiaUT)!FCWG)ag3es+z!Z zD zFR9mq?A;5wQRk1@>t>B&ma`RnxRXM^;>we3pNmK#t zm~-k89&n6WW>L}WGWXZjpT*{K53LHY777}u-e{73uYhFu*^0*VrU>rQs0&DqO7YRW zxn{S)cvXNXW?4huTWWD%PB?%Z{g@EFHRu@k3TUr=UP_LyuZz-@tw?B5_4y4JiaIMG ztzgU!-&U7!CsBYHE~WX~Dg-yOG=y|o%!nDVcZ+1!Q z)-c<0n26?Zl0TPzI7;L{Sf-hkYxPY`;G12?> zh|mXEK$3nnzSHjrdPVJ_fJQ0$K=%mWS=Z>P_`nM=*{+IywmE@((8fG#Z7DnAklxnM>NG?Hzozj#LkZ|dVOpb6g6 z*Yu+i%$XB{p($r;-aW)SBW6h>TLEnjJ>_|q^JofQKvR{y)vEh@ca*%O#twbB>SH0>s!=05PJ@7ZLX}QFu@NePMbj>f)op{|fs40@8&=)A`xD zutiOO3n1FeYV!NBDC(@HS_L#bx@!yio<1IF&gyeAxuUM*6Onv9Eo>|X_72$ygGgu7g`++PVYSU{=@Jzf6Qy4KgCUVxatr*R+~MLK4xucRmd5nCbg z#X5p%bpe3KS$T>iPf}1qG4201k!KJU4DHcz zKE~(Yh-~vq9uq)B#_U>O-fz}rp41lxLb%txj)({ALblYq6$!pR)`<_**|yZF8EvlH z|L|LNoZM0c2k3eKcx8gE-=1l(<`xr{Y+hi=T<;k zv`BgZtyCZn|QP`vB(~ z#BljvgV&PgCIxgA71QnA`g^&OH~?y08I$k-X0_C;Oh()JlSnb6^!q zFL41)J7kv1ho1du6!f)EARLl#BFTfz#|E-H+z9#9x zQaGH6*{3DU`HkoyzWgwI4r%RLb8g3sGGITYI z$e~Gm-xVZ=wAkUzDuI=B8rwd%j@;BXLk{$i#Ck$A7O)fRizrjWjR=_HweWSTxnCP=Kg8a4CSpcmBCb7&WKN?(d& z7S-95&}3Ghoqa)M=&LGnXoCAPGo(LfYQINQE}ialY_Ckz1qYjiq6L5?*9o$iT+*8A zaS=L)v^k_y|Ip~;umK?Yn#TUp^h!)AA767gMXSm|WZ7DY?=eNqqg4r_fQ9HUo)F2F z*7POh0OU~i0+KZA>cUHCTXCU}A+j%wR9cDVkfiFV11wB0X%1gH$#E3QzNnUJK;9fg zhWD2>${E`?~5k+vK5f_AmYmIVkoH9smLQIp|77W?7=G(y!qfJ9Qb zfQ0L5bw{Oxx70Gf#L}yFb7mCvtm)QSd#657?;TeD;NOY49I&!KYyD1R%KFVHUN zoc0<@M-5^!@x(|Lqj3(6+-u#KRRPeB?_9M|g8YSS_Tn3OQe9%K0dpXF4fO4pJf)Ao zs_GPuLW~i1N1f!)Dt^W!DdCy9)Dts+E`zJ>h4au?`h%9%faN zU+@uTPl<$^OLF?;1}x3h7yvNp(NC0+80W$VFn;#s&JZ194j`hfDOdsR`qi?r1XY!> zM*GXYqB5^OH`Cpit3k^WvKgdvEO$wjeP!h6YNeP%lS-XkQ3cCQQJQ9MMe?Uc@{RZg zpe>2Eyb2JR&&gmWFTafRUlr*Wv;xUZFRdd*0i8MkHQ_P^bmu*T`f979SxRm?j8~ZM zYjiJ7pN}oKr-i{TJ|J`F2J1`meiR0qJqOg1YXrv_{!&!A2ShvuiEal9o}q!CI^>3jnsc1P?_CiP8(P8-85` z_h>#jB*9zJdZEQ{O(K_$I2~W(^%1cZbLSjjMb)Vk328r}G{Q|9llL2<>{V@i<)$~I z6Ne7Hq91d_Laf7X3}eW<5H5lojQBlGv`pT2DNG>A{yhHB{ia$P4T(9(Mq=D16RBHwSs2X3db4$N2UkvKZiuQhET!iRnT1{GfX=FRy5A^%T&Q`Fxq!q*ejh z3u>`6ht3n~GG!@@dJC~DR)WaCpiH;a(l6+H&4CztYtov7`fw%E1*euN6b1(N1T@7vlm72OY0l)`GFAQYp_S*2s*nLz6eH+I4U+Tu9bS| zO^64=)^n8`qezjX50VQ6SIT%g`SMyk=KpZc!HBmK4p?Z{SXb`P5{MXA$fY(NF`8V& zxB(=lOPehqDZIC)A+B^XIIV{7;gI8&VxkUk{qru0+JhtMH`KrZ?haXM#MZ@(YkLt@ zbJK%ZBtye_iB?!Tcr*MJhl3M-7pY{hXx~4;U&x}SX&^M5u136U)?A`@uITV^nqJq( zQbOA(;c3i{M3$c1|2Z_e)3?I4Fu1*^aj^u*bS%!YTO$$U9^ttG26`1a04{k^E2q+s z-o`t$w?#gZzKnbT%A0)!xge=9yz^v%b_ww*WJha=e}_y!QpTd8fVTDd7&V(pNqM*q zAzxyxqGyGMmu7wiq|4t>|6C?GAMTTvx-PUBg~ZB?7V^v%AQ<;GKP#cJ(dcJmM7%w~ z!ls1J4Y~mefU8O8qDl}2EXFeISa7*RqYGpuj)vqS!Sy9AdVp53zZQpA?l9-~$I=>r zQn$~N4YDppmH;{%`*9jXkJpkMMwJ}o#EUx9RX7s=Yb>);(hEZ}wu8m=F>*m#8;M;w z;W^z1pLi@!}WfhdzMlQ~BT zur_ylr*zJd52!!Q0dQG~2^Dl@1v%MEM;#64_Q^Vm83wt4s?C`1LBp$BC;<>d{RmAv zyECHJ!w;GRTzahV3!sWn-%*-AwMBoCy*$ECN3(;bv~_!13lMrR-|z@ug4K*Z5bK`1 z>e9L8i-17tR^N9438UxhXiaH)JvPT*5#jVwBsaYjqn~MRCzHOW)%(9PiadIl*u17AmJh0ff;d3>c+JxofheBwJV>h=Ey+GCwX1R^n?e0c4lMAv+bq#Qt;+ZDYr9 zQ2@z*JQf3Gl`h<@r=3URSPo55(=r;QD)eVBz8$duE~_6W4wfTsxu?%4H|WM#QUI_< z>@k(5V^)*(>H-#`TLF@6DNZs#+vqY)335HJFU858y8~;+8?*f!{3R2Q;|n^KoL2>5 zlkwKm#uK-I9qyIdxGGB!=Jl`u)nMIdd3nV60M zDF#vAfZPTPv3r}XMxHLmw{l1qUrkBCWMX2jS-Qcd-3@DX%F}(F*UJ+gSWvL-bm2W@ zb!=>hY4(-%D~r;B<WIXZ6K-6*;kw-gM+8hAX^VPwl<>B9ORNVVxI;`o(n&sVHg?_@uVu( zMzA<>92StISyn$%KwJI0)J&XcfsQ!ath)`?k?JaP0@R3+zDRLhFZoIWEX1O!Fx#r( zin`Z4!8j#Z01<0*S=CVjC{@0R>}-%*n)^UnsxI|efI?nb8rqF=u;lS0J7>9O$OcKm zO))JTeB~wNfplqF_+|Ih4dQJ0rgE@jFGYwV#jeF>Z3#xclbAC*9|?OaF_+9i@WRVX zCTJ3M?}Zm6@-3~UPb<5(HpaB-AqQ5%VC4ejcD%L}j@gtrMz8E^!&K+GI=_?ytomHF z<5D=9rpD+{0ubnm*tB^=FyV8Eq0I<*&QKu7b9sRk3KF{u{87;KgGW%f^lDGmyzS~&pOqR0Z8G?!?c zfOK&yJhnofqUN?J*F+0Qu4TDMa_CC1aB_7LUhQg0QlzXbnVyslQ?%XIC0JiIgV2+* zF`)8&a=?W)mE9kVX0&C{f+i(XGmYN>bER*jtLQl4d|1v2569AJ2ypF(Z_CL}X^uQj zF&oU*vj>7xPi>+Ft=5IHa!4@^e_{tQRAAr5#I&b6pH=MdU8vSdS-1XMN%xAz!~t<)1{ zZ>rNSc=T!!KwMv4xye~N)(&LVf6A>^0A=Ad4PrybE#h!fw^lmtOne_>1BM^*dUGwt zYRu_dU|8wwW)4Y3bhWqCfu?Y3h|&rJdq0K-e93ZT1L{7`-it9ISPN%(m)p^eGSN5qf-7aHfeOJMNUhcgCHXfLMiUtecD=LX#(fRv-3 zj*%2V$|WrfI?z_^p|_%#V;0r13{%X8)d>lNUa%i49TehB5)kC5Ea>+F`d77TEgtWk6<*{Q8l6EvTayW9N$FblEpW$>B|CLHqMK)%R3AC&azLN*tW&fKg)h zFm>jP>pFWdbnu;;PC>$!4T?9z$l%%hq0l$it&qJUtwdS`pj1~I!Z|brKjRtb1W5@@ zSs8T-fR(yKUWlQytm>R|>8OpW4;cgfbmjIW7!z~W`jR`@8=R619zO|NUU58y;Kw|DZfpU-*ZndjPNe2u& zT0OjrllOh-_G94&4ZroCJmVbzDxcBF z;_%ftA3JpT-RgcJU2RKR1df3e6{oVZ?+E(k>Wf>@RkGj$q~K*$mB}sYyhw=_$@A*sor0}=6oN)f zYVWyla++JH$n3jA87fP*YJo^~@d2&5a{xABW69@8I>r>iltRNY7y;Jhrx{js4F0BL z+j*SwaHdJ$@b}a@hI_S@V0=nB&_X@~`x&f6z5?L-!B0v#!;~@XTLRr3&e0!?tDODV z5&7Oa=VBZ@X(h~Tolh+XJF6zWorG=&6(BTZ`3pyAOgv3bSwbQv$J!a8#sEvUu^A`e z|5@bO(l^(FrfklsYD+8!k?mACEjj>aV-ts#Q-CR6d!a)}w}UvNT_zYFxN*w3*w@S} z`@Sf3xvwFy1)vn2oy#Fz%AGpR*m2CAbFeb0UU~@WKB?ZkDG5Pjd7~L~#vo~0-BNHP zW0}$Vek&Q@7c@LiYqkMto}6=okz=%69y&vB0f|fKO7-x`j=`|x>`>g?`++F>M0A7D zu*b)XS#kr8GD{r;R>tZD0q&~g8mCT%DS%Ty>L*JAELb?X#|eg4F#Ew^!$ejcltC{} z(dGv1z0lq}02i{QX-nZa?Bl+GdD20#5R z@ByLqh8sS>HN-kOw*Q8XX1yXt&dCy5CK?;Dc#&DMABuALUOAQA0$BJ>T~W^+*A3rT z$6z(a&mtwH|HYI*R~9FisNxcgGW<*t>`ZT|U(bHHE}um|?11RN;y}G(id)w*uan@W z=IDi!a&n(~9RggHr+w0;B!Jd6i|xiHFTWISKa#lV&~gh1H@a8K%K?@c^Q+oY+EZ%M z1rP-mG~f+W)KcuE0?nKwE^0Po0;rdGG8U%qh@407xD}4j#IT31Wn&JqaXpCxT7pI2 zig04Bln{1kM75ltuPQ<4cQ@b27&0WIezG5}t9^dSp8FOd@hxar%?)~SuBu~z0V%xz zP)O`a4Xadf8=wTZjj4f-L8!{9$7(H-y)%l}ViD~C6cUTC9NPKx8bQa&Mjs3(Fu-aL zR0~7LE%#%K&%i#^F~A1dh|TBhUBPVhp^F8~47`7k1Kivkws6I9Tg&&_sDM%}IGCZRN7=^c@=Qq8i{3SHX;<+LJ z*x)YTWdE_c@{ehB0oi`t@J0j4#(Z2~Sp{MkDh#&PWuc)#Z&AH_2|&6|2S$)?Sm8wd z@j%`6ICRGrNX!4y#$lPQPPkjfHBC5qy;q+VT(uSrNqOzcE0s3q8ieR)__FdYaiXs-= zF| z4y3ya-OA8xq(ex>Te~-f^DXN7=+N|eB`cv3m|bd}MH_>#0WBF3v-d~w(cmEo$B#>* z9AJ@H=a8-g#-ta1g6^Ymp6NDRoae!JJ;>sqbef)KU}LA5y&V=Gh^+nic%jkeI7;UZ z=Ch%6(-(e7Rb+ZTz9wi}dPWv=3>W$-AG$V~4smB6bi)W&O$$I|Z>@4|=f<=F@BMVZ zDAUo0S(x_l4{dXL;q0h$`rgLS7zbM*V)mf`n>}6eZ2?f}3QiE^CgqgxK&lDqhH3%r zV&gLJ5NM^2>L?0`g6Y@Q9Rc{N_x9Sd4@cqmsy~NxDH~d3|?cHakHWYB#%yi=P71WPkMBK$hj4Vj0=GE9z@SEOlkS$0oVa4 zWm@gQ51X#31EmX(AN=-lX@pHkm zx-!xNA^4qlU{Xjpu#|(F6Hy6s8mU0l%$F02v4ukjqA@E|n*>nC)1MAAlX6gGpNi0# z+fWSB68Yjog2!TdmS7xpFPv|SVQFacs6MWgD3*m|AhMo`Q>+()(HSIZK~@%w+rl~6 zjM;oeD`40V@e1cU&Xi~9;4OWJCA2G^6`DL^V**@6oIc7v9g3fg-Q$+QhEH|@oN!u> z+71w2;rlG0sdN_2TEPrKX0gB_5vsOK0r*?goW}%U(4&mI+0Wb1>zc#20F>E_QzFo2 zw-`nQT5~)xH0HwjaMqJ=2%x;xvNJJ%xx~_jwi>hl)Es$mEtuc|v4LP?Hlg$ng zS=86WrfUJF{B=F`GISjF@&U3kVZIkkG6t)gh~uK!FV*Jbys!sRLaY`;qkJ4W0hGO@ zmsL8@YH5s31tcQKQ4weUhhU1uhe;Ke087s)B3J^cg_UmXjc5NdQu=#o8L(A@XgR>y zB4Gz8tHnh&r@QzToq8Ao>Lw%Wl#b+1iu41x%C(`A{cYEg6e0-peIdyA=ajz_IA{=e-(xGbaAT%?UmOeJ_-9z z33B;9!P)_1JbG&?urT0OG*S)g7_D?mfIiH_@P%{7`mdogo~|xblt9G2SZL-q1|U!gBzewUI6R zcAT!S{f)v=R7V`d9|94oz8VC=d@p8Sdkksw%@c`U7i;W|=N8y4rMJe@1Ub;k9X|p? zkTqiKHO^l-nkl+Ik|78-x6yZ%kcfRWc0tAfbX$+Z*uNGLjH=kX1t4Nh6Zae%PFuW= zqVEkvQ3Hx9DpCwgPBYY&ke1VJ9|%9x$FrUN>nL`h&ZPxSLFZ{vxr6)bG_?*i+)nyk zivr}rPHFfX0>3nV;?cW&!J#A&4*K~htO%e~t-Z5fuPRHEW9WJ*%e73^7$Gh>Ui#=H zxTal}0_e)}a}eLOhck1nW{*! z`sFd0f|2M8cvFdJE$F?zx&%|Ob}cLaWi-but=w( z4=5dZXKZGV0hFj&diL8vzKmAcg2W6fnuX@j&Q6;rStsG;xS3TrZMdg~rm1gqg0^AK z>eCqm5eX+Y!(#S3HlP+#EodUv<63QQz^aIv?EtD0G!ra#UuM(pfiS zj2|1ojYg6E+epW4qS&qjq49uDC5JSFnV3ZP7uqw@iLy8{A(^UX53tbstXuuId3|0bAn{9WU{}_YZ;<*9j zM?3zV)#aQu)z<={t~$XSl3Kk&cS}0ZC>JAV%%%zgY>R145r;sS;6w|rE(l?mJc^^E zVjBbFjw4~8Cyj_}fn3UF?7jkFemPF54kXOuYFzVCijR6&TEv}`tXR=U&};>rZa@AwW_0ulP? z%Tew#!SH0f3eW<;@ZqB1AJzn#;n6!_2%`9&&UF=4!uvn*@crKdH%Bq%Qg_hk<}-_9 zfNad6CC1b2;acRy>V!v&5E8R4<#Wmbwi&%6GszAZhT$bGQr7xqsUacgpk4dY3Aa{h z(qm|;aR0kPvh4Tj+M3t?WD7`<(O7e6*fEXaXb^3u1Fk;4RlkRp^2Q=@=)7m5H-m=x zefPl-5*0Z1gbLYzNM&rsD`qVMDxG52JqKfuY8dN)WGg=r1;mjll^|xNSvz$B-d#*)f}4QpBtN39jBv+lW~L;=(@d^#xw+3_tSlU4g^rl z<$7jqoRa?5dr(W8;sLYodjd-E`V0;q5Dw{geKuzEKM)S^EIyM zsJ8zUvMz4L5&=Nn>cmG5N#3)1cco)`T3b|wlW1K(gtQ6NWuuRyUd9U}lN%2=vOc$A{bfoWLR$ppsv4?F0xV+qUdBM{ zI!FJV{gHX-Y1fu%&L_eT+NqAqq9AK=(hnOscFr|F1ew8X^v^)-!xK!ZJAg0CFdoQ- zLuuK6sdZtN5;q%Kgt&y$dSNq93Gi*i$-oXFlu6g8CYn;mC6Xb~!kt>uWTvdr#@(+m zpsEl)7xip^Tq_paRteoX2VW5F>2~bhffU09y&hQrsE8GLZ-&m%JMA;vpt|@t;yot7 znk?#$O!i-+7`iRKkQRVqHnr=JL#ye|!V1#zv;Cwd3%2PGBghbhAcL6hl#Z%~^s%F4 zSUBxQ;kRnu*MfG5-tNf3lz8D?an+^cl;IUDQp(e@FFyoZ{C0GnDSC5Br+CLHI@Uqi zf3sb5pSlHYORQ+Qj)nwK)Wi3G*TeV!AX+-@ZHg+Ok&h#gey4EgqpLg?N}!eK z`ALQ`0cr;RJpX&$Zgo>b^T-w9cj)%QbH5)4m3*WX}~y?;V93Xpedd{Oxgil z{Q6QjT^;GY@$7$uOs6qQ3zD+A-w7XR^03E?OGiVmlZ7J$5`lDx8X<;GNJaVJKnbRh zJM=w|D>`l+|5-@H>`gt60T1(P5djd}H4NJ^y%4Ph8f(Q(4M^9|n$M|}j+2XX4Cx-S zTe(QG{}~0|8!LHeSM({p#hL?c3eLFGtU6$?Qkg8T$KjHKAb4@cuWC0#0$hi(a_MOF zrAv$%Hvx#hwXRnB=XLQ5zT*Hk_t(qgo*OdL3O8g2NX`rIL5l(!ChSaxQB%HwX0Rm? zOMEQ{#>S8+(7U%1F8g0KhnMTaZ2?Jn9$9ioO4M|n`s{$<_0o8r4w}ZX5l;Le01;1e z;928r={P!ZDt!!~w0U(ie^DEXQ?owH-y+D$&FPkY4unH5E^$D^;lU4t8`%-D-#&y^ zi5R1VbiHwUJwzE3;sV1*p8fCIm_9h!0>hxM*Ya~{HwY%8>SH?uQ1B_@3P_lf6%UU+ybXZ;iGHpvX0SbtYegE35-)u{WwSUx zY)Ch@^Sq}5N3iu<)Mi!oe`?s(t*jQ%so8QMItN?D`K{Q<=n#NBm+J-|BrL1WY6#ND zz8)XD0avNeehe_jd3iIkzpA@QjymD3^lFJfr-|3fav+7_5@1N`7;JIC2&Mp-((<)B zAEs<)3z`+5?wMSu%BdK5j4D))H@WND%=Er95(CsT%dF`68f zjyc*!%M`PzDrZ+m8nDxd29tnFySN4fG;3|TPJvvPJS-7ieeU?dAo}Gth?15!SJhPj zXntGj4F{&93n`!xQ5Pe(Az^lW-fyT44Kdh;ez|7{=n6g(Z$X#Nc#i&X1WDOlxogMJ zh^RXasJRP5gz9yY&#C1debP^{O%g)%skrRc0#M3k4{J?9!;H%iK5vsd;anVpXd8^) zr}3pzfe*_kGBB8rDssXXV3t3{9kadq*Ig&>y@r>Hh z7$})>N24DAnU{2ACwo*~=z@;jOaje*PK^cx3(ZQWApK=1NQ4cVhpnf+D1lu0mhuV{wSf_Bn*S$GPFEKCP!)H#4c zS(8i5wh_%tuK!LI=i8VA5LSE9Eocf^6{vx6+cDaJ9-4e1EqDhK33>}iQ$kxq9m9@W zk|Tnw&BeD+kZPP?kNlg2eo zzFj9{@AD(!6aBnOkso>ZeiWHBq-mYf9IgdKny#jBQz+y)CThd!)b1omU!B%REocg19fx>Rj)<#iIkZI`_!xfMk@x8}w+^(W8mKJ{9MzL^ ztVmIxc{4Xqwt>~?t-F63Oos%y7WL~qJ0JwO^@QG=Dgl^Ev$zqYgp4Z&PUAsw*MM^~ zYMqY@LFyIxBv9tGH=qTgh*h18n}W7F)OFnLaAaRMQQFQ%Ro|hy10jT-#~eVq@o?Ek z%7LwSJbt;IU>sc@CKyjZ?*Of*_8?0TdM<_IY6Nh(df0>x)H&nXG1Xp*X33#p{qc43 z)i@zP2}VM>7+WTfo;Rlv~W(S1$XRvsglHE$RjzCz_2Vlnlb33Pp{dR%YvQq+gkvOquRlgQz-yHoOYB@p#4xe`rHUd3WP zn@rl^8=_7p9iaEP^v4HD08D+`41YUidhi2im{M{x1SjAQ80L&meB`CG@i+!m>1^kEadl=4s@ka<_b$-< z`~r9O#lbK3@F$@WOAh1~q?GYf(NiFo5x>rmJ5nDHjkayXG+%cjIv|(t*OLZd^qJQ) zvqg$K7q3)qgI(-~ya2-l!?-&@M9{+rC{?DsY}wNmMqtW4rv82mUG;t%u2IsFvV#batoztwVVcOxA0Qy>?_uo(R+0azr@Q8UG%X$<=WRMP898vxx|>GGN{ z@DW4}X~To&3hRTZ(1QT3+{$r5Y&dZg(3smDwU+Msi zM0XQaKvMb{Eho33>rR?ytm1$hK^AdQE1{hP+wr`BGxz177(pYNoXMDI46+UUqSh`L zjyevKJ+ZFE(Ff2DRNH`WrUfLO?hs8u`t$ai3vmM@PnmVNtDQ1G`zi*#jw9uCL7)K$ zbG&{l_EZ3}t#sOkrfD#cRqsp;36OA@ZO7?&A(VD%dR0$VjZAN7@;5f!A5ew6&{bW> z&U5yp8uRiECY=Ohm9w6%w~W|Z4cAOc>ldeUa6L8m%Oq{5ug2*P2pukb7&{H1t9psw zGy|f+^efTMsk&NU4WTgs`v)v6cbw+scOI}QBviAs*lzF8{Z=VvI}b|eP|s+bY9U5 z5d$EF9@n+X0-8d(C;KKe1Ylh5DaDpt%5_X#>yC4sjb&7d;wC0F3at9+{y9G`I3|dq zSf**@-Zer1`IWLS59ydNhjf!b#EE^*7POm`e#vnPWNYCUK-e5i#T=tD+K{e+Re9q& zK+52_B8Lb@jX|)x zTV~pYhTX;c<51aGge7>dD*dw7BtbA)uu)qe7aLxHsRZNqTJi+fG`_WgRcnd+Y8^0a zsZ_Lt#umrh=;&M=kO^1twjsv;O!33CRUrJ?vCw32$JrK{I+T!5MrRY=c}}yhYgo zR+`3(U;`&c2UT_fvNW8>j61{Igus%O2f_~lD8hS{=m+hB*87?}m0)D(_hSY*a>@(u zK!Y)KrI37ZyG{tl-~4vedum+_Z#1aeN!TZcTVS-DnMF%TMBziE(A?nSo8(Ao1CVQ3 z1$3%Z+_j+Y9MO-rD;!~Iw;d5{+M#LoEz!#KefmAA654Xls3JxQ^3oAnB{;9e%q|c) zpZQEw`l^s~D-IV;0toLRObarkq*kV!=0qHS$m8s-YZ^HDdsR$mV;al4kq>3Z71a(CfY$)Ei=9|L%oco zuC@SD+cU8Rv7M4G=qT6Fkog9^Ao>BZ%WJQo&YX)uobVCAR^i=IiX4NHGq#I&O`l&w zsO)KVy0y4kHwm_2>pIcW0#f|tm!qA@ISR`mZ36wEdmCg$t0{kn0Q2s~YdwR6N1uq_ zGjw*Q(;Aw$Q>+KZAs9&)HPhK~N^IT6)}>>5tJJ~>+H!uT_-@ooa6>cKU8lTLFJ5uO zOXbD;fRmRC7XJ}B*^t@g^C{m`2_p1Zi z23IXcOZcIq`P{tv*h$gHw2Uo*HUKwM;;_t!5SYqgOFaUx*0i66*mabz7iE{!h8%Ig zXcB}{0|aV8A}A+UQ4qR1fTnwZ99ck1!Yp9iKx+eS9}bno#D##Wxo&ACFaT5DtY#4f zH1f>&MY?Ur#1ZWwG-5NqpvHFqX3ILGx@cVzhJ-VeID({JXAb*p#O> zvS-wC&S^_;5{MYL>pN&cQwZo6+fE*@@pMdcHUVv*tvxp*Q3zOR z&oNbs-cIp%>saJ)T=F9?AfulIy+b{k0XaMS6(p_7Lfv1+uA->RN&1)1Z5pNtq0pcZms4%JPc z)bAPZri2?lu#`QsHUvA?y7`9Q1gS0f#90eONh`XOI0bDZ#_I%mg7MT`+bLfpcYFuB zs>FL@qiNts=H=9|^D*MtvFIr7CZH&^E7w!g9Vf*M3pyrM0ufDn8meGW^`&z7CMKkW zPz{5agzN&4!7sonHhWg&*6|W@Pl7BWKZYjHWQzbajs3JKXv@I|NTbXhce#FByA6#H zGk$HY1A-;V=qd+J7IV465esK@0&*JwGp4gtwqzL+00UmuaP{wiUEi~O5~X8N+czFZ zDfZ_-Nri%7$vzZ)$8L&x@Pnv$SzVn>w7mqGB#1BbUd-2mr2guDZORZXtU%Bai2iX? zrwv`TSlp|c`F9AhfclmPP8-*P3usE6t9-xPPN;XUvGNdvto*zMO7A!&4E@`OKo}0R zFE(=2LiK%&fe1a1vwas5)9Hmz{h{OKQ6x_te>1fRAmNof71M%5LT+uvR0rg?dM@5& z0lKy;$86gfUyoQFC&kemxAg`<)#!%WSOKV)<#=m!$Y$H|)m#qXau+@I-66nyH#FBP zotR(U0E_@AeC3sVA!BIU^T8iMNypqShi-O7T|)S^CxK{q98&ZBQdJtt%{i^2y0>%(svdCVwM&<2lp$9J?%5Q1KKFU?{I?WVwqayl!j zkr2e#TGuqLOu4<|Nzll-Il+dSPJ35OJ9Yv7s<@I&LD{owd{?YsCc(CypUniqhb~R( z0uU+s-W=MtyZA=51yO2fBbFVfjH|+fiq`IB0gWNg)0np*t4h*9HcarSRN4Vnjh{BH ziC0Mol6b%w0g&X7rW#{tB&j~?T~MW8^zzwrYGbp@G|v@V+ABe-!sx(S07{CVxt@Y{ zL*gkgeJnWvm6G3xk|4{({(RhN?>KWjaWH_URDKR-2R$7T9?@;ne)b9)rMEbaCo}B; zUE=zZuP>K`SbUE5p!Eoh1ll6SW@8XM==Equy8t&MH1p+$?76i~PswR9nc$lCURw!H z>miRRpqa;sF(z}l6L4%gq-z7=b}=S!os>C_?+$>rjK1u`sJF;vTWZ$%9(t=DI~t~h zdaZuPQNE~G8sJPwMRA9JJ`?WBZv(+UTUWFiw=_C-oNr+MdgN;~>#Gy_pp9^2Gp!I*13 z9-S`@IEJN41h`$X$cr!1u{>%{zTFhd@Gp(CD{C!%&2SQ2NgVt9hD=jq(lQ0K-fJk3 zgAih{q~_8#Af*%qxMlh5_yA0)yrktHvH&8ZUi{J@%BJ+I@!r%B%%{6bAG!kx%j$+o z39@+X385?y3H&oOV*~m`)g-|4Uv6D+K3bas*;Tb^=-JgsCkcR!c5_;g)b9D{dZ6nu zV3o0*Mwb(0zN>Ptw1L*o9bE_P=B@V@)%6buvA;E5a;%OP6oevgZ&NGTZ2&C0dev}f zIvUyzv^O4Dfv5{sEtby7VjT0102XlRp6cD)F+moP@ecKYls%`pXEqg458uyODy>t1 zM4i?IzXfeQWF=ZM*o|j?>9h=-JFiA?b*XJ2^8fbRwWZoI&;ttS3vkQOOEpEx78eAz zoszc#-iE3GY&FAkX_cRLC#8-zS4*engckKO0#xJIO6C0?69lVSua$N|sGu!bSMt?S zrNw1YOC+Yrg7w34rf0 z&RN8MM|Mr%w2qksqm>T;wg7Ghi(1N088Y+Ul*2lAR%+8nuG`Lfx8Gm|AZQyI29Ri= zZ2}ra0kTAFgz5P9HUWscrA~HedUkFO{Zhi}{nwHJ%9-!S2I&X{1Kr;oLt4zZ2)YY& zMX@o7sk3YA==nH14Wt;vjyE(x6mtSWrXc;<;e3h*n{p?e@egCSL6)2vQ)P4rFyGtM z{0Bfx$2y<-C{o;Eb@AINFJ)`VHzc59raP+WzLRqB8U+;!v=%Rki*O?_W}4G)w2h%H zN%g+ut|M3CXFaZ~BiDRpa*_ZmELUI)WTEfY4-iiQ5t<*CBQ8%V)xg@IYRfX7*^49{ zr{{Af{Q%&0&CeqEHH5Eh8*(j>4ZYm;k1l5|HC)jJ*k6in%4~yd&5g}?WOL};NF3{=oxR*~ zkGlk84ld;|jUEAzYDROtF|-Tfh@d)-T>@;=mq_aDrW(cyaWX-EVg^-#6#LLyHA+qa z+>~^cH*Qtsj`z>cv;o-VUc~P}!nL}Vh=m6P)Y|H&%nPvFSCuM?$Rt7V zUDCt3Ez`Xfl1@R}aBMqj1jHusN7Ih+}0Aia(mcrYR zE?BP*QUyZ-D2F!6K(GUFIkWMmObMo0;6sXajg#U)_&PKNR5q-B1$UPabm_&DrOywJ zM~={dP4@ZJTaaeXXao)wVhFm{tl$|2}jt#*|x$dmZDIN7EE1_aKUbsR7*?$sytIhn(Bab4niWjwATZEc*QczxM{VTDIkTfo!7SrZB_j{vTf&MKzTQMrAW1LDiZ7$(yHgr z?1wH0#v1xjgIInWQb#P%&=TlXPyAS_JigSx4xz{p7hg+gWM!I%<|BZ6?^#40gKX%y zM7|5$8-sgVaAq%xf>!i${v@=U5BH~3ajg`#ZMAX0-T#40gSPxMF_G#eE-`Xe&FlT5{SU@ zONY7X_>G6QlhG7oWb7n3ubqT}Q@&FCiWG$p7fpe#5M8ky8sQz$$G-z|^{}|)6Dkv) zi3>#|Fv3LFH+D|_I3Y}SL9U%OPDR5en|959SX-Hs1}kyEy9J<-u8tH=K_d*)9S!d} z$ki5)JG7nflvd^))A-_04!Uj$af7TNz$$IBEZ8>cEyf!KLy%Rv_<(jxcfjTm!%PXJ zu4Znhr9#Tmy81naHj8t9_i{I7yH!mjI}~YoM*!uZU4?pRPb;_O@PH8eJ`4V++;TUwYzXY7*ZaarZ!VzRnRub+MSDa$PjE<;u0to zv*Q$;XZ4U&f?Yi809+C=V@SDZ)r^GbPY8{kX^R>$0Qopst?@pasaY(o`P}3rAxQV) zOVAt=8`=p9ZP{kxL_`j*ZMv>Y?`;BXrt>mW$9d!A>%cVTL}O?q=5qSpuV-u%W|KYi z78)@KX^*UHva;i>%m-+{j^JD{YK#Dgc{kx>r&*9!Y!}*6#QD4Ia9y&7*GUli#0A%4rp5B?;|rG!h-wzt8q!0xK| zFNRL9hbU>7J0Jwwk}J1_Msm8H_{G!_(97ZW-r3PHfffX(kUCs2rs}QgR6#Zy8p)Y4 z2}!!y`k~Agm~_0SNyj{8z|qdP(|iG#9{-a@fwuFZjXFOn5sZ=Ad{2WE#jjKI*%k?N zC6!9p2D=VVVA3JT)o_~{;0_S}_SBb-V{RWjMk(o^ zQrU5`d*F1qc+eHvQz82T{0nB=gwh(G`veq)bicEA5`4!6u(gG*&^E-JY&QbJm(6v+ z*y;GuB{VhtTHQs>Zi_rP?z%cBHVH%%@7RO`zeNbGnz#0~`-!e#*C&0S+GI`u_CC56 zHw)XK)HhZK8asqxx%eTa0W|8onvSk8IZvV6PO+pZ+Yq|?d@Fy`4k2|<5I=VgMlo90 z4q5`N^wK8Mj16^jNHw$Tq;$f7?C73saZ7@Vnk1}hJX(CVfR%P^Ki%)Skzp0plXHNAh;Z#L1U-muwYyT*#V-# z=ws?EOK0LEIZU;UKyDCxi*c7{ObAS;=j)e&cL|_{J?A-Bpzce9j^8_*H2pPtf2IZP zmKmGaaVTdhg-!El2MwGm%$+Bmv}{9T3R>DnHSy^<>Mq^68<=MI1|^{}38%O8*55YB zCh7VIt%pv^?KUblMV`aIhe=8TU?GoUjIf5B3W}RJV<5)Y&A2Fg7YwJ{yMAE%_OOgk zYsA+&Nr+pO&abx;(oZW*fn2X~1};yLZNzAUDkE!gO-RSdnRvv|H~^|QxkpkK0PNk> zEs1SM=+z-=)u0`}T)YFQ`YZMYe5$1+EG+8ZYaBVfws!b-9Y?i$KI}M_5OBu~)``j^&>ds3`I>wmL z1|i~u@6+!|cbrK-gFgBh>KO?C`0<>=^h%HGQ`^usp2k`#+1V&@PK$;epiQGKulV#z zg502HXlo-7JiBM(*$Mzm;=JmvBXpyNR^7JoVl6!hs4I101>3X;uzrU%o=*W`z%Rt9 z)#WMjlveU>AS@O&$>~5N9AB>9K8dTX14pm-IFVAMoP!HLil#tMJ7SxnIwdhARLnR% zj+{G2)lYtxAb7>$>JhZ@; zVj``gvTCR0uBmS@6N)kBWyuaSrqd13 z*w-Ey(`0>pr~sp%7|wlc1*`F!@8>D$m!;XE;uAvHP)W zSnn}pbDY;N89vDkF{tATEemM ztVZjY*JZ|{XL7g5_D5+d99BL9vNE$SttEPA2{~P0hGuK&<{WG zHgrshb+B5WVG7`f`r^qo0xZ(nl1_URVC%b7UGUjXS@g(0NrnW#QaopBg zoD~45oh>rhQ>{)Z6?Ix{?37-8?*dTpzROYY6?HS$`yZ1ac=0BeDivT|@Cs>0F-3qS z<1{7Qb1=g67CiHA18J-$w8ZT|yT(pc$LR-zQ1qI&lmQ5!6MEYwkKK-PPQrU!h_VBq zv~Ij!;)u04@H_$_7ByWt6331myFs>Ku?R^Cm~oUcHL>{U?`PB*F&+O>yoURUc{j=Wqy z*f<4^;>Z_!Rv@@^=X;eO(U{m%@i0OM>>+2P$3rl2swV}p@bH^@Bf7PXw}p9NI_fk#j!u=J-uM0EUU*W!tSH-l_*VvRBt-$efcP zlF`wjB(%$;8&sX9lQpGltI0@3W(@3YCz;-!?oj8W=*rr{+U`(fnb911%6)Jgw zBPZe8;E|8>`|tmUQ=Z1+;yV9*Zu{(g&iT%Dp9|YIQ(ZMAormhJ>W;>!8KxTsEUf+k zSYDgv$s-31tX$ zg41Q+(3-FcjQeVu7m1?YyMhyJP{gm58l z57hxM`wLX|hMg@WklA@*k@DfpyIYPc>+9S}6b;!`2YOIQk^1rLC^C7+8D%02p}<)j zYZ>7-E607xX`f}8?VKi`7f^EH-_Nr3nlI&pLMB|tq;ixDxc)%)6#%8BbB|@ls-Xs+ zIyG5$P!`7#c>{nHbp$l#iJ3iF|HiYOhYT%u9Fj6UX={+(1Cz;pjpx)LlQ*R#mm!#P zt=l-&$bo0KhbBN+zZ{KjYQTYR8c3lr)2a;DQb@2{0k4E;OTOd)tmk3#iTyIe#dgQ|90)@rxT&_!`C7>hnunM@6 zFk6_&rV7p*Mbt#;Hl?tkPjUlDd3Ix3nE}Z674$6Kf}A+33-oXuFjBepK{Sigvt$|b z&h17)Bc+!SGP(tjl1vqm(g24sMEl1$Fcshcn28833H`#W=@eKi0j~LILJ*{3Qwovecs!rEK=5al~onty$(; zJfwJFCwwj%Cy+8=hK1UR8(yxGMHQ#}BJPNCE0k`a4k^F)5wDKK466YVRqipodFOw;$iP0Zqj6bPKypB`*6XBqT~e7z{tC@mI&b|Mr36_^9Io^+_!1-vx})!X1vh2$!Cs1*;@(Acb&%W3%x+`@ zilxhlT_3Crf(vyk9!Y9DKp5SK(M1OW8TgzBIt7yVwyN16#a)kC|ImJ$>tsE`op%fQ zQ)Ck;)V#V{CQj9;+?2@nY$1~Ej&!}O2-S@3$FcPi*a03PKxy{vSR@!40^C z_hrfruqNK3S7?A>dAY@CA#<=z?B-H?4LIydVLAZkS7S0vK^<#-gI;Ek{8nOFb_iCr z|1Kw&zeQPrFv5Et=BLHQGuSZ&(ngCtgO_@$mT~3s+i%Rn@+?_GN8|M;y5rWhB z$2L{G1T@clVwtU|^pGg8b{yi9T?IQEovzkMs-DtdO#vwYt@84-NC5=Lx1t8O?ZuUg zn4p44Ssi8g_P|aVqYd$(#sG@B+tcuN2t717++76o?$OuQrfF&GiQP1EXKBh)8NU?Zwgkl28{eh0?QBaUoq7Tk3opzl- zB3Cw&EV!xTGe-{V0)_igu2(rwTEq#nFU57J62Q!I=mj1sU^B~Tiedtt5%yd1d$^Dg z_>N|#4cuxI3~eY`210hfe1k&bB#*6_U~~XZGp-c&9PE88bPsTn*19eb4{_5DqSby) zyYC}=hUJb4ZqoDq`V?TM^oT;Jqp&k0>22{iS6+lft_K-6E)ZGCP)%EBZ!0MRM!c|) ziHNI^Tt~%D9pq-owF}bfk~f%p&!=(Qj|Tt65g>FOX{=_^lGn5s~@7o zxSfe?6$~8^rvBwW{Wc{AI!_+M&=r|UBHTxa78!ztvVDBs5tCTClJI&N`^y2T6@eO zpxUMs&~_Segx6=~Iw?5kUe)LJAXvq+&@gc1SU(x!RxMsv(;wO6U_*MwO+nx~4pmKo zq`=Wg4Ll3Kp2v!PaexzFtW&9t^ueVguYgDe(e0`muL5CvQLk6j?S2ejCk3z5Mfd0z?pBnf8vLXLnGo~0wCH%U<+ z>?-9c7>2H?9u@XyxvWIn1QHJ`frKjooMjf*AS+-quNQ4exa!C;VFK12S+_eWDZtGZ zORVu&eFlYWZ-%i42gOV^Me6{_C!RRao5Vc_-KTN?^RnlEKTqKv2K%FL>N{Knko5|!u7uNiychbfIGn~>6b)dGuf`Gm1ow30 zxB;Bi`rKAT1__ZZ`2tGYh~i&H=^c=BVuA+lfhpy-&R`F4yVnT|J8>Fdn8Md%@ofYq zZEc*X-hxoagQz4^gL_dE3y-334IMMXNfz;LQ3*E@F1wTft5#=Mt00Pr<%O`cSU1%7 zDnqUfN4y=3W&l@nMz=8`hhQPlWIZ15Ktj3~h}(1UCrp>A=O3e>_Iu{nsIE9Ik5#|X z0fwhdKR7)B!7g7Y`u!;Y2AS1*Xw;giWlyJpia-}e%V{N$lfAvGK~5#%y zksgay`J870!OXr8@5D~Q6ea90P-kDB6-*msMUcbNxr0k!*Y{XWfvrlStl#rR+iFTd zOJH@73meb$Hh{4FqCR_>-P>Dyc-kqFj#qa;dA2*+4eEha@z%A~K1e}d(uV{@g0HAE z8R15D&pu04b&R+ES)>WqttbKWeUHFa2=jHgQ;LeZC z^Qt!!#+W0PpqlUXTU_NMP6Z15Jc?rX3Vz z_hPTJ@S8JHWLiHeCKxs?OF&|Bt`wP805dsbuPG)yRcJ26-X{CHs2GAdKkwaer{KG3 z83498wWmthPC-cx+yUoWk*In=C(Wo-L#csdQ$}6B9fDwup)IX#1h9N|H*|}SGN-_? z!eqo#yV~|Hyc!DG)v1gkZjZ=ijVw3ZTylVy^c}0<&P%Oht}*wmLPLu>B35_MXY|&1 z12+YTNe5n;y|y)~Y2%)KLO5CIr8H=yvUHuYIw-bCDJE`!DCnITBUjE~O9Uf4xa zMV)}i?$+!oLLM>}>Z;F2vq%ucPpv@YU{d;$kbD~YdQeZ z09wkkhr}`{@X7{zd~K3HD|d0Iy933=j+Z*4kLW=|ulZB)(yIyRRi_*w3?aY|U#0J$ z+LQ#=ZdnS`j?E;|bYh?TP(+$Sb5qr-=xlOO_#dJ)@i1}`chs1eo&#Lj;El#~<`0WM#^l;aq}psbHcKRL9}=n!$*Iex2|Dg10zH_x@1;m# zlt4R814yvDx2c`vA;8V57E6vJFj@+W=o1IL5eB5D_6Tzrg?0qGuF=sPZubZ`u2LUB zPEOb1-c^_flNYpUjGtwI)cvM+Dq9Zn)+gWM?WQ)s%yKFGU4_F0 zCvV`}0jk4cOh?XLkV}1&89YL_FBNa}iFXzHAoE2>rlvPY!MC>WQHUcjgzuQ4K*$0(kL2#hyjOG^^&;|Zr z^a?EpMiax0&4xb8O>;y%jV8=hCNj#*X+Yq+Hnt)TJqq)UImM zc$U1l!^PL=@l2vRrZfj4;PDsmDdHv#E(OQp!Mq{-#r&}Zbn&pwprwv-^2eqFY6YVI zCm+#f*h2E#QJz&Gx$Ns$a1A%r^#;p3b)2qsoG%L_h-*+>vSm)kV=Ry63Mu~}`syO0 zYeMr^)V4ulZ#aXfb`AUQDzpHuHyN|iHi(kxFwf&J(`p^^`tmY;Q^(-R@8~dW*8wT> zL%oUH1Ck1LO$zJVlltQ;UO;Qf3A6Xt2e zRS^Q}P-ixeQ&muF^(EX+pY>xYJK@#n%PL@)VIWE1h2%-ev6lU@Ihfs0y z+Eu&k0?F{wzeU0J;WAnr+vo$y|M+h-xflRQ;bobzJi-mfoD7RQZbpncbS~xuJV~-Q9+DkjJXcYiw=WozzaXJb1dB~;k+u`*r=R@ak z&RSyJtSCZ&)9Y&O^SB*vYgabeN?@|SA2qHFa9d*Nqp^*;V@H1VH+~aE;-;AE_rgMP z6-4u7%riT-RD7SJH(04@y|oJgYxc9n`&s+H+^NaVMu=MzdPxZeaS zgDE_XJXS<1hO@(z3y$ec5!`TE-_>}e20(k7<=1h#V$juS`U4Yd4!iC%ss^KhHd~qo zEVa{2G6Tk$NU{a68YXZ2HM#`4Tr6!$onQ*VHG8y)+lt3DvbvmZBCOwj7#j@^-O`Az zj~l6SdN-P83Yyy*If9^~Aj@I$qwQ(%aRMP{f*o_Z-4uZR5A|(h2>uMsu31l@P>iRq zee&t!kE2o`Y~PBrY4)P7gl*X~pcAKSqYo{7B!a&C-S0-LxLp%Qe}+y$h@NBIWH}G8 zf*C42^adUzig4p1ovodFNVk$x&(Sz>!qn^>cok@sal(VEmy$~jnv><`313TcszZW7 zZDq&9;cH-v{lR9GpaH>LzV!uE3$g-Z12h9t^5ZwCE-kymXl~o?o#QW|RJ&KSW6-gC zT_=~jxSjB&^E6QV9Dj+r+{bO9VV(>;0J|tLmx=++5i}UlQ>7WBfFaO2;*+wk;qnO- z7i5bs1a)r&!Kt!?!a!pTCVy^js)Lw-T(Ef5;xq+zTbL&AXeVb0K2#@K_-KeBhRSnx z$BSx3-01X2-~YQ0qf>ia*Il}j-F&LhrUPN=3MMN6mxFho;xOw16qh3(xmF$i((3XR zG-|*~5h>LHln`5YY+dmoTLaG$+xPMZ6jdSw-o)v;z<0`TU}caj0}Mu?&;-wiF%F;^ zVZ^Kzu*2jZR$e+l>cXzR@Lk;2?aDH`1zGafV>_e|_B@JV2zPiqiylBlSlp_hq8tIt zO1yJ4OmI)zz9&Glk{jU|L2wSM@tW7z_+&R6wDdDZT)Pf)|{#M!9USECNWng2?BXcG*}91TOv2*vfSX7UX) zaT$>MP+vsbILU8AI}B~yFz1oQp*^-#S39^Zt9T#@MRlQA6t<<)1H%m)oNv3NXxEFpWGQy4wMVbRxpwVX)fvVT~*w5p3JlCC)HYRCXu0On`3z_I|c(l|VKQCJ~CugRN*HadtyFu8_2JGiL|zsWK|7ia#N;DPJ7 zCE0_d&XSXLsG<)t_ngLu`vKUD=%$3`LPtK8|gi07z}sPppqXaB)f3 zImWo*;?@5SjoPVGBd0iB3|@CfpN??jIuXurlTq9i^?uLq&Um{T*AM5wjvGy}2!s!| z_NZU;0Q1d3^XNiLP{{vz_A<(j$bp=J&Z<;^a2Xeq7I3@#tDH1NoA!H;R$XhjN#Pp0 zs^g?t^Znh~^WkjzWN2n*PhCaBkY|5*e+`WS$t2c}GTaDB%YFE?-ifC*$u`jP<=BDx zlS2rUi~dE;H2LOsp1Tl8mq|0dG47lLEf@6@yf2&U14FJk$Ef27I6HEipMIp7LL{|# zZ21JY8EMje^4%RlaOG)k-=~`KwHrf%xAm}PH%SrpI5J96P7|T#N4=&Fjy+xkbOtcD z7p`5Wq5fV-cZm*b4kz+C;6WIQTm+eq+?e(}1TxhVCVX!JR@e>J251x$&y zwe!4y8wKhS@+wXz%B?)Di`F2Rn@sAe17U$aNbAvSj(CG`jT8<$#i-Q~X~Y@CEJfR* zQMN#yK&aD{34y4nZOE>hG{LxLa|neJVI-dF*l`+c!itfu5sYl`!Rmp`ww4ZIr|+1Y z&7`Ib03%yERrzzw{ z%%7%U5`2Y3&TzVRq@yc^kImBEjdi6tu!C?h4CRt|5t6@;7@XSV;seE)hX!k5!?T2& z0x(gCXHUz9NaKo>U2({5?X4}~o>kX4(Nl$hP)A=;e^3LsNTVLw1-b5TY;xmtJ8sIa zISAur&Fvd`?C(DLW3&tiE-ar6=&Ww)bqExl2w3D%5!#7x&jxM|6M=IhvB{VS`OaP! zf;oKV859jd2C;b9$88zC^QO|k{@eO=9pR>+kK%yt&>mr-@x&gZZ-jX%07^c0kUtAL z29uxcYfH)YRecYG+oxdizpeu%Q{1kvvujFcDKrB-dYZ!T&lH=lF3^S{kvs{GK7*P; zBo{Xz=W)BX-HMZ{C1cG~qP>?LHV%hWa64=6MDA$tzhGZ(SJH@aQ~S;{MXKRM24=o) z`j!UDILS;$o;95bgT6m%PR(-1^v_%EM$^PgU)F}56^Vr<2lWE)JeG_FPR zhYpPIo(3)*pc9W%j>PoAgva+8D7+PBm=_3 zFU3Mo3*b~|M-$r)%_2`MdmIAY#+m$U9irpts}FR1wQF#|#X__SfS(7xsMH6!wbHw@ zNUBfx0Gh?Y1~+9oa&%t!A+4c-?IoO~*m^s~R|ZGWNI%wgQ7_1)3d6S(AO+sk0@|qq z>MT79nt~|r`i^#9&kP>Lz4yW&MuPWl@9GmW2ZA{!mR{c~f}O#Z?k~+dF#Ezf?oe`I zq}^QC+3_-%Opd>{DI zN)c!q8cnnzz|1e(9O)ql<;4<5Ck3UpX}s5kV7b)t2c~;atjP1Mr=oALMH4gFLGs>- z5BL$-g}bN?7PYuxig8EH;RHzGHsUKcN`6Z^M=}Oli9h%zN(WJ$E>oROaXMMW3p*^+ zMB>~C!z6e+gGAELM8945qhNGbS5noUfi4*rfgWkEvkurRF0Imzag$`-52tiME;J^Y_)ASVD1&2Vj88;@o?nW zUd+(ZDQ*h%_J_3!aSE_JST{m{Q*hoYxniQv*qMW~9ZT&f{D&wT@35;Aodc25e)Qc% zfJMy}&*+~M;rKgo1il0z?-4l8<{@<%nk8^kJFyi*<`C;@3k$%Iy{diHezE#W3$592 zo##7HD}bVhS(Y`2TUqjEY6Ix*J511f;janN%$~a!Mq@JweN;r=o+_p1to(le;tFN(@vOKA{A@s z$W)16`87@np8#|9#ACjIFy>QOSqDK+B?vAO1481KQ(R3`D~^ke0k~TL zkSEuLP_LtL$cFxcfHg2u)W<`Dr~|Ery_XqZsdlV6#M9Tm6ZS9~P5@EeXOv6B1zT3U zS{D6Y1_i!39!9-xIX1!1=}<)*=o-QISM~ld`M;>+DIK8mXNhkw`r$4ll6(8N)sT7s zFEH~`6<Dy} zUrU+*@}ACT+fVI_0q5VLSp!H>ca#zJ83+x%W9$DD!B3_IzB%0VPAfIkjFXHxtd2D2 z0p{sE9c9T#W}7wEGSYbo*s3@hpK-hv02DQ@b5(J>Vqn=;oii8>bG{{31M3dDp_${F zy-?0&)rAI7oWwd`(*TW9lq?0js{uv}=*Et{%9-hqHbm#Ucc2#^55X?VLyFP?xtiR$ zhwWRnUnmwOx0Ce!??FH&=dRIuQ(V1*I{>MUKJZ@Gks(~)Rr!VqhVj4?AgSnl_6R3Z zSyFCeyJNp`g4>*pu&Qwi%J~k<8dETwV%#k@>PhDe3UXnPLa_>;jK~_J(o@fYTwj^X zXv~B@9YB}!=!fP(PMwaQv`S#h!2^8M>hKh2g$YarWU1sfs}>-r@#<_{6}NLc*voH$ z)*vC9TlpcXIuKSlJ}ULv59=tg1la(3IqDMc7V30Vj24@a^QOU?lix_npdle9FVRth zNc3E64z~gDbM3};RP106Sp}hJ9S2*Fi|}0lYFI+_J%{2hz7)|n${#o$juBLkJWO(LB=R?!@rv<2RnZ{x#GKgp=d%PzlDk#V`j;5c&j!(Ahh)Exl7?(zzY4 zPD~TlEkYCwhS{*4^T&BYUa2L}S-2#!z_$#{BT5?43N_8)T zNb$Kc8g(e<^wgIHgYA$}72vvXIlpLHgF=z+ZF3f^j@yO5t!pN0_FBBMOhd&@X;~uC zFuQ4=u&15jh9zzoqL3EAb&sac_ofYYe)r==2owm#d2pi)4JH^pNf&5+=wtv@y9a?% zJ^vomh!ak?^+M?ZdMLr;b02E%5;vk^DkPAlKnNx321(DvmEN|9uBYV!Jp=~DTs2anEO&B5INFq(_Iw0;kTXnn>q(HCzlWd zLN7vaN-NaFc@R-~zJcmra#TbTlku|s7^A>I1z@JP_q~;|0EL7Y7inRv3ZnE6biS;H zJ1YXdAF5N! zLuf7+TPf(+@vrEEKeGEp?V*oxTLlaabcS{U0Z!Ka%;qV`Wud#jB@?t$D3;%rPb`9A z?A|uIF8s+%ePz1JIS@X}de^Lo8&#fr`4Z}y2bi@?Yb+(8xo4EfMzEIT%8na1^eVWW zBQN35uPuPzk+1A-R-vG@NAxQ-N8Ecr4_U|UvT^WU)o%@gQ*xKu0K?S>-YvU4O@Qm( zzlDL!UTpXQS^!ECH+tK+o%>Q4w?Vs5C{7Hi{}Sz{0K9h->#kh~K>hK|K@Ui34`dFZ zkJ~J=452ybAQ5Oj>QRp%z+-MQ#8Jaw+KJuAUtk6_!kvpK8p{~$vdDb&1Z0+V30Wh| zQz#^=i3%Du*pDUgGXRBQ%_d%WEc~ZgLaT(F16gtZ^ryOU3G~1bo9VLyb@LD@-7_*H zQgV3Kf8sr!a*7@Ynkx>OX)SO8H!SM-gKSCt2|9&h$>cU6YY2QaR6{@zc#zqew0$+D@21BrWBt1{*$=TcH$Il*JqSc zV2|Z)e5uCvfAR-U9;06XoSs`ku{f=v%Q}EF1)5b>G3Z~l^vv;-Ajio=!d5 z{=ghGxMwSZCQ}4Z^nLCaf#yL@j~9c;xdhI?J9LZjDJnxU2Uj#osQ^jL_TCnfA1mpo z80cDho^GIKPfSR~k>|SM-i6|7n5Wl+KnoukOhKS64Dx^ z1X$$=+nPsE$ciNo>cS8(>+0^`XMYO`ojAaDYzvJHd-~>;NG=~1#A&Cc z6lr7AhhfW5ETB3gbRxNH_`RYkU{;0nwXA_@fv>*zXK2(Zs`i#*N!h-b z#HC^5cIj(W<)(eEhsJR`P1&MA=PBeK2Z7s0_dHZO1i6qg4A$_W18Mg8#4t@vn7hy{ z%N1<~_JHaqzC>#yizIskbM7qZsmdY*mv2u7-R6N#jH8@Xmr}yK1V?6NFlCPGc&Hd;f%)-K32`CC;2F-Z z(zwBt@MVVSHJk|V#bruc$89C*8HKfk7V&FXc{*!x;+11{ZKj{ zDPKem_8AX%Zj-K$=#@Mpm{538bW+!ov=-8d-%Nt{h_$!H!#eWO33_Ldy-Ms9tm zY`^tbimBKS9`7A<%whqEV8Sdz)$V;=pQ_=egdg|-%1tr&7NA{2IwQo5dVyrT*;0Sh z1h}V_u_Q?`gF=be-OV2rYC$03dJVneqy!gytnCm)Lb?lsQXNf_^lKJf2=IAJBZD4p z3nDI6^ntDbTCS1lR8$GgHJ?ia$nFSi#%8N9Lnzb&a=4^jw0DkeoZtahc>Eo+z7fb8 zVd@jJUkI=%3zxKH0EM|m(>ryvn>M04HPZSGsycJ9`%>ti&lm}Vqcp-fD9#}lL=l1+ z(0bB5i2P)P5yb*#ecJ5vQHJJEp`P-{-}k6Mw1}ek3qXs*d*fBG^Sq-)GSr;{EXPUF zI+$FyUy%;ilIP`1E32p+?8LblhSHNa_Xz0wGcc+uKSvMM!i|8=t)Ok3R>3y6TXdo3 zFj4r>#nsbJqR{)rn70eXc|BBh?E&EnCBs+W06_o-0HlUhQhhs)jI^c#heMF++!8Gp zC(=UaKlSX~2%;K+zDHxn)cXpgIx)KA`P?ZeuSFQV)Oy;~G50kaI>QZb&25>&EcAjM zK9(A;%zutAVs;kf~ox<_5Nduzt*@^pmO(R;x z>Wiotl-HkwP*V#61uXw-^o*0@co=jDaA~!+t%GcxL}8&JI@T@}7a@){M@QI$M&`Vu zA`4Z0BSvRB(K7(KN^Hy8?-7vv^q4rB22gZNUZCm}?v|#`qXbvfLyR5GZ`4if_w@0% zV*lhc1zpx>a|(3p;aM1(`dRXRq{&_3(=&dqX-qH&L^ixlzo!Q_(6|YRj_PXuW z1d0T@)OX3V;Os8OTd!0O|puFB@$TtpvoH<5}uj16r zXJ?uH>d!q(Qw3V)*n|jMO+_eVzpRdS9=9s1Ej}Qu5(JlH%Ll8#W_?9lITauxuCJoZ z_gZ_o_RRP9kV!42HW95JS8_-otGnE#^j0`%s+P@n55Bhh5RL z^+&iZ5t|!NQJ461skb{x0X8(-837U5g=;7qCyB0bz*MR>qWOK`7Z_WCC@&K+YRD8o z9=ZN?5V=2PCa^PEVp@0Qo;h9wpOR?EN||72rg<+7RRG z3KV}h^IkC)Y!)1eeuE}fO_8*7C*ErIW}X758(iU_KCKpjfSCxOT-;U`XU}O#O|XS{ zSC1HEKzC0WUkoNAs!^>JXXp5{)V;RjjK21S^?fu9B>6?{{B-QjRJFReDenPQq=(bF zK3bQd*ghEf=dYIxk}vzflg0$N#U_WV@yx` zBNGq|Jo{dhijyQ)bmeJ^n?$f{2$N?3swdGk@gmj2=Vq)f$D7-8U}7>Yr|-oL+njWd zd$>lhmXT`7@$^7l3^dBgcM-W(>`Pe8L?NdIaIW(QySLP(RiU_@FtpP5w*~<=Ih_js zb%4vt^=zN3 z#O>P00;leE#QE0_RI6~;+I4&;Dp~;Ufm|N8K;kpqhl22B|9*gzl5jeNkVgQgQ^&9s z{WBOQ;o~k<>m>QAze2}g7jk1)A6mn=KWbaslN7oh_J>cwmeQghy+zrOC>RDm(KT-P zz%)1OB89)0<@+Nxx8{syD;ps5NDXI|dLA6oL6fEAN52m)C(QXPN>BmAaV$gsIf@2a ztqfgtfUKGbH~AE+-9kW6Iz?K?nR}cX~ikp1&zHQu+pE@8HHK-J40E)x$2o2Z^pO2tAEyQ9ingx^ouI5Qa+%Br@ zesLz1r01dK^-rxBWMOHwTjD80v0xWtLX?u=7-_x$v;YmZ>oK{l8ZdSu@fyI1>x^d| zA;Xi%8nsz80fDAN6u&a z07HmobSpREastJr|3$ueBM_|cGBAq9ZLY7YFPfyNEc|_K%wf_~2jGTb9HgBBrD7d{ zM&Y2mka20e(2tbq8BsTG=P8RWMUWZQUd<+-N@*OTYe8Jsi>vP?Cvi_N29$9l7_X*O z3|8Wap9O%~Mrt%=Ahq}xo<-4loOn;P4xps>)tRp0Ha|-gwBZ;li|7@n)5>IXE^|1( zx4WQLiZ$xeft8Es~{vjbeIR~Ku&NwvAeqKF#@`0<+NJkCVJ_bzzQe^ zKGhxeAXxOV0!4%BLqVlW=joGilJ$C6r9Luf#!0OqfD*^T!3l1cxPAro+Gky_436#I z()4?Ro3wZi1i_yguxjXvgdcK(diehDvyZfK#tPP-@YtDL9QBI zM+KS(Q;PDh&=KQwYTDI`jj=K$i;;Ptj)+$vI6;kpwK})}#kJvzw$iFV(q~12!93nI zFvY1~)52mMKxWI!=r={H#~2x?MkQ4@-|^B=I0Kr87yRyBEBW76!)*ienzF8a#Y3=? z(wMx1n+$hIwTn|a3j0L86lZT;D=mHFWW#R&o;4H-ab!4p)8_7nKytXDn;<84b7mfu z;#M>9PTkl(uj3ze$xfkKm~6|1HR>r8%EC!t@5-HlU1b*e1{D4>^2G)T z4c2ZZc(hmCgyP_xn*BV;@^BYZ0$Z&brN&cfWk?q32BRAK-%1LjRmoVPUx4JSk3WxE zK?w4;G@3kq@L5#qnAqL$*OP{^1}U!*8dNk4XKOf3!`b3s21J&3bbO?R+nma*R(u89 z4o#!>I2ni%v2QKq8Za0JbOC1f$sw&3qE9^nBXJC0oWP(DFK*2O6mN@ zt8~vGxVyBxf~HesR@fM!j==EpLfEVr1FXvHjB|8iX_A8HRudeY1FUd61y})(HsjjZ znSJTEk#OO!W&%8?@7WyCMSJU$C>!Lc93a%^cOJs57Tt=YAtfj-=G|@0n#y1lLNEM4 zBl-$ZEg`OGETpg;;6n#hFeTpB`cMtGvyx$T7^_3@x83i3u^5E9Q>ffpOap?m=MFjn znqb$5^D27=B!^9zO>fzKpfm1m+!Qn3*Q6DLT&meM&<+%mxwE_LOW8n5!M%9m_5it1 zumXn6`iY>Ph!6t^Fs1dsn1mmJ;o_x?9%N|07;0$}djf_NJ%2}g8iC9Svk!_$cMZh_ zx~1Xn#KCWBxI?2hBC!>|xKj{Zvd&G2ce@D5 z5`8IZ{(QnWBYh6x^bAC;ySThc1^DZkjt;a|FbBko#eLd1ZYQfn+o?V8kpmc>3}D>UtRh*p=B)<7i5wM95= zB(G=7s5eEt6O*V{)XKnC*9Bz7OR(Bw6sd1l@S@)E0Xegc$C{5iPrS zb+EUeA~8KdsX!|gYaUUbkDyS*hm>iUJYy}7LZ3i!b<>6P5eQagOQ zN>H5LW=j>OoI+rd?|Y-aq4!Q9_Asf7YEy^{FsL~}6^d&LlP!&1YY@;PrBIfn@@ zYftwCsLVa}9Dyj|qAq@o?T#I>32yVuDtxqC2xJ=1-Nf|Cm`omR-_;a2;q_a(ms}V` z!VfgNn6vx3#tB8-NPF}*x*j_Za4xsC0)Mx1)d{4WY+?^ zcjGO)^_^`U!GZ)c@#4xMZrCL9FR>`x0g(->Y2?`jD4X1vcF&NISk<86=DsX!Yi>CJ zx`N-~F;`TXLa%7OW(Xvk7pW&G_T(K;ts;+r$~9h#83UZqew@LY*prb8Rp1n0ZnX+C zO+l}DO9AXpnBHQ^kN*_DGHbe98q3cC-59WX5_8U?16<3WQJPQT_p~`w0$S19tCe0; z0_*oY`>jBshmuLM1)P*b2F%bb0Qrr+_aqLH)(o-i6fdXL4RtA3Re#Nps~Ucv6G%Z> zV^Fheg2>?o>RE=<;(4^Kaa{{6U26BPo&2zpgqRP(R=rLd$GE!#$;G)twWT;+2$rY@ z3q262t=6cfef#95JO&`=%d3&(djzI%KY6qLlW+0q9VTyIeLMk6rI|()C11praRINIsRs6 z|B6tN-O>t;7N^T_??9VXB{0eJ(&=Mz|3h>O!2;9E7qMJcfdubA`?Eibc2oSV<8Pp| zD%kxWrd*q|2jpu|Df-PAb<_=5D@JB(0AvRDEbGyXfs~bn5g7|_f@X@<+Drz9@8xUg z7^h`+pL|rc+KIAvz@Z#?IfRny`rrZUnPk_2BpvK%DWYq@Dj?B&0MgbFt&h{ksWh

Prra-D)S(Gk|CKZ<~#b5EQZE>t(`2pQKBo$@!an$dSH?Gs=wQ;-Dj62vE zh{4671Mubw;a>SjLZeXfM&U@NMH0{S=SB0?r3usz{5W+YwHS0kfTI z`@zEQrQ+ z{|pjkiG8lWK;J(Vhm%LNxi%Cl^>Um{I(!GmN1y|Cnrs9zl_g3yCBo=2HuZZ@EU`!W zl=c(6qt%>23Qx0%M{kdyP-fO=q;deITJ|#lCn@e_?V^nA$%$JDatuNkfBStjHo=Wh zuQ5tFO>y4y{QJ?hAr>pYu1k7nK+w&PMqDM97Kro_9?I;?Mi_fMsP&OpD z30Z<+7OYQZ2_mJPZK0M8X;G_cB^98R^z+Z5Z=-9zgi-~OT{C9$HGpf5wj|>@l@xXS zIkfG-M64Ma@bpJaD> z0bn6MVxmXZ)f6~xsnvj1_<2@A;%*ZZi|%?{cUc3&xeo0jpauxp?P!U=Y4@&Xn;CAm zHeQ>cO*<0&Y3Zr$fCoATbZDQt?1j*MgYxd1Ys;wEAs1x{uZNp*y!{bLsGkDvHzm(O z9`O2kls~yEAIQ$nLtX0gQ3yHH)nu*quPIc7DV;xl<4s+uYMM zg~bZ7`r0!HX4z&I3g3*l%vv=^oC7&&OcHDPsAz9=cl-@B38Kl|S0{~AwVBTvDg+?D znLUFFaQ3nHOS7c3;JD`~KozI87DufHe2Srvn(dR;4aQ%LTMh<1$Urk`q=;Jd(!iw& z#SCw7p%!Ij5YX_dKH4qEC6h!LBU(%QV%(3q3q+{VIQaGndGKbq-^88e8kgW*K!g_!i z1R~EvpkzD_%-0N^8qN%&l!2})Y`IW3D_|Im4r?L#)p?5;l_ty-8qwFlR`K=CEcRII zDTLSR*T9ql8LC|ZEaKaW-2@`C7R@M2(Y&J8a4F z{d3nbZEn2>KoeL+lSKO|8W-iY9yf5GYlhV5Ee#19z`SwhX3QWUk;@WQ=CJV=_*ZGGK=Z}s zFhy^JXM=t%>gr>A2+cy2<>8JKSY_c{%THZH#@KNOt!FrS<9l%hvJZ4=sBbWXiW5as zsXK`C$Wh?_>Eo}X=XU~d8&NZi6DY9H1~ZKpH@xV0B$ z4#{u2l)+|*7YDUCSxMoWFT>6Pkiz7NRY4SU`z54Q!)>+j!YiYKI+#|wf>~qK4REro zTI)b;1A-aVPIyfGnvnc0(7q=tTB;|RQ-NK!GFkwu2HwMBRU3*k)ww714yLe6)I)vl zI)=oK3tL@>#@4%EMel(r47*0L1?9kDDdjKGI*@Ah|J#o|f|hg5_$99F4V}h+8T|Oo zPoZ}Z{Ad|F4it{S2!W1}?qCcs`&d4W+bWYp(LR?N>Zv2J$t~RjTslS(DC-OizpK6_ zTzGRP)0@}xs7ReQpHy2Uv(?8DJ8(~~tMDZXvTZ;1{WkBA7wctVEN{XG2 zo);jX>V56lSMBBm7HY>W8M3uVv^t1X_1#420H-?oDqRQ-rQyJJ+t+M5FmokZLS}#^ z20#nof`5qp6q>l>(Lbv09DSk)K(St!3#wE76k0Eh z$Bp3uBxmUJd`Dns5hgRJ-a{z#es|I1N%BOp6qP0pg<{|4{sn3VsO{_Y+5|T_AAjD) zK2PoSvW6d1$KjYiQal47MTSUs_@)%TGs~Tewk&s|S|}>b`b#JlH=OKo7;+w`qEXo? zR|(`o+&Rz*;<6(>++>7Uv3vRYb=t7q$Dh{M7&j>(nnz?@1CjJPDyTb9n6O*JP4VbG ziO~R9G`z`)B`dzc_SQ*d^q9P77ZO|cmSH|L32=VP3_}i$xZ$P+{Z5Lym#=De^Qd%u zXc1r~(+%hM!Xa6)d%7bzaBw)oC24ROlv#ot|EexGNffd;}Mp&af?uN$hV}QFW z3^1t=pExXHT0Bo9b=Z2$|E7Sv?#<3qo> z)_tf=^A1jC%WG&gp{sCKHsVTnBd{vS(%I5m&$SeIe>XOkz-X1j z5wy&;6!t|eA~fvB+6EeLrieDnnWeCCqqGI|_m@u?Lppsm+9^mZ4j(4S=mbRrsb))c zzQuN4r)WK#)Nz)nXrd;U8EXu1x)fKht)f;ST>tpnpQc$10jQ8QT}k5vDX37QklZj`CG; ztB_Q9o%cn%DFr$nwX`|}v%>lf3I@Uo@7AJS+}0THSLr4`88lPyWqn99pzEZ^&n>VS zJhvO4j5Y+)d!(7gq1`u|6tjcdX)|Wk31a-a(9Fs!8ngF+c~j)!B~3N^4#1ay$6--9 zkdke1A`e!MjP!sxX$?V6pOX_9dQr!50*yL>J;C2a;Q%DH#C^Scy!sg%#*p%g!Mi{4 zZASYL$pn4;@b}R@h${K~&(JaB{8#_Ye~U+~tjd~2q;duwkx(pGm%{g>R8L>~ICcb3 zJy4bU3Jn^kg~eWjTH-temzaS`sH+48me~NNyyfJ*#N1mQPz8#`|3Y*!3t%Mp5au;k zsRGIC65T@$CkY+w$P!}R^yfIiw&oa=;(|_2H5_ay4mmafF1n0OWFR#8%yZB2;dQWG z?laJc4Ol0XjvXo5(Ihw^y!VHW;t01dA%ts7>F4Nr|9k@CI!}(Sm zNgo<$+Z}zTh_S0Z0>WS{35}E2ef3ilNAt4&sr}C92ZX1sZ%~M%kkM~tdkSN6XE2?^BfaUt+!{`v?@?v@U zb(%2PwSv>2TIQ>!;IiaU1Ci2?zx_ERPzP9anVP^F&=q+0;2gvTM5njKD`}{xnIc`5 z!KV!9g~^LL7Q5p;g%(toim^*f#oAEJ=Vgzf!FdZy-A@M^{5*aAYfm4u2-5|^D+e6D zi_%kkjM`Jx`d}*O;slR(&@fHZJ^+vtn{=AwQ=u2aTk7 z9Srs3b_Bg-NIRK_V0qFn@RF)~g%VV$?ae>sdsH?!{;M5^sTF{0gOy}e(JRPASJ`A1FDcm z5X{o;=yxT>VG6|7w!Snc5G{`-gvm-W_zGE*Z7I<4JC`d9ahcsEI=j=xkZd(%h!*$OoAGquD#K+YZXN zA1D5hV-o-m$6t({yUZbJWO0zB1w`)`uc3FGGrenZ^Uz*D&tcgPPHUNtAg*|Hp_rZP zXb=v15GZyty5&B=Ng)M3^#d?fC|<`q!VNohPSlaf5af2Rm+;#!3^f4=sm4;r5jg2vyuw(D|^2ETgS7KY_2!Nc=T}6#4 z(yA7XPaFyME>V&Z$hBcHYVJ5;j!D7bB!&LL_fe&N_28LTrol+})n8z!a)#StyQY&& zX!M7(qSkfBXbwnDny5wGBG}<&wuCSByu-^h;Nx$hSRiTFq`9)aCU1P1^*xkoc#d99 z)qBD42{#yZjk^k@=vVNm*}1q#lf+4K&ph{DS|n~4`dnCeXxQ(S`26BVLAoZZ4_gK_ zml!BieQQBLW81vQOEqePoRYRm(P!?f$Af8_AgT%PEYT-)b1!Y((Ii2zv$(R18gb_x zESpw5-ATR!ht99Y9f6$9_3#}goITK(=LBpnnd0bMFiHVlidHfPlifotZ%PWSbA={%sWPK4yeatpR&v2&f1!uTMTIk<5b;m<(N$ zSINE?SX+ycR~hW~upvcPfG!A@E>w^Oho`pc_6^DfqujfE6>2!mCU-C6Va7TXN_pu; zgu0d@-r0=>53uJ04E?wKiH>P8bbqQe8P6BZGl*tmk3DO$ZKc!}@9oiPw!zMOIe*gh z5DIx?yAQpm=$oudQHm}EWWW}+`qdtYGMvYwZx6nS+Uy*FoT?6>$E*noHR$cbw?9ln z1|X{^AK~aA+cPIX3z`#ah<21>FE#xc8ao2QyQ!j1faI$Mh|?5zn-bD3gURRT_@Qlp z`~4LeJrT==|1uNE;)4gV{E-OQZIwCGA_TRY9hO57*gTM;FiKH_mkde{05jKj`4pFd zuJn2;fKej3R8o-pI!L*Y;GV1_R)Gldx_i5t(H5U7%2Nj+{W#LShMTf%hT)I~!1e29 zlls*(o}m z#ZBCR10RT|4eQr_aGmv~ik3)br2g6pC{^uiLX{31&LWU-+MbTP{1QubKn7>4aGlt%=9U z)*^I-&Qb^Z#!Y(v{MAqY^MC#wlnsP|r2}4V(Se4tgJLTUE!z*7dC_VCjA*DOvEdIg zW0}r$)f||w?t@*Gqz;}9-$^^Sa=r%DGGUYzBgls1arVm>PnuvSTUUM04CCok)Yt-| zH}v{@v~7SJ|N6to^Dq(AVNw%25a4)^r_rVK{3W3+F3=gSs-kacm9!=&gQjMVmb z=|+xH)LT04ism7}=F`_c6{`^^hKOoaOD-dzYIdx_jsf{sBaT9-)lHz$!Y@(6Q=BdZ z11HtFDFpJpt0CzbZW6)_5gv3TxoEa`SeTvzI$dqx_~K_V1>w{`$_BbfoTO!VTmm~U z9l}AyWe9FpbdNG3f!0$TzkIs@h6NUj=(?%^1ghh8pjwK}RsjtUk=7yQm7dSAR+rWs zXoH0nnIDZ&86@lYT=eWfR~0UyQ&l`E%Tk~)zW<(f&Qdyu_vX+a*^@?BC1(AkJfRzJ+Yk^Cs{oS9YVKQd7}i8 zojA(RKZE9Rx-7co%yNEA5XLEj7t1GL3xI|}6#&96l@{&eM%;{{N%+j+W@e}g|MM)` z3%V6P2P7Jo80gh;&pJp76Foa5bd~S7mr{Tj#i^uaNG`^HnAWd=tvhCK9Ehc3hXOM% z#L%Rgym{iAc?SB2;u@G??1KKCMC;I;=)-L_;58?~w~&R52Efu$d#6W@mA+=8^2OJH zET~yZh--2PspW)xp{i~0p*YfT=#ZDRTdWUQ$DX9ZXtrxlTIPj#+`b1!Xjc@lk2^0g z-$IpW;Lz9NLL}Na0$Z%Q?G^*gAtdWAjJuyW2I+5wg_98&scZFz_BBqvI@m$G2RldI zFcuG|4!>=y22&8+U6awRGy8CUN#F6p{}R=*{BHDa=d15S?Kn};=*_rHVZc5+2fzmYSO?RX-8-sGP`u0D(fE(5-WwbDL$oi$F)oT~8>zSuB zC~&~ChuUc2f1Pp6Fj@nXIS9l((8eQ9INFS3iSqzB;^<6Vb0`7LQ9TZtmF-O}Ek(yzzU`+pAW@9VHaFRH>)QlTeIU++t904q&`#fnEGv6^J zc+hER5}G*Xwe!^VQ~U6((r0A~B8yu75Z9R@tg{Y>j{morj%5gowlN1mRe*+#cM;^e zwyFcf^C>i)u#9n+AXrM=A&BLkG8Bq%YoA@lO77O~_X2KG%9tr=%3B3eIz87wS8YFd zI;Y;G4zzTaX^?9m65P=eUc>HtKjgYzGljbGL7FYdt&5SeIBP+$Qf#OVF`?l^#8|;S ztnW)xhY-nNT{D0V?!3Hu)2a)EOC6L&%?@{7)6+igyzAAI5K3kM39d2Up@GE_h=f*X zN{TxKQg_PqiYGYDDcgcFvowN0HS}XisB)YFpb+&069}ZWnLjah3WTZAchs#;4Q|IY z_YB|)&1F1TD*W%Wo^^L4CQ#7kpgDUsG4)L>LZJB9bf|P5w~LSMakQJ@6%FyqKqt?D zSq(XP*R`s?kYJqDssdd#ZgX2*`=vD~xefz}F>1x=MF5X_ApbUy{5@_iNpP`Lqbz*9rd05EY!`Y8M;La`yC zb_~EH2F69)R)tEduzXdQe z>=vN=wqvR%)5(cLpd_JOI*xZU4CizWnBwEpbEsFffY--OUYw8B%Gv;wmrctJG+_u7 zMRu#T3^xQ)A+Ic>Qk>>wi)%u(U@%#~M~RMc&osFoJu?J%MBGBsJXT^lO)0R0OcS4` zSaFaBy+WYm|M0QDXG~G}yI_>Vlj>ULa^z#ZMGygXvjDXMVC2wW_O}^& zt73P7=#-(Gbk?&cRV1Fo=>tiWy#;a@7}#LeQ3`nc2J8&&SBuq~69Ab#R9B7D12^y4|w`nTn#~(X7kTplDmto`Zz+WfLw2*+sAu z;OZK>2D)D9;KZi8sS?C|(!-EO^JrQ$G%7`0(W^KOPVLA0>kEeP!~tgX4V+a~Z-XMI z8bmq|_0;(qT-~u^T?gH+0ZBi`v<-Wa;EKK&O(3Fsz|vKQGb*%>=eGdvaxT-H+90IL zoFiX2Jv2dVal(bNb|APz*A1(vbKOKbr)?#jm+hr!VN;d{4k7;paa30c4WPj5wnTn} z+w!bQ+`|;R$;0Xt`~-r!BJWv^9P{|kNo9;1&1_@LC$1Gvz)oo?HX#%Ip!%sPkdjlH ze@D9p-Ly#EsRj%T89dBsTEG~$^L*x+ z|DJZ8;4Rq;ICaQ)?|W)DCy-GsZX~DEzn?{~|HmxvfwOdWF_sDcq@Q{%Fl-HUo`y^b3mFw{V{ zYXHk&nb|&HmIegNOa{_5C~86>Tb7sN>{6CTryE9_DLOaBr2RGot9xfR&gOz$InS|* z$-*poc2XFo`Wj(_-4m>XdI{a3_vnLM=KC6_41km=?0g*Ic2=x_5_AZrT$}N+IRSXY zb%i5Tck-@r1eKiVjJ16=29enFA9z3d#hv%0x*ok1?**Siqze9GnFfs0nPEAc+Ii-< zKX~(b)ck)&Fj@GT!|7n`e5>(-&3-K!FAx^JS6BjAH0)vJ7UIi}&jm%!9Fkro#lI8x zl7X%yadtsN_bMa{Pc|-UM$xyUX%M>9>MKV8H7H$!0t2=f)No*Ig{WaRL9mMD!?19b z*^}B-zl@&k2@0E~Z6LKq21|X7>oEDR#I{w6eCb8>3nEjPBAXrnp>OYV2)S>Mt>_pA z0M|qf?cntYf@K#kcn^UnLvzX#oF2vOW;zkV)n1L)N09R`#B7aroYJ_lLe-xDUAgxT ze2C{13aRQ5^QoN|uA^O?t_@qv9rUrmzwm#Bq}gH9@M8|7a>Pq7xXn6es5G>jhd`=V z>NH}U6#jOc!7Kyv{ISL-JB1Y}N=75r1qW^NO+l{!3ZwOn*{XESVGlGgt>bp(VGx$z z$6kYGfi3CeWy8VaOiVL*H8s1=fM)oD7ra{#;F9kn{bCznIdTh)Q`>Y!hmg!Tz4n%_ zZFQhHv$&Sn1-jxh#@2T@dB?*$eZv~}ZBz*c$v3uqjuN~p-yslY7=6&wo)~Pr9QWP~ z?!?)rF~C{+5vPegWuk!&8esOK51RtrN>4H1JgWS)G)6MbZ&o3)QUs0GDq1K08R({qw4po_LUJ= znlFHQvyQ~;ILUvu*t`a$uzEs1T5H3Y93O0IRil~kVyyCJU@MH9XJJsd1%(7}=B6jx zDFCw#ItX-}lAy=~H0%gu!ElsT45X&Mq~T1@K{X?Y(ARmMEgZ;HyJezasD%>2|{ zvLO@-x{5MR99L5jOnuOb9hhu6RT*U+L$NOZvHce`VxT*5t-_q9;7f53ns%ImPu}<| zl%3$Jmck1E&+NmXQezt?PJYfobzQ=q44<(gm@MmYN)crP$&&ST-J~fwFoRGVAt*br zIM%ne0#sXy@8JS~cDhJ!!}f!RtyO49lC=snjyrERVWYmDFej-s+5(f8rJ;4`LnfpLj<{hmpqW47L?I|6OI!Y1l>&n3}!7H&X1*C8cE6HkPaEZR0PqPF5 zzu$QcTkfbF=)&manbdd+g~A-jeD&0UxASGUGegwzXp^3@@YYPiHdr#3FjfQ*YB6DN zFwcWssAauYg}NcQJ7f*ZyC7wU*&!-q)eC09JzLmUV8K z?%Yw(Axo;-q^Uqa7CQTZwjGwm2aPGKATt#^FJDCA4oqi3)u;pL_b#48-Hu19Z+!-R zI~0*Vio)|Ko7$?+0Eq7R+i0qV+o_|v5J?+~8|b-R8rY#Br>)`uTL);>aHWzipqsGF zm7!;_%lSc=>x0PY{qO%7G@L?uxN`)i$g5#2Vrb9vY@}^#)BVJrI>wHA0i>z1p&GKi zZ1!?uz(OUtojM%l*Ot^2^s^TE8(7Xuv#)HCw%w#8~9foHq)4H0k_4mbQx8GENqgEVM{H=^|i!0Sd~}D zA`rt`ia!1r8nq|o3!@HAVBU-H`c3rp8KhjB46{_hTZux0IEdy9g$ko;c$hqyokS zu!wL1!F=B5}3o@@qzMgp&TmK0~h!CZQ$S(>zRmtd{cu>wqnRJ)J!70$oJ5o@F|dI0ma?)Bk2NC{k&I*)+PoBQ&#mSMuT>E2K`*u`w}QCBBCf??KB6W_x720Q77QOCQ@i-a4&9v=lf4$*q+dm@;m}+0Z(` zTx2!@vIb#xa4k-cYw;A(HlU-oI~w#hahs?6df+SrlHiZOE&E6GOFL2R^6jbBdeH|39woHn{KWx)VFvsx7;f zTXsvg+*WNFRT)DLWAlYxa8f192T%fSP{b0D#5DAVAVCU{KmtpE668o7N}^+Aqa@r} zIi?$K1K0E#&N3T1rW45KZa5{q!xx$?IHQ@syJ`zBX@}dY4cn?M+o}(rbH9K5KUh~WlRdqibAZ5Atvx=27 z!dkm{LD6<_S8dI00cGjPq^7+;?sgAl8@ISKHr)=0Z3MqNID2aEa*h=si4ErS&p1#% z%o!#}YV%EnIfx>GPke5xGK_of1o}kW%y26}?p`IBYAav!2vWD}Et_Woj<>V;3xM4G z=vTk;=NEog8G>Th$|ybVlvTZMxfo=9rzY`BQ245)wi4dujxx$FRke}J3BgQAt;Jr0 zBvhJT(!U9Y#aia~eI?(UwXN5cBbZKSV*NPn8WMimvkM9nBExCb_a0|&Yv@Fsz)}NC z_wUDlt9r`wzg`zKqnO4JHR#`fmRzj9?aJk^DO{A~`TJj0wosTzzlF^$Na1@{#^^^s z0*UP`v#Hy-!^nWCZD)LXJF=|5)H}nl15B!pYiNxx<*#F*IlqF2)#i(yA>%qW-Bi(YYLObiDR*mjQ+I1esjpEcwhRsGHo0 z?xki>as`fjcdzfHuOf+)5qQb10g`hCrG^G~p(x%jeNIPpuY;pC+_>Mt1J#-L{Wt5L z*Dfw8X)r0TT&;8C8(|F2I-#+-^Eq12Xl?1>fwEZN4`TllTa5v>apR47ROxL8#84Iw zcXUC9iqFA(8Ij@PeDU%_vnX&76qyw@#mJpnU})Y<4^OOr6~|26r>hs8=T4PtMro8H zLd51|hXrJXzJP?A5#G;nCacz{Yvc?QVmqCr1R+9H^Vp16gkYK*ChnDq#mBm1%-Y%N$dg zc93BWWg6Iynw#77{kJWXWdeKyXX-+;+qMx*8X3uNhLbJBt2&25q+8G;@cngi1*IP) zf;ITAd#2kuFZF8J56&uO6wDgWRa>3UZ2r|HiPlngF?d9^mg9yIQZ!kulEs5aVfFUw z|D|#V6~|LfWBND&h4I|So9QsyZ<3sc)tG@=b2!s|lS!3pp~IAgw9dg2av-ElDbFY9 z^@|$1MQ+*nvoHM_U04Fvg~K{c>)$RXY#GjN1FiT!-Ha?>%eS&-TP)uMBgYfdrr7Gh z3QDiWS(MavC%`T51=d}pHISHlci5}Y5pc7oy`y=r3%pDG$~+$#>jqrcsF-uOhzK^o z5%1_r)q1JH8C{?0-3*b}qji>H3pbq@E-j9ys?sBhxkI19?e+vYIjt?&!JV8O zpR5uegi}MT3B82%VT5G97DZ|tbr2=R+g^Y=+{I8&wkY6y{)?->+giyPP}Xg#tAFzV z)^WA+JBk-du~@_^3z;HcKuddFhqn#fs?wc9Op3=#VCO+uKGkN$+~M~L^Ew}w5UO2J zH7|F;_SHJoa)Px-M6YqI@O5c|!L0@59NWA=g;85&C}E(pF_fzX3?r#_PfCjbMtyML~&ehVe4?QaiA) zC7##q5cGqo8y2?8+MI$$lCLM)Yp~k^a(ce4btEXs&z@EV2En8g8&AUt=i%f(P{4p9 zw`4Ni8w?&siAbK&M@~R^zQuxe)%F=M5-O{bD(E}{CazzsdHR`vy_zMZ3y7zVmnrLW z3D`R$tkRxG5b=*Lf9uER@v#V|>SZxO;iCLRjmaWF|C)d~iKE5YFLY*L01q_r* z$=p0=uK?If%JAz9THuKJmusDmA6@(gl?L(uR2yur0OIZ~#H>PcGg?K>8P4lm ztD1W^-v(sjUII~=@0%suhm!9-r|U?2_0@g1*O_VyQX9Zz$}6uKiEc!a@Y75yDEiG% zGBw^_QryzvdKN%&!;#baMYU}xNwqq4^BS>i?`UREr>2Qd>7%tOuF)Hed$BN&HsYhpK#kOF911!wa8KxZ>(0YU74`gT1w zdJc#rhO;@L#E~*j*x5T&*;WP@kumP(oHFN@gkeUTE?o{J|8~rOIq-?z*^F!5*LI-q zW(S|Cl34+9qm=${UmV_LW& z&YcE!8#$%JM98lzUZAP5z+1TGW#(mCkA?nxI$fjaZQNm}cH^sZZ$}Y}O5*d5gp}=< ztBD-!;8VrzFc`_!7F>H0?jTa)bCtH4r#+04!mId2GJ%jjH3PZWw?^!mQfoTZRL*xH zdmYZ1jxg)(EDS6l;Puv-S_eKCu=2;1{qq4DeGx4JsV+NfP_YD(rA+P9I$eec>cxt0 znLE5rO}2xD*YeQ=2b7q*im`1puK>xqb~i(l)qJ0{XGWKSBzd$JSMGumx&Bp&+~baj zX086VsZ%`Pe&d_0qA1-Tt*gU7`DU%@Z-CdUe;c%JL=fA7Jp;;_8&@wJt1-ov0PP$W z`hlb~s*A2AfNdzzvmgtUv(20;NG>)zs+J7qqha-U75#AMx6K6)f|{-VPtsma z+#Ib1!3pk&_LzD%17ZAR_h9XOLOuGkI$}<|XrrCN5dQ)kfofc04DK8f_G)PJx)q7@ z2|TJO7CBReswJ9aF+nLWAtt|V|Kx2&i&9m2x`uMgKr-(|P3tw+;7GNe{!=Y(BBTl) zcjwZnK=pqG4GT4Tsw7syD#ovCoY%OMob|Uh622SH4imPH^d2Yy4-5Cs`Q6WV_0F=j zV*YW9Z0}P_aRWEGjhZP*bt8oDY#43^BA9v!y0ueV^8I*gEc=15eZ;)z_+$xl8=C0q z9p6PHl_U2&;G-GjRxJ5d2td3+a0Ex7oEYd-oLlEib%xur+n|CzYPcb8iK%kk~(nZk{m;UDGNd1TzOO_vZbq=?ngRiIbz6!Ph+#A`@V zi&ii0cJO3HeGkOck$Nxse)nno7})pc>wygeVg0i|+K>m?hx70TY61lSzJ?rm*U zZGo_~|CzliLw^D}46P)&!=B!?+C{~Q6cKuHxwa8^bbbcg7Pl-6A}ODp<2@6W!*H_n z*|KnO5KN6|V`KyW!zfYQm9Ht@p!OP&!iBeJrwALt#Adyx-5K7Df|oI@SJw>5TkkTz*T^r22(}-S>JWvhHaeM(LI5 z*eQlXmC^)~1ZpY1ZK%({sWTRA^~Rv|_8-`z@$N#G)zfy0XaPyKTnVlw=Uf+7_eiq? zZ!sECZ|E$7qH@SUadU@}Isn(2zsnt|&LQMJ6jCx6Q&GwsC3Rq5dyMTSoJw+19Z>~W z5K=|!*d;M2oYgK+8rn5DwSLzxD)}x71Lj(&zlTuGVHX*qb@-gYlhfzY0E|UZ`p@!JUd-)paVb_Rhq^eC=t> zb4P}?+LGO`GaYT85o4O(g@ieN{i>qpPQm1;^m({<`?lUMaB8ZC{EMBh-R+I%B{;=5 z;oJz`E+fc7fB&OecJ35YZ7W=ZQWxqPNi`5RQRvL8zo67PtH|0Bq*b8DN_&KJ4HoBE zO+fB)uQzJW>Qq&{ha{R^L$o_SKhq{w|J{1Ao~lLn4WJa5>Nu{90V%jYtx{;mH^YgQ z4Z2v#-trO2RJS!i{YdyYQ8Vyui9zApwpMMo!^xeqty5JCcXU2njH@YC=VP{KduSL1 zOM~q}(1U=;U#Kc~n493(6{$U=iF|3NBxblHZwoy&FPTS3v!B_J`*)y?M1U_>)B_Hve^mXWZ!cT|&djXQO*D!KS- z_;s_3V*Hq4-wK$d|B0km^KotY_t%zn5bP z2P@?lnw{ev9cI^5(G0>pMKc*M+xnCkEdn#hH9%Ks4-EnZZVqljBv48@Us& zYcO?$ZZm>ZkG6%CE%7<_ylnP!hjlMzz}w={bKpv6dw@M5Uv>ZlQrC&0K`7Ec!*z$dKm$hlIkz)`CfYqQr!v#|sAF5V+8cuNXfpQ5A-mUS zxKn~H-!*z%K&p!Gf;L?Sk{#zA4FS%>VPU6twqB zRNY#R+hus#%qA1(; zW3jER+=s)4-tN?N*=?f0|3gb|u8m_h0KH7xvTydeHzHy4`I_Tz=FZxW|^k_YS*^ffmZq+V|Qy$wuSt<)niaOtzVraeDj(q#G#XtLsvIbJt`lq91FI8Ei zCHA8|vsec^U(UPsmgf(5zK+n(Dy&C3x}D-di-|-LXO(lRWtf<`07-R7&dku_vR}3W(1ttsVgtqaTs66 zs@d(&H^~gzvQ_SIa#079t5CLgK6sGN9dL<&R0q3HwZEv`VdYVWF0|f2TJI+1!(hsG zZ1&~43o?O_s=a%ndy00Z3$%9e3mWi`0>xa}nT|kiRW%n7!mq5wW=!X0mq+qs@jOBb z%6QSj;UXNyj@IFUr7i|PBgpcvn6+Qy>{a(NPT00wZg&2y0Mz(( z1wk#{UFXT^Jrove`dMAn8cGI<ZBH{5uNYsbPU64#NPc+ji?bovS8kpqgIDIlKD`U#wNNbo||mf z+F{X8mjRA>Zk*P7JTabuQjl#=X`zEg_gZ@}B{kRC2|b_iHq`62h(#z_tEN#+fKx0g zO6}se*6uQDDsJy^-f|-G@qsy8FIMupqa8(GzwVh z)6Fgk+^b3ThIP#pxAX$%V1GX(omfHI28ij^N46^aE+qN)?oP(9<4n|b5J*a{7HL_B z;fSv8^hoC*LP~6?R!k3r;@-Pc?>NDo?4BfzYR6_!5}Ot8Hxzz+KVQpIXSn6o%b~SF zumFj+_K`|m&vgNxZze&`=ku{CODsxgvHPrfB&(%-R{h&i_{-gAosfF843oy`c0}k} z!1bky=w^UPp;%UcRJRj`b-4k$idJPZ+x9uvpybfTSv!+{7p3w)GD6$Cud1EZ5+#h1 z>e#bg-kRgS|7l&OL)p>Z&=FXTs4KS{5mLT2z!V#uN9t9U$?I<`Z&;qb|2szwI{N`! z>b&b9>NY54Ha>m&NGW{$mhmdO9&qco>w)&c&TE~@Cir0l$=FGQE$H1_hl5~_;l|_-w{&Ic~H0MJQ{J{ zdaVq%t|Ww<;jkK_RfI_4MBTn!15*}e<|^;q?z3m^)q^hhbCaj4HsA03KWG1hqW{n9 zl4=%9tKryylqe~HBsRh+L(8abqK*(2*53BE=oXLwzW=%s?dOhojB(q&gl#Bdt0zm^ zj9`0wGaaVUo(>Lqz-$9g=sC+u2sXiP=3L9gkG7}wi zx`w`k$%P#a5zND+`lsKiN8ru?$rAH9Rizj5wT5Ym9Z1A8-~Wmt2iHYhN0$~8xXrWM z;?xp?Z0s3UskX)!KZ3FWf3Q?V^r+{v9%W(~%34J92YmrnL1M59{^tUSK z8fxl_4rR)M6A9i;Fm`+dy9cDRs3iidRjB)j>!o>AY4-gu>w;{RveMpwRGb`rp%ybX zLgD&^6Q0#~Y(~NL+yuW{xaIY8Kc}oio;v0Qq;1?%aU9lATQ%GclS3>n??{;A_#I4` zwoEb{Kg#zD$`=Ul7L@8#ZSj5>F$FPK8b1NzQ`=ERHUr`0ldme<&Yz?6N*7EHycU=y z9Yqe>A?I_Q=b5_Be;!Q5>K?vaM2g7Fk;rwa3vr?jtzHIG4m<0G&Srsm8BGGe{LB4x zI_|+@@@CxaV(_3^mVL8ZnBa^4=esFDSHg=n;*P&@xrve_{C zpv7IIZd$(qYSoM?ZR94VSw+8@Q<8NXSV00(a3`S6F8brGj^b_O44>nMG25Xe{h20= z9q{@wVJGe;dl!QSktO*;wTQ#qz0S+vGABz8qDhW5(YEVwINtjoHqoBoP6}4?^~^Kz zswwzquU}HQAa?4^73R)Bk<{4rZ`Ee;0)qSv?54N*=8{QG=;ZNyg7!x4ix3t^>dlX( zggP*)I5|^Wbm)#1Yo$@h$=%>TRGL7^=~(1+GqCO0E0~pdjk*8Te8c!)^*C#w^qwA( z$ldsRt__*)b^fMKYftWj$-l)nIqLggTe!Xd=^iB+_mOJqH*)uy6DK}v$gsJi3?G|r zH-fjIL_9~Li2VrBt7hMkK1*R6q7--5Jg*v#?JyCKIb8znfKq%XnJKH=w)7T_6lBe= zh!PI0!!C8)8|wTLLUN!7!++5(qeaSkT9Gfqa@4Y* zpS=bW!Lf0+GN>&SQL9I7<8TEmr>dh#yylw zuG_ZY6a9X>10p=zYanwc1(&81A z^bdKVT$WdnBBA!|!5Wwtz2Znfcj2lk?X{tM0Cp#8zImV9W8T!nl8)J6J-m`p`2XHQ z*c|0+1EAI)JV@i?{KXN)%qgA&dt_$|cUaO_T7*zj`jM29aZztqX&V9_z53d2J8TCt zqUb%d){IH)h$jp+qtb(*j8Lo`F&~DB2A{RAd$9BDIaXQ@_9S)6Gau}dPQ=6QH@ z?(-5-Vw`QJ`YyvtfkrCU<%A`0nw}4{f1|3{O`z9R)j^ZCU%G;}UgKwL0eTf7skwld z=U~^6EV$Hwc{z*p zJY5e8ZO+HxZf|1?f}EMwRKdrm!9CsPju6Mou%bk^qasyCVdli1PTqfs;Qytme~=R42s=G+-DX|h*T z&$=(f@7O8QIma#ii0*tooGyDFi`?XX%PSPOOopnbSwg~|Dq4m{FGGpe*sX?5%Lt-! zC0~nw4M?f1e7D9GH}jE}iF+l${vDC50%9FG-3$+`K@@PW1*p56^8Z3jyzV7LW;OQ{ zVkqy;_x&F&S^HXC+ng`^_3f8aqQVmhMVKjJFNKG0`xSfUPM+K`%#jgx#>0* z-`4a0$K`)Jko@d!yY@Ts(V4j_#DJle%y8#(w7qP35bpJd-Db#PG{t%JXl^G@bU}_< zF{Zs8o^0z!^KcWq{#joQVbmON#-@)^X$gmbHt2NO=?7_a{~9MJNiT zI@DlI!6KEEs`6asPDaZNkWz&c*NHKquXUfj=PRk3aHQs9P%GQczoCWF?Y{T!u&p7; zi#r%K__-V3dXTkcoZbU5(*N*(OmDd{(wz+I`#)PbuP&7#y$wLP%mbtwVF7K^DQE6= za9bI6`Q_>nlkOSf@V7I`aE({_j=wL78!) zM`?2t(5R(63LS9k*J`PG8Hl)NPu7aswS4^3fAFW2>L$=@sXCiWD`=_S4E{SWDjTav zk;;hGOew4-lBb>?8dBVVJQ#V`X7xQN>8<=xbs_gV>`0p`-uGeSv@YE%Xh0pRN0>Kq zCXKp)&*NrOifcPcQ zjw}v>$t&w)$?I@@D1*5=TyhXhs;2&}X*rCNBCCtf6Ocl1NLguje)panZ^sAbQRJbn zXnC4Jal^@^{f6qT77(gpx6Qe8-B*1=9fCfOka&MeYwO)4aROC|t-f9Ny=H)vXB&TL%n>ksq<7lsOd#>UP;ahzd+vcs5l z7|WvJ&G^O4?brXFg3dSH3wf>r$!o3396OcU`18xpE)*L$(}(-$^Phf{^6hh+8335qHyFygL)2- z&~WvK=ecEJTpk5ngz#IIqH3JEl#c{eJ?Z6qBw7zeDQz&d!)5>K{i=7omJk*iYua-Y zlF;{G$M*_%WM>Du${d!I6Y~yVY12U&J~m!=iSH(y+t13l3pdnG-rSEz2RP|(-~YQ_ z+Kf~xTu^$G3%2{4T9F;c9o&!9;9Ba3nK z2$9>w>FS8iB!)WCTB}nF@vl&NX7tV>D9}+G4Sb)6!j5eOH{CTli-|ypfzptr4l?nn zkijHZi_=OO7M-c@a|-PmB)S@EwS(At*qaGoCU)xB%?g+t9ceph3K}KCR7GmjbPYjK z&oqOwcY)L*OE6Q@FSpIWd+}j^fkf^D3e_xEdH4OJ3Odkg!UI5+b`5bi#J%^xo{#rE z$gP2WVz)}bncRGS@_FGm#oNx>9NWw-F852x>7ji1d^_X9opxvXNINQ`pdLXZhMpzQ z{si?*j89whskUWBw5!OHCB|8!_TkQRQ|{58=bAnr8e|=07<6oP&HF#XOLv?`D%>LauZD3PE!vSz+^zLEBQCO(5gv>;^%&X)O&00EhJ3F zYTkJuqGsLn<9!#v)J%1~h69TT$;7BSbrDK}qs{#4Qoi<{dA`(r&FtqgoVY)8TCrW} zJ{z=B{AD<4)kQ3A<5dKVS-Yubvj^H3_C~%k_x+?w+Itb8_pMLb?eRmS34Iyg!b2qr&Q)#;a@5Afe zurX*%y`r^Rs1!fo?rAUsJGLS#lKY4xY1z8ktPkV8y|B~wFSIX{we9!uz6T&u+jR`& zaYH_;MbB!OAB2+fNSTajL&lA0kz+eUM5Pgue651lxRdlZC!RW>s3JWC_3GKWo#8D= ziTaqnIXj<^pvlzr?+Vh-jj!9U{q5VYze&gQ>EU1a7!Bu+%pC=_RQec9t{y}YZhQCf z#dfUiaX2-qj63XrlB)jJI%>7h0|{Q|cxX4ESPty{c;65wZiedW;xKnI?KQb-WkwOO zw_g)mjlw|?3tB>P90HPCEs}XQt_~xGi@`m#JAny#XI(6s08^{XG1@G13PsX`726DV zVrvI9OSp3h&rW9BshS5P4+A~Cy#gnbd4IBYl+K`qfjakjwhLituQVG@_VoUhpEu*7 z4eF{korC31a*}@wz}?S3{4=^YZgTLL^sld&`dq(sUj<-$*?BVZ1#z_IcXNtr)ld zEY0OiwKL1hwV^{uiRi^TIdV9j2W57gGpswmU8*+Gg&y&CPHT9lkfhVjTh4U;WweHV za|oio{n|gg{hAAX^MM504)?zjI9!*HaC`;~Yi$9(26tzXdPh}$VqtFqm0XUual)HG zc%16>$q3PdqhqRmDKH8yoM>$~KhJ~R9IHAYYwy4!oH$m)n-w5xxKEQ3qn_`^13=GK16G%6viu&9 zOlHM-t;1HbzN4G&aM_xs&&mjV-1mVF?`!LKgI)0UcKT2UkJlBq;m&t1 zY>&cWOqSY8{=ts2*LHf1?hkcQvCt58*VQwZ|uMuTQ- z28c`z&T|TO4oQL3!KZo7anXGtV=^@QKCnKqFCsn*v%rn zP8ycFi9W%e0|@r+pM2%c^6ydcK;`}L!&`OJ@yg1GRo@Fh(jk@h*6|{WTz|Ahei0$u z)<&qF8W#JHe(MkJzG0p95=bc1HFCb(d1XuhBUfNNlU}vL%R%^fK^?mar9#azDP9$i zj-!PeMw8kq`VExSkr%2Xxe2C7^l8-!yn_(t4zwr5R)R}k+M>z33D6Ggx8>?pgd`z{ zt(kZa1(PqP8(r(ZAIEjgtnQ#-p|0S)!2s-CP=0sSv5NN-v?S{^)jdcVU&^4N4v}Too*UIEV%);0myi|%wbZ4n!P-Tn40hmwb%%y zs<|9%#6n7&kg(Vug5S(7I)}VT>!IFy`j>wuH(k>RaC zBu3^(YPn@wM`84~9rt<+CB?vgpJDiR7zZy@yZ$&g4u;wjXgdJBTMjNI8i=>{iqc@* zJL~o3A#S|wZgU8_4ED6ii;os{_1f`4@@#VJxxrMX-U|&PKZF*Zm=(p-;RI4`EGJ&$5Oc_@e#}tzKmi3e>yBUP4?X`h0*ZFTpFYEI>5+xrsKYWEV$(Idv z8VyTt=P9TBhuTG_vnZ*|gjOTA1%xURT})lwdJ_yk<~G@@J(oyid%U-*|5oQoH=)~5 zgU%-!o{rb?`wLwdOU+Z`gtypvXxHp72Ht-CZ{L1RbFtLhr6QQt{4n-It=_!DUy!O%$*2F6YGQORas`4kQ^i zr?B-ntVq@uB5%E^^G1sJhE%};2p_sCL-X#-zCAP@67N&7k_aT;vY1E7;jrv!km?@} zb{^=IVmZ{|*y=mnVSBDrejJv8ks->>DWRS1GWZmLAN#sAn!DG$O`bYhJ?$KdI&}NB zuik$B4LaR@k5Nbd=){e^^95!;e4qjZZX5}V9k*F`8ItXrIvx%1zQ zkSO*`DDrOKRDa$19%%!hD==28-EAFq_u14_(5v|@@!PL4=XNd7n#}Fjf1MhGSpIxF zNqaNj{^II?{?E=_pu~X(n$anP@p!%d`}GFhyFdz{sm)bNjMA&B=3&a{z4)(3&P}iZ z(ir+hI?RdRwTlbrOGDf<63b z8RvftNQL~VpVB#ZK1XUG@Hm*P^ei#a-j4L7!=GbyFJzz#bp4w$I0!~kwF6I&VT_H| z`_aSPNo|+tMuar)#9V7_U2r_nJX- zha0;z)h#S_p1pOix?F;#UvDg0D7XxzyoS~v0J?${S!jaVD1I3QlO#J<_oS~T1{aO0 zN?n8S`Fz!=8=c3~wGnu;gS%?s{GARyTZ=|3U^wkn-FFd^aNgRcyPcPjxzppqyw?$$ zJvaJ`fHfxrT~VjQ-S;u~p{02jp{H8LU~M7yeIyLq_NK(#k)%_Vg!2Kcq@3fEoBIGs zG0;%Q5I=+xQ+ss0ny|iqwRtd(X=80DQhK%2WCJLfcCF*VxcAi-?Z&tnVyC*?ihDw0 zn>nkFw1YMe0XZXX673BVql%gpZb6QKY5>#L#ZaVq>vaeC|Bx1g$Re#Dg-y!$kb5n4?CpM%eM|26H>MHUkpxi&B_NusT~pg8>im`5uW1~J>$*QPrL>FM5msf^?ba~ zl=lnVa^ya;o?$T`JhMTSxy6IArz!msl*KfU71znRHUqtcni4tCg^%iJlG;{-d^M%`D@Qk%+9K-K5v&aBF<`dR=(AqI7E~85tAC(-2};7DL`#VYz`EK6E%8eFv7K> zjkhhZQrov%ryS=C3ay_L6R}voHT1f>w~dqFN9sKPXpDG0h7?gYn>^dW)VZ~be}C=k zUtGIrUHfswBNK2`D7U_g>UaYR_pcIh{@FPvT1V-B!FLW z>GB3AjvoCvN`I3(H(Ttuo~Q#ny$#fNd`OwuBhwSCz*uR6<3G#-!@Ec+s9m`mu=-J$ ziJAbs2V+6&!<;I0?>^hiRM2-|S(SC*E`X`2GQjYD!l^#HDw=y>N^|y@g;X(pfRdE= zA3xrX1|o>c#MHvdhk&RiD$TxsZQW;`>!L~zAn>{Rt<@!Z4kTu?_O_q#AWE;zwT3f0 z7n5BZQB_#gv#ca+>O$F~Yuluo^Bu#x2k3hDoi?<$47PL>SN@xZJdzN$SxS}qp_FsI zvDV&W-in0JAN>96xoc>TE#sVfEC7JBm5!_ZD&(QFb*0t15UN zF-euVMv8d>AyGTNak}csB1%NMyH2QF1XH!jf}N9ctpQs?mHyaiBb`g!Bs^hnjqU?! z9D3GY(XOD0@%9_P+AeA>gW-V+)w=1`M4;W_`C8Aj`Sojmm)3(_een5a@W|;CF*f;5 z!v65P-yql(fSBu=ro6rjNwy|cHF;f4NabX!kx<{u=PI|Fysm+A@sBS5qp#BO04F3= zn!8=V!L~v5egfWpZSnSNe~X@jD(^4-90li$Fg;w?#`*VA$Z7ABPf>7g-0iLr8WsOb z?aN$^G#}uO$lFP<4Pc@%?rLkK4|bF~q*B2*c0rC;XR|3#JNH`k8=FB%$=fG(&}>K% zO8EGsW1hXHatkWfPL=trN4Qg0jN?zv=3q=eVpOl5kZ9f5RwQ{Hd4h5$`2GXGXr}#< z&vi2;+mTWankc?g7B?P8BjV9t9M%Ht07O;Rr3N@2(GmSR5cbLQx?+j8(xc1zuvYti3 zPO|~H5cjFN#`q?8xSpwXv~yrG#WJLFe+wpu*{Nf-n|nS;vG3YXwV^Qjq|@ajvWOs! zv3isJV)qeKRm+#Uu*1S$g2JF9q3zk)%P2{iiEPu4D{uCoM-&8FuTjZ*pR?nKgeWZZi(L&8;6ndOzKX)yj1hPyWGi}dfwFQeU$a?v+gZ8T_T6~P$NHMnCe|VK)~fs z8-U#B#En<~MED_)%GeB4{_E|NcW+7c0f5x(6uTg)m^Z+3cYRT;I!0d;$43%@iZC)mru`jcu)%B_48K)r zwn3?}b9IHky2;0oRd!+T(C)S#O$7 z+1PS#e1%ENlX9Qg0OhmQFPJ>~b{WXRpvMGESK~^T~o;mgHydQ6u8kb=_vBpJX z9J*P{jLczioqexy3Zw z&S{r6tcD<=X|^4W_TEECd`zg4&KexS4NU6@?toH0sn*`*OiB*RwF8OocVYLnwcC5J zl%GGQUVOlvcs^SLu=`-Lw5L|VX>bUd>t6rT_h73Q^%@H$1`_(t^6%4O5EpYMpAT|M z!+}#8%aux$tmw9baL-Kp0gPeSIx@mRN_PVXkb`_-7QeBWom6IMMjB0O!3vE zepuRv%!jrnDB;b~%55KcZ2Me~!O0?HR<&xe9RaJ?ud2t7bElHkii*P;zVGM)I7VI# z?f^oPu7?Qdaw72b9cAW@R?_EFO@zbwJXwRlQSRt>o#HwOMI~xgOHWk2!l8KC`^=s? ztaBI&w=ZV|Jl@5yv(y^uRkOC!It3Hrk=~KrnLv^_ut#KbU?LlKVyDy^A?(!U_g5f- zICfIqJ`?w@Qtq?d${T$7Pc10>WSUOMe-&QIVh6$XxVETQoKKygXk7l4plEv9|=L65f9O+teFW5k?Le za&w09V+JIxB3(fe(`oietf>W9ij#w@fGU>rcQ(K2H;S0*b*>xnnR|dY0dm^AZx6kW zpPj_F!YRK_U{?G1ZbCSpMgvxXh^v-BD9(Eb;d!J^QLcfh2$t1q&7%tEPW+U0s~V%+ z1*Pg0OB3p!2(Cij0~7s`>{fq(pc-3IFih)>YVV`TxySo#zaBcZ( zG#o5>nKJvhB_EYO`-IdR6pv+-KcU+J)9teTRv=A5y^vjgw{?_#rM1NSSVwU$$&1sJ z93>gneW^r`!=%}!F*~@WB`)=JARZ4exYL%`2jQqlS@#%%1^MwlEKNF}!M1;SeOT7mn4`>Tp0<90z(GoVjf}xQT?_naep-L7L`TXPZf2 zwld^+4v?RA`D>m#)xKMMeWfGS@T1MO&mg62_oYiYn<#eH*1-ZdF7ra(n?OWRudAHv z(6p_yx1e6b%%D!K2AuCG8nre{xqyB(%J9lQ^AOB}~c znbL`Th@2!0uS-Yz{*84$XYEq7e(eFI_+bg9hW{HnLhX}Kk_QnYnz0%gZS21H_RBWq zyJJGPUn9xQpgcM>Pr12eY4mix?zjbr^3AoImydKfHc$IIy!y4W5V*C&d)j)_HaNx5 zPDHf5`NujxI#~&B@BFwSd)lB$WAr#0CYTSqq8kU2-J$7b)MNl5Md!?zJAX=M5J`q- z*p8`QZm9F&AuY$7mE$l{vf_0iwS5$p1ml*f;lY5tyX#HyLxA*Z#USnZ;e2KDimb;2 zux2S|A|Tag3c$mUzx+r4{L9#$30yaVGzTXAm)eAAK0( z&O*tp7pJpIxeyO(b#=dQb{_I#j4&iK!avlt$?O@sx zMyzdxv=-q$qGGxJ59-w6hkz_OD|1<|q)+PmH`il$wi=rUIyh3(?hT-1kDieE2e~5+ z!m0VoMug;P+{v6)Z8jmvL`{__)aIb)#;h^Y@y>ghuxPg2w!{N@KCx3L4q=oXdcOPf z$++P~Xe%Vk&+@g6GX-15#dO~BSVw>{xg8>qI>B4;@xUMd_S--H_LnF%m?CbkrI~{a zph(k(n1*!_klS{NXoy=*%#_=DIFzh9x-h54QssCanV<{@xv}@TkI`dJ4BYwNZ{PXe zC0%ylr`nGmC(!k)>c|AQq{~Q+88#5%mZ?DnF_W*%ulhO%tQYf}#^GSEZnuLjufX_z zq0E|};l@gx3^~i0c92Qnu^cX2K$9&JfARn&h7#{D?thvBgT3B)%wZBG_*O@$5nk2o z^IZf#^D_qFkd&`4s}76Y>wWP0t5+BE{Zo&AiXwB%@AS!+^_!Ofd^}(M9sPxR;x!~| z=j*R@q*~W$Ly%>pa643W^J?eAz#)$(UPH?IlV+^!g0*Fz8>kW2%9UEwy$NHw|CfJR z|IHny4OtzVtP$A?TEwt^B=236s@5^iwF=3|GtK7gdq4{A#p$Ehe^1xl5gf9uzVuE4 zJCrm-0q;WV)whn=y$|;~D~29ZiSMCd&=7%2$E_XJdvo_R@cQr7ro)E_Da~C6I?T{ zw%tA#T%tT(S({JT@mfN7CGNR8A9sdZ?Y#Zkuit*{YwaZES(Kz~)v{+VXaP-X`r_!o zn=nx@(Edew4&dAtN822KD^OquylHrXx~QYKRb4IwW6XF_z87IKV1r3louF+Ntp8a8 zr83?w!(Rea&YCV0|K<3cJ$9ny_6mr<$@NW?<$SI~YF|^4t|siRTsgWH&p5CHt47{{ zwQ;)!=yE*PD!DfK9VkVzvo2h(fV~bXcieQ$NSHre*XCEbQzL%xg?4QVjyBarmo+G@ zfcnu+;oU(IyE~(8Df})dvy_}r-Vbn=kXqi0$12LYpz{GJ%_C(r={|ST_!S#UA3`|L z^5ld~-@ny-K3AuN9*FzqS8%$4JN>%uM@QQpcX|*JgE^(ty3>s)I6AO@Kx4isU|fSr zhXE{n>XW9F4{=9G_V=i8K3)H6=`VLL&w8cmK>JZ5qnFyue=97BF{ZqA#ej176>HCr zaVB4{T2k8{RMQJ_JRWb8<{vva@v*CF!~k~^vxf0vz0f#_(rcK;jWJrsID|$gcU!w* zESscBs$6drKJ)a9Y##9o@i~sIE?$} z4CD<1CZGuDNNtTzfwF8*Sn4y~H>ccn%wSrK18keCf%6E7**iVT`<3|r@s}?D_&2nO zXFmGe)>_ZP;?7;)1)wL8IzcbBH&JASEfY%19SQFp)-fkw3gtY2;U^ptsWdMHKHIaJ zw+JTM7bdP>sSI9>hg+Ubs>?e|V0c#M3g!|VwW-qz)AhjPWh5nCrX{a%V!7syM%6$> zW!awEg5j~A?6?-64Bzm11L!qUxhE*rn`q&T?AmFzcM`#Y+U23Y5b4&z<~rO>dlATE zVDAX+jaN#x@+B)|P#Br2QP5fkUua!0m}08A3<=!L_uH?1>Go@9X)h>GUYA#d_X27; zg6;xxwCBKq{Re0-B>lg-Z+y?6%7Q7CI?haO;gpKjCuh#KB)BaVZNwu0PTOfedJ9Fg zZpgQb=v(92m|4Zz7SDwDS)=R6AS~9Kcy$1CJ4j_>tBOc$1ykbq`O)8f{m1`>6~P@K zNqqX#w0$6-oE&Sd&>&bM%Z|WMJkHhb!7z7p(V6#&I&4(Bi`c7u&N=FH6JWAlp*X~y zO2hVZ*{i_%VKimw{MB<=bsSHC-E~%Ig1dxpygjZml`jLe;X4!eiR#hj;;W7~$nHFp zs^d{j8VmN+#S%@`fioSY-garXDN*pC163{S%G&}$PwkuC+BadPTys5|3nT%@g{UrA zmFV*Z4d=P>s~m<0DK3;)cDCo*7U2|m8v{4hO#lT|^?C^s)$`N#Z!d8dPhPRhF!wS- zl6|fW`&~)MX}iusE<;Hwd+%2h8Y4Tc0`qVUA)?px>RxMa^+xAuxRyL9Faq`!`EH@T2WIj1ZWoYQ-?qXXug>(e7Uw-sKH9CEHSQ$rsAO*`?@qoGKh;f8oUUE8 z`13v(84+SH=&-(D~KHb4BfUKl0O?_HmY zZ$J|MbN|W5XfwCCkIf&;#q*6Yf%KM0H$eo#kV(tsW-zmp@w&6xJ6!fqqOg3IWy>wW z@;BbB#ypZp;DO@4zx%v*w)Pme!fG0Q0=5C+_H?^!r1Cz7lp0osskcKBt~YUeF9Sc0 zmd4Tj-)h-*ASk$J_E2rkYOUJ%XAns2hU#gdhma!MOk;;T{QT)s=FtwjgJqg=5XMc< zhQT3-e2#b&xrP&mJI@Z-UN)kJ!`MqK#;B7M2~8AF$m~?--COM{#ta zzgV-p0z~UP`$4fkf;Eb~3a0p9hOXr!O`!_(8H~gRPTOtOFWy8+dCqu6iAuvVI9x{! zSGZFgV}{Qf&Uf?i`l4K|a#wz*YoqKv082H5YxJS(C5-A6~0bjctltUZaOZ&L#vnNt%U0HmqVoJgrT z{sx3(RDZ2&dJyiF?hzwudkA`GG;Cb|BZAq)z23~|73%ZmeAYARoDFBYt99hGt zEohSHEcZMD>^_8AVBpRcB96YM<-D zw1cQ&a=2>TAyB?nzx_|&q{9H2>A&c2=WkaXt(yq^!58wb&s2h)v=&HrffVc0&>R%z z2{cQI^GM;Ht?JOt$?kxZIpMU91iVLP_YiyRqkvGv2oqb;Q z{9Nb9Ca4|iV*f2PSsAUS>3rO5GE?BVC+iWlMefw)XU3W>oQnuaOrOB`WgSIXLQ8g3 ztQ=Ur)RA_cblkt@VV994H?n6`U(MYUhs2j5`C%lh-Eg`J_C(RHhF^nIY{svx=G{ne z*Dz`?^Jb7CQE9?EK}`QwFTVnX|H0F3{_<`|n9T@(wIkS@sv~CaA>i}*8(%QY<|g!! zXK6L3T5zNpzPk%xvCfn?QN9mn;5W-EQLPBb@IAz&y_*RV_3s0O)TELvo_S)3>WplYU z7dIn_%i(a3S5fMC_L!q@TL4)dRXUGw_Vk1$-#UEMkCIfCaJ8dbJA%8f)iiDE2!qy- z#rGIOanc?)+#V29&yNEU!@g=-c7PIo9>W8iWUzPlXU&!4{{HI}a)>+CW#u9@h6ulQ z6PxfyVJR8LX=@H9*km&s$enzwUHsD8*UbbEC#Xe+*a8@b!^-W~Y$i^C>y_Jhqs`H# z(D3A(0xgdBvCr$URIWJ?BW-)E3Vc4FO*_9+)6Q4G2yKrm<8pci0i!3{Cf->fX|ZQM zH_@z4Eucy3`&Z@pO>SbDlc#f>iAWW$2K6n3$YXdX-R)4B(YXMLquJkFMxgJlKTNVc=`DdAcyM14Yug-zq5`l4X8H{z~r)TrxEG~ZVA?g8|4kq zipurI_`2~e$#3EoqY*}0|IKhDHZ-Ocq`L?tW=!35U`zbtpc;273`?cX`P1Gm@Ko6@ zr@fu;nKp{u7T=6$s27g`1b)OeGWl$WqC-{cRm&gG_v_zu#FX|z_!w~(tkN9-dtz&M zmG~G$YOK`u+fcmY(`%np8BFXrJyx-gqC}Jy;I+mF6Tob_8Fe@WicEDQ=EGfRPhy^~ z=6)O{m1XB{>J0Vb(vBHW!>MJXwW&1&hS%Kd!rmN`=x3&CA~p|Hsm*HdUWuC>0BX#g zXq=$X8z;=_AJp3m3qUXbs@G#4){B4YkiD214^n8FlV_jIO`tbv{0-JnP!) z1u)wA69!Ti5%G6$-w^fYPF8JHigF2#V40R~(}7Fz?fnj2aradt+GfL7P?9HG`+DSf zZVv}v<(4Jd))_pzhLqaqmAI=fAVX|~ z@ae&g_Oi%oydRr2et(Z!aj2u@xE5euDZx8HuLY?sa(bMvKUk=a?R`L!*Z+vD@Acfj zPbwd9<7(Gl`pcOJPGpe&VMiD|cCu=I-@n@;ak7g7b4NU^G0|Z#(q>jyO?ePbF4j*% zZH$jWmucH6nN4uzqzzFv1K4A1j~;XPMCmZ4xV9wZo|Dz+kkBJ=Y7~Pmv25Irq?Y5m z?#ge4Wa_6iO4~TY$IXrF1*X*2Ice_ zC8@V}r)$A^5Gi%B9`-CFHiR{VD1V;AEv*J3o+l3M-$$q6u(fW7^I*I?HEV=(DB#bQ z>bTEgAd=o&^v36vzIEPl1;=Ahi zJU4d#@oRMp@fCm!o~j!mv>TFyDaBwlc(QXg0p0kZ-`vx$GbZ-IXDypb08q|rMYmn-*M?u_3@NOaKW;|+y zo;W~{p~y70QmgG-K}%`P)THHIFtvqEpIii5MZ&qqZcCp#@6D9b8kl-=WV(+3-RVA0 z)V{>s4!&6HmhX4)NbOAA>jLKuw+~>&`{<*OQf}_>!EWYMJ7M}^yt^Tyzf_a@Hm_^K z@oYsrfY1cqW*8eFY}cz-)VuS`Zd)z=wpxrU`d{WU_=rq0UO zn#T-Ec-Y&l1P>G#ZqWlP~&?I$OMu?{#1P z^)KqO^Sydsvqa<5a|mhklou1;?IHWO5{R$Xww?#W+f=i)cL7e0>zsVFVG)gZr)$P^ zF~RE|VYPHiU~+GEQYZc|Atd)d`qi)e`335I86_!5!#$vz^2D;u#1ZOrzw=QKr?hpx50N7Lfv&ps{gw6n7}~bWu@9h#=Lcu4*nB~^4oY_C zQ@iEw!F(f(!D@jwf|0%JukyF4`~0J~ub5ePpU2zX*oQhir6;EG@t!M=I(0CG?Ho_3 zVLw9nohcEt+11uAjvVCO24ioZTU=knCakbS%I1h9O^yaIp2L9tpV*T zpk!WZhg!}6JvCu5fwt%4q!y8;cLASogLAN_q;=cY-2%#vre?ARRDymBq9EE+{Wy2} zQBBiQo zz5_(RoCAsWtRRtqgT*yDe-}z+eI^@je>t*fLrLh_WB^Jqn(eDI`eiA442au|7bWIlN!!q(UCgg*c$l5okycA&GI;m zV`sr7I}tF_9*>>^l4zY4Yk_Bw!syPEv#k@Di>KXX8KhU(^GJAKH-z^}AbB+l0B1nr zs|ljIcedxZ-ky1z-h;|vNO``=nGAVxlL*emr)ZvS+t*+?*+a)`Kj1t{3eHP4wSILW zA@;aCQccxj;M&)Jy_u!CmyNQ^KoA;V{;kGyo_>{cO8`cLNB!d&q)b+(eMV^zk`XgK5S)orj_6 zc0^$XA$5XbVG0kXPS3iUmc_4CG+CIKG;E~wU4Y4YZD0+gEn_eFbSIy5^X=fk-F$NP zj;hDKcVG`S=MIzgysB^?AVdsaK!ERe9+<;x_Kgsd>)q3Ny0Y)zZ)G}ez3c(bPdO;kANqu5lDY$ga=awIjXS(gSUIljJr=Lb?JiMuAl$ddY90r}UEQE> z&D;(ok?q_)Xvm(R?mNioV1UUS)f@s8lHqwf(+!FB_8VVkoS9mK)udH_d5}Busnt!L zz@hlP{(FYmB03C)U*Fp**EkCCoSLku*hGA*q64*)I|Wwt_UZ}Q&2%5zlN)njI=7sO zv1ucQ`2?}!U&Zwb6#4FMWq$@PWgVc#oVb3zW}6G#5tC(BdJKk1rr+9`>T@WVoSd!E z@mqjMUNo^iAG%JPHR)Pz-l^`KBeva4%9xyhhQ3#;}&<1wAZ@(9$L?h zrShhm51`=DIa8wB04TK)&Cr9KgmV3xC+l9?MyOZ##l|MEr{Qxas!ncBz@PhsE}Bz> zb-#wL20Y&$l6oYbhM%UZoQc4iRqbMKjmIpiZ38lF$|k7n+&ql-o}pyr>f$2bQNv1gnH3<4=!3*4VN+Nr*H2o|nw{EU}hX!}we(7^tX)uV_-8$uSoQbFQ z>dK_tTb zt|3Xjc4(r#`96}g zPuHRN54k0@)3Sy^_kXai<2|b&B76WX;f>T|PI3d16l;Z+I39$m@?HNn#pcH8n2Ti9 zJZ}oT{l>Z0Yi$P8gp`fNx>HQQ(WGD3H0d``MY8uPU2($Z{vs6zRj+;fDL1F&jYVrf zv<*tpP4?clcnn39tnO&VwgWh-{lz*1@i-`j@j3v!BmNkvm(2kna`QZOHS>cAQn6%P zbB-Y>Qpf>&A%>AsyV$3MN8vD{S{$vdg@Y))vd|ll$Dz(ky?RpY9Y#sK3VNyZe5^Gubk84gsIX>-aYfk8fNL)v)t?KAOYVi0uMY z9l>6j-E)4HT65y$lb`&wj+r~c^9s#3%8uM6l&E-3nS0CKmlI^B>}Or!PHj4n`_C%S zG8&$%y{&DZtMUF)4Op+`69!M#^BOnuX{II(H@Q_94LyJF0F|x0K)wQzie|UR!Mh=d z#!J>E&(-+%E^C$NdyssT-u4V?t)ZlR@+`z1SYnJlTD-gq5X?ZSF=tg&hd=5N&%J#4 z)aPh0Cywg)6dk=Ee;7Db%g-P7{5?W@|HCHFS6}@VTFae!b!=vGwjGb#fYhtu89Y6R z5Y9B{nAw={yQDGBRk zt7W|q|JF#UEqAoB7SVgJyIe#htKKmwN(`^d%4p*f*qeH0yreKMqhV*zH17(ha#bnN zmjQ`apOAvRiXiT`++I?+1}E;(vQvE{{>hMWr4s5*1QFLL=bi4`%&F_&B%>9COj}Im zoT|**H1DF4;;VJtbCo;dXse@5laa8;pcD4ifMmrHTy{UJgSdm1s%T7trMn4WF#X%K z8AzekGon?^@1ZCb6|1H{A3#Y^XW=fTH0~$7tp12KJ`~IWG(Oi26@Btw5 zFvvC32E)v8V{e@@ob97MCnbN(`c?6`F;N_8yI7kN#nCA?SV(CzNP?gG&nP%2(KzZv zx49E@4rM)(P$a*0@yk>jOh(vNnQQlAw#LW6G*bogwGB+YV&1K6i#>)Qp`Y)k+wt10 zEc}+zbq`rQ zn2$Rx&S)ME1xf9{`i7nu>PfH}*&FZh6Aax^==dIcxqY4LzK&ZBrPHv4h^U=Ungdg} zr8*DbZooR=E1c2IvZ`|il0S!I=y5{4*i7;-01?@2RtDb8R~Pw}?KvQecFupEnx0p~ zoYK?Xz2$?1c5|NR6tzZ*9&@LvG~O1$UL#n0S@^j45vBKn`w~ioQ5Q6*GAuuOdU?GJ zRNQ0w`77LM7uujFS5uc0dgOHPfwrq~7#J#Dzt+Le)G+J@D6_SfWdGw$s*FHLP9xO$ zns++iyKARy1x$T&hUmGIHCuc)K2G*b53Y6rcGanr_rPS=n%fJd*=r~=Gc$JGfq^@K z%(SCjck|(g-+N63dmo4f#wd9$j@&~d;8~G;z?nSlstL<|FsUdJH#=xBN*LX@{;*u% zmUUt2YHaDYDb@ps;(XeGXagt4cIBZ;fE3&SD=BP*lH8FR`)rEOIwMSRKl)azJOqZ7 z^*eT3IuCBC7+H7lKn;oeJO5K{4^;=$e}}i~pg9%x@ct3{%$;b*Tud}#)Jbb#5~#xn@g{^d_ma869Qom5OMKsec1i=l6F zClXfqs5h9V+oYw|jk0L_7IHcU5R`D0hM)`TUooHBHgQie16Vo^%`g7 z`qHsuPJt=t8}WLw9kjg(M6VcFY5g%BM~mD}_+lL!d;o|5RO)nr8#+?i^@?2sBodof02?`bTZR+UbG6b{ zwih;aVX7-@_d7QuMRYg**81i1htNohv7K+#`r($q-Mv}FBcMXc0}A~C4p*vWTjQ}* zW{y0zff3wZX+PF|RE=pkm|D~}{^bAhcrfE^x0!Z;>d46(zeCA8&+D(w529egY(VER z)P1%S&<@EBC-AF3V?S@S`}*Y6G_wR$8wF!MQ+$V@it5E0fE^BSAO0-W2C!gsMYXxf zZvpw?RxqE30wh*ZeQ)REpw;x zK6Q)*hcd5v6)D{RwVHyqEsSeuB`CYb0^R5Wv1ru0n}wu$e}am>(|vVT`$T)-a|KC? zHe~2FccMSaR-3(M@>osiXUt#T<4n__xv1uG4FU7BENPZ%-T_7R=+CP?ce!y@T$0uM zkRq_{Vx{9=J`Egeug83lK+o0a;XZe2$9g0BAxawHwoG;GWTe{na7);_?_@JR1Xfkl zY3jLwl!HTR#@d;~Jct_J87r;zmW>HxxGy7{fRw`Ib&b{L4q9K=i-RfYIvGQ;VflRc z7arGvci%F<=?_+7cGHC23MWg?Oy-EQ@85poYg9XM z?D{vZe~m8f0F#Fyr(gd+uI?sC%j~)nJPQ`Y1RcRduwcOg%h;l9q-JKA8Air32?>>y zk|IrJRpB;n^O0GVS!6*P5(>&_f~vwYvT2)c<2JIfjXcOkZX_WK+en3NqQM<32stZ) z8KrtyqzqNU(JWZ7V8LR3=lv1-ty4>HW&r}{qyNbWy>u$sDar-Hvee}elMQbg_S zXZ7N6Rc;%G(m&aKEi-wv9u~7xAaZvaP|f=W^x_Gnu`V+jXQ5uUKe3;G4lxNixu^!5 z4?gnN-drrZ0Qc%kJ+}u?upvJxXGO;fB93i8cH1Mf1O&mgAmz|)$N?Sfz7rHQ$jNd zY|357A?|g4r|S;-EGWh8uPNU9-KWCJ^9z1*KC#|(1*;3ulC7cIJzWG!p>`pO zWHFQ?%eLE+4o@9=hfJ1sn20dD3>MkOS4KvvDlZRoIAx+43k1dO)K{$>u1r`;5yP{q z^3|ZaTH@7ERB665UxU)?M%rtoRXf)v4(0ARgY?(IsdsNX($hP9wjNQky}P*^AS~9q zDD6Rckl>D2ngiVAzOC8C9t4tg=EQv*f+PCc;~D9`X6s-l9Oj&>?b-F&g(kWmzBl_V zdJIT*sAs=$6ijT#smnN|uEjh|_3e`hI{TM3sF?zA^2Yp$@ag!L>1I_$zm1T@-(Z>8 zpiRk~NFbir5jsVWVfk#@d5RljR;Npc0h|n$MJ;OF1%Jp(&OGt^um~ zYF*rQPJ;U6R`-o~thosV22y8UVt=AWk+F7+`$n7pT-#p83&hpO0 zk3jpY6}kt?(vf;WV3r$S%$sU-?gJ!em{o5Q&c&Nk8G2=Isfgj>_r6Dk!SH1+-OOY@ zMyUcHE34U6Tu-_XZ`A(R(|i}fOLfSCP9sQo`e5}L&*QnBGoz#nRyGC=wrDeVs)B>A zPVd(y{US7cwi8xVI>^EEnwuUoIiN~-_z%K#>a?+mP42@ip24X0ApZD ztOOL%i4*42p*zC-9xVpah*r&%-Wmk)ztuBsu@;PE>MDK>7T3jt6bVLm{0VGVv;r-Z zyjy$r{UC-WkC**|0U%kii3tyO*ea@|hvIpf0Vn#~eRRk98@gdwA>aBl+1$mA$1@)$$LAz&Ho7$ zQA?>Tu$+XFpO5zM||)1^uAq47CYtq9GrM}sGR4au+~Co|~1)PeViHgvq?Yd<8*{D1-x=ZJ>M=MFEYsMZ;@= zO0sGLJ?2c_+~$z;8}R(3(V|>2eG^G`hpG>|%^fvuv!*u)JD3g&*{CulNfn6WuN-Yh&mM2gIQiQZ_h9>x0< z@jZ_7=YN?Ncix&&S9**<@;1|I7xfvGQnP+RjXT)R_AdCTmf=mM{|mXRlI2yGMNk;> z5=h#p#qqqSTJR-tAF0LvrQBh(Rz*p6Sw4QT4$3U&PUG-)b-`@~Wd&Mz8m?*A%D^B0 z;i=YgtpXF>gd57DTaA!{8=j~Ez?#m31(sSwT8p6Aw(oeEE_2I8J!tOB`h0P#-msdx zl~I*$KY)drG}M56Am6-7pcV`Q$$GCnqsQ@1OI~Vr_rX$&nqchw?>-UmE>QXtua8zW z-wi3@pHxnxoGJcIHDJZuI7;$0KK&M+CgEg!qYf1#QwhCoBR%Gjh>-ITdRla=jnX*;Apj}XFAWXZKlQ$o@^q<4#qjSgx0o8&I6@P z61~7F(h&!$VjtxqLU`It-!DM~n9|XvacOXnTWcUlzdS-n6A|poJx-iCFrhyC4aF&hqRuCnmE>bqq*ls&!_vJb{py zbRKP7b`m9`+TTuzfLPdEt=nmCoF5t+uiA7bh!A(sqqC5hYp=D;r|N=3@RuC^cu`;NdCfueGeRXr+VAD);>WZGEY7zz;6g4)8+&v!AXzkdL^N(M@#> z@-di<^^USV=?FUx9CU9Sw#d?;&Wf`vy4?NGOgEc!?@ag$@`^Ow5S_tB7_VN$e z&sYQ`k)D+?`WG*2w~vG zTAt}oXltgp8R$aqbH7Zgg9yo9%q9=Pa{i)}MmUqx&9Bf_s08*Gv)`qxT`12^igtIr znwQtGU=)f_VquL|j&}k2+xuOUFsW?YP7YHEY}uf;(5E|}P8YmyO+8wwbOtgCZX;4B-20_j%jcv59@8bq+c<@$J>{UgqpoDkro=eFZ^eZ2MY{aG2g4Pj-mwWFH-9eI_ zi36`!)w~=3|8n8vqd%wIGoZ4sLlLdbz86f?7D32%7M8J%mDGLi-s-z%cYbYP4mEjn zEUnb~AwqI7?Y3>D`3MD%OjER@A&h$En669hSQ@JJE-d|sWf+>qrtqoiXlG0I^ zb;Y#|lFmXTN3k&2@KPMt7Z z6AGiC^KJjyE{Kan)ErD6>WVBahp{+aBNjvIp#IJ6x%mFh@8P%Wl)(U;da%t3OS>sE zh(xCE)DrPf+|8EL2zNMpy_s33=O{!YhadmJ*JwHzK8BlBjomOlHr0wTO;2zy+qEaV z|D2+ukk~NOit3b%Od=${!&RVDV8#W#t=+0M(+Tup9kRPxbNyooiqopy89EQ7V{!V* zhC@~LlW3)96!KL3QVHt-rva(eI!cWV&VZ?4Mj>t6y@o_*6Zms_n{%A9vqdow@_8uC z*_$$AX&Ye|5L3r@WZCQ@0+9^OKfrql#>=SWFLR2H@Lv8U3LlT9Pp!|p3P%(h>p0^z zP$maPwhdDDggi=@ad-nt2|7DJ+8#*0iIl9=`QCZc`?pb3etQn*7TleV(u_y_HmyfV z1`Zr-GmjYr32$oM)xCf%6Q$*|UEuA9CYrw9N5P4~y*||>Q;E(Y^{T{?8c;lhWy`^a z*6BV9)KOfX{kjeuOzIqROwsqoRf`c6i@|Db;Vpq=f9uOzw(HIV?EPw?m&Kp8`pVaG zDEVaUNujKOlM@RQHJe$PuzTA*g;fE}8u_vs2)k28lB&Ba+n5qgW|!Mk61@a`FkABqUuORkFdd`D?B7CF3tf-7U;_PX!Iz&dYT zv(-x=wg2g#ruO+#PX^TZ|4N{^+q-*LL9#ef2bQjJhw&O-T!&Jio&h1L8}Zr7@@DtV z5*rcShGo*pU@6@BRJ_BEWbr)H;b|*B#^%8#k+y+9+kJQDMELtK7W-bF0QLZ=$rEk)*z~*qK6z zBpvr@rWdE<`z>|q7-!OaX}I?PPQaw-De|`9c`~1mTOw+Yhn<2`0cyE4Ygeby5Eqt@3F4io(ZbRA0L#qP}5Bq7yO#|>0TyBCMA zn?O1CUsr=76s<`IcfTC~K~H#kUbL8*LJ`@J+F zy|<S%LT=YZ9 z)O6dH>fq$9Z%|%P9ye~JxtuB7tp{4)KGG4)ORFW=iI9l5+hN=Vg<1V+o#R*JyU|ju zf9;65ELKMo$;iQBeNxrMafFDko)@6fFxFXNRV$_dCB!1QemEZ(Ptb%O1CsQ9ihTk~ zEw5d~!-un$axwu9>^*FpdkRQ07OIreX;?;Ir6XrJNo0G^^v+pOn)~be*10Z}lcDl+ zzQef{Z~?|fnKmNzix7!zin&xQUqVRsH`Owu(z}cjA+>uMt(32z$+5}2AXhsN!#(Su zx?nUls-_=|%eLc5*O5|+kN@}@g^mwJTwPO>>rEutm^!X#ZgWdv&zoiI=?)<0uXEnz zOe|Y#b8rUC8eduNuc1L3+TKfmQ2}N-Me4=^b>=3~Vaj6BggV2$`eG%GupS~L_I9oK z5h&fE7wK~6wT$B_z$YEyZHpwz_bCFwvaVIF`ZFlXarlUfgYM#x_VX@Gwb->@wtBu6 z)M(UEw|1IvA!^v#M~6hV2!SxXN+y!UfDBERJ^UrysZ%ehAWP$|S}QspZ{t-Dmvbju z19hr+1(^8XJTh8Eys{%4cK;V)RRT1v5Pmf%{nwgd%{74Zch;uP+Ad)AH+-(k$G*~p z_1sb|TaK0a4WLL|gYvB(NHudZ8pC>JlvK0fTBK;jIG7;QPBc(%Ky+rWZM-xBMmxM1 zCG$HwJbLRJyzhdE#w)j_qPwA9wXTbs%5M}!*^X?b&)vuGfA9O>{{DAqG>WuT&QE>z zU(@LhzFE~~8WeTk&`6Chk3sTP-3hhsd~Pl|o&>|>zP5RA3MRY0p&{y=aHeZ=f2Irf zVr{#f1tSbfY&cdM0O#VxqgU-(+xY}{4PW*ybirL(pw)ph-*aZb-1HKN-`@V@8NG&*N?g_uY(Up~(sjh-$$N~ld;^xJt@Q}oP43jk?O&k7 zP_KEdX=|$mcRC;c`RlY9ik!btJ?ac7DmP&C%iSl7Lev-xb2>ZiFl9ywA2ys+fH^qb zaXWWeRqi2@qS1Pa{81M}TZEC5$0*@;i@N*-B8i{+IjWp+lWm^#42ZTEXjVJ?908Z@ zZJh<5X&LzFZ~yAYAN;Nnbs*X9tvla~zFo*0(C$(zw6) zMg8_N?l3ttOp~ExdSiR!VFf%d7HWpOm7V`pKSPl_R8J?Y?tCBiPIhfJtU*x#bC>?X zLI!;XQ)2BVP#aRLLrea3Ahp%M9s#Fs)Y{L6gqPugI>Xu@zs(R#)8GMwYC+Za!35h} zml=oR{`9ZBV2I6~EEr=o8wfj5WZ7bR4d8Y`5n`Q4>pj-K8&SN!Ap9t&EL0a(dOhBG zpDJTlS(%$ejM(agJ%#SD9$eW^CyrXAt$K3|l*9HW=!tlB5IJ_rPQvPxB}WByD&b_m zzqJ1}m?}7YT#H}FRnK%j+O4d!U>G^jUR6H_i*3vH9oux(0q?w1O~(ZwVwpJb=8?L5 zd=Z6MESyw1UxM)1S5{^&b1(ZJ{zq)IWdke6^Or5I_>%N~@b!Aj{A!0;)KdP};MA_e zx|8b=Q4YDHSWDA4z^amEVdExuY60VcH3q+pAZFU=Mmw3^f#qcSV6E}oO~{dAdxksZ z=e>5_Y@3zdOK2QBoh9@vEaiFId-uCAJsX`>DszZg2QC{=AGY_qs(E{eUQJj(9Zxh5 z-v7>n_rFZR1EsRPI;1CHGW(WM+|wY7G&OmD)_HXUh>mw&f8iH&)1Peto&PMuB?}?E zet0Smh%L&;dXY&LSPVtr|J0siUjirhH5QsU&dyaIXB%q`n!>R{Ib? zmZpTJ0mawW(yb^Q0~7mj84~6D1d813sMCKZ<8G>aD(+P)f0d@ky}vr1Gu*0GpWvF3 zvrv+c1!N_34oL<5+$t*0nbhi9ZJiCffFiZ_X!u3$-bGAjG0ZBf^{R2{B_9&$76-p3 zYGLFuQlwxZMW0 zAKWaFliOUkfj%rDlb0=kI&y?+od{bHJb%KrBC#VK%6&U$Xxf5iu%B*m8 zy!Y3h^_re{hE1zWSQ|(-UKTbM2b0iXJEFH9RyNw9dV;%k&l?hA?)taquKyvW21&CH zwy3rPkX*0&^k->xJjzU4Sswz!SS%ow8jPT&==;nY^Au%b*gK((*wuaRO*SbuQi^Ne z#G%$EjCP(Fqswq<94Wcjh44q58H`ZO*WNxl;=4%G5bQN{%G`mk94Br_{#VTU@sjZ0qoV-a8+hh8F$Z0Mqj7 zz5!{CPsH}+y?lGA9l4neIM71f2O^fYdlzlzP@*dBs9~Dthlt{1c)g7=9)YrKB1)O_ zN&R&@jy8Aj)v8}l16ea_$JCyI5sD_VOi?~Z=!yN{0T#j+teLL^bC?0TR={8LR5EG=4w&&F_8u$6uq&AeP5|ji{D$M-lqFXA4%K zMG!N8^`UJIYK(?9qF z$_+)K7(q4J1`Wk(@NC4~E|@lhvb)e_^!qo;bpGxx%y?a*83ko+vNjLLxmC&S+v#(B z5!Bz$+@R86>hEErfz}^SBZ>5f@6NgvbPOO|=2+Z&Y;D+yP}G{YJsw{NWlo}mSsUb3 z8k7?@-l~T=4Pj%d-f=m@EoOE*b!2A&JnhHcxrAVfyei&#s8_L_pe>EMfFh3V+cs0+ zeBIn0L2L|NLP>$xd6nX2SSo{T4AbR+H<~%Ct3V2Y%uBA_aZ*hS4k_1@- zq#8VU?*m#4M%cZjh-CWxbG+_ouhFanQqgOS*Q)0FF7!SRYS3gj9J^vswV@v&DSz6^ zz(5y6d!?jCF@y0?n*s7N1VsR^m)J(YL{|-irhRAk-KhaRG}s$$m=e{=5;}~g2#!_* zJIbBRnTX(K98M0VtYNpRKiPTNwEboEatg*ld#nAjp*JYD+wW7iW8C3ptCb=1oD&`4 zmAao!jS-UDfBYj_3?+5bu6OA%Eb7|nue_Y;LK_a(ro-8M*DSnRRrDMvX@?9bGE5$>h5bVbMQyqRKJiB@Iw<&zIT_!=D#BpJKQv>Z%AY}(dne+>fB)X5rXEi|7@J8;)jV;yR5 ztXkIm?2GhT*!78RhYLPsv5VRx$kY#itIV?Y1IdGDUDe}(_{Pfk@itBzL`bEmIa-?s z3?XSkUnlJm&gjt0Hz+(*I@7ZVu?v(BcjkJ>$aW)0?6t8;+Yxdy8vjm+z5n6wPxK1$Jx~RnI;0ekebVuYcrp51|>q-(w0Nc!eOjlOlU9Io=Xf{whnHl@SXqRX0`VM zEP19N_1|&7GCWMlu>bjV6X0KQq(VAu7BdC zR^@L58WjF^*|@t2&ex);YEN$?Bv-Lhb0_}$TyU&$-Q7Tm8mV5=px$A-~VSWMHawz|Zp;$FB_Aon7$1%$K9ljzdEf9;Ir?;0WO_0wQ16 zFey7ILql~LZAtg(Xia;Uf+S~q@<_Jjmm!Gf2VePt5#sWEl`6qiPp~3i$^CZ?#RCi% zh;&uJ>~~7jRs#{v{KXE6jfSVT4<@FyofrDyMnP%7I<#ck8+gRM9>(|9mt|=~=f6&4 z96VlA{Ql0r#-kJ(#=CQcA{z{Fp_g=qfYkfEm`$5eu(QMFVKo#x1Lbt2)~a@Ou&*xm z>;^INO7+8|+)^D1DBXcG&9= z>`Bq-E>InhsW&c9qNsVRKBxEQ#EjQ1>2t`VgmtD1Q}21#F6vnnY~}Q`B02|$^S8C( zt=64KNxX8zAB&2((3)owNAf?M_sderJQ}RHDLE6y& zY8Qjc7Cv9=@YKP5+OF#z)^>*O%I9zuPb&Ox5J>PUrJQMRym%g&m8w zh&%(P2cg{E=>1-Tj5rI3@mWY@M{4)*{wMzTOtN~C?9ZW!)I);pPQ^nolB}l!j#k}$ zgw!kN-hraWC~Bi20_2~13(!SkJ5UP%Ci!}MPZIzWt+TO0jsn$c)`hw@WFALIRQ+4& zKa|~-+NSZ=P0g+an~JYZ2DW0HhNQp-WLlA9aIZv-QLReU2H}a0sCPcnTjn^4NEELQ zX$&|&N$OC1Na}PKuKG0Osvc($BLhp!nem@RBJ>CEyPW-JnutJ>y!Aa=cOFzc?d-}0 zZdvTzIK3D&kTJ+5s3+Vq!C6Dq%N@zCkT(U9t{{m`c6Ir2Z>@J0u5tG&SKZx~>+2|` zI!>8t2)fT@s-xcVyxIHQPRiW|2&3=cXb3{zH2BJ}b^uUDH6x5c{27GQ_l>n#a1X@I zo!Xz7jeFZsP3rx4bLpz>&CP+LU;lZ7c+O}AD?qi*^9UiOK4OgjILNL-jX<72m4&v_ z{uGeb<~qOlEbezoMW1tHNgvc^!wde`d7WyPGn$FTg=o@0VUoItGZ`4FO1K!5!dBfO z)duDpZ8rg!0_J0B7hwFj24oo+m9^N^d)i|;TAIhbj+0rdZUI-Iiu%@H));&xkZiL# zRg;@lU6^J{xpxD9HEOs!TBpO-be_z&YK*Q?qVK5Ds+Zbsz&e;<+Vz6N2_*% z5rEAgHwaW0S=@z60J*EP8&J&ST4~MHDAeOi!`GgZ9!Kg`V0#!fg_}g_6;+*moC0OL z-SVq(Ku_jVC1Ww9hU%N?n)H%GO>#KGLCfOGNMU*8^f-;Ai? zM+rK-d9bYtKJGqmQA4OVoLW%hpH{J+CWM;T%(6ZMtJY*%`#c|8RXwLK|KFlumu_9s zl>@1zHf~rdsNQZ7n$We{g_*^WJPmFhrs>?tL>XD7E~yH>?P( z;3lxX%`}=b!WVt@n5z&{9}nxvO}g5-yVa-)tZezyXCQUFqLOVy!84cpsu_zHtir=whT_?dsg;N1Il_h zt?L;`lv{=W&Lx~GO2N(psR+ z5RtXp7*{|HSYs|rV^<+E*|T5kc#S*NoTXb!-9BHBXSeI>?!pa7u5;Y`W`NfBKbnFA zNn^N9v9MuVrN1o^Oq( zo+IL^UWlQX3;y?psQ3>`si9<_T|@c|_7Z(d>9r}t;?B#aS~Fe(D$ef7*`+Y1ogx2g zeRurMTr(Yf&j1~ivRQJ~wiWqESDKcs1m(ZRc)jYjDju*lS|*xSgM_vH<$ikIecD!y z^jff6T5AuY^ahISd=RNhJb!e1eJCP*-L;iEg1e!MYoL~O`UA_HLCvxTK>5?;WNmH` zmOq#8sp(LFexA<}Ai~{kALF-b9kvMN|U*@O0hwxs*75eC^`Le|%O8(8Wq+i-UgB9As{ zq-Zmpmwl6MAMakmZ>-jPHg2OoO#pWau8tP$s~Pef5;<)i9&WZOAI68*(MIRrLw^(> z`s6maehl`S{I;kR#eafIZdP{b_){Rcr?!%P2Frq{m^A`5#(0jPja|833$U(31nVs6uY94w3Fd^D25M zJJ{)fSNIU=Iv`ZLb}JK&)j9|K^zwo1hOr)|6##p6(J`nRov%Sr1uV2{2)KJ((e5@Y zScfD_j*f_XJ&;^5)vnw&z)AcS^9X|KkMCNbr~81kHXWQgZj@d7Z-bqGC#386>kxuG zfAkwyKKkA3+JJcS=@2-wgqx+ii>`o*P-aFIQ(MCaKQ_m%ib7KDc z->U8ONg&K0o~(t$DR{mbPPaR^(;bCu+s&$DC}|b8Y}N`u#8U&M`2otwcrbjb*8CKd zs?~dutgW`6MvatgwiUx?63AhRx@CJ7QUFu6|9Xx)JRTaW;mLV8&CHfEjN+OQsb5Hd zx5}i-#V){WZHRiQ!~2+`rSkFn#TQmm_PDo|QeEXHnW6Te%QYZ1@68%qUeC9f>$w4h zqgpqk<}hxz)k5TLZW3%aQS)lpoqXM~d4!sCr;$7G#{5fJGiVWoX=M$Q?jcAz13tPA zCN&RuzE9utxjh7LiF7Vd{n%Pc^{|5v-v1LFJ}6ISvn2NzP;wKKtz@4-N%zp6HavS8 zLe0YJ*drD1e3&LwEA>1CljGhfa>4)6qTkmZXwlOE1eCU=ZzV2_2=vG zNS`7eNJz6E#zLq!C^v6aGc**pYC)~J!_4e=%lyesIFc~|a8thR@9jdAtT#LVib@06 z-K#c^az>{d*QsZr+nZbCsEJaW%-19=Y0s`xY;GB>XGyF;(yD2M$mj5py@!!?@EvK> z5$$Oe;R&RO+-1O4Cr+Y>^ow7lJg2y;KILhi(-8jlt0!kTBfU*EMLi29mN)ImR%d?> zWxgotP~v&G%DF9gTs`=V{I7Ml8z`v{HL?`f&3O6m_Ni{8gr}HMEQPs)mVE#E z7wx;>1?B42SF0PCNeuMVolyJz^c*FLyw0LNwq_A9$;-B3dft8ZXiZ&`nL`kr4NaQy zFkkEJVYKZLLPYuoZKLe4$X>CrjsGV>b&FczKZWr5;N9PS@a{!=4u%mYEUTe-j-V(H zI?lAdj z!+ftpin`Tqi02VYEbBXeH9=~f_lCs7=#P5R52PMf3t1;b22dorSG%Bq2IJW}l@x}6 zV(;Yp{`4J;tc)sDm7Q>RCe(us^BUg^rJc{ z+o8b2BwET+)@yBW3L!aVO17OgD(y!MH{)#|_!yjYGM=OPD5}6u{)BNoXA08D&kawF zjd)L?CZSQ&D}>Vs;ik^SQzYq}K_ef`)!^rB=i^|R$#3n@IkXh)=y8{_YMJglk}6R< z!aDZu`;i0t89r4zFXp@Q5$7c!oZCL73A%R_jFt67WXVfD*s6gABT83Q&~5+Y@^J+uX^smE_iOY6Q@*|5h_Se;0_L z_qHwd8Cc}2vSM?u3trcf%pizy7A*o9uKQFa+54Thy>{l=@X(yhp^13Y{KH%i;S@7_ zSk$wPHXk8YW%<3AwFlhTni{X3=m}8eGS#}7rw{@C_;;>;bisJ|8AyPeC~Y9Bw6NnT4_%q5!i!w-+S=hZ?#dR+5a?R%77O18nRe- zl=`R6R?UBcCh7i>?YepH)F^Lg`1TBr{DOU^HBu>n6X#SIn9p8K9M&fIw(2S|y)K`fYuC<5Z#^t^*Adzh z9aM6jJfhn614)V98Y``GIe>(hkN);+5C6`>>0rG7?9aZS_78R5*_j?`Mp8!N-2~3V z0(y6lj9%y5m5{nXm3cQ1@zkTG8q?8)Q#-y;JdbzZcbEhi=1n3*!dome*SKz~^TKXe zjU=WKr1iO7M(~`ns1a;~cSshU2$J$iAi^4M1E5o%U=t3S+u_4wWqjrg98Tw_M)%EYjE0PO-{*#OyHK43MXdxO7>s$p@2QblBuWBytJZ~}U%CK)qzE70ZsHNNrxEW`^ zM%6*_P$AnK%k!C_p7CH3`qW_4%Te?1dNB8TKjsk z`${o&BB$U~(LIyyhf#HeRBl#|>d=&?`Aio`|F2G*g~Nf@)LO+p*HJQfq3d1e$se7zN?e3hT~>n$+%V`ZpZ6T8wB6Uw}=1m)sNq$Pj^9D z@|jiU>YT~_i*@>0*K`jhA|GOIh?>LmJw~nd5!rntd`ikST&fO6$}`_qbw4PBm=C*H zT*eT~BN*q==f_?6W}oE=NV-4z;NO=Jopx4-u=HQQRd3oX1sV_EDjt?WsYsI(Z2@CB zLQg0xNmKIRH|K@FG6C8)S34iF3PI#Q`28RJ9wi4vUaCSAYZ8|9wzLi2wXh8DtG;O+ zcUn5@`E`q=4$QAd3^!JZOT9KAME*=0(Q+_?F==c`40IlLR2wwdMZlb8MKFYr?9?ua z0XdA@qitTjv-`NeT_WDqg?4|xDLaDX=e~P-?)o<T-N;&3kEApkAz%4z)&z2031Oki7@0jbu&alk)+w;&mNaUTdL~eghIQfNrh-O%Ur& zxK`D_4fR?rGohLb-9eFTtr}*T>~6xVXD`*2%^5IU)ahXJII7-}+}+5qbG8d)IgO@+ z>ZUa5c9AA5*U@wL$f4W;=si200ai=RQF zKU(9z=iE`z>IqmQk^F*RXx|SVQLYQQlROiO)i*3chzN8@&2so+6fAgP>U|^j?sHp$ z5nyRPn*$NxvhH)Uk?q9(a+FlWW+-Y!e9!!0?W3*izP(jrtyN%n?cEyD+N?%Py?d*j zn_mM*-dmaN@IIaXyd%kA2H)$Du)M83LbVT&r6;bAX6f zucqIe#MtcL4aHBY;s_8vT^P_!)Uw)6w8)Xgg{lR+5+dgOcXwZng6cTWXh$eRB?#jP zr9yccZW6-sN2jko{GPS_DNuG8N-WI=u=SbGs0GKkd%eh!c3*~KBSm0)4&Ty_oJ`mq z?QX#-pz4;}QqnvPMKF8Y#Zxfbp}C1;(!fh#o>UX(QgogFi-^k`n zJX3SvUOamhQ(w)UvOQurD9iWrk;O&&%bj&tGe1qUxngDEA+nSiZlHvZfD}`XV$spZ ziNXklE^|jVlM`k6^C_J4SbHvmIL{C&GHd8G8i=eW%3{od4fBHhw9(5#C{Z2UUnfTv z!K(A-S9R8LezB(Aov+b$fO;vM0^Zm&WwZ<>EZqM09xJBC9YHf%W8xJEc(S#mq*nsf z@eEO7&ah{Hn*M?j^d{CKyci`1YZCNFAN(=-tp&pIho@?>bzO(Y-l*m0^<4zL>neo1 z0Zk3q()WwBmpkbVl|9q}5GNbe#lbiS)UP4VBvOlYdS)2&X+L%XVkjGC@4ZWL!DOAq ziCRS2jgSdnSHk!jMUBknZM}}8h}2yVMK%fGW1FI$;tYrV)#6Ua+obmcDRB4gjWUyS z0*timK^-i+IusZ!{Jb2~kEal1rmjP@MWEAg)sFT+&zXFrS9O=4?!rmDuZAS&I=H?1 zyYmTp>l@8pTlekb3#Vx>7@57cb!31BcX*_&qF?U3x4p?LVAy0%=qL$X?Y>V{MY#4! z@b-Ac_3pC=&nYta$?<#h(cT>ACc4ahfz=%943N17mwLCg$UFH=7OK)+Kwdw5k0~ZK zX$FkeS#WUnva}jW#o79z{LIETNqO6dPJ<~=!=Pq{at=k(Z?tj3LqK`Hj@3t;B00QY z-FVF1t5_3d9`gyT`8jf+-IRSApQE*m_l#R?HWroX^DdY}pAuitH_s{?0)}Mj_`;Ap zQ*wIVp!y;tBF;;h)ebF2NZ}5avM&KsdyK`YX^kJ2qG4#t!D0Dd1|^ewdvC%l&-Z#w zSedN|lKmQEu7qUtAO9Z5tGH7>_Pb1J4)+RRb^P4AWI()zTNY~@gkA$FjAlY%UHnZS zA*A(j?6O}bnxRZ z|F$kU@WH!Zeem9w>2>#Yx>jI!cd)-!Sw=y;A8f~W#(@aZjKv@1-*1aH0$R6 z(8~zoa-%@;UjbD4H#A{aL!WG;{cByQJq+Tgs@D-Bi#iI>vvYp~Q5KFGSMQm(3UU)o z7Cv2vjc;>HO5Og3t{DicPN3DXXv&O~0m3ooCuX2XO!utx?!g(W)CP7tJd{W5XMK>w zp%UwT?lj!yJT*&fORICJk-^J##O`4SU#-6B5!h?i8JDR6`s2>S$N%vv4evY**VSrr zdWzEHtu0(UgR!uE$1p|bmWw@;?O^+Yjh(!W3#7IXq1Sx0J@-X0p;%NVfyLd&H(D>f z1QzcLFMO6-$Nej{g0zfVhGzbzccL|L-rmg$Fmha_;;e*}>2Za>Dj&)$ADvvCP*lk# zDOCog)ixB@#-mU5lKeVI3_oY|xtFoR;r*8N-<=TQtXx$Com~lQJkjiT><01s`OklzD#zcC zKKQnpI}Rj@&$;nA3E|KPucuT~2&s789k^+vl!>ivnK%YV7dN++>6&4k=z<-Y>g`^i zL<;v7sI?lj8YLBNN1eJm4bGo7bSF)HoL6igA~p+Mb?4=50ybkLYTP+63}x1I9+ss# z5JmYeB=i@?j#s*><3)sI$r!@ZXh@ghhoCo&w;jIA2w`no-^(-|lmg4|IS6z$xcs+S z-L=kpf92$Q_u0v48NC53m|NeN);rz=lAAiSp`P8&SBJF~`JI3teej!Fxx0Xj9A(xG zlQUgFTl=-Da4(;GiwLs`X+BN;@8=T+S8Xr^hvcSR_Yu)UF!Jhaxqk$QqY;F(tPi?zEcCYWr2pAZNFnVm`u(7SsnLU z(P0xv!deOAyjH5HcEZZl!CT*@+hD5Z#@<6HYfwmj=#X32hegjI=Zo5c5KVOlW6jI z>w9+2^w$tpOwf9^-lszl<&;yoD(*3aFz>>mvGEBQJM&K=o&;0GbwswMa4J4TaiE&e z(_m(TO4K^Uym})?`g?6?R#}}zQU5;mMY_!y#@m*TJC^!zv}$|ngwzFYnb>@+?Xg|V zm#v$I=`?rh*Xx#o$@VgWbhIJU2d9tM7VMP-p4>B41E;G{l2RVEGA_^8(9#Y})*$>k zsFaV4)Fe>2}00QpTi4MbKijn!J}9XNTk-Fv7Gx!mo1;7#s4wC8H(V5|<=--jyhiJ07WB`{>qx!YvM_M|T=k;B z9s2qjQZ5EXUj{#^J0SWIhZuiL)T5Ry1$YSSiz^$wMBvz^eq^!F0a zE;yCd8swf_T-uG6B4;_u^hPs)622zKnmxzyE*dAdtp-fOm~zp^T@H-^NVE3Up4W6f z;fBo-+7BkG7o9#<{ZDj+vVwtd5+N+r6SVKNRiaZJjWIzJt{N|#MoU&y8{y7$_`@&N z{?l2wwEt&mKPUDcy!*`uwSja#U$z;QUf?9Fp5e}mQ0k)gs}-MkFCm5>_NU$8slmYI zj@H(p+Va4aE^y9>HEp_zn6mi6UuXs(MZdRMbiNJ{(parT-QZ54PF9V+38vWFnWt)1 zZX<;?lk6Oo&>SGnv(`+stBZHhQhIH!ryH3;iOBnHOBz+(L%>GedpA7?B)#Xr;kT=( z?}L#l{ik|!`KwkoX{s-mzIleV&i)MyZZ}bK~(q8yYSI6y?iXb@!a%Z-0Bfa4}qscy$a*;@(#;X)opO z)uJ-)(2l(>Lra3~oYeAo-&otIE4okZX@!-6)hHaYN4^S7WJkx`cxeX_R-+~7N8R=y zxittfXSi6c`r4qnXp5V5P*Q%QZiBAx;QpEbZU}t%-9K#upnfnhy98flGJqiaZ)xra zIg|Zdavg$GEu3&~9ijL}5K{%*A8Y1-ccPHeriyJBcQSL>uJYUCb@zWa8veFSjI~Ab zQ7{v_F~`v5&*WU^A5W^ni^cUMh|PWrz}4E)`tG|Yv?xAE9&I&|-gh52)tvc60*i~9 z)I#w|gk)^$P`%H3s>2532Ir?c{BqkYJp)Ig8*BUMET~Gq-Ja?_2jHyUR(Wf0wfg5f z&xdMUdI5}lm?@s9f&0adGT($`%KeWx;@!C(MVxW197D4RN0%;Q!6rU!mn&=EJugkK%A4lFx%R z99_iSQ=1ykEe;%JbFSbLkW`$mqWen|Y@!}dU&f71y3^Z6S&oo=?PcSAAaZArRfDdY6b2X@(s=^P_?+$H*)$&l^wRN8xfF%FM z!Dbbr3tJ1_2kSJ?08(_P~SeI7P8BB$s`JxM4*Jv*HC!>vjeh1+h04WcW%tCp(f+=i6;_=M++;JFFu<4fWEjqsnJM>F9$6J_{sc zL%M#|{v1M@u-VJA-_E;f=h1MQ@8R0;bMrb_b6ZZ`VP|4we0&8{Ema)82Wi^CnW}D$ZEfE$VD;-%cbCzxTZ# z|KTb9I2abE4!+&aNZdtH!kZk}+dtTjiC5D-lVJ0<>hA^q_{x=s*WI$6?P8f2ayv=^ z-H!*X57vq9Ign796`^MjfiSyG&1gqbA9db8^@3%m$Drcv*XusvjKD|SL5snMr)Xkh z%8BMYOHgmhlw zhO1}i?s4e$aLlpvqb0VNJ#kmvJ3@4FV0v0pHV7vRZ%<5@fgAeYk(#yDVtk|{ndj8t z%itafA9eXnm|ak!b%__=4a>H573UjA1Mm;St3?L>_=a3ckIT%O(eDb&bb@G9{IzX=t0MoowgL)Cdz}{NKxWt{x zX$PfkB3?$3erv~DUw$Rtx0O_{a!dB$f#YRTPy+ztZR^V;JGS?~vMEOut|tcKa5kid zx;Nqrf$(ddTxE6>5ywMg2U}&k4Jr}02x!oq09%i0++6_kM-N(ir1lWz`(FNnPM*6m z&9ZIDeYS&J%QnJ&Fe1vuuQ@nb-Rt?1Dv5_E;$kYwOrBe|W8$NdeT+ca{_Ve|@tjGg z?c+ZUY)hs3qi3LW+7XOuuAjqla`5mJ?N~6-K(*C09Y{P)ITi)h0nr)@EC!<=jY~|U!=e?#xAD=Zr!K@T4#VBXH34e`AbRe zEOL^Xxlxb&oP!n4J}WloIi>Mp-HW)uo!V1}KOK9*ZO!Pfa0X1`_Ak?n$oDD$ILGY;Ky^(Tu^!<_?X z$^YVx)L!(cR(wepo88JcYV&z13Q6qk4FMpTpWd(M=Zr)(>W2lN-F@%(}VYZ zkNs8607xDhkgD9f;e62?AK8)%&X#pn7tQJn-*wV0!*vCye5eBDB6=lxG7yfhEjaP>@r z7`D3~P{W?H2qI)|Rkv~ukWpu)C#oIzBxqI43xO;`v_ASGi2DcclF{Y%8u=v@HMiLe zxE%MyEtBRe@rebe;oElnRTT35=#MTwy#77iJ=m20RJ(!~bGO$KQ}=t%a%EV5BSA&> zR#mr~pfrc;b^6=fvU2OYH4eA~^ct?IcKt)<@1jNEd+V&J+|MM4r7OM1Js>stP+f(c z1x5W@Z2{cxKJBmiF_%w!>r~1^?&$lJ$CT-RyncM)-+lC3U)A4tpRF_a`~=42?6>QP z$kTwCYjq(0S?6==(5>%Kepu-$rnY2H?O&R2pz6r@LMU~Gtqr!y<{?5d zS9T3(H>^o4D;-5tfu=0H_|#VBFUYOYv*nmOiH%c z)aKPoaonc3Qt#OzMwsiL|3V17LWrd*-=dKNv5}_Qx4;djqZx?GIc`f<2zT_73nA(=I7T#$KwM#lkL6XNmzCG z+^u~ z2BbFpOB6odj@Gf+(sWQ|8diyk;2adbE#Wy>gJrq`n)O%wO%ZFvp1l7Y?h>2JqTDr_~S1n{7%X2-RR9 zU&Xz};`|^aC-wfphrdCIyN^eG9D#bB!Epz>s^8s-q-IRKqbJzK9jR@Zm@GAh#r5$i z4|%J+qk*<+zf|Wv#yj6EU5IHC#&RuoP+|bfnEE%=b-MHFY?#V<43_BO8WEl7zOnnQ z5}t%5I&mNy{HMTHPR#<{>3pnv>}?wzOnw+_%Cgnj_?Pf2TPbkRkqqI^Cv+qi_6fZO(ZU(^y=1;GCzFPKE_b+LqD4G2 za8=#5_InQ?OTu`mUdo#V@h}{FP51NhU;g+{KDuuFG#B{jn}7f4n_tymgNb*WE1>#o zI2<3IKHfH!ABVENZC$mM$R{Yc9;#8&)3~Rqn4WP*&?XS|ssO1y&v&PEDfhN{Hg{Bi zx-f8C??A^QF!Ghs>qdhWd@*9!Gv0b)VSpq0(Ry8*Goquj%m2AGFKku*9iB4UyI*@M!Pm?0OI{ zw$3PWJ}Hm!S}*Gdu{Iz-1Nkyg(+#ccASlJefuSKdGU(N)%D_m%xaOQ;c%Zp|YczI& zD#EsQhjBNM9PDfM^GD-%z&*3-rN+UG;SAPd)>fjEsHw6)_zyH1ie?y6RLe1q5T0TM zp$#{WA*Pxb3AE?UPN2yDa8=xsJ$H5Cr??~jzS>hi4JLX6jM^kUgCM55(|q`4$8Yq_ z3EkJPQ9j3wx4N+4%XuhqZLIosAwh|`t&&UZV!Ye`uMO`@P^95(hYEKYPBZcaW36^h z>k3j@XloOt`8jWYHSvgflX`pYlX%)*0d z-iN6>@xGf+jagRo0#Y(%ZJ@c$wLG7G%e=osomW z-9LF({GWb-y4Jvd6&7PH4!OfE)~=b@@OOSn^HJ6e$~g{jW2 zPf>8rRMX*C=y*Q7qYqlaDf}0I=3mq9ko!q~6{n)8(+Wm)1hg6<6?y(01H?7)RT3%_ zzWQtgvAta{wkv z6WeKzK{&#z15vY=X+BCSB7PlF?>$m7f}9%PMj$)E$ly)9@5;v=c6aK~#%@rGrrLxu z3WW97#`c>2`aX^%(Rxk09jc-E31Nsx^iy4k&BuDD8K#j)|5sM&$`hEJEQK7YHQo~_ zB=J^lB-y1u@>VH6TG$<`0nMo{(7q#drPLpVAU1jsx7oou1LK)JG3Z_7#+9X`d#Js$K28 z%x}70L+Le@s;Q;H@mlD;-UYP6QP$#bAVkc=^9ztSJ3p`Qas6KtfFREjXYWZBx&w!w z!ya6(%R6rd4wX#veeuKCgTYD9_jwb!B6B)~qFEvx3N1*Our zlkUfxC){Y~dH^qVri_d?K+$(cC!8HF6G!UGXYJmn}QEyVUhd6Oy#T$Jj z^k4prF?#4*+QIx?0W*J76=F9aUq{#`t(CV?P#Lv@dgJkD0on@mB+zTH4!R;=b!#eq zok-G^bK}uEcGiZE!O6&Bt4-|KRUw=}?3!pxux;Ty0ab*Gm14tHQ0=A7)A?xln~!Jm z(Y`iT&j$SP-5>rIod!}oQ#JfLpKtY!8ifXk_!nPkcgZj2Q|=;O>VmP3NuTpgA1?kY z0YCcST{ZbCKrRQ$kn=TeRkLqust%{tc=LL`4wqKkNU;C=`v3EuP8&!)JY@h(lxrz3~u;2t-jFuEelERd3LchP_mclVLxkPx>Tq|QO= z!>jy0YjIM=VdWWu!c00wEH&h-n~IMoTzJo zsjA#Elw@@5P3oGk?~Gw%1$R%d)p4!_)5x;0Q5*GgOKVXRVV_sh>eboRC?edS!Py#! zxXcoo*^Sl(uSLV)u~LF{asT8`7!R!H?y2fAr`4;CqrZsZqMhXJ2j$#mq&hhONUFB$ z$Z!x!gJn;J9ELjlgL6Ol6Uqyxl-=WTA5sK669LhcMWJ0#k{WJjT0u$O{?nShj0Vj9 zRawg!2U2{46RjzmL`c*gN3KGn%1DW9w64-lgIIOgsVdAdh}2AP=LfMTP%vL7SlSZ( zNjO7{b~&upQEOOqD$$HNcp`_VAslK7RQ59fsScKYp5+dMTkF&qWkyIbwL{%sqR%M! zb9}6w{kjlH5@YSSOXt7NJhXlqCDN_)cN9ATasilXgIFN?Rw&i1I|1&ps@ZpeWYI*R)-Pr{Lci&i zntcxe7qV0jlFoLhj2+((DpMlr*jyLX_^7JVLj+uusfuFr5lH=y>t!EvlJewn!Jc%% z=xrN~qZxuy7kaaw5d`e6+D*k1avN#1 zCqj3@%4qABt($b!0S6D)#s5)&U^iB`HO?)9K4ZGcI7j;FG-t9-9p6-A=`~XHG|MQx z6CcNraTL?UH7GcNK%Qmrpq}qN2_}Er+Phq*U|jv5e$+aFmEoSrH)=WVOuim(%WY=? zF*P%Q3jAEYz11#rod<|Tqf(0=7vfVqU#QA*5yaEJGOpYfTQ22$TYCFl8V#lz9JSZ$ z?hLtKLBp39V~P1HAoeNCwFZ>R3oraD8Xfn6It?|;od_BFmf8CW^rP>6qZArQ^{6*} z9|mpmX3;QZ4sw8@W<8IgWLod-`h6XDdV)s6xZ%a=H*fPN%IcE#efhj<*fS7YEM#>4zRrqs(A5C1$lLGm30%>GQ@h zRfbagOwY5cxjZr8^vyb-y8={Nibs>b66lFFFPg4GAjy5T0Jl2sy>&--O)pftiMBT2 zt%{@g$Regvjh@9j6!o@mg2i+pE8St$Rk2{PvoT z4sm0Y&5YmFF9Tf@Rb$w88+j+HyuVoUIbAc9^xk9@jZ}9dDE?c2<894q=SG7`Vq~E1 zsE>C67&5Obc9We4+e8X^3dX{)u9OycAB~cU=NMcy-AMypPjp|)C>`a66_gu~_2^jj z=chU!h65VC(+N)vtNS<;WDBJ(Z=8i9sXcW+@*J2_uZtfQ*?AP4jgQs#%mpB0q6r(5 zhMd)9TtrNzt@e@?3yEApi-aaMydu7wXc$HD^-4gDM_dJ>9rd8$wS2NkEavM#)sZrJ zb0gl2+^Ym`2K8r0_coMF$P^U@CEjOnW9r8D-T1D>5vk8Wa;hg;vNQ|pU? zGk{UMm* zkJa%wYK%alTMx7YZIS388YZ(MO_f2inJ8QCBl(24(RyQOCm8;>)}+dvB8l#b|DUf_ z!rk1d`@?%)AFKXmw4=B-)l_S|qqJiwxST}6z+nTP>L8{-WuVj#PS(oWG@PQ<(=b0y z;>VEic(Cl)oZyb`Y_YBpn{X#Pn%=ASl;!0VnpA%L&9DCWtl{$Mz}I`%fX{%D?CW;r zct4Afn(*Oo*2$N1J^#J?p63H8aC&A?F}ci8bBl{XQ(3mU1YvgHVFT&Q+!SX=)A}nw zl9+1ilvndHE4U!%zR+G@{jZt=%9!W}@4mEJ$zAjiA; zc4S|fPnrRGLUop}>B~JNBB(K3hK#czY%>kqs&-K1#PE&>D`;}Qirrn=YDFJ*gl6=f zI(G!`YgZ@oIN{%JcZHq=I7*@Wc?u*~RK2}>`V1w)>9ZQESMIPU+?3CPk@*l_7;4XD zEJR7|)R4FL5qMkFdGio+n`13TQ5-`%HdAkI;yT#Yo0kGaVl}m_jkIC)viSWkUzNS( z+^J`dBbxqJ?Ou`Z!!@j5$(`g4xyLO3)tG8kJba<%aI3i`_R4VW{I3C$;Sb-frBb8p zPL&MuhjHV^jGb#7IDTb0Q3F}E`sR|9T+yE@Zr zfOs5ZoBQS^$_~nyqr4B^{mO%Psm%?PaQUL;B>mz|1m(DKjD_dwByP_KaW>7XaeuD= zMyY&$`Q*nJzD)JwK~DBf9BFrPXHc;7>Gu5Wz4-RIS?8ba;HJ$oejn_Hb6NBuoH^8} zgUzw=91D1e)~jFbmh_`8qJiVY@i=g_-9g55=<>8g&;y6`X76Kv1H zM1NrXzg*P>yfR+}oEd3rGz(GW>WA;v@vKEa%BL;GFAj8EN`FB01KYa9S9R_Q$)Fhd zE(21OHuIe2S3#E}eNrb40b+7s1#cFZ`eO3#=ZS3-n zk^P#Gx7H1wwcNN)JGTx85 zmUmmOX|M?l4dzDeE+bUV`mJj=t$-|39+s2gb|myc0dD&wM-OqErzj;&b9ogu7pWk`U zdCqgrbN)W(IajMg+=3GM?!RV1ytRwK=S)+OF%Gf4Rp-Bl{{>Fuq&?`BJSCKLDla5>C&);30xi z^=w-oCAO=Knat)G&daZ_>?K4~dPw=lYiTrJIQ1HL zus&XZl%kDSuS59}BEFfk*jxk>%C8)#6UfCtinzX2*^=NLE)q4P-=#25V`~nzx9#@s zSzWWwLq1&Q2TZK0N?0zO4DMZWEb|l?1{_t?j^B#z_x0sVZzU*C_C$H`^=TMG`w!HT zt{;%@ba@dT=)QZg_D=>uolTjm4GD(r{d%6H&};1i6mWn;B;!LOQt+^)m@p2GMy;iR1xJ+0JlIy zb{I%{W&t!7=?GF!sYbozgmM%moz;{h;yU8>N*2{eCYGz8SNP+?DbMb`vZeU>(H%8f zFC58~6K;wR<5+G~;B@z8Jw`#KXAn|z-#dAZ+Jn6zFnQ~u&Y@|aN?QKAF64cC>@#C!h@xx6s1#0o}=MD205vHcsz!aHM+f888CVJ!u^G zV4S_u_H6D4eD|HVl>I?ItTkE9gg?k{x4%;M77z2AhS6qSkHAz>sTs*X&hMVa+f!Z` z=Daw|;CB7UT!>trFHIMWC^+`e%g}ZdBHmN8;rYTIi!=7zxI6SM7o>K?}3mW%Z&<5h9YIwgI#ZmTiCENFQYfq~AuFPgC|T?z5xi z%ydQKa>OU)m4MQ$*Q=GSudd)}l;mac+!pi*(M8@~YU$ZzJJ3b5m0R0gg9u^C;!Qe3 zFp(Yph~Zr@MJh*WmO3@IceGe-)c*Dsw1{?;DPJw1x1uQ5c7NvkfHu?2pN#F0Qr7u+ z>B(f^dzX*A_vaeyRG@b%+WU#qV5Q^z9x4yuZ1q|SFDS|0`r!zq2xjfCG-Iu!aQHUm z-RHeQl0Sx~vJciS)N$ePTN__B!#mMYUfea=F07nHiCDZst+r3W#5?{gC+@xTm((3pmXY?b#QA(p_lMQ|g~Wu`RWEumKOWfD zRPIt2wXD)EcTtauPigiBtl-M_8{!A-OX!^YEv zce#SNg+k6hX_Mx*phC8{HRyK`&%rhEnSqizbCueGzK1|$WwF@wa%Qdz8p()3Ne8#pF2~ZqCQ6Ae z10zdEug!{hc}J-kbg9NuC_OzL8=cbjR)p|wyZc+$l@jDr0EwQ*n^ z2bAcwYrjsB^Mjj8av1_5hxO|=&}2~6m;$AT-3<4}LW`l=#IY)($jIT}3nQ)ZZABr8 zXBB-+5EtxhevcM+F+G@R`;8)_WDMTlUbG~RseG}qb$!!76l&wDbtAMHPFplLop#Mn z1sz5UTcqU_s@IT?AZvwoh-(-*3PpfM`V!AEgp_rp#=_&>?;9t*G)cJ;lJi<-Tn6$k zf*U{OP~=pXz~DHwISp2$-`9*m&U9Z-Sjy4r?(5Mh#}D)xfn4nG%ltf$PM1#*Ex;Lq zZ!VxluC?^rYwW*>5}upI+$At9?)NAyEr-?X^W`k)O81*7p}E^N7<-(FSHry?+BH?A_1)Jt%-*RHYCek0*Kec? z3xrF#HR*+dO8l*Fl##%q#93SVvRIgmncbDjE`cJ8ROeDS)%i+|zhW#y2^ZD4pYPlB zF~d1pm0zFQY|=q2kTsPTPJ$du)?OW)JRRJRv$3{ zCPOB&-UQxBF$U36C3R0r^$+D6C#!A#HPZdwzpttLW`wY@sy)xQ1;*mb)nvB{hb7Y) zGs5PGX$+CP#+sw72_S9lxmrU^g4o?<^dyieC`sF2Fj!0nm&;SejIxP zNztatp5Ro%O*2m?s(yc!Yv^rn`V0~=)|4m7okfT!+Chsg0ePN7B#Lcqv^o!@K@63o zNaI3&zWV2F*Dhd9fAxEp00q+%yR|{nyHCsPw5E1fP_TNS4q>k)^1XLH6IV~5Oc@D_ zcLN}B+E%`}ZYJjJxN|Ge8LZn})E(6359(U(2ukB4e|JPTJ_ATY4_u}c_wv)Pv@3b{ z1K1Fkawi@$huS~+0K(x5wYl+7SaCS1xcXUQc?3oh6k}gaLm%f;vx4es=luL^y_dRb z+B6%f*9!O6!h2p9zhT4Hs6_YpG*?P~#?w(m@)`mcpuggU^Y-EfAM zaYoZ!Q(25m8dg?1#U))LljDxm4DTV-vu?UE8}G{yly-=R=E`I_n3myCeqZnHsizPn z=`9}>xB>|0)fH6|EA#W7UDZsU2C&Vjv*!h&A3=pYYtAbF0Z6qC4pHuWHng{%&>8~L z8CHKuyL&Bl1RZ;fqT1lSIp2Ke!_^Wd0#{4A*qj6v z>2E$;Co@w4yyg348c6xtx>v%7P^uqrV(s_CP$c9nxzU%)CoJ7Y#OB|>dG5VqA2*T32<`#6Slv}7AELI!mdEV@8Y8V+O>rc^Zyw{GDys%HDM zW$t&TOJHs9NZ>3=T96&6V*OmAG&c}=oZ>u6{8Jr3mxSy2+jRlIFe|u?)p5-Q!)aR*qRd0V z%zi?CLQtjjOv2?fKLq^P+^bqzfRJifTQ86<1k>L5m^B^PUH$JOL`5|c=n5C-g9)~6 zHOejlBhV1~NsT0TQwsL6OG9>)`@;|5W39iyvJk@=;o9k|x4rAorrQf{&rVWLa zpvq{Mil0t+xUTZ_C%m@|e+Puqb!z4e{{pbcm4XGmt5!5Ohh_x8moG98j z|3{@cTe@#Xaz|z>0@gpe%=BH55bDZ1{RUEM=V6-GB%D%9jMXR76jB=O{sUvIK-!)= zn&dWojG7C!;(noxOosz!&VF;|?3Zacm`1c^B?Zrq)ud{7`xscAUhlMxh9|ZjrJDH( zh~%#QYXv$fjA@sofKx!K+C5v-^!#Q6UmcwRQXntVyls{ar$D=R^}H;cLz3>*w`lA6 zF8Xwv-Cls@L#*<-7_gh+8#NAC-Ap1b1F3@P-g~K6P?B$Xz&2pR;czV@{xR;3h*HT~M z3=RLlxr{T$D5$eb_6p6Q74h3VUi7_6gyX^A)ar+M;DXQgpVtjEI%$0>oZr*Cm049pu%L zMuTBwWLNXnzZfAkQ`y%3%o3E;Oxb%=?Cz7A#FN^xj=--=c@18UP>rA&SUlBz&dmn- zt$^|K-lyNVf8jV&XcJHb)kCVNa4o2uaP(98NeHWBnJz!GKSk5{`QLjGA_7oJx^seJC zi@zfA)l|8lrTrcLPsjiEEy~~F7g~$z5AJL>2VhJ+U(Waj6SgBu=Y^}sY4=%208M>b zJJ~FVA01JNWeXI6^KxS5o{zZBk@FZJxQ||qL0#Z$z^RFvCA8C*qg@nDNTnX@qBM)vnlv2m zNM(H9di)bey>?-4t_n{gqynaOji;cr%+-6RUiCudgr;*k#CAIJ&Gs)9I)jMAb)2tx zDAuqKM^qoHCGt6td|zoEcFqIg-i|XHNvvH!3LgwjYB_Teq1PtGDGTmPD3JkE1zp$W zE=DbAYBh7EqpWk=vKr(y6qWL`ANr6PfG`%G-?V0!{sUpps!ay~rqmnM>n$ibnVM>J zxAP^V_9;pagt46mbZ=OlK}m6(WE#yO^Ggl!Wm{Hib0Kl-a#FiJFb^)RY$h>J z0IAVht{WaFG^hE9Dbt#LB{47C} zJGF+dFfPeEaXOxmf`iu^vrd{8~DgyhY<#}PJd z@1nP-^(4@bBInhc))_zs0200WXDVqh;a945hJ;gs-B&Lg-oc)Rw%0Q%%H~9wJu27& z_FAmA)~a|blEmBHgfU@kTRU#=87fVn;A4;73td2>T5?l;;Z&sQlU6(pCuMHMcD9>2 zhmg|wI$GH4xxUwmqr*vL;|pu3fA>*qJ@vN5QKS@tr@7vUbqtMcN2l6@TE`)!wheWu zoC=)?RKtS@w9b=YD#jh1cBr0@*OcpbI^X@gWAZ>%`;^oe`l z`~w;eCb4b3YP*ku-95$M1L0n6^xpF@(MQ9x6T8|eeb~iVZQFzSzB+h>Mo^xC_-&(k zqBB)$wllvxTf{vF>S-0NYAzC$+^u(*C+I^9ny(nmffQr=)r)i+OoMaUZouc|Q`ZIJJOwqVe>RG`q?Y{BONU^&t-?Y;>g{9Md6PUa_$0{wIf zPyrua@)5e8A67p~Y*Z@Y7}%ZbkLOJmovuXLZA`C{%GtPt^ad_CZ$=1c1a5Fylil&!to zM8eWD&rt6~HBr<--Uh;Y^^eB!4o_Wu+q!-xk-zh1Jr{T{V7NH}xes8LMr1p4_H^%omOZIvBNoUJZHy8VR%E>Ye#5L?DLszFpMC-(Dt_tz9fei$;$2 z?EaT@6kai!*Nden%Dvj8SSrgPmC)Ck%kls>(uV0WpwR7N&^n)qVSh?Y88a$9FYZwAmlNk)uWZEirC~&;z33dcj9#{5k(zNZO z&52I2wW{V8wD{pGFI(y>3+CDJTl`Iy7XfKEXh>!aiG`nnaY(+ zL_dKTQI-|Z&XVp)r1Te6H>`u29oi{W0`^3EGc7+2Y7T2Kdqz;XhN^2iD~uO52W6^o z4hp9mnveJMutoPj`QQHNw;!ao+n9AxFtVs~V1!Eu={MI@v$+h)!NFq9q_03q%ZZVn zt^u-h)D|aJ*YjnZ+nb_;X`;4J+t%1klwRStnIEp}M0!eM{+-a6^L1E*2d zZc06iL(3C?=eXbR31i(!W({N)%+QlSoV8M_2XG`?a<$3c_SHT}5?B93*ZojfVJ(lp zTK9<`fvT*ni8{OA$B4>Y2U%2pw0+RLDN}>FU`kXY7j2>SNGU}h100&4D6{v4Yad`f zl0skoJRvSfcztcXE=*Ky+Q3cVeqMwW8SHkeH?#A_Xc4T%g{x4;^d$03wZWxbBJCig zX5Gt>lz?5k6qf_3-}heseaau0R|^f~Al6n_%$35b-oZpQ^rxZZ>RqO<-u#+DJinI; zzHSunejnkCqlz<#BCT~*jv?VRGgsnjz#2h_nCx&FVm9aVyRV;VR@YXfTTr5Bf30q} z23acCcG6h)wYje4%>IiX?>`Ry}x`C98TrmsQts3?WH5+TT-4<>QIYvj=q-rf6e2tdoK%Fy~4NdtcB^&rS5wp{b=()co~V{h7W9OW};W}xg{{UU(0W*d{f!~TnA<49UxO$ zV!M%_YhR>}BX5G?w=Pb$aqCt`!teHWJaRi9*;#4h#2qjac9F08@tNN5_VR1wxd&E7 z?yGiiKfmF}+YJ;xaFyP#B=G?lN$fvR8^sUvX*sc@>p+^q zcC)LQi;&*D#`oIell?qI)w}X%skxxM+TD?2b3WW_V&yGNk_!^UN+Ao-g>Y*4`<6b7 z(8#{cYpK9uP|KUHR~47!Cq=#crWMN4{Pa@U&MgxrGHwSkhauYnaFF((&p*z*H=6!TBB1x3Na1EMb7{fiR!*7(jcrdj18g=0jaruO-YA7 zf)EwF`-W}O&0q@bW7DlVlHP(Q`L^bzco?mnvY~{dsa>zVCdoqx;b^3$1&6!eC*2NFgd+&bW51a77~XX3`^&-4X_S<1TWg$Wx)1*5n|?mq zeb7$1HMVnIgqgEC?(-nYwPA;ncR#*dGwzFEwFtYxnYvy=P_WvCHKqfal|us~x^UPS z8!y*J*Yc%iR;H>%ay^8#PCRm(`36Etz47@GN)A@ad&;8W7Le3zapU7QTsc+ea7Q@3 zK@CjJ$);2}z#zRy0`>2X(7p|t%4}#j`pY$6_gNpmd?K<*@9ZmPa zXfk5a>s5S6j3mW*WjOk(*pZy<7$2fSCv)50#HGD zb2(4f^IN&FsZ+s4U{V@yuKyOpX>%p3+Vfq~#n{mH6qhD}_s;(LdtdxZD&PIQXS8Pa z%V9Kl&c>}DTews zNqGQBmp07LiTWGN2mR$avzONpQi?Xv-Xt4=Q*-)Ij~TZk%gu=7ilG-Zo-O&Ld0?xK zX=|6n+O}35>u?<$(0n-3T3ycE6n_#W4YzLLIf~VAr@F7E+P36$_f;LO*F5Y{7h`O^ zo>n^yE9?3;Up)e(a$l*lv!jV?@lvh^^Z~FE*l;NG%j5a-JD>c{>v{lylydj2rk*De z!rc%>%rxv2T9Uv2maEaH!APlhRk0e>8N`TkyImvWKCF`a`#z>i$oCG}+TqFhKmuLe zM;R`FsYumrTT|U$L<$qlJwp?~rS7Zte#K=F;2r1QIvqHa<2&fl!Pr*`@00wl|;pRI;5nV;N-sx|mj5D)odn$|QF@vic6Lha-pLczJ6qD?{KN1b!88cFubkKAs-3L-U)s!1s_9^QC<6}U6?sL zTGH-Vout)3ejYyH@fvkBFF%{xXfaO!YIW_4bXicceFxe*E(`KQjnxbDgQ2^2)D}Ur z{adxL#rbvDp|(4<1WL2=%69d!OGD7f*{Q6HY0L6`cYwbfRx)P+^f$k$!Ec`^zk4g- zsMog9_CjM#Pgi!4%3hud!__3*>8Op&eo!&$96@KjM!^PuLu%#_&`Z3Qe}}yxc z_wVZ6R6dSGFsxu}OgPbfVJEc;b`l}%tr=_g**d(w`I|otM_cQ1MDD|$LBpeV*q)m` z3(BE(10{H_OQ3ENQD#`_RE*|-Azu>m-8YZX?O?3+{>C3(BY!>v+j z`xP`5v8#IOYx(@$W7WW}11ZRh&4ltse)I&0v~L2DYP-5}tAj69r*}KBY}TrSx&tZ? zx53qw&p=_z>$6wSN$eg9sqf+oEfdlE2#QFxN~!a;BJTecIt}57N*+%HI8>)Z^MS~N zH3hC0%$+zU5MK z8jcJ^f~MH*w?4i2xL51 z+v697@#5xHJ9)kYre(1sI5=9nRhN;J3VVIVm@Ck1yV=0#!# z)%VPqXywZ*fOCN`xd~76pfI`J`kthp$j{I7bR>uQP+0EMxt44fpoHcAI=Nd25@ow9 zQM2|%aJZB;mwr-QoG++xGnZWg%Kz$H?ok@U^P{R_1WCt1HDNWV16iKmeyaUZa4@Ca z)m%QdDL7i#f5mRK@~lh(9=FEK(?FV`G2V~;iQVsNdiHMz5K@@6b>KG$Ml@H?8o42g zbU+gt5mXQBR*x996KPe=8t8M76ZY!Ww?fI#R)}B6;E1oEpTOE%n?T95W7flg0eBKI zY#b~No&x1IRu;I^!cr`&lXZ1D_)vc19f^7iV{pHj@)1EP%v}1~%%v|Fvq1&f`242T zYp8X;8GTv)$Aw!{uldypAQBzkyJOmpm?E4+lTuw{s~OR$eA`}HKP@a(?on!!;7s?i zciHU)mCepsw3MHtY87yT9Z1DrgR4;^@bOOQ+;CwFU7rx5J@~=`=^&cC0r$` z%YqSQyK((FdX3T>QVw~}qL{yiBt3>d49!b{t|KV!PUm_i;Q3WsTirLfZmkg;L2h^7 zeA8Vp^Kh`r7aQ)|kg{}--rRvDr4fIHZUY$m7t3h1pghNH?B`95`v?l?bsQQk93H>( z>;LKa-yJzlr;#EfN85`05Kh6S_8vG`&W|3U$f^JNYHp7KsjaHUJ!jYV#nl{n%|-B| z5sfAbM|7t3^cck9Gm@AuD7}%K2bNpdU$*uAZILrU!uN)P!k>Wl=FWh!}DF-N%2IGUtPOPl`GN z3T@@gs9Gc645td()E<9Z^7Z@2e!JPBZw13t^RZeJmN6s(*q*CU6HqkC7JJ>^)nX^n z!q(nxHhK>1FMXQIXG(kX9PNhG9|L%ronO4#(RS4#EH%%Gzu6q69YLApLHAbu)lsD6 zutrx~4+5CM9qWj;re|+@97iOLnM+4!F8wLR2Z?W}S!bOD(je<1K9z@~l=~SYu~;Bz9zy~#%tq?mZOxyvOO?oR?-Na=lI3*2L$cCf%{xsjLly zc}OX!%clmeCtx*V?EU6V8;|q5pR*;g01oHnr@mIP+U>&LH=l^3=S5(Y!;GsrHC~LO zXitAgHy{{ckFwW`MgLN?bPXTUV3t87=WRyaKz^CtMa}d95Z+qvtF^2EiG(9sc-uYZ}JU0MVE@kLHC>(J*@&1BrhY%u^{#R>M8R@X+3`_lC zIjc)xOZQ_vj%P@Rb9KrlU(bwA7cz#NOuQFc2O|^RcbaM0#=#P`k0Zw^Kmj(bS-WP_ znhjk2Q~mt@V!Ow12#6TjXz|flz24!DruHt>g(Jf}T1uij*2x`((*lgwrf>K?hD560 zJ9F}TZ~dk5KJeYDOq*IK0_je>XLJ%w{aaU$^6EngUsk zmND5`lwQ+iPER?{b%a5>t(MPsgnGssgBQ9)c1*_1@gj;+)lo`o*O$5&!*Hc~8HF(I z^1QA?08(fZT9a_Hx`w3Cip9~k{-8^ME2Vw4#c=~g`?&f!9Np}`Z%-N2j@d1wUNQ36 z)a@<~yF2E$lDN~6%-kFcYAZ8HB-Ov(ETBuliE!IwN5Hc2QkD+@*>i2KHxRC#{f%RH zU;p^s*Z++f0E%QbXuFRDQ;?xr>O2P1`1T)g3xa!rcD6R><=I#RbzC+#@b2rseYgDO z&I4ukvmg3t`Y+t3H0_S){6xI^R{5ng708c+1}rMK5VBZoU6euErfc>6i%=EAWiHG8 z#Xy>0-U?ol1cqJFmcY^=I~G;`We_e#Clr5q!rS&J{!_wfac*cHV0vYc4Q||{;_dEFhZmneiDUH$t#m(Ggu%p$&Lb$Y%^DoEt&R(c;(nD!|7xhYh@f@VBdR9*KyNJZ0ucuUB3w=c z3uq#~lFt=!YTTX;wMR&exUWF%L84h-ZVYbZr>XJ1ty{X8PYnkKu^NhQg|LH7%y3(! z-bRQ>*wq@8@4&eE2lr~}dp^RzYC9cT=e>?l$2yX|kC0|#$Hhs6gdQNN1=mw+(E9+2 z@Em9|L(q@coaQ02E}*Yw|AM{tJevOTF^bygofFGt&SXn+I5)TFLXpDSy_OvFKr&rr zK&JnIbjz-Hexl8M@h#rDDS!9XU~LgB1S9R^TY0J+0SrYqlRGyKjGn#zWz7Hz#B~sqH8D6NqUa z?_an`_rVmg_OR91DFpK9t7pVd3-``6YHnWIq6}wHldPSW*=@wLh)H&5Dcrg4OLsn; z7^Y)Ck1Fj0b!+y5uu^>Ijk*_nv5Q`B{gq>@>Z2~9rtQ0u(fZ`eD9NQcvbqv@^J~s{ zO=qrwDa-!d?MUc4g8Y8=XW5S4NZi(_3|=?EUJEB%?pVq4R-$Zo8Kp#f8;th;yv?gN zjNR$JsR&7G0GZ<@*{NpNBo>TMvCxlVwb0twiB?sHG*;>E&I8U>_?o&(Ek8dX(kX)04cPq zzoW0U{ml7jJ)w>7tJC)d2snDK%>weZ>QiHj0{8>@n!!I{VzjRPEdj!mQvr*D)`2fY zl#b_(slIUP?O;uSmxKDD$(jIkpVsY%-fDdXV#+<}nzl0P1dw`CHtnvhfu~UvvutSX z00fLr*TQoE2s;Pz#9@scgNebJmH~MPz}xeiHW>VclQIjYCcw?Ts5${dKM-Cx=h@#{ z;Z~&7;m7Q$8T;Xg@a1tMv(dkckRzB$SavU0$2TP$=EmA4V6&Q=Mhp*Sv0bg|5Q@@O zB^&qw8BUJAeBkBc=?I(*pR>`W#77f>-aogu2G3(~(z9A}|R%6b-v2N#N)L!vf`JBJILI! zm*-JPVZ)jiX@A1~HP~DfjyPXz@1I=CZ`@uR@r7j-GqEc`lAPp$myOF*Nrth*#)ZKWziJICR8|NOb+W0Lbk|Dca_ytN2MMbK%ceZ}$PGUG~tdo!# zi0l|qyz}m-DLaV6Ri;^#{rg|@P2(=FcKV@C{-L0OQ&n@q$i;pMpI*bQweOOSpi=6M&7~co z&UDo3vW_r3%}iN`fPm3j@F~ty5XScUGa1NF*ccwx4M1VDYy@hP_h}TZYENyI_6w(k zc7=akM*ydpZ>j(WM`@ zb#fRYmGSm+)Db{QhdmS64g!wCDe25v>-=M2npDp}dW-~)Blj4w=pgA62+|x{Q+vxN zyI*^ED^DR1?Q=C#JuNH)4@%s9N_`@sYu! zE=pUOJ?@|f<*EJN*(BE<$g>P{QIz`X7aTcKmU$rNH)&Pv?Ei^;&{w`IGX`-TUyib5EgcV%?Anbc`yHxz(iTWE`|F9Nt-`WrJbw4huNdAz{qCyu26n*| zaa%h+ne1@%ehN;v;rOiloS5Y!No?lQS7$E$iE$lFs~D@(Smik!N?+SmYvyW4@<}tb zqk2a{nbxhR)}0)K)9S{pzmG$*%<@35FU1r2{=GkYT>`)UF_l+ zR?W7hehGX0-^h=9+N|(qK2^t_ zIjH7xt0PQVqiGYj5mdC$ng%$}s3#%x9;5aQWUzA?6ds%{NWyV@J>w<3g@f%byy7@SVi!%@v{ruwuA z)Rbal-QTDw?qv7j@JOE_b*jUT3(PpCyHCn9S1!dSN^Psyht(Z)QOZ8PRoD?E>}pBd z50K|kkZeb5HGfQ))O+p;jwhnywwX)6ryoefXUbmWq%fiU_%Hs#k6YsdWOn!UKg=y` z-2jSo$gld;GZ2C7a)~%5BWDq2)m&#@y!*!Qx?F6W2UAfsxzUn* za1o7A%TT60aC0emC_6Da0a&qKuH&*R!nkz>ULSJ}z?(@y^$ph(jaOT9d|`YN%*566 zb-Vp$e*D!so4l3qL~ZQcPIza%d~!!PmFs}e#9;=GbeRKD_zrHWmFxX{xo#&Vdyw$> zbdBZ0NrhE20X&4m%Br5EEJxdT(q+S(HC_I-2C#p zpSgL_P>w>*ueH^}5+K<-^`Q4)G|+USDZM0?b#a(-8P-8nqhLFH@f3vNa%j^OaRnTP z%eQ$M0j@;RUk+`c@Pf(h#oD~?2h+l8#@dd&1`;Ft926Z)KeWdZnF}MHFIDXjvLci{ zn-Lzs&%QitwmAvBTgRPWrsj!x`%Cp8z*ZpnZLk&Fu1kz1y27-r^$Ac~HqL7QVX}wx z*2+BvrS`A=_SHY7*KlgTc^)#%Bj74_p@#uQz5B-3nup;dpn|->biux!HFNH}9Ys!> zE-1?J7#!KIdfqVKq4J+`0*YjZcje8ulPEI0`sLEbQ;E!WvNpF*2QlNTF6s;vR_$r+ zCcv`@>g2=!f=&x2*-dpFM?ue{G^JF%7ldWOb1X?;1d`0)c)QngDKU37Up2y5cPFfT zyk6;|awt{ma4kQ(WpMQi6dEF(b{+exbQ%l~Z54orn<#0BeM(@%zPg`V`ReYQpSkU@L&~`9+);uRWPxH`Z zz~r^KdLkcu?+-4%|68AjPcm~kJdBh+jy;%dyY7>^n}Lg?2y!i3N(mh6V(+YN zvEyK`Eq(VLP5cC$^hK|J!=R3&NOr$ub_$S~1MVu|>Dix(*E2x)=&v#DYzK#@jCkk3 zlw`FFo4!4dpwUd#1ph+fmV1So8C}dzeVaC!zzY}mZX^2TGL%&Ij~=MvTtP^q$cu!{ zLEkmRo}yXJ=~;bw9Zh!qwa0uTsW4yax#+lwkac|C)|=gRx|N7)Jr5oS@wX9D#^Ks4 zxD)6$PkTgX2273Kece;CzpE=iA-metrT+nY+B+l<@~Or>UH-p608_AGdsU?VFa*ws zc4gds6aov_-rV>x0?G8w{^#tU%j;e^>QdOou~u6r#kpd!4+WgKgh|Fwz3idg9oO}ZfcZ#8kX1&?)fm( z52dhUFPY+NcmrJmva2RPm?&*yHcQzd6vF!DWyW?v%#>Zj8=s=-ppvj@qw2y@4O8Y_ zdz-fBXKOynE}Vk4>o2ZKnY^Rnt$MKMjP6OGI${pio9<3^5$b*0X(-H3I(I8m=R+vi zX$QH=emGEi+v~*t2&m$m(D>~rKrU4ct(Q9n%e&5cN$GfyKE1ZuPe9?djXJICJBcQ- zx}inQfwY2p+Kr;asRa{>s`xWq3OY)s0X0cFn-m;yh;Xipus$2PBAoA1*`fCq=>h^C zU1ce0U4){`Y!h^dF||j;*3Q-#F9WJ$_tmpBUzli=uHB!x21eB7q2+pqo-_Hr0cBFh z?4ewtWux*YatgVjtqyN>n9Hy?4fn9Da}VndAnU82C7qdsO=xMja3nM8WJ80$kC0|k z=e4XR4f057;e$0Mf6#r>yPWnA0q<76rTLG7MwyX7kD&;KuLL{4trMR!JW-HpC<#|i)!{9JaPjSP-#$Ujf%LIfSbO$6Xv~K6Q|Or4Ud9zG zgr!##*67Ddgp}S)(Xyu5t2~WJobP?%&)z@tSt<{v8rP53d}RQZ>(xIlVGaUPwEtIi z?$@C%wwKK4&`2VztCRI+AF>&t%hHx!TO4meOwrv>9;^B8R+MVGNp%cTh-N@OAuNBR zr&EbZAk48)F?E=NBjQql)*DWDq_)g0t`8w8p=EKqUvU@=OT5|C4r@)ijv&frx}8xU z?P9FQZ}0WXV@ahRYkA{0bpc??z@(<#i93OU>#Ki4M@|Z-OW+X>ySoMxj>vwz4y0(4qcpM^W8LUdFtpdE z$;}W9k#D}+Ty)(+NJ00s3xl^|Npc9NjCTT_8-G>$GXdW@TJ8ky0by;@XMVlk5r%z= z@E`<7d*9^R!)d_>2x`0DY-%2q)TYn%2v0~MOni>snzqcE{fN8u(lk=wc}n>0AXCa z;?d4qSAfZJ?CKXv_g3b2wLR75{N3;U*+}X~==Jww1~@h)P082(I6zlvEp|~ zSiwF1nkF{_rxETw^lG!A+Khswzx%_p|LxMJ%>_V7?Hk`!3%;#E+{3nmIw;M}9f_W! zo{7XTbF0=f2@(3>#=d?%Lhq*-N=yg2+)|6$Lr~@FET@bn4ug?&clL55G2T5ETic_c znM|K*IVPCObGQCbDUSAZ9I^PW39D%ULgs%e#(9*oC#v%FD&ZmmehkT##U)5_UoNMtN`Dzl zac%Y-Y?8ZzltS$%%l)xhzJ{n6Lv7J~-{ob$tr5EXFHz=)lKUvG$4}hL( zc;>w^^biRbMDU?sqTf&syxX8yf}fq;`z5W2(-Ql*#q*qlt)MO#&~jnf4tuVln&3Pz zTp0va=M%6>;W?mZ_BcP^Zd`35x$UmY>1(HCFAacY$9gb_c#xbbMf7mnP_pxJ~~QbTCTXsjlOBVgFqgGy{O zoScU9{@@mbw6YzJMOawWx!zX9R9xQ+X8RCk_LfcJgfMabijHnlFgf+rJbMa^bo1__ zDSp1&S8M3q?{Ui#$pRfDTrxICHbkCTA2j2cF+bhG!88Az`&XunW&Y~*oM}Lf( zcb~7S>N^ics>5!D(sej_Z>q;Ce0vckDQasqm2I=ROQ_0t`-``~O637U?ip-c398Ha zTK_dj2Cq)`o@>1hCj-rZK?`L^kDRj)3Gn4@gR@M{S|hAoc{3lLVzW-k&UP_a9}(I)Sh2ia zgX{Bv#LBkZ#}}Zmz5P{;UQEoTAO7%14Dmp?Udu?b{Dxdc3D-vK+E%~P5$YzL#(WK- z%H6wo2Pwq5;XVp4oTfaw&yzlNJk$E>n~B4hA>9{Fj&_LK5PUnoK0l~-?g)oh?h)vG z7ukt%YhL$IdNTaOYC88}%y?(4YUlw(3V-{h+bo4X0D7HBbN5iA-ox(m-A&Ms;FQ)` zZAtoZM<~yiv~|v*S$-$#)d!-Ui<0T=_>N`@eXyPB&O^pJoop8qo&d?kQ(M$uI0IPo zG-2b5To)k9x~Y6S$y$++SO9(O}XDF^=_lpa)$k(x_v7RI=@YShsd09)K7#~0<+{Alk)F~)#Y z*fiIc+Uo>DQXQ(T>&d{f`zpiBseH>?%x}{Gj{8R_dZK;rEAM>o9qxb+gQ=kHy19C) zqtqQS;@M;ZY{2g#?RRkBdpGbHVw&@m22~rP$5E8U=8rkWiSB3K!J3uFNfZrMXInOq zrxJOnd8RoHBtzQ@dbu+&39YVP_^hyEzW2>z@0E|*bNOjidH*;s9Fgc9YaqOUAf18H z{S)iU!SO{nrCV(mtGC&32@#uoz9`ydAf<2_rA?!+Bn>^P(-FK}gXMq!i{Jd-H@;{M zpARxLwS(jv`C#2MeIv~{@g^KCF?!WoJsGQRB^`GezxwVQrurZ*xCXc0y%WHQ5KA+O zc=yfoci;Sq(H<;W)S}|PaGKkAjY_5050FxTnX_M?`P`>ye?IuouP&qY-3P-5*nd=~ z_$WVbuQu~oIC5XNt&T$GygJ*Ynt{|@P>r=6bt8TE<9l!aYomPN?4wNc!PLbdr~itv zAiodQJ2v=S7&yImXEV251g4AP8m1nY!r5Z96y^38vxBgtqpWXNIw(3yvT>iSmH^8T z@Yow@mIpC)uH&(%plHamCD;{UN}EOHN|P8?ZWEJ zB2iro0!gjx4JkP+vpNo}PHrS|zx(#*6n--huIqSJ&$b0YQ4MQt`l%dSyC~&18et3} z%(u5)CO|6u>@ZCi#KtI2={cbMYwN)mik_cbW|hey0EZ5_vL|{tQMK#?7F