mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-10 15:57:04 +00:00
convert gemma2
This commit is contained in:
@@ -44,15 +44,14 @@ func (p *gemma) KV(t *Tokenizer) llm.KV {
|
||||
}
|
||||
|
||||
func (p *gemma) Tensors(ts []Tensor) []llm.Tensor {
|
||||
var out []llm.Tensor
|
||||
out := make([]llm.Tensor, 0, len(ts))
|
||||
for _, t := range ts {
|
||||
name := p.tensorName(t.Name())
|
||||
if strings.HasSuffix(name, "_norm.weight") {
|
||||
if strings.HasSuffix(t.Name(), "_norm.weight") {
|
||||
t.SetRepacker(p.addOne)
|
||||
}
|
||||
|
||||
out = append(out, llm.Tensor{
|
||||
Name: name,
|
||||
Name: t.Name(),
|
||||
Kind: t.Kind(),
|
||||
Shape: t.Shape(),
|
||||
WriterTo: t,
|
||||
@@ -62,8 +61,8 @@ func (p *gemma) Tensors(ts []Tensor) []llm.Tensor {
|
||||
return out
|
||||
}
|
||||
|
||||
func (p *gemma) tensorName(n string) string {
|
||||
return strings.NewReplacer(
|
||||
func (p *gemma) Replacements() []string {
|
||||
return []string{
|
||||
"model.embed_tokens", "token_embd",
|
||||
"model.norm", "output_norm",
|
||||
"model.layers", "blk",
|
||||
@@ -76,8 +75,7 @@ func (p *gemma) tensorName(n string) string {
|
||||
"mlp.down_proj", "ffn_down",
|
||||
"mlp.up_proj", "ffn_up",
|
||||
"post_attention_layernorm", "ffn_norm",
|
||||
"block_sparse_moe.gate", "ffn_inp",
|
||||
).Replace(n)
|
||||
}
|
||||
}
|
||||
|
||||
func (*gemma) addOne(_ string, data []float32, shape []uint64) ([]float32, error) {
|
||||
|
||||
Reference in New Issue
Block a user