mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-10 15:57:04 +00:00
image processing for llama3.2 (#6963)
Co-authored-by: jmorganca <jmorganca@gmail.com> Co-authored-by: Michael Yang <mxyng@pm.me> Co-authored-by: Jesse Gross <jesse@ollama.com>
This commit is contained in:
690
llama/patches/0010-add-mllama-support.patch
Normal file
690
llama/patches/0010-add-mllama-support.patch
Normal file
@@ -0,0 +1,690 @@
|
||||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||
From: jmorganca <jmorganca@gmail.com>
|
||||
Date: Thu, 17 Oct 2024 15:18:22 -0700
|
||||
Subject: [PATCH] add mllama support
|
||||
|
||||
mllama adds cross-attention layers to the standard llama architecture
|
||||
it also requires a way to input a new tensor: cross_attention_state
|
||||
once per generation
|
||||
|
||||
cross-attention layers don't change and so they are cached in the
|
||||
kv cache once per run
|
||||
|
||||
remaining is to implement the cross attention mask
|
||||
---
|
||||
include/llama.h | 4 +
|
||||
src/llama.cpp | 456 ++++++++++++++++++++++++++++++++++++++++++++++--
|
||||
2 files changed, 447 insertions(+), 13 deletions(-)
|
||||
|
||||
diff --git a/include/llama.h b/include/llama.h
|
||||
index 7cae1bbe..122e3cf1 100644
|
||||
--- a/include/llama.h
|
||||
+++ b/include/llama.h
|
||||
@@ -423,6 +423,10 @@ extern "C" {
|
||||
struct llama_model * model,
|
||||
struct llama_context_params params);
|
||||
|
||||
+ // TODO (jmorganca): this should most likely be passed in as part of a batch
|
||||
+ // and not set on the context for all batches.
|
||||
+ LLAMA_API void llama_set_cross_attn_state(struct llama_context * ctx, float * cross_attn_state);
|
||||
+
|
||||
// Frees all allocated memory
|
||||
LLAMA_API void llama_free(struct llama_context * ctx);
|
||||
|
||||
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||
index 83b80b59..b189a19a 100644
|
||||
--- a/src/llama.cpp
|
||||
+++ b/src/llama.cpp
|
||||
@@ -169,6 +169,7 @@ static std::string format(const char * fmt, ...) {
|
||||
|
||||
enum llm_arch {
|
||||
LLM_ARCH_LLAMA,
|
||||
+ LLM_ARCH_MLLAMA,
|
||||
LLM_ARCH_FALCON,
|
||||
LLM_ARCH_BAICHUAN,
|
||||
LLM_ARCH_GROK,
|
||||
@@ -223,6 +224,7 @@ enum llm_arch {
|
||||
|
||||
static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
|
||||
{ LLM_ARCH_LLAMA, "llama" },
|
||||
+ { LLM_ARCH_MLLAMA, "mllama" },
|
||||
{ LLM_ARCH_FALCON, "falcon" },
|
||||
{ LLM_ARCH_GROK, "grok" },
|
||||
{ LLM_ARCH_GPT2, "gpt2" },
|
||||
@@ -330,6 +332,7 @@ enum llm_kv {
|
||||
LLM_KV_ATTENTION_SLIDING_WINDOW,
|
||||
LLM_KV_ATTENTION_SCALE,
|
||||
LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION,
|
||||
+ LLM_KV_ATTENTION_CROSS_ATTENTION_LAYERS,
|
||||
|
||||
LLM_KV_ROPE_DIMENSION_COUNT,
|
||||
LLM_KV_ROPE_FREQ_BASE,
|
||||
@@ -439,6 +442,7 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
|
||||
{ LLM_KV_ATTENTION_SLIDING_WINDOW, "%s.attention.sliding_window" },
|
||||
{ LLM_KV_ATTENTION_SCALE, "%s.attention.scale" },
|
||||
{ LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION, "%s.attention.block_skip_connection.%d" },
|
||||
+ { LLM_KV_ATTENTION_CROSS_ATTENTION_LAYERS, "%s.attention.cross_attention_layers" },
|
||||
|
||||
{ LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
|
||||
{ LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" },
|
||||
@@ -613,6 +617,14 @@ enum llm_tensor {
|
||||
LLM_TENSOR_CLS,
|
||||
LLM_TENSOR_CLS_OUT,
|
||||
LLM_TENSOR_BSKCN_TV,
|
||||
+ LLM_TENSOR_CROSS_ATTN_K_NORM,
|
||||
+ LLM_TENSOR_CROSS_ATTN_K_PROJ,
|
||||
+ LLM_TENSOR_CROSS_ATTN_O_PROJ,
|
||||
+ LLM_TENSOR_CROSS_ATTN_Q_NORM,
|
||||
+ LLM_TENSOR_CROSS_ATTN_Q_PROJ,
|
||||
+ LLM_TENSOR_CROSS_ATTN_V_PROJ,
|
||||
+ LLM_TENSOR_CROSS_ATTN_ATTN_GATE,
|
||||
+ LLM_TENSOR_CROSS_ATTN_MLP_GATE,
|
||||
};
|
||||
|
||||
static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
|
||||
@@ -642,6 +654,40 @@ static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NA
|
||||
{ LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
|
||||
},
|
||||
},
|
||||
+ {
|
||||
+ LLM_ARCH_MLLAMA,
|
||||
+ {
|
||||
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
|
||||
+ { LLM_TENSOR_OUTPUT, "output" },
|
||||
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
|
||||
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
||||
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
||||
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
||||
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
||||
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
||||
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
|
||||
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
|
||||
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
||||
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
|
||||
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
||||
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||
+ { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
|
||||
+ { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
|
||||
+ { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
|
||||
+ { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
|
||||
+ { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
|
||||
+ { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
|
||||
+ { LLM_TENSOR_CROSS_ATTN_K_NORM, "blk.%d.cross_attn_k_norm" },
|
||||
+ { LLM_TENSOR_CROSS_ATTN_K_PROJ, "blk.%d.cross_attn_k_proj" },
|
||||
+ { LLM_TENSOR_CROSS_ATTN_O_PROJ, "blk.%d.cross_attn_o_proj" },
|
||||
+ { LLM_TENSOR_CROSS_ATTN_Q_NORM, "blk.%d.cross_attn_q_norm" },
|
||||
+ { LLM_TENSOR_CROSS_ATTN_Q_PROJ, "blk.%d.cross_attn_q_proj" },
|
||||
+ { LLM_TENSOR_CROSS_ATTN_V_PROJ, "blk.%d.cross_attn_v_proj" },
|
||||
+ { LLM_TENSOR_CROSS_ATTN_ATTN_GATE, "blk.%d.cross_attn_attn_gate" },
|
||||
+ { LLM_TENSOR_CROSS_ATTN_MLP_GATE, "blk.%d.cross_attn_mlp_gate" },
|
||||
+ },
|
||||
+ },
|
||||
{
|
||||
LLM_ARCH_BAICHUAN,
|
||||
{
|
||||
@@ -2390,6 +2436,7 @@ enum e_model {
|
||||
MODEL_40B,
|
||||
MODEL_65B,
|
||||
MODEL_70B,
|
||||
+ MODEL_90B,
|
||||
MODEL_236B,
|
||||
MODEL_314B,
|
||||
MODEL_SMALL,
|
||||
@@ -2434,6 +2481,7 @@ struct llama_hparams {
|
||||
std::array<uint32_t, LLAMA_MAX_LAYERS> n_ff_arr;
|
||||
|
||||
std::array<std::array<uint32_t, LLAMA_MAX_LAYERS>, 4> n_bskcn_arr;
|
||||
+ std::array<uint32_t, LLAMA_MAX_LAYERS> cross_attn_layers;
|
||||
|
||||
uint32_t n_layer_dense_lead = 0;
|
||||
uint32_t n_lora_q = 0;
|
||||
@@ -2502,10 +2550,11 @@ struct llama_hparams {
|
||||
if (this->n_expert != other.n_expert) return true;
|
||||
if (this->n_expert_used != other.n_expert_used) return true;
|
||||
|
||||
- if (this->n_head_arr != other.n_head_arr) return true;
|
||||
- if (this->n_head_kv_arr != other.n_head_kv_arr) return true;
|
||||
- if (this->n_ff_arr != other.n_ff_arr) return true;
|
||||
- if (this->n_bskcn_arr != other.n_bskcn_arr) return true;
|
||||
+ if (this->n_head_arr != other.n_head_arr) return true;
|
||||
+ if (this->n_head_kv_arr != other.n_head_kv_arr) return true;
|
||||
+ if (this->n_ff_arr != other.n_ff_arr) return true;
|
||||
+ if (this->n_bskcn_arr != other.n_bskcn_arr) return true;
|
||||
+ if (this->cross_attn_layers != other.cross_attn_layers) return true;
|
||||
|
||||
if (this->n_rel_attn_bkts != other.n_rel_attn_bkts) return true;
|
||||
if (this->n_layer_dense_lead != other.n_layer_dense_lead) return true;
|
||||
@@ -2623,6 +2672,10 @@ struct llama_hparams {
|
||||
|
||||
GGML_ABORT("fatal error");
|
||||
}
|
||||
+
|
||||
+ bool cross_attention_layer(uint32_t il) const {
|
||||
+ return std::find(cross_attn_layers.begin(), cross_attn_layers.end(), il) != cross_attn_layers.end();
|
||||
+ }
|
||||
};
|
||||
|
||||
static_assert(std::is_trivially_copyable<llama_hparams>::value, "llama_hparams must be trivially copyable");
|
||||
@@ -2806,6 +2859,16 @@ struct llama_layer {
|
||||
struct ggml_tensor * ffn_down_scale;
|
||||
|
||||
struct ggml_tensor * bskcn_tv;
|
||||
+
|
||||
+ // cross attention
|
||||
+ struct ggml_tensor * cross_attn_k_norm;
|
||||
+ struct ggml_tensor * cross_attn_k_proj;
|
||||
+ struct ggml_tensor * cross_attn_o_proj;
|
||||
+ struct ggml_tensor * cross_attn_q_norm;
|
||||
+ struct ggml_tensor * cross_attn_q_proj;
|
||||
+ struct ggml_tensor * cross_attn_v_proj;
|
||||
+ struct ggml_tensor * cross_attn_attn_gate;
|
||||
+ struct ggml_tensor * cross_attn_mlp_gate;
|
||||
};
|
||||
|
||||
// very similar to llama_batch,
|
||||
@@ -3452,6 +3515,12 @@ struct llama_context {
|
||||
struct ggml_tensor * inp_pos_bucket; // I32 [n_batch|n_kv, n_batch]
|
||||
struct ggml_tensor * inp_embd_enc; // F32 [n_embd, n_outputs_enc]
|
||||
struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch]
|
||||
+
|
||||
+ // TODO (jmorganca): this should most likely be passed in as part of a batch
|
||||
+ // and not set on the context for all batches.
|
||||
+ float * cross_attn_state = nullptr;
|
||||
+ bool cross_attn_state_first_pass = true;
|
||||
+ struct ggml_tensor * inp_cross_attn_state; // F32 [4, n_embd, 1061]
|
||||
};
|
||||
|
||||
struct llama_lora_weight {
|
||||
@@ -3686,6 +3755,18 @@ static bool llama_kv_cache_init(
|
||||
cache.v_l.reserve(n_layer);
|
||||
|
||||
for (int i = 0; i < (int) n_layer; i++) {
|
||||
+ // for cross attention layers
|
||||
+ if (model.arch == LLM_ARCH_MLLAMA && hparams.cross_attention_layer(i)) {
|
||||
+ struct ggml_context * ctx = offload ? ctx_map.at(model.buft_layer[i].buft) : cache.ctxs.front();
|
||||
+ ggml_tensor * k = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, hparams.n_embd_head_k, 6404, hparams.n_head_kv(i));
|
||||
+ ggml_tensor * v = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, hparams.n_embd_head_v, 6404, hparams.n_head_kv(i));
|
||||
+ ggml_format_name(k, "cache_k_l%d", i);
|
||||
+ ggml_format_name(v, "cache_v_l%d", i);
|
||||
+ cache.k_l.push_back(k);
|
||||
+ cache.v_l.push_back(v);
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s();
|
||||
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s();
|
||||
|
||||
@@ -5460,12 +5541,14 @@ static void llm_load_hparams(
|
||||
}
|
||||
|
||||
// zero-out the per-layer hparams
|
||||
- std::fill(hparams.n_head_arr.begin(), hparams.n_head_arr.end(), 0);
|
||||
- std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0);
|
||||
- std::fill(hparams.n_ff_arr.begin(), hparams.n_ff_arr.end(), 0);
|
||||
+ std::fill(hparams.n_head_arr.begin(), hparams.n_head_arr.end(), 0);
|
||||
+ std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0);
|
||||
+ std::fill(hparams.n_ff_arr.begin(), hparams.n_ff_arr.end(), 0);
|
||||
+ std::fill(hparams.cross_attn_layers.begin(), hparams.cross_attn_layers.end(), -1);
|
||||
|
||||
- ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff_arr, hparams.n_layer);
|
||||
- ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer);
|
||||
+ ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff_arr, hparams.n_layer);
|
||||
+ ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer);
|
||||
+ ml.get_arr(LLM_KV_ATTENTION_CROSS_ATTENTION_LAYERS, hparams.cross_attn_layers, false);
|
||||
|
||||
// n_head_kv is optional, default to n_head
|
||||
hparams.n_head_kv_arr = hparams.n_head_arr;
|
||||
@@ -5514,7 +5597,7 @@ static void llm_load_hparams(
|
||||
|
||||
ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false);
|
||||
|
||||
- if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON) {
|
||||
+ if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_MLLAMA || model.arch == LLM_ARCH_FALCON) {
|
||||
if (hparams.n_rot != hparams.n_embd_head_k) {
|
||||
throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd_head_k));
|
||||
}
|
||||
@@ -5554,6 +5637,16 @@ static void llm_load_hparams(
|
||||
}
|
||||
}
|
||||
} break;
|
||||
+ case LLM_ARCH_MLLAMA:
|
||||
+ {
|
||||
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
||||
+
|
||||
+ switch (hparams.n_layer) {
|
||||
+ case 40: model.type = e_model::MODEL_11B; break;
|
||||
+ case 100: model.type = e_model::MODEL_90B; break;
|
||||
+ default: model.type = e_model::MODEL_UNKNOWN;
|
||||
+ }
|
||||
+ } break;
|
||||
case LLM_ARCH_MINICPM:
|
||||
{
|
||||
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
||||
@@ -7249,6 +7342,55 @@ static bool llm_load_tensors(
|
||||
layer.rope_short = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight"), { n_embd_head_qk_rope/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
|
||||
}
|
||||
} break;
|
||||
+ case LLM_ARCH_MLLAMA:
|
||||
+ {
|
||||
+ model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab+8});
|
||||
+
|
||||
+ // output
|
||||
+ {
|
||||
+ model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
|
||||
+ model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
||||
+
|
||||
+ // if output is NULL, init from the input tok embed
|
||||
+ if (model.output == NULL) {
|
||||
+ model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ for (int i = 0; i < n_layer; ++i) {
|
||||
+ ggml_context * ctx_layer = ctx_for_layer(i);
|
||||
+ ggml_context * ctx_split = ctx_for_layer_split(i);
|
||||
+
|
||||
+ auto & layer = model.layers[i];
|
||||
+
|
||||
+ if (hparams.cross_attention_layer(i)) {
|
||||
+ layer.cross_attn_k_norm = ml.create_tensor(ctx_split, tn(LLM_TENSOR_CROSS_ATTN_K_NORM, "weight", i), {128});
|
||||
+ layer.cross_attn_k_proj = ml.create_tensor(ctx_split, tn(LLM_TENSOR_CROSS_ATTN_K_PROJ, "weight", i), {n_embd, 1024});
|
||||
+ layer.cross_attn_o_proj = ml.create_tensor(ctx_split, tn(LLM_TENSOR_CROSS_ATTN_O_PROJ, "weight", i), {n_embd, n_embd});
|
||||
+ layer.cross_attn_q_norm = ml.create_tensor(ctx_split, tn(LLM_TENSOR_CROSS_ATTN_Q_NORM, "weight", i), {128});
|
||||
+ layer.cross_attn_q_proj = ml.create_tensor(ctx_split, tn(LLM_TENSOR_CROSS_ATTN_Q_PROJ, "weight", i), {n_embd, n_embd});
|
||||
+ layer.cross_attn_v_proj = ml.create_tensor(ctx_split, tn(LLM_TENSOR_CROSS_ATTN_V_PROJ, "weight", i), {n_embd, 1024});
|
||||
+ layer.cross_attn_attn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_CROSS_ATTN_ATTN_GATE, i), {1});
|
||||
+ layer.cross_attn_mlp_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_CROSS_ATTN_MLP_GATE, i), {1});
|
||||
+ layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
|
||||
+ layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
|
||||
+ layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
|
||||
+ layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
|
||||
+ layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
|
||||
+ } else {
|
||||
+ layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
|
||||
+ layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head});
|
||||
+ layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa});
|
||||
+ layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa});
|
||||
+ layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd});
|
||||
+ layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
|
||||
+ layer.rope_freqs = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ROPE_FREQS, "weight"), {n_rot/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
|
||||
+ layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
|
||||
+ layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
|
||||
+ layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
|
||||
+ }
|
||||
+ }
|
||||
+ } break;
|
||||
case LLM_ARCH_GROK:
|
||||
{
|
||||
if (n_expert == 0) {
|
||||
@@ -9093,7 +9235,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
|
||||
|
||||
if (model.vocab.type != LLAMA_VOCAB_TYPE_NONE &&
|
||||
model.hparams.n_vocab != model.vocab.id_to_token.size()) {
|
||||
- throw std::runtime_error("vocab size mismatch");
|
||||
+ LLAMA_LOG_WARN("%s: vocab mismatch %u !- %zu ...\n", __func__, model.hparams.n_vocab, model.vocab.id_to_token.size());
|
||||
}
|
||||
|
||||
if (params.vocab_only) {
|
||||
@@ -9178,7 +9320,7 @@ static struct ggml_tensor * llm_build_inp_embd(
|
||||
|
||||
inpL = ggml_get_rows(ctx, tok_embd, lctx.inp_tokens);
|
||||
} else {
|
||||
- lctx.inp_embd = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, batch.n_tokens);
|
||||
+ lctx.inp_embd = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, batch.n_tokens);
|
||||
inpL = lctx.inp_embd;
|
||||
ggml_set_input(lctx.inp_embd);
|
||||
}
|
||||
@@ -9193,6 +9335,22 @@ static struct ggml_tensor * llm_build_inp_embd(
|
||||
return inpL;
|
||||
}
|
||||
|
||||
+static struct ggml_tensor * llm_build_inp_cross_attn_state(
|
||||
+ struct ggml_context * ctx,
|
||||
+ struct llama_context & lctx,
|
||||
+ const llama_hparams & hparams,
|
||||
+ const llm_build_cb & cb) {
|
||||
+ const int64_t n_embd = hparams.n_embd;
|
||||
+
|
||||
+ struct ggml_tensor * inpCAS;
|
||||
+ lctx.inp_cross_attn_state = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, n_embd, 1601, 4);
|
||||
+ cb(lctx.inp_cross_attn_state, "inp_cross_attn_state", -1);
|
||||
+ ggml_set_input(lctx.inp_cross_attn_state);
|
||||
+ inpCAS = lctx.inp_cross_attn_state;
|
||||
+
|
||||
+ return inpCAS;
|
||||
+}
|
||||
+
|
||||
static void llm_build_kv_store(
|
||||
struct ggml_context * ctx,
|
||||
const llama_hparams & hparams,
|
||||
@@ -10167,6 +10325,7 @@ struct llm_build_context {
|
||||
lctx.inp_pos_bucket = nullptr;
|
||||
lctx.inp_embd_enc = nullptr;
|
||||
lctx.inp_KQ_mask_cross = nullptr;
|
||||
+ lctx.inp_cross_attn_state = nullptr;
|
||||
}
|
||||
|
||||
void free() {
|
||||
@@ -10754,6 +10913,253 @@ struct llm_build_context {
|
||||
LLM_NORM_RMS, cb, -1);
|
||||
cb(cur, "result_norm", -1);
|
||||
|
||||
+ cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
|
||||
+ cb(cur, "result_output", -1);
|
||||
+
|
||||
+ ggml_build_forward_expand(gf, cur);
|
||||
+
|
||||
+ return gf;
|
||||
+ }
|
||||
+
|
||||
+ struct ggml_cgraph * build_mllama() {
|
||||
+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
|
||||
+
|
||||
+ // mutable variable, needed during the last layer of the computation to skip unused tokens
|
||||
+ int32_t n_tokens = this->n_tokens;
|
||||
+
|
||||
+ const int64_t n_embd_head = hparams.n_embd_head_v;
|
||||
+ GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
|
||||
+ GGML_ASSERT(n_embd_head == hparams.n_rot);
|
||||
+
|
||||
+ struct ggml_tensor * cur;
|
||||
+ struct ggml_tensor * inpL;
|
||||
+ struct ggml_tensor * inpCAS;
|
||||
+
|
||||
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
|
||||
+ inpCAS = llm_build_inp_cross_attn_state(ctx0, lctx, hparams, cb);
|
||||
+
|
||||
+ // inp_pos - contains the positions
|
||||
+ struct ggml_tensor * inp_pos = build_inp_pos();
|
||||
+
|
||||
+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
|
||||
+
|
||||
+ for (int il = 0; il < n_layer; ++il) {
|
||||
+ struct ggml_tensor * inpSA = inpL;
|
||||
+
|
||||
+ // norm
|
||||
+ cur = llm_build_norm(ctx0, inpL, hparams,
|
||||
+ model.layers[il].attn_norm, NULL,
|
||||
+ LLM_NORM_RMS, cb, il);
|
||||
+ cb(cur, "attn_norm", il);
|
||||
+
|
||||
+ if (hparams.cross_attention_layer(il)) {
|
||||
+ if (!lctx.cross_attn_state) {
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
+ // cross attention layer
|
||||
+ struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].cross_attn_q_proj, cur);
|
||||
+ cb(Qcur, "Qcur", il);
|
||||
+
|
||||
+ Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
|
||||
+ cb(Qcur, "Qcur", il);
|
||||
+
|
||||
+ Qcur = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
|
||||
+ cb(Qcur, "Qcur", il);
|
||||
+
|
||||
+ // TODO: is this required?
|
||||
+ Qcur = ggml_cont(ctx0, Qcur);
|
||||
+ cb(Qcur, "Qcur", il);
|
||||
+
|
||||
+ Qcur = llm_build_norm(ctx0, Qcur, hparams, model.layers[il].cross_attn_q_norm, NULL, LLM_NORM_RMS, cb, il);
|
||||
+ cb(Qcur, "Qcur", il);
|
||||
+
|
||||
+ struct ggml_tensor * Kcur;
|
||||
+ if (lctx.cross_attn_state_first_pass) {
|
||||
+ Kcur = ggml_mul_mat(ctx0, model.layers[il].cross_attn_k_proj, inpCAS);
|
||||
+ cb(Kcur, "Kcur", il);
|
||||
+
|
||||
+ Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, 6404);
|
||||
+ cb(Kcur, "Kcur", il);
|
||||
+
|
||||
+ Kcur = ggml_permute(ctx0, Kcur, 0, 2, 1, 3);
|
||||
+ cb(Kcur, "Kcur", il);
|
||||
+
|
||||
+ // TODO: is this required?
|
||||
+ Kcur = ggml_cont(ctx0, Kcur);
|
||||
+ cb(Kcur, "Kcur", il);
|
||||
+
|
||||
+ Kcur = llm_build_norm(ctx0, Kcur, hparams, model.layers[il].cross_attn_k_norm, NULL, LLM_NORM_RMS, cb, il);
|
||||
+ cb(Kcur, "Kcur", il);
|
||||
+
|
||||
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, kv_self.k_l[il]));
|
||||
+ } else {
|
||||
+ Kcur = ggml_view_tensor(ctx0, kv_self.k_l[il]);
|
||||
+ cb(Kcur, "Kcur (view)", il);
|
||||
+ }
|
||||
+
|
||||
+ struct ggml_tensor * Vcur;
|
||||
+ if (lctx.cross_attn_state_first_pass) {
|
||||
+ Vcur = ggml_mul_mat(ctx0, model.layers[il].cross_attn_v_proj, inpCAS);
|
||||
+ cb(Vcur, "Vcur", il);
|
||||
+
|
||||
+ Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, 6404);
|
||||
+ cb(Vcur, "Vcur", il);
|
||||
+
|
||||
+ Vcur = ggml_permute(ctx0, Vcur, 0, 2, 1, 3);
|
||||
+ cb(Vcur, "Vcur", il);
|
||||
+
|
||||
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, kv_self.v_l[il]));
|
||||
+ } else {
|
||||
+ Vcur = ggml_view_tensor(ctx0, kv_self.v_l[il]);
|
||||
+ cb(Vcur, "Vcur (view)", il);
|
||||
+ }
|
||||
+
|
||||
+ struct ggml_tensor * kq = ggml_mul_mat(ctx0, Kcur, Qcur);
|
||||
+ cb(kq, "kq", il);
|
||||
+
|
||||
+ kq = ggml_scale_inplace(ctx0, kq, 1.0f/sqrtf(float(n_embd_head)));
|
||||
+ cb(kq, "kq_scaled", il);
|
||||
+
|
||||
+ // TODO: apply causal masks
|
||||
+ struct ggml_tensor * kq_soft_max = ggml_soft_max_inplace(ctx0, kq);
|
||||
+ cb(kq_soft_max, "kq_soft_max", il);
|
||||
+
|
||||
+ Vcur = ggml_cont(ctx0, ggml_transpose(ctx0, Vcur));
|
||||
+ cb(Vcur, "Vcur", il);
|
||||
+
|
||||
+ struct ggml_tensor * kqv = ggml_mul_mat(ctx0, Vcur, kq_soft_max);
|
||||
+ cb(kqv, "kqv", il);
|
||||
+
|
||||
+ struct ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
|
||||
+ cb(kqv_merged, "kqv_merged", il);
|
||||
+
|
||||
+ cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_head_v*n_head, n_tokens);
|
||||
+ cb(cur, "kqv_merged_cont", il);
|
||||
+
|
||||
+ cur = ggml_mul_mat(ctx0, model.layers[il].cross_attn_o_proj, cur);
|
||||
+ cb(cur, "cur", il);
|
||||
+
|
||||
+ // TODO: do this in place once?
|
||||
+ cur = ggml_mul(ctx0, cur, ggml_tanh(ctx0, model.layers[il].cross_attn_attn_gate));
|
||||
+
|
||||
+ struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
|
||||
+ cb(ffn_inp, "ffn_inp", il);
|
||||
+
|
||||
+ // feed-forward network
|
||||
+ cur = llm_build_norm(ctx0, ffn_inp, hparams,
|
||||
+ model.layers[il].ffn_norm, NULL,
|
||||
+ LLM_NORM_RMS, cb, il);
|
||||
+ cb(cur, "ffn_norm", il);
|
||||
+
|
||||
+ cur = llm_build_ffn(ctx0, lctx, cur,
|
||||
+ model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
|
||||
+ model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
|
||||
+ model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
|
||||
+ NULL,
|
||||
+ LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
|
||||
+ cb(cur, "ffn_out", il);
|
||||
+
|
||||
+ // TODO: do this inplace once?
|
||||
+ cur = ggml_add_inplace(ctx0, ggml_mul_inplace(ctx0, cur, ggml_tanh(ctx0, model.layers[il].cross_attn_mlp_gate)), ffn_inp);
|
||||
+ cb(cur, "ffn_out", il);
|
||||
+
|
||||
+ cur = lctx.cvec.apply_to(ctx0, cur, il);
|
||||
+ cb(cur, "l_out", il);
|
||||
+
|
||||
+ // input for next layer
|
||||
+ inpL = cur;
|
||||
+ } else {
|
||||
+ // self attention layer
|
||||
+
|
||||
+ // rope freq factors for llama3; may return nullptr for llama2 and other models
|
||||
+ struct ggml_tensor * rope_factors = build_rope_factors(il);
|
||||
+
|
||||
+ // compute Q and K and RoPE them
|
||||
+ struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
|
||||
+ cb(Qcur, "Qcur", il);
|
||||
+ if (model.layers[il].bq) {
|
||||
+ Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
|
||||
+ cb(Qcur, "Qcur", il);
|
||||
+ }
|
||||
+
|
||||
+ struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
|
||||
+ cb(Kcur, "Kcur", il);
|
||||
+ if (model.layers[il].bk) {
|
||||
+ Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
|
||||
+ cb(Kcur, "Kcur", il);
|
||||
+ }
|
||||
+
|
||||
+ struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
|
||||
+ cb(Vcur, "Vcur", il);
|
||||
+ if (model.layers[il].bv) {
|
||||
+ Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
|
||||
+ cb(Vcur, "Vcur", il);
|
||||
+ }
|
||||
+
|
||||
+ Qcur = ggml_rope_ext(
|
||||
+ ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors,
|
||||
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
||||
+ ext_factor, attn_factor, beta_fast, beta_slow
|
||||
+ );
|
||||
+ cb(Qcur, "Qcur", il);
|
||||
+
|
||||
+ Kcur = ggml_rope_ext(
|
||||
+ ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors,
|
||||
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
||||
+ ext_factor, attn_factor, beta_fast, beta_slow
|
||||
+ );
|
||||
+ cb(Kcur, "Kcur", il);
|
||||
+
|
||||
+ cur = llm_build_kv(ctx0, lctx, kv_self, gf,
|
||||
+ model.layers[il].wo, model.layers[il].bo,
|
||||
+ Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||
+
|
||||
+
|
||||
+ if (il == n_layer - 1) {
|
||||
+ // skip computing output for unused tokens
|
||||
+ struct ggml_tensor * inp_out_ids = build_inp_out_ids();
|
||||
+ n_tokens = n_outputs;
|
||||
+ cur = ggml_get_rows(ctx0, cur, inp_out_ids);
|
||||
+ inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
|
||||
+ }
|
||||
+
|
||||
+ struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
|
||||
+ cb(ffn_inp, "ffn_inp", il);
|
||||
+
|
||||
+ // feed-forward network
|
||||
+ cur = llm_build_norm(ctx0, ffn_inp, hparams,
|
||||
+ model.layers[il].ffn_norm, NULL,
|
||||
+ LLM_NORM_RMS, cb, il);
|
||||
+ cb(cur, "ffn_norm", il);
|
||||
+
|
||||
+ cur = llm_build_ffn(ctx0, lctx, cur,
|
||||
+ model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
|
||||
+ model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
|
||||
+ model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
|
||||
+ NULL,
|
||||
+ LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
|
||||
+ cb(cur, "ffn_out", il);
|
||||
+
|
||||
+ cur = ggml_add(ctx0, cur, ffn_inp);
|
||||
+ cb(cur, "ffn_out", il);
|
||||
+
|
||||
+ cur = lctx.cvec.apply_to(ctx0, cur, il);
|
||||
+ cb(cur, "l_out", il);
|
||||
+
|
||||
+ // input for next layer
|
||||
+ inpL = cur;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ cur = inpL;
|
||||
+
|
||||
+ cur = llm_build_norm(ctx0, cur, hparams,
|
||||
+ model.output_norm, NULL,
|
||||
+ LLM_NORM_RMS, cb, -1);
|
||||
+ cb(cur, "result_norm", -1);
|
||||
+
|
||||
// lm_head
|
||||
cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
|
||||
cb(cur, "result_output", -1);
|
||||
@@ -16501,6 +16907,10 @@ static struct ggml_cgraph * llama_build_graph(
|
||||
{
|
||||
result = llm.build_llama();
|
||||
} break;
|
||||
+ case LLM_ARCH_MLLAMA:
|
||||
+ {
|
||||
+ result = llm.build_mllama();
|
||||
+ } break;
|
||||
case LLM_ARCH_BAICHUAN:
|
||||
{
|
||||
result = llm.build_baichuan();
|
||||
@@ -16773,6 +17183,14 @@ static void llama_set_inputs(llama_context & lctx, const llama_ubatch & batch) {
|
||||
ggml_backend_tensor_set(lctx.inp_pos, batch.pos, 0, n_tokens*ggml_element_size(lctx.inp_pos));
|
||||
}
|
||||
|
||||
+ // TODO (jmorganca): this might copy a lot of data on every request of a
|
||||
+ // single generation even though it doesn't change, so we should
|
||||
+ // find a way to not set this more than one time per image
|
||||
+ if (lctx.inp_cross_attn_state &&
|
||||
+ lctx.inp_cross_attn_state->buffer) {
|
||||
+ ggml_backend_tensor_set(lctx.inp_cross_attn_state, lctx.cross_attn_state, 0, hparams.n_embd * 1601 * 4 * ggml_element_size(lctx.inp_cross_attn_state));
|
||||
+ }
|
||||
+
|
||||
if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) {
|
||||
GGML_ASSERT(lctx.inp_out_ids && "every model that can must skip unused outputs");
|
||||
const int64_t n_tokens = batch.n_tokens;
|
||||
@@ -17455,6 +17873,10 @@ static int llama_decode_internal(
|
||||
|
||||
llama_set_inputs(lctx, ubatch);
|
||||
|
||||
+ // TODO: replace with something better to find out if its
|
||||
+ // our first actual pass
|
||||
+ lctx.cross_attn_state_first_pass = false;
|
||||
+
|
||||
llama_graph_compute(lctx, gf, n_threads, threadpool);
|
||||
|
||||
// update the kv ring buffer
|
||||
@@ -18648,7 +19070,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
if (llama_model_has_encoder(&model)) {
|
||||
n_attn_layer *= 3;
|
||||
}
|
||||
- GGML_ASSERT((qs.n_attention_wv == n_attn_layer) && "n_attention_wv is unexpected");
|
||||
+ if (qs.n_attention_wv != n_attn_layer) {
|
||||
+ LLAMA_LOG_WARN("%s: n_attention_wv is unexpected, expected: %d, found: %d\n", __func__, n_attn_layer, qs.n_attention_wv);
|
||||
+ }
|
||||
}
|
||||
|
||||
size_t total_size_org = 0;
|
||||
@@ -19744,6 +20168,11 @@ struct llama_context * llama_new_context_with_model(
|
||||
return ctx;
|
||||
}
|
||||
|
||||
+void llama_set_cross_attn_state(struct llama_context * ctx, float * cross_attn_state) {
|
||||
+ ctx->cross_attn_state_first_pass = true;
|
||||
+ ctx->cross_attn_state = cross_attn_state;
|
||||
+}
|
||||
+
|
||||
void llama_free(struct llama_context * ctx) {
|
||||
delete ctx;
|
||||
}
|
||||
@@ -19814,6 +20243,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
|
||||
|
||||
// use what we call a normal RoPE, operating on pairs of consecutive head values
|
||||
case LLM_ARCH_LLAMA:
|
||||
+ case LLM_ARCH_MLLAMA:
|
||||
case LLM_ARCH_BAICHUAN:
|
||||
case LLM_ARCH_STARCODER:
|
||||
case LLM_ARCH_PLAMO:
|
||||
409
llama/patches/0011-add-unpad-operator.patch
Normal file
409
llama/patches/0011-add-unpad-operator.patch
Normal file
@@ -0,0 +1,409 @@
|
||||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||
From: Michael Yang <mxyng@pm.me>
|
||||
Date: Thu, 17 Oct 2024 17:19:25 -0700
|
||||
Subject: [PATCH] add unpad operator
|
||||
|
||||
---
|
||||
ggml/include/ggml.h | 10 ++++
|
||||
ggml/src/ggml-cuda.cu | 4 ++
|
||||
ggml/src/ggml-cuda/pad.cu | 46 +++++++++++++++++++
|
||||
ggml/src/ggml-cuda/pad.cuh | 1 +
|
||||
ggml/src/ggml-metal.m | 33 ++++++++++++++
|
||||
ggml/src/ggml-metal.metal | 45 ++++++++++++++++++
|
||||
ggml/src/ggml.c | 93 +++++++++++++++++++++++++++++++++++++-
|
||||
7 files changed, 230 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h
|
||||
index ce3d92cb..962cb5f7 100644
|
||||
--- a/ggml/include/ggml.h
|
||||
+++ b/ggml/include/ggml.h
|
||||
@@ -506,6 +506,7 @@ extern "C" {
|
||||
GGML_OP_POOL_2D_BACK,
|
||||
GGML_OP_UPSCALE, // nearest interpolate
|
||||
GGML_OP_PAD,
|
||||
+ GGML_OP_UNPAD,
|
||||
GGML_OP_ARANGE,
|
||||
GGML_OP_TIMESTEP_EMBEDDING,
|
||||
GGML_OP_ARGSORT,
|
||||
@@ -1764,6 +1765,15 @@ extern "C" {
|
||||
int p2,
|
||||
int p3);
|
||||
|
||||
+ // unpad each dimension: [x, ..., x, y, ..., y] -> [x, ..., x]
|
||||
+ GGML_API struct ggml_tensor * ggml_unpad(
|
||||
+ struct ggml_context * ctx,
|
||||
+ struct ggml_tensor * a,
|
||||
+ int p0,
|
||||
+ int p1,
|
||||
+ int p2,
|
||||
+ int p3);
|
||||
+
|
||||
// Ref: https://github.com/CompVis/stable-diffusion/blob/main/ldm/modules/diffusionmodules/util.py#L151
|
||||
// timesteps: [N,]
|
||||
// return: [N, dim]
|
||||
diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu
|
||||
index fe77b81c..6e84af56 100644
|
||||
--- a/ggml/src/ggml-cuda.cu
|
||||
+++ b/ggml/src/ggml-cuda.cu
|
||||
@@ -2270,6 +2270,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
|
||||
case GGML_OP_PAD:
|
||||
ggml_cuda_op_pad(ctx, dst);
|
||||
break;
|
||||
+ case GGML_OP_UNPAD:
|
||||
+ ggml_cuda_op_unpad(ctx, dst);
|
||||
+ break;
|
||||
case GGML_OP_ARANGE:
|
||||
ggml_cuda_op_arange(ctx, dst);
|
||||
break;
|
||||
@@ -2992,6 +2995,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
|
||||
case GGML_OP_GROUP_NORM:
|
||||
case GGML_OP_UPSCALE:
|
||||
case GGML_OP_PAD:
|
||||
+ case GGML_OP_UNPAD:
|
||||
case GGML_OP_ARANGE:
|
||||
case GGML_OP_TIMESTEP_EMBEDDING:
|
||||
case GGML_OP_LEAKY_RELU:
|
||||
diff --git a/ggml/src/ggml-cuda/pad.cu b/ggml/src/ggml-cuda/pad.cu
|
||||
index aba539e8..39fd4b16 100644
|
||||
--- a/ggml/src/ggml-cuda/pad.cu
|
||||
+++ b/ggml/src/ggml-cuda/pad.cu
|
||||
@@ -47,3 +47,49 @@ void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||
src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3],
|
||||
dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], stream);
|
||||
}
|
||||
+
|
||||
+static __global__ void unpad_f32(const float * x, float * dst, const int ne0, const int ne00, const int ne01, const int ne02, const int ne03) {
|
||||
+ // blockIdx.z: idx of ne2*ne3, aka ne02*ne03
|
||||
+ // blockIdx.y: idx of ne1
|
||||
+ // blockIDx.x: idx of ne0 / BLOCK_SIZE
|
||||
+ int nidx = threadIdx.x + blockIdx.x * blockDim.x;
|
||||
+ if (nidx >= ne0) {
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ // operation
|
||||
+ int offset_dst =
|
||||
+ nidx +
|
||||
+ blockIdx.y * ne0 +
|
||||
+ blockIdx.z * ne0 * gridDim.y;
|
||||
+ if (nidx < ne00 && blockIdx.y < ne01 && blockIdx.z < ne02*ne03) {
|
||||
+ int offset_src =
|
||||
+ nidx +
|
||||
+ blockIdx.y * ne00 +
|
||||
+ blockIdx.z * ne00 * ne01;
|
||||
+ dst[offset_dst] = x[offset_src];
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+static void unpad_f32_cuda(const float * x, float * dst,
|
||||
+ const int ne00, const int ne01, const int ne02, const int ne03,
|
||||
+ const int ne0, const int ne1, const int ne2, const int ne3, cudaStream_t stream) {
|
||||
+ int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE;
|
||||
+ dim3 gridDim(num_blocks, ne1, ne2*ne3);
|
||||
+ unpad_f32<<<gridDim, CUDA_PAD_BLOCK_SIZE, 0, stream>>>(x, dst, ne0, ne00, ne01, ne02, ne03);
|
||||
+}
|
||||
+
|
||||
+void ggml_cuda_op_unpad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||
+ const ggml_tensor * src0 = dst->src[0];
|
||||
+ const float * src0_d = (const float *)src0->data;
|
||||
+ float * dst_d = (float *)dst->data;
|
||||
+ cudaStream_t stream = ctx.stream();
|
||||
+
|
||||
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
|
||||
+ GGML_ASSERT(src0->ne[3] == 1 && dst->ne[3] == 1); // just 3D tensors
|
||||
+
|
||||
+ unpad_f32_cuda(src0_d, dst_d,
|
||||
+ src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3],
|
||||
+ dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], stream);
|
||||
+}
|
||||
diff --git a/ggml/src/ggml-cuda/pad.cuh b/ggml/src/ggml-cuda/pad.cuh
|
||||
index 8fd386b0..e2ededc3 100644
|
||||
--- a/ggml/src/ggml-cuda/pad.cuh
|
||||
+++ b/ggml/src/ggml-cuda/pad.cuh
|
||||
@@ -3,3 +3,4 @@
|
||||
#define CUDA_PAD_BLOCK_SIZE 256
|
||||
|
||||
void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||
+void ggml_cuda_op_unpad(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||
diff --git a/ggml/src/ggml-metal.m b/ggml/src/ggml-metal.m
|
||||
index 829c5e39..25702d85 100644
|
||||
--- a/ggml/src/ggml-metal.m
|
||||
+++ b/ggml/src/ggml-metal.m
|
||||
@@ -193,6 +193,7 @@
|
||||
GGML_METAL_KERNEL_TYPE_IM2COL_F32,
|
||||
GGML_METAL_KERNEL_TYPE_UPSCALE_F32,
|
||||
GGML_METAL_KERNEL_TYPE_PAD_F32,
|
||||
+ GGML_METAL_KERNEL_TYPE_UNPAD_F32,
|
||||
GGML_METAL_KERNEL_TYPE_ARANGE_F32,
|
||||
GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32,
|
||||
GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC,
|
||||
@@ -689,6 +690,7 @@ static void ggml_metal_log(enum ggml_log_level level, const char * format, ...){
|
||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F32, im2col_f32, true);
|
||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_UPSCALE_F32, upscale_f32, true);
|
||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_PAD_F32, pad_f32, true);
|
||||
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_UNPAD_F32, unpad_f32, true);
|
||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32, timestep_embedding_f32, true);
|
||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARANGE_F32, arange_f32, true);
|
||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC, argsort_f32_i32_asc, true);
|
||||
@@ -846,6 +848,7 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_context * ctx
|
||||
return false;
|
||||
case GGML_OP_UPSCALE:
|
||||
case GGML_OP_PAD:
|
||||
+ case GGML_OP_UNPAD:
|
||||
case GGML_OP_ARANGE:
|
||||
case GGML_OP_TIMESTEP_EMBEDDING:
|
||||
case GGML_OP_ARGSORT:
|
||||
@@ -2655,6 +2658,36 @@ static void ggml_metal_encode_node(
|
||||
|
||||
const int nth = MIN(1024, ne0);
|
||||
|
||||
+ [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
||||
+ } break;
|
||||
+ case GGML_OP_UNPAD:
|
||||
+ {
|
||||
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||
+
|
||||
+ id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_UNPAD_F32].pipeline;
|
||||
+
|
||||
+ [encoder setComputePipelineState:pipeline];
|
||||
+ [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||
+ [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
||||
+ [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
|
||||
+ [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
|
||||
+ [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
|
||||
+ [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:5];
|
||||
+ [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
|
||||
+ [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
|
||||
+ [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
|
||||
+ [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:9];
|
||||
+ [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:10];
|
||||
+ [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:11];
|
||||
+ [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:12];
|
||||
+ [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:13];
|
||||
+ [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:14];
|
||||
+ [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15];
|
||||
+ [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:16];
|
||||
+ [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:17];
|
||||
+
|
||||
+ const int nth = MIN(1024, ne0);
|
||||
+
|
||||
[encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
||||
} break;
|
||||
case GGML_OP_ARANGE:
|
||||
diff --git a/ggml/src/ggml-metal.metal b/ggml/src/ggml-metal.metal
|
||||
index 2b200032..09887511 100644
|
||||
--- a/ggml/src/ggml-metal.metal
|
||||
+++ b/ggml/src/ggml-metal.metal
|
||||
@@ -2029,6 +2029,51 @@ kernel void kernel_pad_f32(
|
||||
}
|
||||
}
|
||||
|
||||
+kernel void kernel_unpad_f32(
|
||||
+ device const char * src0,
|
||||
+ device char * dst,
|
||||
+ constant int64_t & ne00,
|
||||
+ constant int64_t & ne01,
|
||||
+ constant int64_t & ne02,
|
||||
+ constant int64_t & ne03,
|
||||
+ constant uint64_t & nb00,
|
||||
+ constant uint64_t & nb01,
|
||||
+ constant uint64_t & nb02,
|
||||
+ constant uint64_t & nb03,
|
||||
+ constant int64_t & ne0,
|
||||
+ constant int64_t & ne1,
|
||||
+ constant int64_t & ne2,
|
||||
+ constant int64_t & ne3,
|
||||
+ constant uint64_t & nb0,
|
||||
+ constant uint64_t & nb1,
|
||||
+ constant uint64_t & nb2,
|
||||
+ constant uint64_t & nb3,
|
||||
+ uint3 tgpig[[threadgroup_position_in_grid]],
|
||||
+ uint3 tpitg[[thread_position_in_threadgroup]],
|
||||
+ uint3 ntg[[threads_per_threadgroup]]) {
|
||||
+
|
||||
+ const int64_t i3 = tgpig.z;
|
||||
+ const int64_t i2 = tgpig.y;
|
||||
+ const int64_t i1 = tgpig.x;
|
||||
+
|
||||
+ const int64_t i03 = i3;
|
||||
+ const int64_t i02 = i2;
|
||||
+ const int64_t i01 = i1;
|
||||
+
|
||||
+ device const float * src0_ptr = (device const float *) (src0 + i03*nb03 + i02*nb02 + i01*nb01);
|
||||
+ device float * dst_ptr = (device float *) (dst + i3*nb3 + i2*nb2 + i1*nb1);
|
||||
+
|
||||
+ if (i1 < ne01 && i2 < ne02 && i3 < ne03) {
|
||||
+ for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) {
|
||||
+ if (i0 < ne00) {
|
||||
+ dst_ptr[i0] = src0_ptr[i0];
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ return;
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
kernel void kernel_arange_f32(
|
||||
device char * dst,
|
||||
constant int64_t & ne0,
|
||||
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c
|
||||
index bcbc32d9..f4864ac8 100644
|
||||
--- a/ggml/src/ggml.c
|
||||
+++ b/ggml/src/ggml.c
|
||||
@@ -2997,6 +2997,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
|
||||
"POOL_2D_BACK",
|
||||
"UPSCALE",
|
||||
"PAD",
|
||||
+ "UNPAD",
|
||||
"ARANGE",
|
||||
"TIMESTEP_EMBEDDING",
|
||||
"ARGSORT",
|
||||
@@ -3030,7 +3031,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
|
||||
"OPT_STEP_ADAMW",
|
||||
};
|
||||
|
||||
-static_assert(GGML_OP_COUNT == 80, "GGML_OP_COUNT != 80");
|
||||
+static_assert(GGML_OP_COUNT == 81, "GGML_OP_COUNT != 81");
|
||||
|
||||
static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
|
||||
"none",
|
||||
@@ -3091,6 +3092,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
|
||||
"pool_2d_back(x)",
|
||||
"upscale(x)",
|
||||
"pad(x)",
|
||||
+ "unpad(x)",
|
||||
"arange(start, stop, step)",
|
||||
"timestep_embedding(timesteps, dim, max_period)",
|
||||
"argsort(x)",
|
||||
@@ -3124,7 +3126,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
|
||||
"adamw(x)",
|
||||
};
|
||||
|
||||
-static_assert(GGML_OP_COUNT == 80, "GGML_OP_COUNT != 80");
|
||||
+static_assert(GGML_OP_COUNT == 81, "GGML_OP_COUNT != 81");
|
||||
|
||||
static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
|
||||
|
||||
@@ -6955,6 +6957,32 @@ struct ggml_tensor * ggml_pad(
|
||||
return result;
|
||||
}
|
||||
|
||||
+// ggml_unpad
|
||||
+
|
||||
+struct ggml_tensor * ggml_unpad(
|
||||
+ struct ggml_context * ctx,
|
||||
+ struct ggml_tensor * a,
|
||||
+ int p0, int p1, int p2, int p3) {
|
||||
+ bool is_node = false;
|
||||
+
|
||||
+ if (a->grad) {
|
||||
+ GGML_ABORT("fatal error"); // TODO: implement backward
|
||||
+ is_node = true;
|
||||
+ }
|
||||
+
|
||||
+ struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type,
|
||||
+ a->ne[0] - p0,
|
||||
+ a->ne[1] - p1,
|
||||
+ a->ne[2] - p2,
|
||||
+ a->ne[3] - p3);
|
||||
+
|
||||
+ result->op = GGML_OP_UNPAD;
|
||||
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
+ result->src[0] = a;
|
||||
+
|
||||
+ return result;
|
||||
+}
|
||||
+
|
||||
// ggml_arange
|
||||
|
||||
struct ggml_tensor * ggml_arange(
|
||||
@@ -15312,6 +15340,58 @@ static void ggml_compute_forward_pad(
|
||||
}
|
||||
}
|
||||
|
||||
+static void ggml_compute_forward_unpad_f32(
|
||||
+ const struct ggml_compute_params *params,
|
||||
+ struct ggml_tensor *dst) {
|
||||
+
|
||||
+ const struct ggml_tensor * src0 = dst->src[0];
|
||||
+
|
||||
+ GGML_ASSERT(src0->nb[0] == sizeof(float));
|
||||
+ GGML_ASSERT( dst->nb[0] == sizeof(float));
|
||||
+
|
||||
+ const int ith = params->ith;
|
||||
+ const int nth = params->nth;
|
||||
+
|
||||
+ GGML_TENSOR_UNARY_OP_LOCALS
|
||||
+
|
||||
+ float * dst_ptr = (float *) dst->data;
|
||||
+
|
||||
+ // TODO: optimize
|
||||
+
|
||||
+ for (int64_t i2 = 0; i2 < ne2; ++i2) {
|
||||
+ for (int64_t i1 = ith; i1 < ne1; i1 += nth) {
|
||||
+ for (int64_t i0 = 0; i0 < ne0; ++i0) {
|
||||
+ for (int64_t i3 = 0; i3 < ne3; ++i3) {
|
||||
+ const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0;
|
||||
+
|
||||
+ const float * src_ptr = (const float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
|
||||
+
|
||||
+ if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) {
|
||||
+ dst_ptr[dst_idx] = *src_ptr;
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+static void ggml_compute_forward_unpad(
|
||||
+ const struct ggml_compute_params * params,
|
||||
+ struct ggml_tensor * dst) {
|
||||
+
|
||||
+ const struct ggml_tensor * src0 = dst->src[0];
|
||||
+
|
||||
+ switch (src0->type) {
|
||||
+ case GGML_TYPE_F32:
|
||||
+ {
|
||||
+ ggml_compute_forward_unpad_f32(params, dst);
|
||||
+ } break;
|
||||
+ default:
|
||||
+ {
|
||||
+ GGML_ABORT("fatal error");
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
|
||||
// ggml_compute_forward_arange
|
||||
|
||||
@@ -17294,6 +17374,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
|
||||
{
|
||||
ggml_compute_forward_pad(params, tensor);
|
||||
} break;
|
||||
+ case GGML_OP_UNPAD:
|
||||
+ {
|
||||
+ ggml_compute_forward_unpad(params, tensor);
|
||||
+ } break;
|
||||
case GGML_OP_ARANGE:
|
||||
{
|
||||
ggml_compute_forward_arange(params, tensor);
|
||||
@@ -18369,6 +18453,10 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
|
||||
{
|
||||
GGML_ABORT("fatal error"); // TODO: not implemented
|
||||
}
|
||||
+ case GGML_OP_UNPAD:
|
||||
+ {
|
||||
+ GGML_ABORT("fatal error"); // TODO: not implemented
|
||||
+ }
|
||||
case GGML_OP_ARANGE:
|
||||
{
|
||||
GGML_ABORT("fatal error"); // TODO: not implemented
|
||||
@@ -19165,6 +19253,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) {
|
||||
} break;
|
||||
case GGML_OP_UPSCALE:
|
||||
case GGML_OP_PAD:
|
||||
+ case GGML_OP_UNPAD:
|
||||
case GGML_OP_ARANGE:
|
||||
case GGML_OP_TIMESTEP_EMBEDDING:
|
||||
case GGML_OP_ARGSORT:
|
||||
Reference in New Issue
Block a user