ggerganov commited on
Commit
1568fc8
·
1 Parent(s): e3c317a

talk-llama : sync llama.cpp

Browse files
examples/talk-llama/llama.cpp CHANGED
@@ -179,6 +179,7 @@ enum llm_arch {
179
  LLM_ARCH_COMMAND_R,
180
  LLM_ARCH_DBRX,
181
  LLM_ARCH_OLMO,
 
182
  LLM_ARCH_OLMOE,
183
  LLM_ARCH_OPENELM,
184
  LLM_ARCH_ARCTIC,
@@ -232,6 +233,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
232
  { LLM_ARCH_COMMAND_R, "command-r" },
233
  { LLM_ARCH_DBRX, "dbrx" },
234
  { LLM_ARCH_OLMO, "olmo" },
 
235
  { LLM_ARCH_OLMOE, "olmoe" },
236
  { LLM_ARCH_OPENELM, "openelm" },
237
  { LLM_ARCH_ARCTIC, "arctic" },
@@ -1207,6 +1209,25 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
1207
  { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1208
  },
1209
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1210
  {
1211
  LLM_ARCH_OLMOE,
1212
  {
@@ -2907,9 +2928,15 @@ struct llama_model {
2907
  // for quantize-stats only
2908
  std::vector<std::pair<std::string, struct ggml_tensor *>> tensors_by_name;
2909
 
2910
- int64_t t_load_us = 0;
2911
  int64_t t_start_us = 0;
2912
 
 
 
 
 
 
 
2913
  // keep track of loaded lora adapters
2914
  std::set<struct llama_lora_adapter *> lora_adapters;
2915
 
@@ -3454,21 +3481,13 @@ static bool llama_kv_cache_init(
3454
  const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s();
3455
  const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s();
3456
 
3457
- const llama_model::buft_list_t * buft_list;
3458
  if (offload) {
3459
- buft_list = model.dev_layer.at(i).buft_list;
 
3460
  } else {
3461
- buft_list = &model.cpu_buft_list;
3462
  }
3463
- ggml_backend_buffer_type_t buft = select_buft(*buft_list,
3464
- [&](ggml_context * ctx) {
3465
- ggml_tensor * k = ggml_new_tensor_1d(ctx, type_k, n_embd_k_gqa*kv_size);
3466
- if (hparams.rope_type == LLAMA_ROPE_TYPE_NONE) {
3467
- return k;
3468
- }
3469
- ggml_tensor * p = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
3470
- return ggml_rope(ctx, k, p, hparams.n_rot, hparams.rope_type);
3471
- });
3472
  ggml_context * ctx = ctx_for_buft(buft);
3473
 
3474
  if (!ctx) {
@@ -4275,8 +4294,8 @@ struct llama_model_loader {
4275
  int n_tensors = 0;
4276
  int n_created = 0;
4277
 
4278
- int64_t n_elements = 0;
4279
- size_t n_bytes = 0;
4280
 
4281
  bool use_mmap = false;
4282
  bool check_tensors;
@@ -5344,6 +5363,11 @@ static const char * llama_model_vocab_type_name(enum llama_vocab_type type){
5344
  }
5345
  }
5346
 
 
 
 
 
 
5347
  static void llm_load_arch(llama_model_loader & ml, llama_model & model) {
5348
  model.arch = ml.get_arch();
5349
  if (model.arch == LLM_ARCH_UNKNOWN) {
@@ -5874,6 +5898,17 @@ static void llm_load_hparams(
5874
  default: model.type = e_model::MODEL_UNKNOWN;
5875
  }
5876
  } break;
 
 
 
 
 
 
 
 
 
 
 
5877
  case LLM_ARCH_OLMOE:
5878
  {
5879
  ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
@@ -7254,7 +7289,7 @@ static llama_model::buft_list_t make_cpu_buft_list(llama_model & model) {
7254
  auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
7255
  auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev);
7256
  auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t)
7257
- ggml_backend_reg_get_proc_address(cpu_reg, "ggml_backend_cpu_get_extra_bufts");
7258
  if (ggml_backend_dev_get_extra_bufts_fn) {
7259
  ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(cpu_dev);
7260
  while (extra_bufts && *extra_bufts) {
@@ -7521,7 +7556,7 @@ static bool llm_load_tensors(
7521
 
7522
  // avoid using a host buffer when using mmap
7523
  auto * buft_dev = ggml_backend_buft_get_device(buft);
7524
- if (ml.use_mmap && buft == ggml_backend_dev_host_buffer_type(buft_dev)) {
7525
  auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
7526
  buft = ggml_backend_dev_buffer_type(cpu_dev);
7527
  }
@@ -8556,6 +8591,31 @@ static bool llm_load_tensors(
8556
  layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
8557
  }
8558
  } break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8559
  case LLM_ARCH_OLMOE:
8560
  {
8561
  model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
@@ -9128,6 +9188,10 @@ static bool llm_load_tensors(
9128
 
9129
  // check if it is possible to use buffer_from_host_ptr with this buffer type
9130
  ggml_backend_dev_t dev = ggml_backend_buft_get_device(buft);
 
 
 
 
9131
  ggml_backend_dev_props props;
9132
  ggml_backend_dev_get_props(dev, &props);
9133
  bool buffer_from_host_ptr_supported = props.caps.buffer_from_host_ptr;
@@ -9252,6 +9316,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
9252
  throw std::runtime_error("error loading model vocabulary: " + std::string(e.what()));
9253
  }
9254
 
 
9255
  llm_load_print_meta(ml, model);
9256
 
9257
  if (model.vocab.type != LLAMA_VOCAB_TYPE_NONE &&
@@ -14416,6 +14481,130 @@ struct llm_build_context {
14416
  return gf;
14417
  }
14418
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14419
  // based on the build_qwen2moe() function, changes:
14420
  // * removed shared experts
14421
  // * removed bias
@@ -16608,6 +16797,10 @@ static struct ggml_cgraph * llama_build_graph(
16608
  {
16609
  result = llm.build_olmo();
16610
  } break;
 
 
 
 
16611
  case LLM_ARCH_OLMOE:
16612
  {
16613
  result = llm.build_olmoe();
@@ -18020,7 +18213,7 @@ static void llama_kv_cache_update_internal(struct llama_context & lctx) {
18020
 
18021
  // apply K-shift if needed
18022
  if (lctx.model.hparams.rope_type != LLAMA_ROPE_TYPE_NONE && lctx.kv_self.has_shift) {
18023
- if (lctx.model.arch == LLM_ARCH_DEEPSEEK2) { // not supported due to MLA
18024
  GGML_ABORT("Deepseek2 does not support K-shift");
18025
  }
18026
 
@@ -18597,6 +18790,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
18597
  llama_model model;
18598
  llm_load_arch(ml, model);
18599
  llm_load_hparams(ml, model);
 
18600
 
18601
  struct quantize_state_internal qs(model, params);
18602
 
@@ -19876,6 +20070,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
19876
  case LLM_ARCH_QWEN:
19877
  case LLM_ARCH_QWEN2:
19878
  case LLM_ARCH_QWEN2MOE:
 
19879
  case LLM_ARCH_OLMOE:
19880
  case LLM_ARCH_PHI2:
19881
  case LLM_ARCH_PHI3:
@@ -19949,19 +20144,11 @@ int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t bu
19949
  }
19950
 
19951
  uint64_t llama_model_size(const struct llama_model * model) {
19952
- uint64_t size = 0;
19953
- for (const auto & it : model->tensors_by_name) {
19954
- size += ggml_nbytes(it.second);
19955
- }
19956
- return size;
19957
  }
19958
 
19959
  uint64_t llama_model_n_params(const struct llama_model * model) {
19960
- uint64_t nparams = 0;
19961
- for (const auto & it : model->tensors_by_name) {
19962
- nparams += ggml_nelements(it.second);
19963
- }
19964
- return nparams;
19965
  }
19966
 
19967
  struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name) {
@@ -20275,6 +20462,10 @@ void llama_kv_cache_update(struct llama_context * ctx) {
20275
  llama_kv_cache_update_internal(*ctx);
20276
  }
20277
 
 
 
 
 
20278
  // deprecated
20279
  size_t llama_get_state_size(struct llama_context * ctx) {
20280
  return llama_state_get_size(ctx);
@@ -22021,7 +22212,6 @@ const char * llama_print_system_info(void) {
22021
  s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
22022
  s += "RISCV_VECT = " + std::to_string(ggml_cpu_has_riscv_v()) + " | ";
22023
  s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
22024
- s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
22025
  s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
22026
  s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
22027
  s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
@@ -22067,28 +22257,6 @@ void llama_perf_context_reset(struct llama_context * ctx) {
22067
  ctx->t_p_eval_us = ctx->n_p_eval = 0;
22068
  }
22069
 
22070
- void llama_perf_dump_yaml(FILE * stream, const llama_context * ctx) {
22071
- fprintf(stream, "\n");
22072
- fprintf(stream, "###########\n");
22073
- fprintf(stream, "# Timings #\n");
22074
- fprintf(stream, "###########\n");
22075
- fprintf(stream, "\n");
22076
-
22077
- fprintf(stream, "mst_eval: %.2f # ms / token during generation\n",
22078
- 1.0e-3 * ctx->t_eval_us / ctx->n_eval);
22079
- fprintf(stream, "mst_p_eval: %.2f # ms / token during prompt processing\n",
22080
- 1.0e-3 * ctx->t_p_eval_us / ctx->n_p_eval);
22081
- fprintf(stream, "n_eval: %d # number of tokens generated (excluding the first one)\n", ctx->n_eval);
22082
- fprintf(stream, "n_p_eval: %d # number of tokens processed in batches at the beginning\n", ctx->n_p_eval);
22083
- fprintf(stream, "t_eval_us: %" PRId64 " # total microseconds spent generating tokens\n", ctx->t_eval_us);
22084
- fprintf(stream, "t_load_us: %" PRId64 " # total microseconds spent loading the model\n", ctx->t_load_us);
22085
- fprintf(stream, "t_p_eval_us: %" PRId64 " # total microseconds spent prompt processing\n", ctx->t_p_eval_us);
22086
- fprintf(stream, "ts_eval: %.2f # tokens / second during generation\n",
22087
- 1.0e6 * ctx->n_eval / ctx->t_eval_us);
22088
- fprintf(stream, "ts_p_eval: %.2f # tokens / second during prompt processing\n",
22089
- 1.0e6 * ctx->n_p_eval / ctx->t_p_eval_us);
22090
- }
22091
-
22092
  // For internal test use
22093
  const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(
22094
  struct llama_context * ctx
 
179
  LLM_ARCH_COMMAND_R,
180
  LLM_ARCH_DBRX,
181
  LLM_ARCH_OLMO,
182
+ LLM_ARCH_OLMO_1124,
183
  LLM_ARCH_OLMOE,
184
  LLM_ARCH_OPENELM,
185
  LLM_ARCH_ARCTIC,
 
233
  { LLM_ARCH_COMMAND_R, "command-r" },
234
  { LLM_ARCH_DBRX, "dbrx" },
235
  { LLM_ARCH_OLMO, "olmo" },
236
+ { LLM_ARCH_OLMO_1124, "olmo_1124" },
237
  { LLM_ARCH_OLMOE, "olmoe" },
238
  { LLM_ARCH_OPENELM, "openelm" },
239
  { LLM_ARCH_ARCTIC, "arctic" },
 
1209
  { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1210
  },
1211
  },
1212
+ {
1213
+ LLM_ARCH_OLMO_1124,
1214
+ {
1215
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1216
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1217
+ { LLM_TENSOR_OUTPUT, "output" },
1218
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1219
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1220
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1221
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1222
+ { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
1223
+ { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
1224
+ { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
1225
+ { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
1226
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1227
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1228
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1229
+ },
1230
+ },
1231
  {
1232
  LLM_ARCH_OLMOE,
1233
  {
 
2928
  // for quantize-stats only
2929
  std::vector<std::pair<std::string, struct ggml_tensor *>> tensors_by_name;
2930
 
2931
+ int64_t t_load_us = 0;
2932
  int64_t t_start_us = 0;
2933
 
2934
+ // total number of parameters in the model
2935
+ uint64_t n_elements = 0;
2936
+
2937
+ // total size of all the tensors in the model in bytes
2938
+ size_t n_bytes = 0;
2939
+
2940
  // keep track of loaded lora adapters
2941
  std::set<struct llama_lora_adapter *> lora_adapters;
2942
 
 
3481
  const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s();
3482
  const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s();
3483
 
3484
+ ggml_backend_buffer_type_t buft;
3485
  if (offload) {
3486
+ auto * dev = model.dev_layer.at(i).dev;
3487
+ buft = ggml_backend_dev_buffer_type(dev);
3488
  } else {
3489
+ buft = ggml_backend_cpu_buffer_type();
3490
  }
 
 
 
 
 
 
 
 
 
3491
  ggml_context * ctx = ctx_for_buft(buft);
3492
 
3493
  if (!ctx) {
 
4294
  int n_tensors = 0;
4295
  int n_created = 0;
4296
 
4297
+ uint64_t n_elements = 0;
4298
+ size_t n_bytes = 0;
4299
 
4300
  bool use_mmap = false;
4301
  bool check_tensors;
 
5363
  }
5364
  }
5365
 
5366
+ static void llm_load_stats(llama_model_loader & ml, llama_model & model) {
5367
+ model.n_elements = ml.n_elements;
5368
+ model.n_bytes = ml.n_bytes;
5369
+ }
5370
+
5371
  static void llm_load_arch(llama_model_loader & ml, llama_model & model) {
5372
  model.arch = ml.get_arch();
5373
  if (model.arch == LLM_ARCH_UNKNOWN) {
 
5898
  default: model.type = e_model::MODEL_UNKNOWN;
5899
  }
5900
  } break;
5901
+ case LLM_ARCH_OLMO_1124:
5902
+ {
5903
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
5904
+
5905
+ switch (hparams.n_layer) {
5906
+ case 16: model.type = e_model::MODEL_1B; break;
5907
+ case 32: model.type = e_model::MODEL_7B; break;
5908
+ case 40: model.type = e_model::MODEL_13B; break;
5909
+ default: model.type = e_model::MODEL_UNKNOWN;
5910
+ }
5911
+ } break;
5912
  case LLM_ARCH_OLMOE:
5913
  {
5914
  ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
 
7289
  auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
7290
  auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev);
7291
  auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t)
7292
+ ggml_backend_reg_get_proc_address(cpu_reg, "ggml_backend_dev_get_extra_bufts");
7293
  if (ggml_backend_dev_get_extra_bufts_fn) {
7294
  ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(cpu_dev);
7295
  while (extra_bufts && *extra_bufts) {
 
7556
 
7557
  // avoid using a host buffer when using mmap
7558
  auto * buft_dev = ggml_backend_buft_get_device(buft);
7559
+ if (ml.use_mmap && buft_dev && buft == ggml_backend_dev_host_buffer_type(buft_dev)) {
7560
  auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
7561
  buft = ggml_backend_dev_buffer_type(cpu_dev);
7562
  }
 
8591
  layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
8592
  }
8593
  } break;
8594
+ case LLM_ARCH_OLMO_1124:
8595
+ {
8596
+ model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
8597
+
8598
+ // output
8599
+ model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
8600
+ model.output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
8601
+
8602
+ for (int i = 0; i < n_layer; ++i) {
8603
+ auto & layer = model.layers[i];
8604
+
8605
+ layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
8606
+ layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
8607
+ layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
8608
+ layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
8609
+ layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, 0);
8610
+ layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, 0);
8611
+ layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
8612
+
8613
+ layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
8614
+ layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
8615
+ layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
8616
+ layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
8617
+ }
8618
+ } break;
8619
  case LLM_ARCH_OLMOE:
8620
  {
8621
  model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
 
9188
 
9189
  // check if it is possible to use buffer_from_host_ptr with this buffer type
9190
  ggml_backend_dev_t dev = ggml_backend_buft_get_device(buft);
9191
+ if (!dev) {
9192
+ // FIXME: workaround for CPU backend buft having a NULL device
9193
+ dev = ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0);
9194
+ }
9195
  ggml_backend_dev_props props;
9196
  ggml_backend_dev_get_props(dev, &props);
9197
  bool buffer_from_host_ptr_supported = props.caps.buffer_from_host_ptr;
 
9316
  throw std::runtime_error("error loading model vocabulary: " + std::string(e.what()));
9317
  }
9318
 
9319
+ llm_load_stats(ml, model);
9320
  llm_load_print_meta(ml, model);
9321
 
9322
  if (model.vocab.type != LLAMA_VOCAB_TYPE_NONE &&
 
14481
  return gf;
14482
  }
14483
 
14484
+ struct ggml_cgraph * build_olmo_1124() {
14485
+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
14486
+
14487
+ // mutable variable, needed during the last layer of the computation to skip unused tokens
14488
+ int32_t n_tokens = this->n_tokens;
14489
+
14490
+ const int64_t n_embd_head = hparams.n_embd_head_v;
14491
+ GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
14492
+ GGML_ASSERT(n_embd_head == hparams.n_rot);
14493
+
14494
+ struct ggml_tensor * cur;
14495
+ struct ggml_tensor * inpL;
14496
+
14497
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb);
14498
+
14499
+ // inp_pos - contains the positions
14500
+ struct ggml_tensor * inp_pos = build_inp_pos();
14501
+
14502
+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
14503
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
14504
+
14505
+ for (int il = 0; il < n_layer; ++il) {
14506
+ struct ggml_tensor * inpSA = inpL;
14507
+
14508
+ cur = inpL;
14509
+
14510
+ // self_attention
14511
+ {
14512
+ // compute Q and K and RoPE them
14513
+ struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
14514
+ cb(Qcur, "Qcur", il);
14515
+
14516
+ struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
14517
+ cb(Kcur, "Kcur", il);
14518
+
14519
+ struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
14520
+ cb(Vcur, "Vcur", il);
14521
+
14522
+ Qcur = llm_build_norm(ctx0, Qcur, hparams, model.layers[il].attn_q_norm, NULL,
14523
+ LLM_NORM_RMS, cb, il);
14524
+ cb(Qcur, "Qcur_normed", il);
14525
+
14526
+ Kcur = llm_build_norm(ctx0, Kcur, hparams, model.layers[il].attn_k_norm, NULL,
14527
+ LLM_NORM_RMS, cb, il);
14528
+ cb(Kcur, "Kcur_normed", il);
14529
+
14530
+ Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
14531
+ Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
14532
+
14533
+ Qcur = ggml_rope_ext(
14534
+ ctx0, Qcur, inp_pos, nullptr,
14535
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
14536
+ ext_factor, attn_factor, beta_fast, beta_slow
14537
+ );
14538
+ cb(Qcur, "Qcur_rope", il);
14539
+
14540
+ Kcur = ggml_rope_ext(
14541
+ ctx0, Kcur, inp_pos, nullptr,
14542
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
14543
+ ext_factor, attn_factor, beta_fast, beta_slow
14544
+ );
14545
+ cb(Kcur, "Kcur_rope", il);
14546
+
14547
+ cur = llm_build_kv(ctx0, lctx, kv_self, gf,
14548
+ model.layers[il].wo, NULL,
14549
+ Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
14550
+ }
14551
+
14552
+ cur = llm_build_norm(ctx0, cur, hparams,
14553
+ model.layers[il].attn_post_norm, NULL,
14554
+ LLM_NORM_RMS, cb, il);
14555
+ cb(cur, "attn_post_norm", il);
14556
+
14557
+ if (il == n_layer - 1) {
14558
+ // skip computing output for unused tokens
14559
+ struct ggml_tensor * inp_out_ids = build_inp_out_ids();
14560
+ n_tokens = n_outputs;
14561
+ cur = ggml_get_rows(ctx0, cur, inp_out_ids);
14562
+ inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
14563
+ }
14564
+
14565
+ struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
14566
+ cb(ffn_inp, "ffn_inp", il);
14567
+
14568
+ // feed-forward network
14569
+ cur = llm_build_ffn(ctx0, lctx, ffn_inp,
14570
+ model.layers[il].ffn_up, NULL, NULL,
14571
+ model.layers[il].ffn_gate, NULL, NULL,
14572
+ model.layers[il].ffn_down, NULL, NULL,
14573
+ NULL,
14574
+ LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
14575
+ cb(cur, "ffn_out", il);
14576
+
14577
+ cur = llm_build_norm(ctx0, cur, hparams,
14578
+ model.layers[il].ffn_post_norm, NULL,
14579
+ LLM_NORM_RMS, cb, -1);
14580
+ cb(cur, "ffn_post_norm", -1);
14581
+
14582
+ cur = ggml_add(ctx0, cur, ffn_inp);
14583
+ cb(cur, "ffn_out", il);
14584
+
14585
+ cur = lctx.cvec.apply_to(ctx0, cur, il);
14586
+ cb(cur, "l_out", il);
14587
+
14588
+ // input for next layer
14589
+ inpL = cur;
14590
+ }
14591
+
14592
+ cur = inpL;
14593
+
14594
+ cur = llm_build_norm(ctx0, cur, hparams,
14595
+ model.output_norm, NULL,
14596
+ LLM_NORM_RMS, cb, -1);
14597
+ cb(cur, "result_norm", -1);
14598
+
14599
+ // lm_head
14600
+ cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
14601
+ cb(cur, "result_output", -1);
14602
+
14603
+ ggml_build_forward_expand(gf, cur);
14604
+
14605
+ return gf;
14606
+ }
14607
+
14608
  // based on the build_qwen2moe() function, changes:
14609
  // * removed shared experts
14610
  // * removed bias
 
16797
  {
16798
  result = llm.build_olmo();
16799
  } break;
16800
+ case LLM_ARCH_OLMO_1124:
16801
+ {
16802
+ result = llm.build_olmo_1124();
16803
+ } break;
16804
  case LLM_ARCH_OLMOE:
16805
  {
16806
  result = llm.build_olmoe();
 
18213
 
18214
  // apply K-shift if needed
18215
  if (lctx.model.hparams.rope_type != LLAMA_ROPE_TYPE_NONE && lctx.kv_self.has_shift) {
18216
+ if (!llama_kv_cache_can_shift(&lctx)) {
18217
  GGML_ABORT("Deepseek2 does not support K-shift");
18218
  }
18219
 
 
18790
  llama_model model;
18791
  llm_load_arch(ml, model);
18792
  llm_load_hparams(ml, model);
18793
+ llm_load_stats(ml, model);
18794
 
18795
  struct quantize_state_internal qs(model, params);
18796
 
 
20070
  case LLM_ARCH_QWEN:
20071
  case LLM_ARCH_QWEN2:
20072
  case LLM_ARCH_QWEN2MOE:
20073
+ case LLM_ARCH_OLMO_1124:
20074
  case LLM_ARCH_OLMOE:
20075
  case LLM_ARCH_PHI2:
20076
  case LLM_ARCH_PHI3:
 
20144
  }
20145
 
20146
  uint64_t llama_model_size(const struct llama_model * model) {
20147
+ return model->n_bytes;
 
 
 
 
20148
  }
20149
 
20150
  uint64_t llama_model_n_params(const struct llama_model * model) {
20151
+ return model->n_elements;
 
 
 
 
20152
  }
20153
 
20154
  struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name) {
 
20462
  llama_kv_cache_update_internal(*ctx);
20463
  }
20464
 
20465
+ bool llama_kv_cache_can_shift(struct llama_context * ctx) {
20466
+ return ctx->model.arch != LLM_ARCH_DEEPSEEK2; // not supported due to MLA
20467
+ }
20468
+
20469
  // deprecated
20470
  size_t llama_get_state_size(struct llama_context * ctx) {
20471
  return llama_state_get_size(ctx);
 
22212
  s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
22213
  s += "RISCV_VECT = " + std::to_string(ggml_cpu_has_riscv_v()) + " | ";
22214
  s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
 
22215
  s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
22216
  s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
22217
  s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
 
22257
  ctx->t_p_eval_us = ctx->n_p_eval = 0;
22258
  }
22259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22260
  // For internal test use
22261
  const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(
22262
  struct llama_context * ctx
examples/talk-llama/llama.h CHANGED
@@ -667,6 +667,9 @@ extern "C" {
667
  // Apply the KV cache updates (such as K-shifts, defragmentation, etc.)
668
  LLAMA_API void llama_kv_cache_update(struct llama_context * ctx);
669
 
 
 
 
670
  //
671
  // State / sessions
672
  //
@@ -1244,8 +1247,6 @@ extern "C" {
1244
  LLAMA_API void llama_perf_sampler_print(const struct llama_sampler * chain);
1245
  LLAMA_API void llama_perf_sampler_reset( struct llama_sampler * chain);
1246
 
1247
- LLAMA_API void llama_perf_dump_yaml(FILE * stream, const struct llama_context * ctx);
1248
-
1249
  #ifdef __cplusplus
1250
  }
1251
  #endif
 
667
  // Apply the KV cache updates (such as K-shifts, defragmentation, etc.)
668
  LLAMA_API void llama_kv_cache_update(struct llama_context * ctx);
669
 
670
+ // Check if the context supports KV cache shifting
671
+ LLAMA_API bool llama_kv_cache_can_shift(struct llama_context * ctx);
672
+
673
  //
674
  // State / sessions
675
  //
 
1247
  LLAMA_API void llama_perf_sampler_print(const struct llama_sampler * chain);
1248
  LLAMA_API void llama_perf_sampler_reset( struct llama_sampler * chain);
1249
 
 
 
1250
  #ifdef __cplusplus
1251
  }
1252
  #endif