Skip to content

Commit 7585edb

Browse files
convert : Add support for Microsoft Phi-4 model (ggml-org#10817)
* convert : use GPT2 vocab for Phi-4 model * convert : use null value of sliding_window to distinguish Phi-4 from other PHI3-based models * llama : do not use sliding window attention mask for Phi-4 model --------- Co-authored-by: Stanisław Szymczyk <[email protected]>
1 parent cd920d0 commit 7585edb

File tree

2 files changed

+22
-3
lines changed

2 files changed

+22
-3
lines changed

convert_hf_to_gguf.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2200,6 +2200,15 @@ class Phi3MiniModel(Model):
22002200
model_arch = gguf.MODEL_ARCH.PHI3
22012201

22022202
def set_vocab(self):
2203+
# Phi-4 model uses GPT2Tokenizer
2204+
tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
2205+
if tokenizer_config_file.is_file():
2206+
with open(tokenizer_config_file, "r", encoding="utf-8") as f:
2207+
tokenizer_config_json = json.load(f)
2208+
tokenizer_class = tokenizer_config_json['tokenizer_class']
2209+
if tokenizer_class == 'GPT2Tokenizer':
2210+
return self._set_vocab_gpt2()
2211+
22032212
from sentencepiece import SentencePieceProcessor
22042213

22052214
tokenizer_path = self.dir_model / 'tokenizer.model'
@@ -2316,7 +2325,11 @@ def set_gguf_parameters(self):
23162325
self.gguf_writer.add_rope_dimension_count(rope_dims)
23172326
self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
23182327
self.gguf_writer.add_file_type(self.ftype)
2319-
self.gguf_writer.add_sliding_window(self.find_hparam(["sliding_window"]))
2328+
sliding_window = self.hparams.get("sliding_window")
2329+
# use zero value of sliding_window to distinguish Phi-4 from other PHI3 models
2330+
if sliding_window is None:
2331+
sliding_window = 0
2332+
self.gguf_writer.add_sliding_window(sliding_window)
23202333

23212334
def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
23222335
n_embd = self.find_hparam(["hidden_size", "n_embd"])

src/llama.cpp

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13333,7 +13333,13 @@ struct llm_build_context {
1333313333
struct ggml_tensor * inp_pos = build_inp_pos();
1333413334

1333513335
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
13336-
struct ggml_tensor * KQ_mask_swa = build_inp_KQ_mask_swa();
13336+
struct ggml_tensor * KQ_mask = nullptr;
13337+
if (hparams.n_swa == 0) {
13338+
// Phi-4 doesn't use sliding window attention
13339+
KQ_mask = build_inp_KQ_mask();
13340+
} else {
13341+
KQ_mask = build_inp_KQ_mask_swa();
13342+
}
1333713343

1333813344
for (int il = 0; il < n_layer; ++il) {
1333913345
auto residual = inpL;
@@ -13391,7 +13397,7 @@ struct llm_build_context {
1339113397

1339213398
cur = llm_build_kv(ctx0, lctx, kv_self, gf,
1339313399
model.layers[il].wo, model.layers[il].bo,
13394-
Kcur, Vcur, Qcur, KQ_mask_swa, n_tokens, kv_head, n_kv, 1.0f, cb, il);
13400+
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
1339513401
}
1339613402

1339713403
if (il == n_layer - 1) {

0 commit comments

Comments
 (0)