Skip to content

Commit 1a9c5b8

Browse files
committed
update, fix unnecessary codes
1 parent d7310a8 commit 1a9c5b8

1 file changed

Lines changed: 1 addition & 13 deletions

File tree

convert_hf_to_gguf.py

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2992,13 +2992,6 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
29922992
else:
29932993
return
29942994

2995-
if self.origin_hf_arch.startswith('Sarashina2VisionForCausalLM'):
2996-
# Remove llm. from name
2997-
if name.startswith("llm."):
2998-
name = name[len("llm."):]
2999-
elif name.startswith("visual.") or name in ("norm.weight", "norm.bias"):
3000-
return #Skip processing "modify_tensors"
3001-
30022995
yield from super().modify_tensors(data_torch, name, bid)
30032996

30042997
def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
@@ -4267,12 +4260,7 @@ def set_gguf_parameters(self):
42674260
assert self.hparams_vision is not None
42684261
hparams = self.hparams_vision
42694262
model_type = self.global_config['model_type']
4270-
if model_type == 'sarashina2_vision':
4271-
model_type = 'qwen2_vl'
4272-
self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN2VL)
4273-
spatial_merge_size = self.hparams.get("spatial_merge_size", 2)
4274-
self.gguf_writer.add_uint32("clip.vision.spatial_merge_size", spatial_merge_size)
4275-
elif model_type == 'qwen2_vl':
4263+
if model_type == 'qwen2_vl':
42764264
self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN2VL)
42774265
elif model_type == 'qwen2_5_vl' or model_type == 'qwen2_5_omni':
42784266
if model_type == 'qwen2_5_omni':

0 commit comments

Comments
 (0)