Skip to content

Commit 39ecadd

Browse files
committed
chore: move debugging scripts to dedicated folder
1 parent e593137 commit 39ecadd

File tree

5 files changed

+83
-0
lines changed

5 files changed

+83
-0
lines changed

scripts/debugging/list_keys.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
from safetensors import safe_open
2+
from glob import glob
3+
import sys
4+
5+
paths = glob("/Users/simba/.cache/huggingface/hub/models--mlx-community--gemma-4-26b-a4b-it-4bit/snapshots/*/*.safetensors")
6+
found = False
7+
for path in paths:
8+
with safe_open(path, framework="np") as f:
9+
for k in f.keys():
10+
if "layers.3.experts" in k:
11+
print(k, f.get_tensor(k).shape)
12+
found = True
13+
if found: break

scripts/debugging/test_tokens.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
from transformers import AutoTokenizer
2+
3+
tok = AutoTokenizer.from_pretrained("mlx-community/gemma-4-26b-a4b-it-4bit")
4+
messages = [{"role": "user", "content": "What is 2+2?"}]
5+
prompt = tok.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
6+
tokens = tok.encode(prompt)
7+
print("PROMPT:", repr(prompt))
8+
print("TOKENS:", len(tokens), tokens)
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import Foundation
2+
import MLXLMCommon
3+
import MLX
4+
5+
func run() async throws {
6+
let factory = LLMModelFactory.shared
7+
let config = ModelConfiguration(id: "mlx-community/gemma-4-26b-a4b-it-4bit")
8+
let container = try await factory.loadContainer(from: HubClient.default, using: TokenizersLoader(), configuration: config)
9+
let context = try await container.context()
10+
11+
let messages: [[String: Any]] = [
12+
["role": "user", "content": "What is 2+2?"]
13+
]
14+
let prompt = try context.tokenizer.applyChatTemplate(messages: messages)
15+
let tokens = context.tokenizer.encode(text: prompt)
16+
print("PROMPT TEXT:", prompt)
17+
print("TOKENS:", tokens)
18+
}
19+
20+
Task { try await run(); exit(0) }
21+
RunLoop.main.run()
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import Foundation
2+
import MLX
3+
import MLXNN
4+
5+
class DummyModule: Module {
6+
@ModuleInfo(key: "my_proj") var myProj: Module
7+
override init() {
8+
self._myProj.wrappedValue = Linear(10, 10)
9+
super.init()
10+
}
11+
}
12+
13+
let m = DummyModule()
14+
print("Module initialized")
15+
let updates = [("my_proj", Linear(10, 10))]
16+
m.update(modules: updates)
17+
print("Module updated successfully!")
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
import Foundation
2+
import MLX
3+
import MLXLLM
4+
import MLXLMCommon
5+
6+
@main
7+
struct TestWeights {
8+
static func main() async throws {
9+
let config = ModelConfiguration(id: "mlx-community/gemma-4-26b-a4b-it-4bit")
10+
let modelContainer = try await LLMModelFactory.shared.loadContainer(from: HubClient.default, using: TokenizersLoader(), configuration: config)
11+
let context = try await modelContainer.context()
12+
13+
if let gemma4 = context.model as? Gemma4Model {
14+
print("Successfully cast to Gemma4Model!")
15+
let layer0 = gemma4.model.layers[0]
16+
print("Layer 0 norms:")
17+
print("post1:", layer0.postFeedforwardLayerNorm1?.weight.max().item(Float.self) ?? "NIL")
18+
print("pre2:", layer0.preFeedforwardLayerNorm2?.weight.max().item(Float.self) ?? "NIL")
19+
print("post2:", layer0.postFeedforwardLayerNorm2?.weight.max().item(Float.self) ?? "NIL")
20+
21+
print("Router proj:", layer0.expertsBlock?.router.proj.weight.shape ?? [0])
22+
}
23+
}
24+
}

0 commit comments

Comments
 (0)