-
Notifications
You must be signed in to change notification settings - Fork 29
Expand file tree
/
Copy pathtrain_pythia_clt_topk.py
More file actions
88 lines (84 loc) · 2.62 KB
/
train_pythia_clt_topk.py
File metadata and controls
88 lines (84 loc) · 2.62 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import os
import torch
from llamascopium import (
ActivationFactoryConfig,
ActivationFactoryDatasetSource,
ActivationFactoryTarget,
BufferShuffleConfig,
CLTConfig,
DatasetConfig,
InitializerConfig,
LanguageModelConfig,
TrainCLTSettings,
TrainerConfig,
WandbConfig,
train_clt,
)
if __name__ == "__main__":
torch.cuda.set_device(int(os.environ.get("LOCAL_RANK", 0)))
settings = TrainCLTSettings(
sae=CLTConfig(
hook_points_in=[f"blocks.{i}.ln2.hook_normalized" for i in range(12)],
hook_points_out=[f"blocks.{i}.hook_mlp_out" for i in range(12)],
d_model=768,
expansion_factor=8,
act_fn="layertopk",
top_k=256,
dtype=torch.float32,
device="cuda",
),
initializer=InitializerConfig(
grid_search_init_norm=False,
init_encoder_with_decoder_transpose=False,
),
trainer=TrainerConfig(
amp_dtype=torch.float32,
initial_k=768 * 12 * 8 // 2,
k_warmup_steps=1.0,
k_schedule_type="exponential",
k_exponential_factor=30,
lr_warm_up_steps=1000,
lr=5e-5,
optimizer_class="adam",
total_training_tokens=800_000_000,
log_frequency=1000,
eval_frequency=1000000000,
n_checkpoints=0,
check_point_save_mode="log",
exp_result_path="results",
),
model=LanguageModelConfig(
model_name="EleutherAI/pythia-160m",
device="cuda",
dtype="torch.float16",
),
model_name="pythia-160m",
datasets={
"SlimPajama-3B": DatasetConfig(
dataset_name_or_path="Hzfinfdu/SlimPajama-3B",
)
},
wandb=WandbConfig(
wandb_project="llamascopium",
exp_name="pythia-160m-clt",
),
activation_factory=ActivationFactoryConfig(
sources=[
ActivationFactoryDatasetSource(
name="SlimPajama-3B",
)
],
target=ActivationFactoryTarget.ACTIVATIONS_1D,
hook_points=[f"blocks.{i}.ln2.hook_normalized" for i in range(12)]
+ [f"blocks.{i}.hook_mlp_out" for i in range(12)],
batch_size=4096,
buffer_size=4096 * 4,
buffer_shuffle=BufferShuffleConfig(
perm_seed=42,
generator_device="cuda",
),
),
sae_name="pythia-160m-clt",
sae_series="pythia-clt",
)
train_clt(settings)