Skip to content

Commit d108680

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 6d9e9ea commit d108680

7 files changed

Lines changed: 21 additions & 35 deletions

File tree

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ gpus per model.
4444

4545
This is an example to run a single 8bit llama-65b model on 2 A40s that have
4646
~50 GB of memory each.
47-
47+
4848
```
4949
elk elicit huggyllama/llama-65b imdb --num_gpus 2 --gpus_per_model 2 --int8
5050
```

elk/extraction/extraction.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -157,9 +157,7 @@ def extract_hiddens(
157157
world_size: int = 1,
158158
) -> Iterable[dict]:
159159
first_device = (
160-
devices
161-
if not isinstance(devices, ModelDevices)
162-
else devices.first_device
160+
devices if not isinstance(devices, ModelDevices) else devices.first_device
163161
)
164162
"""Run inference on a model with a set of prompts, yielding the hidden states."""
165163
os.environ["TOKENIZERS_PARALLELISM"] = "false"
@@ -174,15 +172,13 @@ def extract_hiddens(
174172
ds_names = cfg.datasets
175173
assert len(ds_names) == 1, "Can only extract hiddens from one dataset at a time."
176174

177-
178175
model = instantiate_model_with_devices(
179176
cfg=cfg, device_config=devices, is_verbose=is_verbose
180177
)
181178
tokenizer = instantiate_tokenizer(
182179
cfg.model, truncation_side="left", verbose=is_verbose
183180
)
184181

185-
186182
is_enc_dec = model.config.is_encoder_decoder
187183
if is_enc_dec and cfg.use_encoder_states:
188184
assert hasattr(model, "get_encoder") and callable(model.get_encoder)

elk/utils/multi_gpu.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44

55
import torch
66
from accelerate import infer_auto_device_map, init_empty_weights
7-
from torch import dtype
87
from torch.nn import Module
98
from transformers import PreTrainedModel
109

tests/test_smoke_elicit.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
def test_smoke_elicit_run_tiny_gpt2_ccs(tmp_path: Path):
99
# we need about 5 mb of gpu memory to run this test
10-
model_path, min_mem = "sshleifer/tiny-gpt2", 10 * 1024 ** 2
10+
model_path, min_mem = "sshleifer/tiny-gpt2", 10 * 1024**2
1111
dataset_name = "imdb"
1212
elicit = Elicit(
1313
data=Extract(
@@ -38,7 +38,7 @@ def test_smoke_elicit_run_tiny_gpt2_ccs(tmp_path: Path):
3838

3939
def test_smoke_elicit_run_tiny_gpt2_eigen(tmp_path: Path):
4040
# we need about 5 mb of gpu memory to run this test
41-
model_path, min_mem = "sshleifer/tiny-gpt2", 10 * 1024 ** 2
41+
model_path, min_mem = "sshleifer/tiny-gpt2", 10 * 1024**2
4242
dataset_name = "imdb"
4343
elicit = Elicit(
4444
data=Extract(

tests/test_smoke_eval.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ def setup_elicit(
1919
tmp_path: Path,
2020
dataset_name="imdb",
2121
model_path="sshleifer/tiny-gpt2",
22-
min_mem=10 * 1024 ** 2,
22+
min_mem=10 * 1024**2,
2323
is_ccs: bool = True,
2424
) -> Elicit:
2525
"""Setup elicit config for testing, execute elicit, and save output to tmp_path.

tests/test_split_devices.py

Lines changed: 15 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -5,39 +5,30 @@ def test_split_2_devices_1_gpu_per_model():
55
devices = ["a", "b"]
66
gpus_per_model = 1
77
models_to_create = 2
8-
assert (
9-
split_devices_into_model_devices(
10-
devices=devices,
11-
gpus_per_model=gpus_per_model,
12-
models_to_create=models_to_create,
13-
)
14-
== [ModelDevices("a", []), ModelDevices("b", [])]
15-
)
8+
assert split_devices_into_model_devices(
9+
devices=devices,
10+
gpus_per_model=gpus_per_model,
11+
models_to_create=models_to_create,
12+
) == [ModelDevices("a", []), ModelDevices("b", [])]
1613

1714

1815
def test_split_4_devices_2_gpus_per_model():
1916
devices = ["a", "b", "c", "d"]
2017
gpus_per_model = 2
2118
models_to_create = 2
22-
assert (
23-
split_devices_into_model_devices(
24-
devices=devices,
25-
gpus_per_model=gpus_per_model,
26-
models_to_create=models_to_create,
27-
)
28-
== [ModelDevices("a", ["b"]), ModelDevices("c", ["d"])]
29-
)
19+
assert split_devices_into_model_devices(
20+
devices=devices,
21+
gpus_per_model=gpus_per_model,
22+
models_to_create=models_to_create,
23+
) == [ModelDevices("a", ["b"]), ModelDevices("c", ["d"])]
3024

3125

3226
def test_split_7_devices_3_gpus_per_model():
3327
devices = ["a", "b", "c", "d", "e", "f", "g"]
3428
gpus_per_model = 3
3529
models_to_create = 2
36-
assert (
37-
split_devices_into_model_devices(
38-
devices=devices,
39-
gpus_per_model=gpus_per_model,
40-
models_to_create=models_to_create,
41-
)
42-
== [ModelDevices("a", ["b", "c"]), ModelDevices("d", ["e", "f"])]
43-
)
30+
assert split_devices_into_model_devices(
31+
devices=devices,
32+
gpus_per_model=gpus_per_model,
33+
models_to_create=models_to_create,
34+
) == [ModelDevices("a", ["b", "c"]), ModelDevices("d", ["e", "f"])]

tests/test_truncated_eigh.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ def random_symmetric_matrix(n: int, k: int) -> torch.Tensor:
1111
assert k <= n, "Rank k should be less than or equal to the matrix size n."
1212

1313
# Generate random n x k matrix A with elements drawn from a uniform distribution
14-
A = torch.rand(n, k) / k ** 0.5
14+
A = torch.rand(n, k) / k**0.5
1515

1616
# Create a diagonal matrix D with k eigenvalues evenly distributed around zero
1717
eigenvalues = torch.linspace(-1, 1, k)

0 commit comments

Comments
 (0)