Skip to content

Commit 786a272

Browse files
Copilotchensuyue
andcommitted
Fix docstrings in neural_compressor directory
Co-authored-by: chensuyue <51692656+chensuyue@users.noreply.github.com>
1 parent a2543f6 commit 786a272

4 files changed

Lines changed: 26 additions & 6 deletions

File tree

neural_compressor/common/base_tuning.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ class Evaluator:
7070
def eval_acc(model):
7171
...
7272
73-
def eval_perf(molde):
73+
def eval_perf(model):
7474
...
7575
7676
# Usage
@@ -525,6 +525,10 @@ def init_tuning(tuning_config: TuningConfig) -> Tuple[ConfigLoader, TuningLogger
525525
526526
Args:
527527
tuning_config (TuningConfig): The configuration for the tuning process.
528+
529+
Returns:
530+
Tuple[ConfigLoader, TuningLogger, TuningMonitor]: A tuple containing the config loader,
531+
tuning logger, and tuning monitor.
528532
"""
529533
config_loader = ConfigLoader(config_set=tuning_config.config_set, sampler=tuning_config.sampler)
530534
tuning_logger = TuningLogger()

neural_compressor/torch/quantization/autotune.py

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,14 @@ def get_all_config_set() -> Union[BaseConfig, List[BaseConfig]]:
5555

5656

5757
def _deepcopy_warp(model):
58+
"""Create a deep copy of the model while preserving specific attributes.
59+
60+
Args:
61+
model (torch.nn.Module): The model to deep copy.
62+
63+
Returns:
64+
torch.nn.Module: A deep copy of the model with preserved attributes.
65+
"""
5866
additional_attr_lst = ["_exported", "dynamic_shapes"]
5967
original_attr = {key: getattr(model, key, None) for key in additional_attr_lst}
6068
new_model = deepcopy(model)
@@ -64,7 +72,15 @@ def _deepcopy_warp(model):
6472

6573

6674
def _preprocess_model_quant_config(model, quant_config):
67-
"""Preprocess model and quant config before quantization."""
75+
"""Preprocess model and quant config before quantization.
76+
77+
Args:
78+
model (torch.nn.Module): The model to be quantized.
79+
quant_config (TuningConfig): The quantization configuration to preprocess.
80+
81+
Returns:
82+
Tuple[torch.nn.Module, TuningConfig]: The preprocessed model and quantization configuration.
83+
"""
6884
for config in quant_config.config_set:
6985
# handle tokenizer attribute in AutoRoundConfig
7086
if isinstance(config, AutoRoundConfig):
@@ -88,8 +104,8 @@ def autotune(
88104
"""The main entry of auto-tune.
89105
90106
Args:
91-
model (torch.nn.Module): _description_
92-
tune_config (TuningConfig): _description_
107+
model (torch.nn.Module): The model to be quantized.
108+
tune_config (TuningConfig): The configuration for the auto-tuning process.
93109
eval_fn (Callable): for evaluation of quantized models.
94110
eval_args (tuple, optional): arguments used by eval_fn. Defaults to None.
95111
run_fn (Callable, optional): for calibration to quantize model. Defaults to None.

neural_compressor/torch/quantization/config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1767,7 +1767,7 @@ def __init__(
17671767
allowlist (dict, optional): Whether to execute fp8 quantization for specific op names or types. Defaults to {"names": [], "types": FP8_WHITE_LIST}.
17681768
mode (str, optional): Choose the quantization mode. Defaults to "AUTO".
17691769
scale_method (str or dict, optional): Select method used to generate scale from calibration info. Can be a string or a dict. Defaults to "maxabs_hw".
1770-
scale_params (dict, optional): _description_. Defaults to {}.
1770+
scale_params (dict, optional): Scaling parameters that override the default ones for specific modules. Defaults to {}.
17711771
observer (str, optional): Params of scales. Defaults to "maxabs".
17721772
mod_dict (dict, optional): The dict of modules to quantize. Defaults to {}.
17731773
measure_exclude (str, optional): Select INPUT/OUTPUT to be exculded by measurement. Defaults to "OUTPUT".

neural_compressor/torch/utils/llm_utility.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ def get_default_llm_dataloader(tokenizer, dataset_name="NeelNanda/pile-10k", bs=
6464
6565
Args:
6666
tokenizer (obj): tokenizer object.
67-
seq_len (int, optional): _description_. Defaults to 128.
67+
seq_len (int, optional): the sequence length of the input tokens. Defaults to 128.
6868
dataset_name (str, optional): dataset name. Defaults to "NeelNanda/pile-10k".
6969
seed (int, optional): random seed. Defaults to 42.
7070
bs (int, optional): batch size. Defaults to 8.

0 commit comments

Comments
 (0)