-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpytorchjob_full.yaml
More file actions
68 lines (66 loc) · 3.03 KB
/
pytorchjob_full.yaml
File metadata and controls
68 lines (66 loc) · 3.03 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
# Copyright (c) IBM Corporation
# SPDX-License-Identifier: MIT
apiVersion: "kubeflow.org/v1"
kind: "PyTorchJob"
metadata:
name: simple
labels:
kueue.x-k8s.io/queue-name: fake
autoconf-plugin-name: resource-requirements-appwrapper
spec:
pytorchReplicaSpecs:
Master:
replicas: 1
restartPolicy: Never
template:
spec:
nodeSelector:
nvidia.com/gpu.product: NVIDIA-A100-80GB-PCIe
volumes:
- name: dshm
emptyDir:
medium: Memory
securityContext:
allowPrivilegeEscalation: false
containers:
- name: pytorch
image: quay.io/ado/ado:1.5.0-sft-py312-cu121-ofed2410v1140
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
env:
- name: HF_HOME
value: /tmp/huggingface_home
volumeMounts:
- name: dshm
mountPath: "/dev/shm"
resources:
requests:
memory: 80Gi
nvidia.com/gpu: 8
limits:
memory: 80Gi
nvidia.com/gpu: 8
workingDir: /tmp/
command:
- sh
- -c
- |
python -m venv .venv
. .venv/bin/activate
pip install fms-hf-tuning ado-core ado-sfttrainer
pip install --no-build-isolation fms-hf-tuning[flash-attn]
sfttrainer_generate_dataset_text -o /tmp/artificial-dataset/news-tokens-16384plus-entries-4096.jsonl
accelerate launch --use_fsdp --fsdp_auto_wrap_policy=TRANSFORMER_BASED_WRAP --fsdp_forward_prefetch=false \
--fsdp_offload_params=false --fsdp_sharding_strategy=HYBRID_SHARD --fsdp_state_dict_type=FULL_STATE_DICT \
--fsdp_cpu_ram_efficient_loading=true --fsdp_sync_module_states=true --dynamo_backend="no" --machine_rank="${RANK}" \
--main_process_ip="${MASTER_ADDR}" --main_process_port="${MASTER_PORT}" --mixed_precision="no" \
--num_machines="1" --num_processes="8" --rdzv_backend="static" --same_network \
-m tuning.sft_trainer --log_level info --eval_strategy no --save_strategy no \
--learning_rate 1e-05 --weight_decay 0.0 --warmup_ratio 0.03 --lr_scheduler_type cosine \
--logging_steps 1 --include_tokens_per_second True --packing False --response_template "\n### Response:" \
--dataset_text_field output --gradient_accumulation_steps 4 --gradient_checkpointing True --max_steps 5 \
--model_name_or_path "ibm-granite/granite-3.1-3b-a800m-instruct" \
--per_device_train_batch_size 1 --torch_dtype bfloat16 --max_seq_length 512 \
--training_data_path /tmp/artificial-dataset/news-tokens-16384plus-entries-4096.jsonl \
--output_dir /tmp/output/