-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathappwrapper_lora.yaml
More file actions
91 lines (88 loc) · 4.62 KB
/
appwrapper_lora.yaml
File metadata and controls
91 lines (88 loc) · 4.62 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
# Copyright (c) IBM Corporation
# SPDX-License-Identifier: MIT
apiVersion: workload.codeflare.dev/v1beta2
kind: AppWrapper
metadata:
name: autoconf-lora-granite-3-8b
labels:
# This is a trick that circumvents a) the Kyverno policy blocking us from creating objects without a queue-name,
# and b) a feature gap in Kueue for temporarily ignoring a Job
kueue.x-k8s.io/queue-name: fake
autoconf-plugin-name: resource-requirements-appwrapper
spec:
components:
- template:
apiVersion: "kubeflow.org/v1"
kind: "PyTorchJob"
metadata:
name: autoconf-lora-granite-3-8b
spec:
pytorchReplicaSpecs:
Master:
replicas: 1
restartPolicy: Never
template:
spec:
serviceAccountName: gdr
priorityClassName: default-priority
volumes:
- name: hf-models-pvc
persistentVolumeClaim:
claimName: fms-tuning-data
- name: fms-tuning-checkpoints
persistentVolumeClaim:
claimName: fms-tuning-checkpoints
- name: dshm
emptyDir:
medium: Memory
# sizeLimit: 256Gi
securityContext:
fsGroup: 0
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
containers:
- name: pytorch
image: quay.io/modh/fms-hf-tuning:v3.1.0
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
env:
- name: HF_HOME
value: /data/transformers_cache
# The fms-hf-tuning images do not include the path to the `accelerate` binary in $PATH
- name: PATH
value: "/home/tuning/.local/bin:/home/tuning/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
volumeMounts:
- name: hf-models-pvc
mountPath: /hf-models-pvc
- name: fms-tuning-checkpoints
mountPath: /data
subPath: multinode-benchmarking-data
- name: dshm
mountPath: "/dev/shm"
resources:
requests:
memory: 400Gi
nvidia.com/gpu: 8
limits:
memory: 400Gi
nvidia.com/gpu: 8
command:
- sh
- -c
- |
accelerate launch --use_fsdp --fsdp_auto_wrap_policy=TRANSFORMER_BASED_WRAP --fsdp_forward_prefetch=false \
--fsdp_offload_params=false --fsdp_sharding_strategy=HYBRID_SHARD --fsdp_state_dict_type=FULL_STATE_DICT \
--fsdp_cpu_ram_efficient_loading=true --fsdp_sync_module_states=true --dynamo_backend="no" --machine_rank="${RANK}" \
--main_process_ip="${MASTER_ADDR}" --main_process_port="${MASTER_PORT}" --mixed_precision="no" \
--num_machines="${WORLD_SIZE}" --num_processes="2" --rdzv_backend="static" --same_network \
-m tuning.sft_trainer --log_level info --eval_strategy no --save_strategy no \
--learning_rate 1e-05 --weight_decay 0.0 --warmup_ratio 0.03 --lr_scheduler_type cosine \
--logging_steps 1 --include_tokens_per_second True --packing False --response_template "\n### Response:" \
--dataset_text_field output --gradient_accumulation_steps 4 --gradient_checkpointing True --max_steps -1 \
--num_train_epochs 1.0 --model_name_or_path "ibm-granite/granite-8b-code-base-4k" \
--per_device_train_batch_size 8 --torch_dtype bfloat16 --max_seq_length 512 \
--training_data_path /data/fms-hf-tuning/artificial-dataset/news-tokens-16384plus-entries-4096.jsonl \
--output_dir /tmp/output/ --peft_method lora --r 4 --lora_alpha 16 --target_modules q_proj v_proj --use_flash_attn True
echo this line stops codeflare from immediately killing pods in which accelerate launch fails