-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathtrainer_wrapper.py
More file actions
241 lines (199 loc) · 9.85 KB
/
trainer_wrapper.py
File metadata and controls
241 lines (199 loc) · 9.85 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
from transformers import FalconForCausalLM
from peft import LoraConfig
from datasets import load_dataset
from trl import SFTTrainer, DataCollatorForCompletionOnlyLM
from transformers import TrainingArguments
from accelerate import Accelerator as accelerator, PartialState
import argparse
from argparse import ArgumentParser, Namespace
import os
'''
The below class serves as a general wrapper to SFTTrainer for training simultaneous
large language models. Some expected behavior is outlined in functions that remain
to be implemented. If additional setup is required for certain config objects,
relevant setup functions should be overwritten.
When extending this class to other LLMs, it is expected that the remaining functions
are implemented and that self.train() details any additional expected behavior.
'''
class LLMSimulSFTTrainerWrapper:
def __init__(self, args: Namespace):
self.model_name = args.model
self.training_set = args.training_set
self.training_subset = args.training_subset
self.source = args.source_lang
self.target = args.target_lang
self.peft = args.peft
self.bnb = args.bnb
self.adapter_path = args.adapter_path
if self.source == "en":
self.source_lang = "English"
elif self.source == "es":
self.source_lang = "Spanish"
elif self.source == "de":
self.source_lang = "German"
elif self.source == "fr":
self.source_lang = "French"
elif self.source == "nl":
self.source_lang = "Dutch"
elif self.source == "ro":
self.source_lang = "Romanian"
elif self.source == "it":
self.source_lang = "Italian"
elif self.source == "ko":
self.source_lang = "Korean"
if self.target == "en":
self.target_lang = "English"
elif self.target == "es":
self.target_lang = "Spanish"
elif self.target == "de":
self.target_lang = "German"
elif self.target == "fr":
self.target_lang = "French"
elif self.target == "nl":
self.target_lang = "Dutch"
elif self.target == "ro":
self.target_lang = "Romanian"
elif self.target == "it":
self.target_lang = "Italian"
elif self.target == "ko":
self.target_lang = "Korean"
assert self.source != self.target, "Source and target languages should not be the same."
if not self.peft:
PartialState().print(f"Warning: PEFT-LoRA is set to {self.peft}, indicating full model fine-tuning is desirable. This is not recommended for most hardware setups.")
self.fsdp = False
if os.getenv("ACCELERATE_USE_FSDP"):
self.fsdp = True
self.setup_bnb_config(args)
self.setup_peft_config(args)
self.setup_training_arguments(args)
self.setup_model_and_tokenizer(args)
self.setup_trainer(args)
'''
The below arguments should be common to fine-tuning LLMs outside of Falcon.
Defaults are configured to allow for fine-tuning on a single V100. Assuming
a sharded model, however, batch size can be scaled significantly.
Lora adapter-related arguments govern the size of the added network via PEFT.
Quantization framework-related arguments govern exact model size and parameter
drift from model compression.
Direct training arguments govern fine-tuning behavior in general. For Falcon,
gradient checkpointing is currently non-functional with the assumed sharded 7B
model hardcoded in the training pipeline.
'''
@classmethod
def add_args(cls, parser: ArgumentParser):
# basic arguments required for fine-tuning
parser.add_argument("--model", type=str, required=True,
help="Path to model you want to use, currently only works with models on Huggingface Hub.",
)
parser.add_argument("--training-set", type=str, required=True,
help="Path to dataset you want to use, currently only works with datasets on Huggingface Hub.",
)
# lora adapter arguments
parser.add_argument("--peft", action="store_true")
parser.add_argument("--adapter-path", type=str,
help="Path to locally stored adapter checkpoint that you want loaded.",
)
parser.add_argument("--lora-alpha", type=int, default=16)
parser.add_argument("--lora-dropout", type=float, default=0.1)
parser.add_argument("--lora-r", type=int, default=64)
# quantization framework arguments
parser.add_argument("--bnb", action="store_true")
parser.add_argument("--use-4bit", action="store_true")
parser.add_argument("--use-nested-quant", action="store_true")
parser.add_argument("--bnb-4bit-compute-dtype", type=str, default="float16")
parser.add_argument("--bnb-4bit-quant-type", type=str, default="nf4")
# more direct training arguments
parser.add_argument("--output-dir", type=str, default="./model")
parser.add_argument("--bsz", type=int, default=4)
parser.add_argument("--update-freq", type=int, default=1)
parser.add_argument("--gradient-checkpointing", action="store_true")
parser.add_argument("--optim", type=str, default="paged_adamw_32bit")
parser.add_argument("--lr", type=float, default=2e-4)
parser.add_argument("--lr-scheduler", type=str, default="constant")
parser.add_argument("--weight-decay", type=float, default=0)
parser.add_argument("--max-grad-norm", type=float, default=0.3)
parser.add_argument("--warmup-ratio", type=float, default=0.03)
parser.add_argument("--save-interval", type=int, default=1000)
parser.add_argument("--log-interval", type=int, default=100)
parser.add_argument("--eval-interval", type=int, default=1000)
parser.add_argument("--max-updates", type=int, default=10000)
parser.add_argument("--max-seq-length", type=int, default=1024)
parser.add_argument("--source-lang", type=str, default="en")
parser.add_argument("--target-lang", type=str, default="es")
parser.add_argument("--save-strategy", type=str, default="steps")
parser.add_argument("--eval-strategy", type=str, default="steps")
parser.add_argument("--logging-strategy", type=str, default="steps")
parser.add_argument("--num-train-epochs", type=int, default=1)
parser.add_argument("--training-subset", type=str, required=False, default="",
help="Path to dataset subset you want to use, currently only works with datasets on Huggingface Hub.",
)
parser.add_argument(
"--user-dir",
type=str,
default=None,
required=False,
help="The directory to personal project files.",
)
def setup_bnb_config(self, args):
compute_dtype = getattr(torch, args.bnb_4bit_compute_dtype)
if compute_dtype == torch.float16 and args.use_4bit:
major, _ = torch.cuda.get_device_capability()
if major >= 8:
PartialState().print("=" * 80)
PartialState().print("Your GPU supports bfloat16, you can accelerate training with bf16")
PartialState().print("=" * 80)
# necessary to align compute_dtype with storage val for FSDP to work
self.bnb_config = BitsAndBytesConfig(
load_in_4bit=args.use_4bit,
bnb_4bit_quant_type=args.bnb_4bit_quant_type,
bnb_4bit_compute_dtype=compute_dtype,
bnb_4bit_quant_storage=compute_dtype,
bnb_4bit_use_double_quant=args.use_nested_quant,
)
'''
Annoying to change evaluation strategy, but if an epoch-based one is desired,
this function should be overridden in the child wrapper.
Gradient checkpointing is bugged for Falcon-based models. This function should
be overridden if you want to enable it.
'''
def setup_training_arguments(self, args):
self.training_arguments = TrainingArguments(
output_dir=args.output_dir,
per_device_train_batch_size=args.bsz,
gradient_accumulation_steps=args.update_freq,
optim=args.optim,
save_steps=args.save_interval,
save_strategy=args.save_strategy,
eval_strategy=args.eval_strategy,
logging_strategy=args.logging_strategy,
num_train_epochs=args.num_train_epochs,
logging_steps=args.log_interval,
max_steps=args.max_updates,
learning_rate=args.lr,
max_grad_norm=args.max_grad_norm,
warmup_ratio=args.warmup_ratio,
lr_scheduler_type=args.lr_scheduler,
weight_decay=args.weight_decay,
evaluation_strategy="steps",
eval_steps=args.eval_interval,
group_by_length=True,
fp16=(args.bnb_4bit_compute_dtype == "float16"),
bf16=(args.bnb_4bit_compute_dtype == "bfloat16"),
#gradient_checkpointing=args.gradient_checkpointing,
)
PartialState().print(f'fp16: {(args.bnb_4bit_compute_dtype == "float16")}, bf16: {(args.bnb_4bit_compute_dtype == "bfloat16")}')
def load_dataset(self):
self.training = load_dataset(self.training_set, self.training_subset, split="train")
self.validation = load_dataset(self.training_set, self.training_subset, split="validation")
def setup_peft_config(self, args):
raise NotImplementedError
def setup_model_and_tokenizer(self, args):
raise NotImplementedError
def setup_trainer(self, args):
raise NotImplementedError
def formatting_func(self, example):
raise NotImplementedError
def train(self):
raise NotImplementedError