-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathtrain.py
More file actions
88 lines (76 loc) · 2.59 KB
/
train.py
File metadata and controls
88 lines (76 loc) · 2.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import json
import os
from dataclasses import asdict
from data import (
build_concat_train_dataset,
build_eval_dataset_dict,
get_compute_metrics_dict,
get_data_collator,
)
from engine import (
StopEvaluationAfterOneStepCallback,
StopTrainingAfterOneStepCallback,
TrainerWithGenToEval,
)
from models import build_model_and_tokenizer, count_parameters, parse_args
def train():
args = parse_args()
args.run_name = args.output_dir.split("/")[-1]
args.logging_dir = os.path.join(args.output_dir, "runs")
model, tokenizer = build_model_and_tokenizer(is_training=True, **asdict(args))
_ = count_parameters(model, layers=False)
train_dataset = build_concat_train_dataset(
tokenizer=tokenizer,
transform=(
model.vision_processor if hasattr(model, "vision_processor") else None
),
**asdict(args),
)
eval_dataset_dict = build_eval_dataset_dict(
tokenizer=tokenizer,
transform=(
model.vision_processor if hasattr(model, "vision_processor") else None
),
**asdict(args),
)
data_collator = get_data_collator(tokenizer=tokenizer, **asdict(args))
compute_metrics_dict = get_compute_metrics_dict(
dataset_dict=eval_dataset_dict, tokenizer=tokenizer, **asdict(args)
)
args.gradient_checkpointing_kwargs = {"use_reentrant": False}
trainer = TrainerWithGenToEval(
model=model,
tokenizer=tokenizer,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset_dict,
data_collator=data_collator,
compute_metrics=(
list(compute_metrics_dict.values())[0]
if compute_metrics_dict is not None
else None
),
)
save_config(args)
trainer.train()
trainer.save_model()
print("Trained model saved...")
print("Moving to Evaluation...")
if eval_dataset_dict is not None:
metrics = {}
for eval_dataset_name, eval_dataset in eval_dataset_dict.items():
trainer.compute_metrics = compute_metrics_dict[eval_dataset_name]
metrics.update(
trainer.evaluate(
eval_dataset=eval_dataset,
metric_key_prefix=f"eval_{eval_dataset_name}",
)
)
print(metrics)
def save_config(args):
os.makedirs(args.logging_dir, exist_ok=True) # Ensure the directory exists
config_path = os.path.join(args.logging_dir, "config.json")
with open(config_path, "w") as f:
json.dump(asdict(args), f, indent=4)
if __name__ == "__main__":
train()