-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutils.py
More file actions
210 lines (163 loc) · 6.41 KB
/
utils.py
File metadata and controls
210 lines (163 loc) · 6.41 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
from datetime import timedelta
import os
import shutil
import sys
import logging
import functools
from xml.dom import NOT_FOUND_ERR
from termcolor import colored
import torch
import torch.distributed as dist
from torch.distributed.elastic.multiprocessing.errors import record
@functools.lru_cache()
def create_logger(output_dir, dist_rank=0, name=''):
# create logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
# create formatter
fmt = '[%(asctime)s %(name)s] (%(filename)s %(lineno)d): %(levelname)s %(message)s'
color_fmt = colored('[%(asctime)s %(name)s]', 'green') + \
colored('(%(filename)s %(lineno)d)', 'yellow') + \
': %(levelname)s %(message)s'
# create console handlers for master process
if dist_rank == 0:
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(
logging.Formatter(fmt=color_fmt, datefmt='%Y-%m-%d %H:%M:%S'))
logger.addHandler(console_handler)
# create file handlers
file_handler = logging.FileHandler(os.path.join(
output_dir, f'log_rank{dist_rank}.txt'), mode='a')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(
fmt=fmt, datefmt='%Y-%m-%d %H:%M:%S'))
logger.addHandler(file_handler)
return logger
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
@record
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
ngpus_per_node = torch.cuda.device_count()
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.local_rank = int(os.environ['LOCAL_RANK'])
args.batch_size = int(args.batch_size / ngpus_per_node) # fixed
else:
print('Not using distributed mode')
args.distributed = False
args.rank = args.local_rank = 0
return
args.distributed = True
torch.cuda.set_device(args.local_rank)
print(f'| distributed init (rank {args.rank})', flush=True)
# for `torch.distributed.launch`
# dist.init_process_group(
# backend="nccl", init_method='env://', rank=args.rank, world_size=args.world_size, timeout=timedelta(seconds=5))
# for `torchrun``
dist.init_process_group(
backend="nccl", init_method='tcp://10.106.26.17:6667', rank=args.rank, world_size=args.world_size, timeout=timedelta(seconds=20))
dist.barrier()
setup_for_distributed(args.rank == 0)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def load_checkpoint(model, optimizer, ckpt_file, device_id: int):
"""
Loads a local checkpoint (if any). Used for shared folder.
Returns: State
"""
state = State(model, optimizer)
if os.path.isfile(ckpt_file):
print(f"=> loading checkpoint file: {ckpt_file}")
state.load(ckpt_file, device_id)
print(f"=> loaded checkpoint file: {ckpt_file}")
return state
def save_checkpoint(state, is_best, filename: str):
checkpoint_dir = os.path.dirname(filename)
os.makedirs(checkpoint_dir, exist_ok=True)
# save to tmp, then commit by moving the file in case the job
# gets interrupted while writing the checkpoint
tmp_filename = filename + ".tmp"
torch.save(state.capture_snapshot(), tmp_filename)
os.rename(tmp_filename, filename)
print(f"=> saved checkpoint for epoch {state.epoch} at {filename}")
if is_best:
best = os.path.join(checkpoint_dir, "model_best.pth.tar")
print(f"=> best model found at epoch {state.epoch} saving to {best}")
shutil.copyfile(filename, best)
class State:
"""
Container for objects that we want to checkpoint. Represents the
current "state" of the worker. This object is mutable.
"""
def __init__(self, model, optimizer):
self.epoch = -1
self.best_state = {'acc1': 0, 'loss': 1e3}
self.model = model
self.optimizer = optimizer
def capture_snapshot(self):
"""
Essentially a ``serialize()`` function, returns the state as an
object compatible with ``torch.save()``. The following should work
::
snapshot = state_0.capture_snapshot()
state_1.apply_snapshot(snapshot)
assert state_0 == state_1
"""
return {
"epoch": self.epoch,
"best_state": self.best_state,
"state_dict": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
}
def apply_snapshot(self, obj, device_id):
"""
The complimentary function of ``capture_snapshot()``. Applies the
snapshot object that was returned by ``capture_snapshot()``.
This function mutates this state object.
"""
self.epoch = obj["epoch"]
self.best_state = obj["best_state"]
self.state_dict = obj["state_dict"]
self.model.load_state_dict(obj["state_dict"])
self.optimizer.load_state_dict(obj["optimizer"])
def save(self, f):
torch.save(self.capture_snapshot(), f)
def load(self, f, device_id):
# Map model to be loaded to specified single gpu.
snapshot = torch.load(f, map_location=f"cuda:{device_id}")
self.apply_snapshot(snapshot, device_id)