-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutils.py
More file actions
553 lines (430 loc) · 20.7 KB
/
utils.py
File metadata and controls
553 lines (430 loc) · 20.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
import os
import re
import torch
import numpy as np
import matplotlib.pyplot as plt
import sys
# from torchviz import make_dot
from torch.nn import functional as F
from torch.nn.utils import stateless
from sklearn.metrics.pairwise import cosine_similarity
from scipy.stats import kurtosis, skew
from contextlib import contextmanager
def compute_forward_correlation(fc_layer):
weight = fc_layer.data
out_features, in_features = weight.shape
norm = weight.norm(dim=1, keepdim=True)
norm_weight = weight / norm # Normalize each row (output feature)
correlation_matrix = torch.matmul(norm_weight, norm_weight.T)
upper_triangle_indices = torch.triu_indices(out_features, out_features, offset=1)
forward_corr = correlation_matrix[upper_triangle_indices].mean().item()
return forward_corr
def compute_backward_correlation(fc_layer):
weight = fc_layer.data
out_features, in_features = weight.shape
norm = weight.norm(dim=0, keepdim=True)
norm_weight = weight / norm # Normalize each column (input feature)
correlation_matrix = torch.matmul(norm_weight.T, norm_weight)
upper_triangle_indices = torch.triu_indices(in_features, in_features, offset=1)
backward_corr = correlation_matrix[upper_triangle_indices].mean().item()
return backward_corr
def log(data, filename, mode='a'):
with open(filename, mode) as f:
np.savetxt(f, np.array(data), newline=' ', fmt='%0.6f')
f.writelines('\n')
def normalize_vec(vector):
"""
normalize input vector.
"""
return vector / torch.linalg.norm(vector)
def measure_angle(v1, v2):
"""
Compute angle between two vectors.
"""
n1 = normalize_vec(v1.squeeze())
n2 = normalize_vec(v2.squeeze())
return np.nan_to_num((torch.acos(torch.einsum('i, i -> ', n1, n2)) * 180 / torch.pi).cpu().numpy())
def graph_visualizer(logits, params, res_dir, name):
"""
Visualize computation graph.
This function visualize the computation graph leading to the provided model
output (logits) in terms of the provided model parameters.
:param logits: (torch.Tensor) The logits produced by the model.
:param params: (dict) The dictionary containing the model parameters.
:param res_dir: (str) The path to the directory to save the computation graph.
:param name: (str) The name of the computation graph.
:return: None
"""
# -- render computation graph
make_dot(logits, params=params).render(f'{res_dir}/comp_grph_{name}', format='pdf')
# -- exit the program
sys.exit()
@contextmanager
def my_profiler(function_name='.', quit_code=True):
"""
Context manager for profiling the meta-learning model.
This function is used to profile the meta-learning model. It uses the
`torch.autograd.profiler.profile` function to profile the model and
prints the results to the console.
:
"""
# -- print profiling header
print(f'\nProfiling {function_name} . . .\n')
# -- start profiling
with torch.autograd.profiler.profile(record_shapes=True, profile_memory=True, use_cuda=True) as prof:
# -- yield to the caller and run the wrapped code
yield
# -- print profiling results
total_memory = prof.total_average()
print(prof.key_averages().table(sort_by="cpu_memory_usage", row_limit=20))
print(total_memory)
# -- quit if quit_code is True
if quit_code:
quit()
else:
print(f'\nFinished profiling {function_name} . . . . . . . . . . . . . .')
def e_y_func(x, label, model, params, W, Beta):
# -- compute activations
y, logits = stateless.functional_call(model, params, x)
# -- compute errors (BP)
e_sym = [F.softmax(logits) - F.one_hot(label, num_classes=logits.shape[1])]
for y_, i in zip(reversed(y), reversed(list(W))):
e_sym.insert(0, torch.matmul(e_sym[0], W[i]) * (1 - torch.exp(-Beta * y_)))
return e_sym, [*y, F.softmax(logits, dim=1)]
def compute_EVs(EVs, obs, i):
"""
Compute explained variance (EV) for each set of observations.
This function constructs a covariance matrix for each set of observations
and computes the explained variance (EV).
:param EVs: (dict) set of explained variances for different layer
:param obs: (torch.Tensor) observations
:param i: (int) layer index
:return: None
"""
# -- compute covariance matrix
cov = torch.cov(obs.T)
# -- compute explained variance
eigs = torch.linalg.eigvalsh(cov)
total_var = sum(eigs)
EVs[f'layer{i + 1}'] = [(eig / total_var).item() for eig in reversed(list(eigs))]
return EVs
def stats(test_loader, train_loader, model, loss_func, res_dir, params, Beta, bc_idx, Theta, vec, fbk, x, label,
save_dist):
"""
Compute statistics for the model.
"""
'''
"""
batching test
"""
with torch.no_grad():
# -- compute gradient for whole dataset
W = {k: v for k, v in params.items() if 'fd' in k}
e_sym, activation = e_y_func(*next(iter(train_loader)), model, params, W, Beta)
for _ in range(len(train_loader) - 1):
# -- compute activations and errors
e_sym_, activation_ = e_y_func(*next(iter(train_loader)), model, params, W, Beta)
# -- concatenate activations and errors
e_sym = [torch.cat((e1, e2)) for e1, e2 in zip(e_sym, e_sym_)]
activation = [torch.cat((a1, a2)) for a1, a2 in zip(activation, activation_)]
# -- compute gradients (BP update) based on the whole dataset
total_grad = [torch.matmul(e_post.T, y_pre) for y_pre, e_post in zip(activation, e_sym[1:])]
# -- compute the gradient based on the current training data
e_sym, activation = e_y_func(x, label, model, params, W, Beta)
batch_grad = [torch.matmul(e_post.T, y_pre) for y_pre, e_post in zip(activation, e_sym[1:])]
# -- compute the BP+Oja update based on the current training data
batch_oja = [torch.matmul(y_post.T, y_pre) - torch.matmul(torch.matmul(y_post.T, y_post), w)
for y_pre, y_post, w in zip(activation[:-1], activation[1:], W.values())]
batch_BPOja = [-Theta[0] * BP - Theta[2] * Oja for BP, Oja in zip(batch_grad, batch_oja)]
# -- compute the angle between the BP (negative grad) and BP+Oja updates on the whole dataset
log([measure_angle(v1.flatten(), - v2.flatten()) for v1, v2 in zip(batch_BPOja, total_grad)],
f'{res_dir}/BP_BPOja_angle.txt')
'''
for x, label in test_loader:
x, label = x.to(model.device), label.to(model.device)
# -- predict
y, logits = _stateless.functional_call(model, params, x)
# -- accuracy
pred = F.softmax(logits, dim=1).argmax(dim=1)
acc = torch.eq(pred, label).sum().item() / len(label)
# -- loss
loss = loss_func(logits, label.ravel())
with torch.no_grad():
# -- compute activations
activation = [*y, F.softmax(logits, dim=1)]
# -- compute explained variance (EV) for hidden activations
EVs = {}
for i, y_ in enumerate(activation[1:-1]):
EVs = compute_EVs(EVs, y_, i)
# -- store EVs
for i, EV in enumerate(EVs.values()):
log(EV, f'{res_dir}/EVs_{i + 1}.txt')
# -- Compute Forward and Backward Correlations
W = {k: v for k, v in params.items() if 'fd' in k}
log([compute_forward_correlation(w) for w in W.values()], f'{res_dir}/forward_corr.txt')
log([compute_forward_correlation(w) for w in W.values()], f'{res_dir}/backward_corr.txt')
'''
# -- compute and store absolute value of the activation node's kurtosis (first 20 nodes at each layer)
log([sum(abs(kurtosis(y_, axis=0))).item() for y_ in y], f'{res_dir}/y_krt_cnt_fnc.txt')
'''
'''
# -- compute and store activation node stats (first 20 nodes at each layer)
for i in range(20):
log([y_.mean(dim=0)[i].item() for y_ in y], f'{res_dir}/y_mean_node_{i}.txt')
log([y_.std(dim=0)[i].item() for y_ in y], f'{res_dir}/y_std_node_{i}.txt')
'''
'''
"""
Activation stats
Computes and stores the activation distribution statistics, including
the mean, variance, skewness, and kurtosis. Additionally, it calculates
the mean standard deviation and mean norm over the mini-batch.
"""
log([y_.mean().item() for y_ in [*y, F.softmax(logits, dim=1)]], f'{res_dir}/y_mean.txt')
log([torch.var(y_) for y_ in [*y, F.softmax(logits, dim=1)]], f'{res_dir}/y_var.txt')
log([skew(y_, axis=None) for y_ in [*y, F.softmax(logits, dim=1)]], f'{res_dir}/y_skew.txt')
log([kurtosis(y_, axis=None) for y_ in [*y, F.softmax(logits, dim=1)]], f'{res_dir}/y_kurt.txt')
log([y_.norm(dim=1).mean().item() for y_ in [*y, F.softmax(logits, dim=1)]], f'{res_dir}/y_norm.txt')
log([y_.std(dim=1).mean().item() for y_ in [*y, F.softmax(logits, dim=1)]], f'{res_dir}/y_std.txt')
"""
Modulatory signal stats
Computes and stores the modulatory signal (errors) distribution
statistics, including the mean, variance, skewness, and kurtosis.
Additionally, it calculates the mean standard deviation and mean
norm over the mini-batch.
Furthermore, it computes the angle between the modulatory signal (error)
in the test and its corresponding backproped counterpart.
"""
# -- compute modulatory signals
feedback = {k: v for k, v in params.items() if 'fk' in k}
e = [F.softmax(logits) - F.one_hot(label, num_classes=logits.shape[1])]
for y_, i in zip(reversed(y), reversed(list(feedback))):
e.insert(0, torch.matmul(e[0], feedback[i]) * (1 - torch.exp(-Beta * y_)))
log([e_.mean().item() for e_ in e], f'{res_dir}/e_mean.txt')
log([torch.var(e_) for e_ in e], f'{res_dir}/e_var.txt')
log([skew(e_, axis=None) for e_ in e], f'{res_dir}/e_skew.txt')
log([kurtosis(e_, axis=None) for e_ in e], f'{res_dir}/e_kurt.txt')
log([e_.norm(dim=1).mean().item() for e_ in e], f'{res_dir}/e_norm.txt')
log([e_.std(dim=1).mean().item() for e_ in e], f'{res_dir}/e_std.txt')
# -- compute backprop modulatory signals
e_sym = [e[-1]]
W = {k: v for k, v in params.items() if 'fd' in k}
for y_, i in zip(reversed(y), reversed(list(W))):
e_sym.insert(0, torch.matmul(e_sym[0], W[i]) * (1 - torch.exp(-Beta * y_)))
log([measure_angle(e_FA.mean(dim=0), e_BP.mean(dim=0)) for e_FA, e_BP in zip(e, e_sym)],
f'{res_dir}/e_ang.txt')
"""
orthonormality error
Measure the orthonormality error ||wwT-I|| for the weight matrices of all
layers.
"""
orth_err = []
for w in W.values():
orth_err.append((torch.matmul(w, w.T) - torch.eye(w.shape[0])).norm())
log(orth_err, f'{res_dir}/orth_err.txt')
"""
Oja's error
Measure deviation from the stable solution of the Oja's rule.
"""
activation = [*y, F.softmax(logits, dim=1)]
log([((torch.matmul(activation[i], w.T) - torch.matmul(torch.matmul(activation[i+1], w), w.T)).norm(dim=1)
** 2).mean() for i, w in enumerate(W.values())], f'{res_dir}/oja_err.txt')
"""
Weight updates
Compute the weight updates for all layers based on each test mini-batch (size=1).
"""
# -- compute gradient/psuedo updates
if fbk == 'fix':
w_update = [torch.stack([torch.matmul(e_.unsqueeze(dim=0).T, y_.unsqueeze(dim=0)) for e_, y_ in
zip(e_post, y_pre)]) for e_post, y_pre in zip(e[1:], y)]
elif fbk == 'sym':
w_update = [torch.stack([torch.matmul(e_.unsqueeze(dim=0).T, y_.unsqueeze(dim=0)) for e_, y_ in
zip(e_post, y_pre)]) for e_post, y_pre in zip(e_sym[1:], y)]
# -- store mean, std, and norm of weight grads
log([(- Theta[0] * w_).mean(dim=(1, 2)).mean().item() for w_ in w_update], f'{res_dir}/wgrad_mean.txt')
log([(- Theta[0] * w_).var(dim=(1, 2)).mean().item() for w_ in w_update], f'{res_dir}/wgrad_var.txt')
log([(- Theta[0] * w_).norm(dim=(1, 2)).mean().item() for w_ in w_update], f'{res_dir}/wgrad_norm.txt')
log([(- Theta[0] * w_).std(dim=(1, 2)).mean().item() for w_ in w_update], f'{res_dir}/wgrad_std.txt')
# -- Oja's rule
if '9' in vec:
oja = [torch.stack([torch.matmul(y_post_.unsqueeze(dim=0).T, y_pre_.unsqueeze(dim=0)) -
torch.matmul(torch.matmul(y_post_.unsqueeze(dim=0).T, y_post_.unsqueeze(dim=0)), w)
for y_pre_, y_post_ in zip(y_pre, y_post)])
for y_pre, y_post, w in zip(y, [*y[1:], F.softmax(logits, dim=1)], W.values())]
w_update = [- Theta[0] * dw_ - Theta[2] * oja_ for dw_, oja_ in zip(w_update, oja)]
# -- store mean, std, and norm of weight grads
log([dw_.mean(dim=(1, 2)).mean().item() for dw_ in w_update], f'{res_dir}/dw_mean.txt')
log([dw_.var(dim=(1, 2)).mean().item() for dw_ in w_update], f'{res_dir}/dw_var.txt')
log([dw_.norm(dim=(1, 2)).mean().item() for dw_ in w_update], f'{res_dir}/dw_norm.txt')
log([dw_.std(dim=(1, 2)).mean().item() for dw_ in w_update], f'{res_dir}/dw_std.txt')
'''
'''
"""
Confusion
Compute the confusion measure at the current iteration.
"""
with torch.no_grad():
confusion = []
for i, dw_ in enumerate(w_update):
# -- compute pairwise cosine similarities
a = torch.tensor(cosine_similarity(dw_.reshape(len(x), -1), dw_.reshape(len(x), -1)))
# -- store upper triangular elements
confusion.append(torch.tensor([a[j, k] for j in range(a.shape[0]) for k in range(j+1, a.shape[1])]))
# -- store cosine similarities
if save_dist:
log(confusion[-1], f'{res_dir}/conf_l{i+1}.txt')
# -- compute min
log([torch.min(c_) for c_ in confusion], f'{res_dir}/conf_min.txt')
log([torch.mean(c_) for c_ in confusion], f'{res_dir}/conf_avg.txt')
'''
return loss, acc
class Plot:
def __init__(self, res_dir):
self.res_dir = res_dir
# self.colors = ['lightcoral', 'lightsalmon', 'lightyellow', 'lightgreen', 'lightblue', 'lightteal']
# self.edgecolors = ['red', 'orange', 'yellow', 'green', 'blue', 'teal']
self.colors = ['lightcoral', 'lightsalmon', 'lightyellow', 'lightgreen', 'lightblue', 'lightteal', 'lightpink',
'lightorange', 'lightviolet', 'lightgray']
self.edgecolors = ['red', 'orange', 'yellow', 'green', 'blue', 'teal', 'pink', 'gold', 'lime', 'navy', 'purple']
@staticmethod
def comp_moving_avg(vector, period):
"""
Compute moving average.
"""
ret = np.cumsum(vector, dtype=float)
ret[period:] = ret[period:] - ret[:-period]
return ret[period - 1:] / period
def compute_yrange(self, z):
# -- Compute the 25th and 75th percentiles
q25, q75 = np.percentile(z, [25, 98])
# -- Compute the inter-quartile range
iqr = q75 - q25
# -- Compute the upper and lower bounds
upper_bound = q75 + (1.5 * iqr)
lower_bound = -0.1
return [lower_bound, upper_bound]
def loss(self):
"""
Plot the loss
"""
# -- load data
loss = np.loadtxt(f'{self.res_dir}/loss.txt')
# -- plot
plt.plot(range(len(loss)), loss)
plt.title('Loss')
plt.savefig(f'{self.res_dir}/loss', bbox_inches='tight')
plt.close()
def cond_no(self):
"""
Plot condition number of Hessian for layer W_{3,4}
"""
# -- load data
loss = np.loadtxt(f'{self.res_dir}/cond_no.txt')
per = 11
z = self.comp_moving_avg(loss, period=per)
plt.plot(np.array(range(len(z))) + int((per - 1) / 2), z)
# -- plot
# plt.plot(range(len(loss)), loss)
plt.ylim([0.2e9, 1.2e9])
plt.title('Condition number')
plt.savefig(f'{self.res_dir}/cond_no', bbox_inches='tight')
plt.close()
def accuracy(self):
"""
Plot the accuracy
"""
# -- load data
acc = np.loadtxt(f'{self.res_dir}/acc.txt')
# -- plot
plt.plot(range(len(acc)), acc)
plt.ylim([0, 1])
plt.title('Accuracy')
plt.savefig(f'{self.res_dir}/acc', bbox_inches='tight')
plt.close()
def plot_stats(self, filename, label_func, title, cols=None, ylim='auto'):
"""
Plot per weight and per layer statistics
"""
# -- load data
z = np.loadtxt(f'{self.res_dir}/{filename}.txt')
# -- plot
if cols is None:
cols = range(len(z[0]))
else:
pass
for i in cols:
plt.plot(range(len(z)), z[:, i], color=self.edgecolors[i], label=label_func(i))
# -- store
plt.legend()
if ylim is 'auto':
plt.ylim(self.compute_yrange(z))
elif ylim is None:
pass
else:
plt.ylim(ylim)
plt.title(title)
plt.savefig(f'{self.res_dir}/{filename}', bbox_inches='tight')
plt.close()
def plot_dist(self, variable, idx, xlabel, xlim, label_func):
"""
Plot the distribution of the gradients and activations for each layer
"""
# Read in the data from the file
with open(f'{self.res_dir}/{variable}_{idx}.txt', 'r') as f:
data = [[float(num) for num in line.split()] for line in f]
# Create a figure and axis object
fig, ax = plt.subplots()
# Loop through each row and plot the distribution
for i, row in enumerate(data):
n, bins = np.histogram(row, bins=500, density=True)
# Compute bin centers
bin_centers = (bins[:-1] + bins[1:]) / 2
# Plot bin center vs bin frequency
# define label as a function of the variable and index
ax.plot(bin_centers, n, color=self.edgecolors[i], label=label_func(variable, i))
# Set axis labels and legend
ax.set_xlabel(xlabel)
ax.set_ylabel('Frequency')
ax.legend()
plt.xlim(xlim)
plt.ylim([0, 50])
# Show the plot
plt.savefig(f'{self.res_dir}/dist_{variable}_{idx}', bbox_inches='tight')
plt.close()
def __call__(self, *args, **kwargs):
# -- performance
self.loss()
self.accuracy()
'''
for stat in ['mean', 'var', 'skew', 'kurt', 'norm', 'std']:
# -- activation stats
label_func = lambda x: r'$y_{}$'.format(x)
self.plot_stats(f'y_{stat}', label_func, f'{stat} of activations')
# -- error stats
label_func_e = lambda x: r'$e_{}$'.format(x)
self.plot_stats(f'e_{stat}', label_func_e, f'{stat} of error signals')
self.plot_stats(f'e_{stat}', label_func_e, f'{stat} of error signals')
# -- weight stats
label_func_w = lambda x: r'$w_{{{},{}}}$'.format(x, x + 1)
self.plot_stats('dw_norm', label_func_w, 'Norm of weights.', ylim=None)
# -- Plot orthonormality and Oja's error
self.plot_stats('oja_err', label_func_w, f'Distance from Oja,s solution', ylim=[0, 100])
self.plot_stats('orth_err', label_func_w, f'Orthonormality error of weight matrices', ylim=[0, 15])
""" batching test
# -- Plot angle between BP (whole dataset) and BP+Oja (current batch)
self.plot_stats('BP_BPOja_angle', label_func_w, 'BP_BPOja_angle', ylim=[0, 110])
"""
# -- confusion
self.plot_stats('conf_min', label_func_w, f'Confusion (minimum)', ylim=None)
self.plot_stats('conf_avg', label_func_w, f'Confusion (average)', ylim=None)
'''
def main(res_dir):
plot = Plot(res_dir)
plot()
if __name__ == '__main__':
res_dir = '/Users/nshervt/Private/Research/PostDoc/MetaPlasticity/Codes/MetaPlasticity/results/figures/results'
tests = []
# tests.append('Stiefel/2023-08-07_17-57-01_39')
# tests.append('Stiefel/2023-08-07_17-57-07_7')
for test_dir in tests:
main(f'{res_dir}/{test_dir}')