forked from miguelalba96/tensorflow-facialexpr-recognition
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain_landmarks.py
More file actions
129 lines (100 loc) · 5.18 KB
/
train_landmarks.py
File metadata and controls
129 lines (100 loc) · 5.18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import os
import glob
import dlib
import numpy as np
import tensorflow as tf
from lib.data_loader import _CNN_Data_Loader
from lib.config import ConfigReader, TrainNetConfig, DataConfig
from lib.CNNS.land_marks import _facenet
#NUM_PARALLEL_EXEC_UNITS = 4
# p = '/Users/miguelangelalbaacosta/Downloads/shape_predictor_68_face_landmarks.dat'
p = '/home/miguel_alba/data/shape_predictor_68_face_landmarks.dat'
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(p)
face_rects = [dlib.rectangle(left=1, top=1, right=47, bottom=47)]
def get_landmarks(image, rects):
# this function have been copied from http://bit.ly/2cj7Fpq
if len(rects) > 1:
raise BaseException("TooManyFaces")
if len(rects) == 0:
raise BaseException("NoFaces")
return np.matrix([[p.x, p.y] for p in predictor(image, rects[0]).parts()])
def create_batch_landmarks(crops):
landmarks = []
for ex in crops:
lands = get_landmarks(ex[:, :, 0], face_rects)
landmarks.append(lands)
batch = np.array(landmarks).astype(np.float32)
return batch
def train(conf_path):
conf_path = conf_path
config_reader = ConfigReader(conf_path)
train_config = TrainNetConfig(config_reader.get_train_config())
data_config = DataConfig(config_reader.get_train_config())
out_dir = os.path.join(train_config.checkpoint_dir, 'models', train_config.name)
train_log_dir = '{}/logs/train/'.format(out_dir)
test_log_dir = '{}/logs/test/'.format(out_dir)
if not os.path.exists(train_log_dir):
os.makedirs(train_log_dir)
if not os.path.exists(test_log_dir):
os.makedirs(test_log_dir)
net = _facenet(train_config)
with tf.name_scope('input'):
train_loader = _CNN_Data_Loader(data_config, name='train', training_mode=True, shuffle=True)
train_image_batch, train_label_batch = train_loader._generate_batch()
test_loader = _CNN_Data_Loader(data_config, name='test', training_mode=False, shuffle=False) # default false
test_image_batch, test_label_batch = test_loader._generate_batch()
loss, accuracy = net.batch_model()
train_op = net.optimize(loss)
summary_op = tf.summary.merge_all()
saver = tf.train.Saver(tf.global_variables())
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
train_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
val_summary_writer = tf.summary.FileWriter(test_log_dir, sess.graph)
try:
for step in np.arange(train_config.max_step):
train_image, train_label = sess.run([train_image_batch, train_label_batch])
train_land_marks = create_batch_landmarks(train_image)
assert train_label.shape[1] == data_config.n_classes
_, train_loss, train_acc = sess.run([train_op, loss, accuracy],
feed_dict={net.x: train_image, net.y: train_label,
net.landmark: train_land_marks})
if step % 50 == 0 or step + 1 == train_config.max_step:
print('===TRAIN===: Step: %d, loss: %.4f, accuracy: %.4f%%' % (step, train_loss, train_acc))
summary_str = sess.run(summary_op, feed_dict={net.x: train_image, net.y: train_label,
net.landmark: train_land_marks})
train_summary_writer.add_summary(summary_str, step)
if step % 150 == 0 or step + 1 == train_config.max_step:
val_image, val_label = sess.run([test_image_batch, test_label_batch])
val_land_marks = create_batch_landmarks(val_image)
plot_images = tf.summary.image('val_images_{}'.format(step % 200), val_image, 10)
val_loss, val_acc, plot_summary = sess.run([loss, accuracy, plot_images],
feed_dict={net.x: val_image, net.y: val_label,
net.landmark: val_land_marks})
print('====VAL====: Step %d, val loss = %.4f, val accuracy = %.4f%%' % (step, val_loss, val_acc))
summary_str = sess.run(summary_op, feed_dict={net.x: val_image, net.y: val_label,
net.landmark: val_land_marks})
val_summary_writer.add_summary(summary_str, step)
val_summary_writer.add_summary(plot_summary, step)
if step % 2000 == 0 or step + 1 == train_config.max_step:
checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
except tf.errors.OutOfRangeError:
print('===INFO====: Training completed, reaching the maximum number of steps')
sess.close()
return None
def _train_201909_():
conf_path = '/Volumes/SSD_ML/facialexpr/lib/experiments/experiment_2.yml'
train(conf_path)
return None
def train_cloud():
conf_path = '/home/miguel_alba/facialexpr/lib/experiments/experiment_cloud.yml'
train(conf_path)
return None
def main():
#_train_201909_()
train_cloud()
if __name__ == '__main__':
main()