-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathLearning.py
More file actions
140 lines (97 loc) · 6.58 KB
/
Learning.py
File metadata and controls
140 lines (97 loc) · 6.58 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
import numpy as np
"""
----------------------------------------------------------------------------------------------------------------------------------------------------------
Notes for readers
Thank you for reading this note. This code is the learning phase for deep deducing for generating sets of weight matrix to be randomly
selected in the deducing phase.
You may change or tune any of the following parameters or variables. However, it is recommended that you do so only if the following
note suggests so.
We hope you enjoy it.
----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
"""
----------------------------------------------------------------------------------------------------------------------------------------------------------
Part A. Functions for Generating Samples
In this part, we import the model for learning and create an object or instance from the imported class.
We define:
--- network_size:
The topology of the deep neural network. For example, if it is [36, 100, 100, 100, 6], it means the deep neural network
has one input layer with 36 neurons, three hidden layers each with 100 neurons, and an output layer with 6 neurons.
--- alpha:
The learning rate for the set of weight matrix and slope multiplier.
--- epoch_of_learning:
Learning epochs under which traditional SGD is performed in every epoch upon the set of weight matrix and slope multiplier.
--- Machine:
The name of the object or instance created from the class "Brain".
----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
def generate_state():
state = np.random.binomial(1, 0.5, size=9)
state = state.reshape((3, 3))
return state
def return_state_value(state):
state_value = 0
neighbor = state.flatten() * np.array([1, 1, 1, 1, 0, 1, 1, 1, 1])
number_of_neighbor = neighbor.sum()
self = state.flatten() * np.array([0, 0, 0, 0, 1, 0, 0, 0, 0])
number_of_self = self.sum()
if number_of_neighbor < 2:
state_value = np.ones(9) * ( 1 - number_of_self )
if number_of_neighbor == 2:
state_value = np.ones(9)
if number_of_neighbor == 3:
state_value = np.ones(9) * ( number_of_self )
if number_of_neighbor > 3:
state_value = np.ones(9) * ( 1 - number_of_self )
return state_value
# parameter referring to the batch size of the samples used to train the neural network. We recommend readers to try different batch size.
batch_size = 1
"""
----------------------------------------------------------------------------------------------------------------------------------------------------------
Part B. Initializing Set of Weight Matrix and Importing Model
----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
from Brain_for_learning import *
# this parameter refers to the topology of the neural network. We recommend readers to try different numbers.
network_size = np.array([9, 100, 100, 100, 9])
# this parameter refers to intial slopes for the activation/sigmoid functions in the hidden and output layers of the neural network. We recommend readers to try different numbers.
slope = 30
# this parameter refers to learning rate. We recommend readers to try different numbers.
alpha = 0.000001
# this parameter refers to learning epochs. We recommend readers to try different numbers.
epoch_of_learning = 50000000
# this parameter refers to the dropout rate in the learning phase. We recommend readers to try different numbers.
drop_rate = 0.015
# this parameter refers to the rate at which the momentum of the previous gradient affect the latter in the learning phase. We recommend readers to try different numbers.
momentum_rate = 0.015
Machine = Brain(network_size, slope, alpha, epoch_of_learning, drop_rate, momentum_rate)
"""
----------------------------------------------------------------------------------------------------------------------------------------------------------
Part C. Generating Samples and Training by Model
----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
# this parameter decides whether the program will train weight matrix upon existing weight matrix. We recommend readers to try different numbers.
retrain = False
if retrain == True:
Machine.weight_list = np.load("self.Conway_1_100x100x100_30_0.000001_20m_[1]_weight_list.npy" , allow_pickle=True)
Machine.slope_list = np.load("self.Conway_1_100x100x100_30_0.000001_20m_[1]_slope_list.npy" , allow_pickle=True)
for i in range(epoch_of_learning):
print(i)
input_list = list()
output_list = list()
for j in range(batch_size):
state = generate_state()
state_value = return_state_value(state)
input_list .append(state.flatten())
output_list.append(state_value)
input_list = np.asarray(input_list)
output_list = np.asarray(output_list)
Machine.learn_batch(input_list, output_list)
"""
----------------------------------------------------------------------------------------------------------------------------------------------------------
Part D. Saving the Trained Set of Weight Matrix for MWM-SGD
----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
# these two lines save the trained set of weight matrix later to be used/selected in the dedcuing phase. We recommend readers to try different numbers.
np.save("self.Conway_1_100x100x100_30_0.000001_50m_[1]_weight_list" , Machine.weight_list )
np.save("self.Conway_1_100x100x100_30_0.000001_50m_[1]_slope_list" , Machine.slope_list )