-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdeepNet.v1.py
More file actions
109 lines (76 loc) · 2.46 KB
/
deepNet.v1.py
File metadata and controls
109 lines (76 loc) · 2.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import numpy as np
import sys
import matplotlib.pyplot as plt
from nn_utils import *
def initial_parameters(layers):
parameters ={}
L = len(layers)
for l in range(1,L):
parameters['W'+ str(l)] = np.random.rand(layers[l],layers[l-1])
parameters['b'+ str(l)] = np.random.rand(layers[l],1)
return parameters
def single_layer_forward(A,l,parameters):
W = parameters['W'+str(l)]
b = parameters['b'+str(l)]
Z = np.dot(W,A) + b
A_next = sigmoid(Z)
return A_next, Z
def forward_pass(X,layers,parameters):
L = len(layers)
caches = []
caches.append([-1])
A_prev = X
for l in range(1,L):
A_next,Z = single_layer_forward(A_prev,l,parameters)
cache = {'A'+str(l-1):A_prev,'Z'+str(l):Z,'A'+str(l):A_next}
caches.append(cache)
A_prev = A_next
return A_next, caches
def single_layer_backward(da,l,parameters,caches):
Al = caches[l]['A'+str(l)]
dz_l = np.multiply( Al , (1-Al))
dA_l_1 = np.dot( parameters['W'+str(l)].T, dz_l)
dw_l = np.dot( dz_l, caches[l]['A'+str(l-1)].T )
db_l = np.sum(dz_l,1).reshape(-1,1)
return dA_l_1, dw_l, db_l
def backward_pass(dA_L,layers,parameters,caches,lr):
L = len(layers)
dA_next = dA_L
for l in range(L-1,0,-1):
dA_prev, dW, db = single_layer_backward(dA_next,l,parameters,caches)
dA_next = dA_prev
parameters['W'+str(l)] = parameters['W'+str(l)]-lr*dW
parameters['b' + str(l)] = parameters['b' + str(l)] - lr * db
return parameters
#---------------------Main---------------------------
layers = [5,10,2,1]
parameters = initial_parameters(layers)
X = np.random.rand(5,100)
previousError=float("inf")
targ=X[1,:].reshape([100,1]).T
target = np.sin(1*2*np.pi*targ)
epoch_error = []
AL=0
target1=[]
for l in range(1,100):
# A, Z = single_layer_forward(X,1,parameters)
AL , caches = forward_pass(X,layers,parameters)
#dA_L = np.random.rand(1,100)
lr = .01
# dA, dW, db = single_layer_backward(da,2,parameters,caches)
AL =backward_pass(AL,layers,parameters,caches,lr)
target1=caches[3]['Z3']
e = target1 - target
errors = np.array(e)
epoch_error.append(np.sum(errors**2))
print('Current epoch is:%s'%np.sum(errors**2))
if (previousError>np.sum(errors**2)):
previousError=np.sum(errors**2)
else:
break
#plt.plot(X,target1)
#plt.plot(X,target,'r')
#plt.show()
plt.figure()
plt.plot(epoch_error)
#plt.figure()