-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlog_quan.py
More file actions
233 lines (178 loc) · 8.2 KB
/
log_quan.py
File metadata and controls
233 lines (178 loc) · 8.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
import pickle
import numpy as np
from copy import deepcopy
from tqdm import tqdm
import multiprocessing as mp
import global_
from collections import Counter
import sys
import math
class log_Pattering:
def __init__(self, ignore_idx=[0,1,2], n_log_ = 1.2, n_jobs = 1, using_entropy = False):
self.ignore_idx = ignore_idx
if using_entropy:
self.n_log = {}
else:
self.bin = n_log_
self.n_log = {}
self.boundary_dict = {}
def find_min_frequency_value(self,lst):
frequencies = Counter(lst)# 리스트의 빈도 계산
min_frequency = min(frequencies.values())# 빈도 중에서 최소값 찾기
min_frequency_elements = [key for key, value in frequencies.items() if value == min_frequency]
# 최소 빈도값을 가진 원소들 추출
min_value = min(min_frequency_elements) # 최소 빈도값을 가진 원소들 중에서 최솟값 찾기
return min_value
def make_countlist(self,data,ranges):
counts = [0] * (len(ranges) + 1)
# 이진 탐색으로 각 데이터를 적절한 범위에 할당하여 카운트
for value in data:
# 이진 탐색으로 적절한 범위 찾기
left, right = 0, len(ranges)
while left < right:
mid = (left + right) // 2
if value < ranges[mid]:
right = mid
else:
left = mid + 1
# 적절한 범위에 카운트 추가
counts[left] += 1
return counts
def entropy_f(self,count_list,total_count):
probabilities = [count / total_count for count in count_list]
entropy = -sum(p * np.log2(p) for p in probabilities if p != 0)
n = len(count_list)
return entropy/np.log2(n)
def make_tmp_boundary(self,original_list,feature_list):
max_data = max(feature_list)
len_boundary = len(original_list)
real_boundary = []
for idx,scale in enumerate(original_list):
left = scale
if idx==(len_boundary-1):
right = max_data
else:
right = original_list[idx+1]
filtered_array = feature_list[(feature_list > left) & (feature_list <= right)]
if len(filtered_array)==0:
if left !=0:
real_boundary.append(right)
else:
min_freq_value = self.find_min_frequency_value(filtered_array)
real_boundary.append(min_freq_value)
real_boundary = [-1,0]+ real_boundary
return real_boundary
def make_n_bin_boundary(self,feature_list):
max_data = max(feature_list)
#make original_list
bin_n = self.bin
N_bottom = max_data**(1/bin_n)
original_list = [N_bottom**(i) for i in range(1,bin_n+1)]
original_list = [0]+original_list
#make real_boundary
real_boundary = self.make_tmp_boundary(original_list,feature_list)
return real_boundary,N_bottom
def make_boundary(self,logN_,feature_list):
logN_n= logN_
#min_data = min(feature_list)
max_data = max(feature_list)
#make original_list
Nlog_list = [1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2.0]
#Nlog_list = [1.5,2,2.5,3.0,3.5,4]
min_entropy = sys.maxsize
data_len = len(feature_list)
real_boundary = [(2)**(i) for i in range(1,1000)]
choice_N = 2
for logN in Nlog_list:
original_list = [0]
tmp=1
i=1
while(tmp<max_data):
tmp = logN**i
i+=1
original_list.append(tmp)
len_ = len(original_list)
#make real_boundary
tmp_boundary = [-1, 0]
for idx,scale in enumerate(original_list):
left = scale
if idx==(len_-1):
right = max_data * 1.5
else:
right = original_list[idx+1]
filtered_array = feature_list[(feature_list > left) & (feature_list <= right)]
if len(filtered_array)==0:
if left !=0:
tmp_boundary.append(right)
continue
midian_value = self.find_min_frequency_value(filtered_array)
tmp_boundary.append(midian_value)
count_list = self.make_countlist(feature_list,tmp_boundary)
entropy = self.entropy_f(count_list,data_len)
if min_entropy >= entropy:
real_boundary = tmp_boundary
min_entropy = entropy
choice_N = logN
return real_boundary,choice_N
def multi_fit(self, data, use_entropy):
boundary_dict = dict()
array_ = np.array(data)
arrayT = array_.T
for idx,feature_data in tqdm(enumerate(arrayT),total = len(arrayT)):
if idx in self.ignore_idx:
continue
if use_entropy:
boundary_dict[idx],f_log_n = self.make_boundary(self.n_log,feature_data)
self.n_log[idx]=f_log_n
else:
boundary_dict[idx],bin_n = self.make_n_bin_boundary(feature_data)
self.n_log[idx]=bin_n
self.boundary_dict = boundary_dict
def multi_transform(self,data_list, is_tqdm=True):
array_ = np.array(data_list)
arrayT = array_.T
if is_tqdm:
for idx,data in tqdm(enumerate(arrayT),total=23):
if idx in self.ignore_idx:
data = data.astype(int)
if idx == 0:
signature_array = np.array([chr(x + 65).zfill(2) for x in data])
continue
else:
result_array = np.array([chr(x + 65).zfill(2) for x in data])
else:
bins_ = np.digitize(data,self.boundary_dict[idx],right=True).astype(int)
result_array = np.array([f'{x // 26}{chr(x % 26 + 65)}' for x in bins_])
signature_array = np.vstack([signature_array, result_array])
quan_data = [''.join(col) for col in signature_array.T]
else:
for idx,data in enumerate(arrayT):
if idx in self.ignore_idx:
data = data.astype(int)
if idx == 0:
signature_array = np.array([chr(x + 65).zfill(2) for x in data])
continue
else:
result_array = np.array([chr(x + 65).zfill(2) for x in data])
else:
bins_ = np.digitize(data,self.boundary_dict[idx],right=True).astype(int)
result_array = np.array([f'{x // 26}{chr(x % 26 + 65)}' for x in bins_])
signature_array = np.vstack([signature_array, result_array])
quan_data = [''.join(col) for col in signature_array.T]
return quan_data
def make_log_quan(train_raw, train_key, dataset_path, n_com, dp_log, use_entropy):
train_attack = []
if global_.attack == 1:
for idx, key in enumerate(train_key):
if key.split('+')[0].upper() != "BENIGN":
train_attack.append(train_raw[idx])
elif global_.attack == 0:
for idx, key in enumerate(train_key):
if key.split('+')[0].upper() == "BENIGN":
train_attack.append(train_raw[idx])
else:
train_attack = train_raw
pattern_log = log_Pattering(ignore_idx=[0, 1, 2], n_log_ = n_com, n_jobs=1, using_entropy = False)
pattern_log.multi_fit(train_attack, False)
with open(f"./preprocessing/{dataset_path}/LOG/{dp_log}", 'wb') as f:
pickle.dump(pattern_log, f)