-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathNN.py
More file actions
81 lines (61 loc) · 2.59 KB
/
NN.py
File metadata and controls
81 lines (61 loc) · 2.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
from helpers import csv_to_df
import datetime
import json
import os
import nltk
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import time
import pandas as pd
import matplotlib.pyplot as plt
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras import Sequential
from keras.layers import Embedding, SpatialDropout1D, LSTM, Dense
from keras.callbacks.callbacks import EarlyStopping
if __name__ == "__main__":
training_set = csv_to_df('training.csv')
test_set = csv_to_df('test.csv')
MAX_NB_WORDS = 50000
MAX_SEQUENCE_LENGTH = 500
EMBEDDING_DIM = 100
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True, split=',')
tokenizer.fit_on_texts(training_set['article_words'].values)
word_index = tokenizer.word_index
X_train = tokenizer.texts_to_sequences(training_set['article_words'].values)
X_train = pad_sequences(X_train, maxlen=MAX_SEQUENCE_LENGTH)
y_train = pd.get_dummies(training_set['topic']).values
X_test = tokenizer.texts_to_sequences(test_set['article_words'].values)
X_test = pad_sequences(X_test, maxlen=MAX_SEQUENCE_LENGTH)
y_test = pd.get_dummies(test_set['topic']).values
model = Sequential()
model.add(Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=X_train.shape[1]))
model.add(SpatialDropout1D(0.2))
model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(11, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
epochs = 5
batch_size = 64
history = model.fit(
X_train, y_train,
epochs=epochs,
batch_size=batch_size,
validation_split=0.1,
callbacks=[EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)]
)
accr = model.evaluate(X_test, y_test)
print('Test set\n Loss: {:0.3f}\n Accuracy: {:0.3f}'.format(accr[0],accr[1]))
topic_data = test_set.topic.value_counts()
labels = list(topic_data.keys())
words = []
words.append(test_set['article_words'][0])
seq = tokenizer.texts_to_sequences(words)
padded = pad_sequences(seq, MAX_SEQUENCE_LENGTH)
pred = model.predict(padded)
print(pred)
print(labels[np.argmax(pred)])
# accr = model.evaluate(X_test, y_test)
# y_pred = model.predict(X_test)
# labels = list(topic_data.keys())
# print(y_pred, labels[np.argmax(y_pred)])
# print('Test set\n Loss: {:0.3f}\n Accuracy: {:0.3f}'.format(accr[0],accr[1]))