-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathRDF_model.py
More file actions
77 lines (60 loc) · 2.53 KB
/
RDF_model.py
File metadata and controls
77 lines (60 loc) · 2.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import nltk
import scipy.sparse as sp
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.ensemble import RandomForestClassifier # Random Forest Classifier
from sklearn.metrics import accuracy_score, classification_report
# download the stopwords
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
# Load the dataset
df = pd.read_csv("IMDB Dataset.csv")
# Preprocess the data
# change the ojbect to lower case and remove the sepcial characters
import re
def preprocess_text(text):
text = text.lower()
text = re.sub(r'<.*?>', '', text)
text = re.sub(r'[^\w\s]', '', text)
return text
df['review']=df['review'].apply(preprocess_text)
# Function to remove stopwords
def remove_stopwords(text):
words = re.findall(r'\w+', text.lower()) # Tokenize text
filtered_words = [word for word in words if word not in stop_words]
return ' '.join(filtered_words)
df['review'] = df['review'].apply(remove_stopwords) # Apply stopword removal
# change the sentiment to 1 and 0
df['sentiment'] = df['sentiment'].map({'positive': 1, 'negative': 0})
# Apply TF-IDF vectorization with a limited feature size to reduce memory usage
vectorizer = TfidfVectorizer(max_features=5000)
X = vectorizer.fit_transform(df['review'])
x=X
y=df['sentiment']
# Split the data into training and testing sets
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
# Train a Random Forest Classifier
clf = RandomForestClassifier(n_estimators=100, random_state=42)
clf.fit(x_train, y_train)
# Predict the sentiment of the test set
y_pred = clf.predict(x_test)
# Calculate the accuracy of the model
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy: {:.2f}%".format(accuracy * 100))
# Classification report
print("\nClassification Report:\n", classification_report(y_test, y_pred))
# Save the trained model
import pickle
with open("random_forest_sentiment.pkl", "wb") as model_file:
pickle.dump(clf, model_file)
print("Model saved to random_forest_sentiment.pkl successfully!")
# Save the trained vectorizer
with open("tfidf_vectorizer.pkl", "wb") as vectorizer_file:
pickle.dump(vectorizer, vectorizer_file)
print("Vectorizer saved to tfidf_vectorizer.pkl successfully!")