-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathfunctions_helpers_eval.py
More file actions
59 lines (46 loc) · 2.05 KB
/
functions_helpers_eval.py
File metadata and controls
59 lines (46 loc) · 2.05 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import re
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
def handle_negation(text):
# Expand contractions
text = text.lower()
text = re.sub(r"\b(can't)\b", "can not", text)
text = re.sub(r"\b(don't)\b", "do not", text)
text = re.sub(r"\b(doesn't)\b", "does not", text)
text = re.sub(r"\b(won't)\b", "will not", text)
text = re.sub(r"\b(isn't)\b", "is not", text)
text = re.sub(r"\b(aren't)\b", "are not", text)
text = re.sub(r"\b(hasn't)\b", "has not", text)
text = re.sub(r"\b(haven't)\b", "have not", text)
text = re.sub(r"\b(shouldn't)\b", "should not", text)
# Actually handling negations
text = re.sub(r"not\s+(\w+)", r"not_\1", text)
return text
def remove_special_characters(text):
return re.sub(r"[^a-z\s_]", '', text)
def tokenize_text(text):
"""Tokenizes text into words."""
return word_tokenize(text)
def remove_stopwords(tokens):
"""Removes stopwords from a tokenized list."""
stop_words = set(stopwords.words("english"))
return [word for word in tokens if word.lower() not in stop_words]
def lemmatize_tokens(tokens):
"""Lemmatizes each token (reduces to base form)."""
lemmatizer = WordNetLemmatizer()
return [lemmatizer.lemmatize(token) for token in tokens]
def preprocess_LLM_words(issues):
processed_issues = []
for issue in issues:
words = []
for phrase in issue["words"]:
phrase = handle_negation(phrase) # Step 1: Handle negations and lower letters
phrase = remove_special_characters(phrase) # Step 2: Remove special characters
tokens = tokenize_text(phrase) # Step 3: Tokenize
tokens = remove_stopwords(tokens) # Step 4: Remove stopwords
lemmatized_tokens = lemmatize_tokens(tokens) # Step 5: Lemmatize
words.extend(lemmatized_tokens) # Flatten list
unique_words = list(dict.fromkeys(words))
processed_issues.append({"name": issue["name"], "words": unique_words})
return processed_issues