-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathprepare.py
More file actions
40 lines (31 loc) · 1.03 KB
/
prepare.py
File metadata and controls
40 lines (31 loc) · 1.03 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
"""
Helpers to prepare the corpus for tokenization
----------------------------------------------
"""
import re
import string
from nlpkit.utils import space_split, space_join, flatten
def striphtml(data):
p = re.compile(r'<.*?>')
return p.sub('', data)
def filter_printable(s):
printable = set(string.printable)
return ''.join(filter(lambda x: x in printable, s))
def punctuation_cleaning(words):
table = str.maketrans({key: ' ' for key in string.punctuation})
cleaned_words = [w.translate(table) for w in words]
resulted_doc = []
for word in cleaned_words:
spliced_word = word.split(' ')
spliced_word = [w for w in spliced_word if len(w) > 0]
resulted_doc.append(spliced_word)
return resulted_doc
def prepare_line(line):
line = striphtml(line)
line = filter_printable(line)
line = line.lower()
line = re.sub(r'^https?:\/\/.*[\r\n]*', '', line)
words = space_split(line)
words = flatten(punctuation_cleaning(words))
line = space_join(words)
return line