-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathW2V2.py
More file actions
73 lines (65 loc) · 2.9 KB
/
W2V2.py
File metadata and controls
73 lines (65 loc) · 2.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
import numpy as np
import pandas as pd
from tensorflow.keras.layers import Embedding
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.preprocessing.text import one_hot
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Bidirectional
from tensorflow.keras.layers import Dropout
from sklearn.metrics import accuracy_score
import nltk
import re
from nltk.corpus import stopwords
nltk.download('stopwords')
def Sentence2Vec(filename,glovepath,embedding_dim = 100,max_length = 120):
df_train = pd.read_excel(filename,engine='openpyxl')
T1 = df_train['content_original'].str.split(' \n\n---\n\n').str[0]
df_train['content_original'] = T1.str.replace('-',' ').str.replace('[^\w\s]','').str.replace('\n',' ').str.lower()
df = df_train[['content_original','source','bias_text','bias']]
T = df['content_original']
stop = stopwords.words('english')
T = T.apply(lambda words: ' '.join(word.lower() for word in words.split() if word not in stop))
path = glovepath
tokenizer = Tokenizer()
text=T
tokenizer.fit_on_texts(text)
word_index=tokenizer.word_index
print("number of word in vocabulary",len(word_index))
vocab_size = 5000
trunc_type = 'post'
oov_tok = '<OOV>'
padding_type = 'post'
#print("words in vocab",word_index)
text_sequence=tokenizer.texts_to_sequences(text)
text_sequence = pad_sequences(text_sequence, maxlen=max_length, truncating=trunc_type)
print("word in sentences are replaced with word ID",text_sequence)
size_of_vocabulary=len(tokenizer.word_index) + 1
print("The size of vocabulary ",size_of_vocabulary)
embeddings_index = dict()
f = open(path)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Loaded %s word vectors.' % len(embeddings_index))
embedding_matrix = np.zeros((size_of_vocabulary, embedding_dim))
for word, i in tokenizer.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
text_shape = text_sequence.shape
X_train = np.empty((text_shape[0],text_shape[1],embedding_matrix.shape[1]))
for i in range(text_sequence.shape[0]):
for j in range(text_sequence.shape[1]):
X_train[i,j,:] = embedding_matrix[text_sequence[i][j]]
print(X_train.shape)
y_train = df['bias'].to_numpy()
return X_train,y_train