# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
import os
import pandas as pd
from keras.layers import Dense,LSTM,Bidirectional,Embedding,Conv1D,MaxPooling1D,GlobalMaxPooling1D,Dropout,SpatialDropout1D,GRU
from keras.models import Sequential,Model
import keras.preprocessing as preprocessing
import keras.backend as K
from nltk.corpus import stopwords
from keras.utils.np_utils import to_categorical
import nltk
import matplotlib.pyplot as plt
import os
import re
import numpy as np
from keras.engine.topology import Layer
from keras import initializers, regularizers, constraints
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from nltk import WordNetLemmatizer,word_tokenize
from keras import Model,Input,layers
stoplist = stopwords.words('english')
data_train = pd.read_csv(r'../input/movie-review-sentiment-analysis-kernels-only/train.tsv',sep='\t')
data_test = pd.read_csv(r'../input/movie-review-sentiment-analysis-kernels-only/test.tsv',sep='\t')
sub = pd.read_csv(r'../input/movie-review-sentiment-analysis-kernels-only/sampleSubmission.csv')
data_train_X = data_train.Phrase.values
X_test = list(data_test.Phrase.values)
data_train_Y = list(data_train.Sentiment.values)
lemmat = WordNetLemmatizer()
class Attention(Layer):
"""
Keras Layer that implements an Attention mechanism for temporal data.
Supports Masking.
Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
:param kwargs:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Example:
model.add(LSTM(64, return_sequences=True))
model.add(Attention())
"""
def __init__(self, step_dim,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.features_dim = input_shape[-1]
if self.bias:
self.b = self.add_weight((input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input, input_mask=None):
return None
def call(self, x, mask=None):
features_dim = self.features_dim
step_dim = self.step_dim
eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)),
K.reshape(self.W, (features_dim, 1))), (-1, step_dim))
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], self.features_dim
def clean(data):
data = [re.sub('[^a-zA-Z]',' ',word) for word in data] #去非英文字符
data_x = []
for i in (data):
data_word = word_tokenize(i) #分词
# data_word = [word for word in data_word if word not in stoplist ] #去停用词
data_word_result = []
for word in data_word:
if word in stoplist:
data_word.remove(word)
data_word_result.append(word)
elif data_word == []:
for j in data_word_result:
data_word.append(j)
data_word1= [lemmat.lemmatize(word.lower()) for word in data_word] #去时态语态
data_word1 = ' '.join(data_word1)
data_x.append(data_word1)
return data_x
data_train_X = clean(data_train_X)
# data_train_Y = [data_train_Y[i] for i in range(len(data_train_Y)) if i not in data_train_X_emp]
data_train_Y = to_categorical(data_train_Y)
X_train,X_val,Y_train,Y_val = train_test_split(data_train_X,data_train_Y,stratify=data_train_Y,test_size=0.2,random_state=123)#切分数据
def count_word(data_word):
word_set = set()
for j in data_word:
for k in word_tokenize(j):
word_set.add(k)
word_count = len(word_set) # 统计词的个数
return word_count
X_train_count = count_word(X_train)
def word_maxlen(data_x): #统计最长的字符长度
len_list = []
for i in data_x:
i = word_tokenize(i)
len_list.append(len(i))
maxlen = max(len_list)
return maxlen
X_train_maxlen = word_maxlen(X_train)
tokenizer = Tokenizer(num_words=X_train_count) #创建分词器
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_sequences(X_train)
X_train = preprocessing.sequence.pad_sequences(X_train,maxlen=X_train_maxlen) #截断和补全
X_val = tokenizer.texts_to_sequences(X_val)
X_val = preprocessing.sequence.pad_sequences(X_val,maxlen=X_train_maxlen)
word_index = tokenizer.word_index #找回索引
glove_base = r'../input/mydata123'
glove_dir = os.path.join(glove_base,'glove.6B.100d.txt') #使用预训练好的词向量
emb_vec_dict = {}
with open(glove_dir,'r',encoding='utf8') as f:
for line in f:
line = line.split()
word = line[0]
index = np.asarray(line[1:],dtype='float32')
emb_vec_dict[word] = index
emd_dim = 100
emb_num = np.stack(emb_vec_dict.values()) #拼接所有的矩阵在一起
emb_mat = np.random.normal(emb_num.mean(),emb_num.std(),(X_train_count,emd_dim)) #初始化矩阵,对向量进行均值与方差处理
for word,i in word_index.items():
if i < X_train_count:
emb_vec = emb_vec_dict.get(word)
if emb_vec is not None:
emb_mat[i] = emb_vec
X_test = clean(X_test)
X_test = tokenizer.texts_to_sequences(X_test)
X_test = preprocessing.sequence.pad_sequences(X_test,maxlen=X_train_maxlen)
# print(emb_mat)
# model = Sequential()
input_words = Input(shape=(X_train_maxlen,))
embedding = Embedding(X_train_count,emd_dim,input_length=X_train_maxlen,weights=[emb_mat],trainable=True)(input_words)
# model.add(Embedding(X_train_count,emd_dim,input_length=X_train_maxlen,weights=[emb_mat],trainable=True)) #嵌入到 (X_train_count,emd_dim)的矩阵中,冻结嵌入层,
conv1d = Conv1D(127,7,padding='same')(embedding)
# model.add(Conv1D(127,7,padding='same'))
# model.add(MaxPooling1D(5))
max_pool = MaxPooling1D(5)(conv1d)
# model.add(SpatialDropout1D(0.5))
drop_spa = SpatialDropout1D(0.5)(max_pool)
# model.add(Bidirectional(GRU(128,return_sequences=True)))
bi_lstm = Bidirectional(GRU(128,return_sequences=True))(drop_spa)
# model.add(Bidirectional(GRU(64)))
bi_lstm1 = Bidirectional(GRU(64,return_sequences=True))(bi_lstm)
attention = Attention(6)(bi_lstm1)
# model.add(Dropout(0.8))
# model.add(Dense(5,activation='softmax'))
dense = Dense(5,activation='softmax')(attention)
# model.layers[0].set_weights([emb_mat]) #预训练的词嵌入加载到embedding层中
# model.layers[0].trainable = False #冻结embedding层
model = Model(inputs=input_words, outputs=dense)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
model.summary()
# model.compile(optimizer='adam',loss='categorical_c
# rossentropy',metrics=['acc'])
# history = model.fit(X_train,Y_train,batch_size=1024,epochs=8,validation_data=(X_val,Y_val),verbose=1)
model.fit(X_train,Y_train,epochs=10,shuffle=True,batch_size=1024)
y_pred = model.predict(X_test, batch_size=1024)
y_pred = y_pred.argmax(axis=1).astype(int)
sub.Sentiment=y_pred
sub.to_csv('re10.csv',index=False)
# Any results you write to the current directory are saved as output.