Import various modules that we need for this notebook (now using Keras 1.0.0)
%pylab inline
import copy
import numpy as np
import pandas as pd
import sys
import os
import re
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, RMSprop
from keras.layers.normalization import BatchNormalization
from keras.layers.wrappers import TimeDistributed
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import SimpleRNN, LSTM, GRU
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from gensim.models import word2vec
Let's work through a solution to the first part of problem set 8, where you applied various techniques to the STL-10 dataset.
dir_in = "../../../class_data/stl10/"
X_train = np.genfromtxt(dir_in + 'X_train_new.csv', delimiter=',')
Y_train = np.genfromtxt(dir_in + 'Y_train.csv', delimiter=',')
X_test = np.genfromtxt(dir_in + 'X_test_new.csv', delimiter=',')
Y_test = np.genfromtxt(dir_in + 'Y_test.csv', delimiter=',')
And construct a flattened version of it, for the linear model case:
Y_train_flat = np.zeros(Y_train.shape[0])
Y_test_flat = np.zeros(Y_test.shape[0])
for i in range(10):
Y_train_flat[Y_train[:,i] == 1] = i
Y_test_flat[Y_test[:,i] == 1] = i
We now build and evaluate a neural network.
model = Sequential()
model.add(Dense(1024, input_shape = (X_train.shape[1],)))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(1024))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(1024))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms,
metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=32, nb_epoch=5, verbose=1)
test_rate = model.evaluate(X_test, Y_test)[1]
print("Test classification rate %0.05f" % test_rate)
And now, a basic linear support vector machine.
svc_obj = SVC(kernel='linear', C=1)
svc_obj.fit(X_train, Y_train_flat)
pred = svc_obj.predict(X_test)
pd.crosstab(pred, Y_test_flat)
c_rate = sum(pred == Y_test_flat) / len(pred)
print("Test classification rate %0.05f" % c_rate)
And finally, an L1 penalized model:
lr = LogisticRegression(penalty = 'l1')
lr.fit(X_train, Y_train_flat)
pred = lr.predict(X_test)
pd.crosstab(pred, Y_test_flat)
c_rate = sum(pred == Y_test_flat) / len(pred)
print("Test classification rate %0.05f" % c_rate)
Now, let's read in the Chicago crime dataset and see how well we can get a neural network to perform on it.
dir_in = "../../../class_data/chi_python/"
X_train = np.genfromtxt(dir_in + 'chiCrimeMat_X_train.csv', delimiter=',')
Y_train = np.genfromtxt(dir_in + 'chiCrimeMat_Y_train.csv', delimiter=',')
X_test = np.genfromtxt(dir_in + 'chiCrimeMat_X_test.csv', delimiter=',')
Y_test = np.genfromtxt(dir_in + 'chiCrimeMat_Y_test.csv', delimiter=',')
Now, built a neural network for the model
model = Sequential()
model.add(Dense(1024, input_shape = (434,)))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(1024))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(1024))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(5))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms,
metrics=['accuracy'])
# downsample, if need be:
num_sample = X_train.shape[0]
model.fit(X_train[:num_sample], Y_train[:num_sample], batch_size=32,
nb_epoch=10, verbose=1)
test_rate = model.evaluate(X_test, Y_test)[1]
print("Test classification rate %0.05f" % test_rate)
Now, let's use the word2vec embeddings on the IMDB sentiment analysis corpus. This will allow us to use a significantly larger vocabulary of words. I'll start by reading in the IMDB corpus again from the raw text.
path = "../../../class_data/aclImdb/"
ff = [path + "train/pos/" + x for x in os.listdir(path + "train/pos")] + \
[path + "train/neg/" + x for x in os.listdir(path + "train/neg")] + \
[path + "test/pos/" + x for x in os.listdir(path + "test/pos")] + \
[path + "test/neg/" + x for x in os.listdir(path + "test/neg")]
TAG_RE = re.compile(r'<[^>]+>')
def remove_tags(text):
return TAG_RE.sub('', text)
input_label = ([1] * 12500 + [0] * 12500) * 2
input_text = []
for f in ff:
with open(f) as fin:
pass
input_text += [remove_tags(" ".join(fin.readlines()))]
I'll fit a significantly larger vocabular this time, as the embeddings are basically given for us.
num_words = 5000
max_len = 400
tok = Tokenizer(num_words)
tok.fit_on_texts(input_text[:25000])
X_train = tok.texts_to_sequences(input_text[:25000])
X_test = tok.texts_to_sequences(input_text[25000:])
y_train = input_label[:25000]
y_test = input_label[25000:]
X_train = sequence.pad_sequences(X_train, maxlen=max_len)
X_test = sequence.pad_sequences(X_test, maxlen=max_len)
words = []
for iter in range(num_words):
words += [key for key,value in tok.word_index.items() if value==iter+1]
loc = "/Users/taylor/files/word2vec_python/GoogleNews-vectors-negative300.bin"
w2v = word2vec.Word2Vec.load_word2vec_format(loc, binary=True)
weights = np.zeros((num_words,300))
for idx, w in enumerate(words):
try:
weights[idx,:] = w2v[w]
except KeyError as e:
pass
model = Sequential()
model.add(Embedding(num_words, 300, input_length=max_len))
model.add(Dropout(0.5))
model.add(GRU(16,activation='relu'))
model.add(Dense(128))
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.layers[0].set_weights([weights])
model.layers[0].trainable = False
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=32, nb_epoch=5, verbose=1,
validation_data=(X_test, y_test))