{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Recurrent neural net with embeddings\n", "Adapted from: https://github.com/fchollet/keras/blob/master/examples/imdb_lstm.py" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "'''Trains an LSTM model on the IMDB sentiment classification task.\n", "The dataset is actually too small for LSTM to be of any advantage\n", "compared to simpler, much faster methods such as TF-IDF + LogReg.\n", "# Notes\n", "- RNNs are tricky. Choice of batch size is important,\n", "choice of loss and optimizer is critical, etc.\n", "Some configurations won't converge.\n", "- LSTM loss decrease patterns during training can be quite different\n", "from what you see with CNNs/MLPs/etc.\n", "'''\n", "from keras.preprocessing import sequence\n", "from keras.models import Sequential\n", "from keras.layers import Dense, Embedding\n", "from keras.layers import LSTM\n", "from keras.datasets import imdb\n", "\n", "max_features = 5000\n", "maxlen = 80\n", "batch_size = 32\n", "embedding_dims = 50\n", "hidden_dims = 250\n", "epochs = 2" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "print('Loading data...')\n", "(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)\n", "print(len(x_train), 'train sequences')\n", "print(len(x_test), 'test sequences')\n", "\n", "print('Pad sequences (samples x time)')\n", "x_train = sequence.pad_sequences(x_train, maxlen=maxlen)\n", "x_test = sequence.pad_sequences(x_test, maxlen=maxlen)\n", "print('x_train shape:', x_train.shape)\n", "print('x_test shape:', x_test.shape)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "print('Build model...')\n", "model = Sequential()\n", "model.add(Embedding(max_features, embedding_dims))\n", "model.add(LSTM(hidden_dims, dropout=0.2, recurrent_dropout=0.2))\n", "model.add(Dense(1, activation='sigmoid'))\n", "\n", "# try using different optimizers and different optimizer configs\n", "model.compile(loss='binary_crossentropy',\n", " optimizer='adam',\n", " metrics=['accuracy'])\n", "\n", "model.summary()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "print('Train...')\n", "model.fit(x_train, y_train,\n", " batch_size=batch_size,\n", " epochs=epochs,\n", " validation_data=(x_test, y_test))\n", "score, acc = model.evaluate(x_test, y_test,\n", " batch_size=batch_size)\n", "print('Test score:', score)\n", "print('Test accuracy:', acc)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Testing" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "predictions = model.predict(x_test)\n", "predictions" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "predictions.round()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "y_test" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "errors = y_test-predictions.round().flatten()\n", "errors" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "len(errors[errors==0])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "len(errors[errors==0])/len(errors)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (TA)", "language": "python", "name": "ta" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.7" } }, "nbformat": 4, "nbformat_minor": 2 }