Commit 7aeadacd by Paktalin

IMDB with simple lstm and try with sequence autoencoder

parent 49615313
from keras.models import Sequential from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM from keras.layers import Dense, Embedding, LSTM, Dropout
from keras.datasets import imdb from keras.datasets import imdb
from keras.preprocessing import sequence from keras.preprocessing import sequence
import numpy as np import numpy as np
...@@ -37,4 +37,4 @@ model.fit(x_train, y_train, batch_size=batch_size, epochs=15, validation_data=(x ...@@ -37,4 +37,4 @@ model.fit(x_train, y_train, batch_size=batch_size, epochs=15, validation_data=(x
model.save('my_model.h5') model.save('my_model.h5')
# print model's score and accuracy # print model's score and accuracy
print('Score: %f\n Accuracy: %f' % model.evaluate(x_test, y_test, batch_size=batch_size)) print('Score: %f\n Accuracy: %f' % model.evaluate(x_test, y_test, batch_size=batch_size))
\ No newline at end of file
from numpy import array
from keras.models import Sequential
from keras.layers import TimeDistributed, Embedding, RepeatVector, Dense, LSTM
from keras.utils import plot_model
from keras.datasets import imdb
from keras.preprocessing import sequence
max_features = 10000
maxlen = 9
batch_size = 128
(x_train, _), (x_test, _) = imdb.load_data(num_words=max_features)
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
# reshape input into [samples, timesteps, features]
timesteps = x_train.shape[1]
samples = x_train.shape[0]
x_train = x_train.reshape((samples, timesteps, 1))
model = Sequential()
# model.add(Embedding(max_features, 50, batch_input_shape=(batch_size, timesteps,1)))
model.add(LSTM(512, activation='relu'))
model.add(RepeatVector(timesteps))
model.add(LSTM(512, activation='relu', return_sequences=True))
model.add(TimeDistributed(Dense(1)))
model.compile(optimizer='adam', loss='mse', metrics=['acc'])
model.fit(x_train, x_train, epochs=300)
print(model.evaluate(x_test, x_test))
import seq2seq
from seq2seq.models import SimpleSeq2Seq
from keras.datasets import imdb
from keras.preprocessing import sequence
import numpy as np
from keras.layers import Input, Dense
from keras.models import Model
max_features = 10000
maxlen = 400
(x_train, _), (x_test, _) = imdb.load_data(num_words=max_features)
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
x_train = np.reshape(x_train, (x_train.shape[0], 1, x_train.shape[1]))
encoding_dim = 80
input_seq = Input(shape=(None,maxlen))
encoded = Dense(encoding_dim, activation='relu')(input_seq)
decoded = Dense(maxlen, activation='sigmoid')(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input_seq, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.fit(x_train, x_train, epochs=500, batch_size=128)
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment