Dr. George Jen


Deep learning can be made much simpler with Keras as front end API, in this example, using CNTK as back end.  In this example, it took 20 iterations (EPOCHS), with pretty decent accuracy and loss after the run:

 

loss: 0.0483 - acc: 0.9852 - val_loss: 0.0234 - val_acc: 0.9923

 

Following code is to train the machine with stanard MNIST data set, at the end of the code, trained model is saved to the disk, that can be used immediately later.

 

#Following is the Python code

import numpy as np

from keras.datasets import mnist

import keras

import gc

import os

 

from keras.models import Sequential, Model

from keras.layers import Input, Dense, Dropout, Flatten

from keras.layers.convolutional import Conv2D, MaxPooling2D

 

#Load MNIST data with loading URL from mnist source in keras.datasets

 

(X_train, y_train), (X_test, y_test) = mnist.load_data()

print(X_train[0].shape)

print(y_train[0])

X_train = X_train.reshape(X_train.shape[0],28,28,1).astype('float32')

X_test = X_test.reshape(X_test.shape[0],28,28,1).astype('float32')

X_train /= 255

X_test /= 255

 

 

 

'''

This helper function is for one hot encoding, basically using binary to make each category as feature, marked with 1.

'''

def tran_y(y):

    y_ohe = np.zeros(10)

    y_ohe[y] = 1

    return y_ohe

 

#Below to create one hot encoding with data set from x_test and y_train

 

y_train_ohe = np.array([tran_y(y_train[i]) for i in range(len(y_train))])

y_test_ohe = np.array([tran_y(y_test[i]) for i in range(len(y_test))])

y_train_ohe = y_train_ohe.astype('float32')

y_test_ohe = y_test_ohe.astype('float32')

 

 

 

# Define keras model

 

 

model = Sequential()

model.add(Conv2D(filters = 64, kernel_size = (3, 3), strides = (1, 1), padding = 'same', activation = 'relu',

          input_shape = (28,28,1)))

model.add(MaxPooling2D(pool_size = (2, 2)))

model.add(Dropout(0.5))

model.add(Conv2D(128, kernel_size = (3, 3), strides = (1, 1), padding = 'same', activation = 'relu'))

model.add(MaxPooling2D(pool_size = (2, 2)))

model.add(Dropout(0.5))

model.add(Conv2D(256, kernel_size = (3, 3), strides = (1, 1), padding = 'same', activation = 'relu'))

model.add(MaxPooling2D(pool_size = (2, 2)))

model.add(Dropout(0.5))

model.add(Flatten())

model.add(Dense(128, activation = 'relu'))

model.add(Dense(64, activation = 'relu'))

model.add(Dense(32, activation = 'relu'))

model.add(Dense(10, activation = 'softmax'))

 

 

# Define optimization

 

 

adamoptimizer = keras.optimizers.Adam(lr = 1e-4)

model.compile(loss = 'categorical_crossentropy',

              optimizer = adamoptimizer, metrics = ['accuracy'])

 

 

# Traing the model

 

 

model.fit(X_train, y_train_ohe, validation_data = (X_test, y_test_ohe), epochs = 20, batch_size = 128)

 

#Save the trained model,  trained model as keras_mnist1.h5 can be ready for use.

 

 

model.save("keras_mnist1.h5")

print("Saved keras_mnist1 to disk")

 

#####

 

Now that the model file "keras_mnist1.h5" has been saved to disk, it can be used by another python script for prediction, using MNIST image file in JPG, which can be downloaded from Kaggle.

 

Below is the example that shows you to load a trained model and use it immediately for prediction.

 

####

 

import numpy as np

from keras.datasets import mnist

import keras

import gc

import os

 

from keras.models import Sequential, Model

from keras.layers import Input, Dense, Dropout, Flatten

from keras.layers.convolutional import Conv2D, MaxPooling2D

from keras.models import model_from_json

from keras.models import load_model

import cv2

 

 

#Load a trained model. Note, make sure your Keras version can not be lower than the Keras version used to save the model

 

 

loaded_model=load_model('keras_mnist1.h5')

print("Loaded keras_mnist1 from disk")

 

 

#Load a MNIST image in JPG format, MNIST data set in JPG can be downloaded from https://www.kaggle.com/scolianni/mnistasjpg

#It is 28x28x3, meaning depth is 3, that needs to make it grey scale only with depth 1, following is the code

 

from skimage import transform,io

grey = io.imread("/home/megan/ml/keras/new/new/testSample/img_1.jpg")

small_grey = transform.resize(grey, (28,28), mode='symmetric', preserve_range=True)

im=small_grey

height, width  = im.shape

print(height)

print(width)

 

#It will predict the image in JPG and tell what it is

 

pr = loaded_model.predict_classes(im.reshape(1, height, width,1))

print(pr)

 

####

 

Running it, it successfully tells "img_1.jpg" of digit 2 to be 2:

 

/home/megan/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.

  from ._conv import register_converters as _register_converters

Using CNTK backend

Selected GPU[0] GeForce GT 710 as the process wide default device.

Loaded keras_mnist1 from disk

28

28

/home/megan/anaconda3/lib/python3.6/site-packages/cntk/core.py:361: UserWarning: your data is of type "float64", but your input variable (uid "Input4") expects "". Please convert your data beforehand to speed up training.

  (sample.dtype, var.uid, str(var.dtype)))

[2]