使用神經(jīng)網(wǎng)絡(luò)識(shí)別手寫數(shù)字

使用MNIST數(shù)據(jù)集訓(xùn)練神經(jīng)網(wǎng)絡(luò)模型。訓(xùn)練數(shù)據(jù)由28*28的手寫數(shù)字的圖像組成,輸入層包含784=28*28個(gè)神經(jīng)元。輸入像素是灰度級(jí)的,值為0.0表示白色,值為1.0表示黑色,中間數(shù)值表示逐漸暗淡的灰色。

intro.png

Algorithm

algorithm.png

神經(jīng)網(wǎng)絡(luò)快速入門

Codes

mnist_loader.py: 加載數(shù)據(jù)

import numpy as np
import pickle
import gzip

def load_data():
    f = gzip.open('data/mnist.pkl.gz', 'rb')
    training_data, validation_data, test_data = pickle.load(f, encoding="latin1")
    f.close()
    return (training_data, validation_data, test_data)

def load_data_wrapper():
    tr_d, va_d, te_d = load_data()
    # tr_d[0]: x; 1*784
    # tr_d[1]: y; 0-9
    training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
    training_results = [vectorized_result(y) for y in tr_d[1]]
    training_data = zip(training_inputs, training_results)
    validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
    validation_data = zip(validation_inputs, va_d[1])
    test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
    test_data = zip(test_inputs, te_d[1])
    return (training_data, validation_data, test_data)

def vectorized_result(j):
    v = np.zeros((10, 1))
    v[j] = 1.0
    return v

network.py: 算法,包括小批量梯度下降、反向傳播算法

import numpy as np
import random

class Network(object):
    
    def __init__(self, sizes):
        """初始化權(quán)重和偏置
        
        :param sizes: 每一層神經(jīng)元數(shù)量,類型為list
        
        weights:權(quán)重
        biases:偏置
        """
        
        self.sizes = sizes
        self.num_layers = len(sizes)
        self.weights = np.array([np.random.randn(x, y) for x, y in zip(sizes[1:], sizes[:-1])])
        self.biases = np.array([np.random.randn(y, 1) for y in sizes[1:]])
        
    
    def feedforward(self, a):
        """對(duì)一組樣本x進(jìn)行預(yù)測(cè),然后輸出"""
        
        for w, b in zip(self.weights, self.biases):
            a = sigmoid(np.dot(w, a) + b)
        return a
        
    
    def gradient_descent(self, training_data, epochs, mini_batch_size, alpha, test_data=None):
        """MBGD,運(yùn)行一個(gè)或者幾個(gè)batch時(shí)更新一次
        
        :param training_data: 訓(xùn)練數(shù)據(jù),每一個(gè)樣本包括(x, y),類型為zip
        :epochs: 迭代次數(shù)
        :mini_batch_size:每一個(gè)小批量數(shù)據(jù)的數(shù)量
        :alpha: 學(xué)習(xí)率
        :test_data: 測(cè)試數(shù)據(jù)
        """
        
        training_data = list(training_data)
        n = len(training_data)
        if test_data: 
            test_data = list(test_data)
            n_test = len(list(test_data))
        for i in range(epochs):
            random.shuffle(training_data)
            mini_batches = [training_data[k:k+mini_batch_size] for k in range(0, n, mini_batch_size)]
            for mini_batch in mini_batches:
                init_ws_derivative = np.array([np.zeros(w.shape) for w in self.weights])
                init_bs_derivative = np.array([np.zeros(b.shape) for b in self.biases])
                for x, y in mini_batch:
                    activations, zs = self.forwardprop(x) #前向傳播
                    delta = self.cost_deviation(activations[-1], zs[-1], y) #計(jì)算最后一層誤差
                    ws_derivative, bs_derivative = self.backprop(activations, zs, delta) #反向傳播,cost func對(duì)w和b求偏導(dǎo)
                    init_ws_derivative = init_ws_derivative + ws_derivative
                    init_bs_derivative = init_bs_derivative + bs_derivative
                self.weights = self.weights - alpha / len(mini_batch) * init_ws_derivative
                self.biases = self.biases - alpha / len(mini_batch) * init_bs_derivative
            if test_data:
                print("Epoch {} : {} / {}".format(i, self.evaluate(test_data), n_test)) #識(shí)別準(zhǔn)確數(shù)量/測(cè)試數(shù)據(jù)集總數(shù)量
            else:
                print("Epoch {} complete".format(i))


    def forwardprop(self, x):
        """前向傳播"""
        
        activation = x
        activations = [x]
        zs = []
        for w, b in zip(self.weights, self.biases):
            z = np.dot(w, activation) + b
            zs.append(z)
            activation = sigmoid(z)
            activations.append(activation)
        return (activations, zs)


    def cost_deviation(self, output, z, y):
        """計(jì)算最后一層誤差"""
    
        return (output - y) * sigmoid_derivative(z)
    
    
    def backprop(self, activations, zs, delta):
        """反向傳播"""
        
        ws_derivative = np.array([np.zeros(w.shape) for w in self.weights])
        bs_derivative = np.array([np.zeros(b.shape) for b in self.biases])
        ws_derivative[-1] = np.dot(delta, activations[-2].transpose())
        bs_derivative[-1] = delta
        
        for l in range(2, self.num_layers):
            z = zs[-l]
            delta = np.dot((self.weights[-l+1]).transpose(), delta) * sigmoid_derivative(z)
            ws_derivative[-l] = np.dot(delta, activations[-l-1].transpose())
            bs_derivative[-l] = delta
        
        return (ws_derivative, bs_derivative)
     
        
    def evaluate(self, test_data):
        """評(píng)估"""
        
        test_results = [(np.argmax(self.feedforward(x)), y) for (x, y) in test_data]
        return sum(int(output == y) for (output, y) in test_results)
    

def sigmoid(z):
    return 1.0 / (1.0 + np.exp(-z))


def sigmoid_derivative(z):
    """sigmoid函數(shù)偏導(dǎo)"""
    
    return sigmoid(z) * (1 - sigmoid(z))

run.py: 運(yùn)行,訓(xùn)練一個(gè)三層(1個(gè)輸入層、1個(gè)隱藏層、1個(gè)輸出層)的神經(jīng)網(wǎng)絡(luò)模型

import mnist_loader
import network

if __name__ == '__main__':
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
    net = network.Network([784, 30, 10]) #28*28
    net.gradient_descent(training_data, 30, 10, 3.0, test_data=test_data)

MNIST數(shù)據(jù)集及源碼下載


更多內(nèi)容:Github個(gè)人博客
備注:本文發(fā)表于 https://cnyangkui.github.io/2018/10/07/ML-NeuralNetwork/

最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請(qǐng)聯(lián)系作者
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時(shí)請(qǐng)結(jié)合常識(shí)與多方信息審慎甄別。
平臺(tái)聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點(diǎn),簡(jiǎn)書系信息發(fā)布平臺(tái),僅提供信息存儲(chǔ)服務(wù)。

相關(guān)閱讀更多精彩內(nèi)容

友情鏈接更多精彩內(nèi)容