深度學(xué)習(xí) 第一課 KNN以及交叉驗(yàn)證

1 python numpy 常用函數(shù)

1.1 cPickle

Python標(biāo)準(zhǔn)庫提供pickle和cPickle模塊用于序列化。pickle模塊中的兩個(gè)主要函數(shù)是dump()和load()。

1.2 numpy
numpy.concatenate 連接兩個(gè)矩陣 可以是按行連接或按列連接

numpy.tile(arr, (x, y)) 用于擴(kuò)充數(shù)組 如果是數(shù)組擴(kuò)充幾倍 如果是舉證擴(kuò)充 (x, y) x行y列

numpy.bincount(a) 返回索引值在 a 出現(xiàn)的次數(shù) 比 a 中最大值大 1

2 KNN 詳細(xì)實(shí)現(xiàn)以及交叉驗(yàn)證

#!/usr/bin/env python
# -*- coding: utf-8 -*-

import cPickle as pickle 
import numpy as np 
import os
import random
import matplotlib.pyplot as plt

def load_CIFAR_batch(filename):
    with open(filename, 'r') as f: # rb
        datadict = pickle.load(f)

        X = datadict['data']
        Y = datadict['labels']
        X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype(float)
        Y = np.array(Y) # 轉(zhuǎn)換成一個(gè) labels 的數(shù)組

        # X 為 10000 * 3 * 32 * 32 的矩陣 
        return X, Y

def load_CIFAR1O(ROOT):
    print('start load_CIFAR1O')
    xs = []
    ys = []

    for b in range(1, 6):
        f = os.path.join(ROOT, 'data_batch_%d' % (b, ))
        X, Y = load_CIFAR_batch(f)
        xs.append(X)
        ys.append(Y)
    Xtr = np.concatenate(xs)
    Ytr = np.concatenate(ys)
    del X, Y

    Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))
    return Xtr, Ytr, Xte, Yte

class KNearestNeighbor:

    def __init__(self):
        pass

    def train(self, X, y):
        self.X_train = X # dataset
        self.y_train = y # label

    def predict(self, X, k=1):
        dists = self.compute_distances_two_loops(X)
        return self.predict_labels(dists, k=k)

    def compute_distances_no_loops(self, X):
        num_test = X.shape[0]
        num_train = self.X_train.shape[0]
        dists = np.zeros((num_test, num_train)) 

        """
        公式: (x - y) ^2 = x^2 + y^2 -2xy 
        1 - np.sum( (self.X_train)**2, axis=1) 
            計(jì)算訓(xùn)練集列方向的和 得到列方向的距離數(shù)組 即 y^2
        2 - np.tile( np.sum( (self.X_train)**2, axis=1), [num_test, 1]) 
            將 y^2 數(shù)組擴(kuò)充成 [num_test, 1] 既 num_test 行 1 列的矩陣 例如: [1, 2, 3, 4, 5]
        3 - np.tile( np.sum( (X)**2, axis=1), [num_train, 1]).T
            計(jì)算 x^2 數(shù)組并且擴(kuò)充成 [num_train, 1].T 的矩陣 例如: [1, 1, 1, 2, 2].T
            矩陣乘法是行乘以列這樣來計(jì)算的所以需要轉(zhuǎn)置
        4 - 計(jì)算 xy 矩陣相乘
        5 - 計(jì)算 x^2 + y^2 - 2xy 的值
        """ 
        train_2 = np.tile( np.sum( (self.X_train)**2, axis=1), [num_test, 1]) 
        test_2  = np.tile( np.sum( (X)**2, axis=1), [num_train, 1]).T
        test_train = X.dot(self.X_train.T)

        dists = train_2 + test_2 - 2*test_train
        return dists

    def predict_labels(self, dists, k=1):
        """
        返回預(yù)測的 labels
        """
        num_test = dists.shape[0]
        y_pred = np.zeros(num_test)
        for i in xrange(num_test):
            # 得到最近的k個(gè)點(diǎn)索引id 再得到這些 索引對應(yīng)的 label
            # [:k] 表示從 [0:k]個(gè)點(diǎn)索引
            closest_y = []
            closest_idx = np.argsort(dists[i, :])[:k].tolist() 
            closest_y = self.y_train[closest_idx]

            # numpy.bincount 返回索引值在 closest_y 出現(xiàn)的次數(shù) 比 closest_y 中最大值大 1
            # 得到這k個(gè)點(diǎn)label出現(xiàn)的次數(shù) 選取出現(xiàn)次數(shù)最多的 label 為最好的結(jié)果
            counts = np.bincount(closest_y) 
            y_pred[i] = np.argmax(counts) 
        return y_pred

cifar10_dir = 'datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR1O(cifar10_dir)

# 數(shù)據(jù)庫中有5w張圖片 這里只使用了 5k
num_training = 5000
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]

num_test = 500
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))

# 測試分類器
def test_classifier():
    classifier = KNearestNeighbor()
    classifier.train(X_train, y_train) # KNN 實(shí)際上沒啥訓(xùn)練過程
    y_test_pred = classifier.predict(k=5) 

    # 看下準(zhǔn)確率
    num_correct = np.sum(y_test_pred == y_test)
    accuracy = float(num_correct) / num_test
    print '%d個(gè)預(yù)測對了,總共%d個(gè)測試樣本 => 準(zhǔn)確率: %f' % (num_correct, num_test, accuracy)

# 交叉驗(yàn)證
def cost_validation():
    # K 折交叉驗(yàn)證 1.選取 n 個(gè)不同的 k 2.把數(shù)據(jù)分成 m 份 
    # 每一個(gè) k 在這 m 分?jǐn)?shù)據(jù)上做預(yù)測 取平均值即為在這個(gè)k對應(yīng)的準(zhǔn)確值 然后再選取最優(yōu)的
    num_folds = 5 # 同一個(gè)k 5個(gè)結(jié)果取平均值
    k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100]

    X_train_folds = []
    y_train_folds = []

    idxes = range(num_training)
    idx_folds = np.array_split(idxes, num_folds)
    for idx in idx_folds:
        X_train_folds.append( X_train[idx] )
        y_train_folds.append( y_train[idx] )
        
    k_to_accuracies = {}

    import sys
    classifier = KNearestNeighbor()
    Verbose = False
    for k in k_choices:
        if Verbose: print "processing k=%f" % k
        else: sys.stdout.write('.')

        k_to_accuracies[k] = list()
        for num in xrange(num_folds):
            if Verbose: print "processing fold#%i/%i" % (num, num_folds)
            
            X_cv_train = np.vstack( [ X_train_folds[x] for x in xrange(num_folds) if x != num ])
            y_cv_train = np.hstack( [ y_train_folds[x].T for x in xrange(num_folds) if x != num ])
            
            # 需要特別主要的是交叉驗(yàn)證過程中不能用測試集上的數(shù)據(jù) 所以這里是選了訓(xùn)練集上的某折數(shù)據(jù)來當(dāng)測試集
            X_cv_test = X_train_folds[num]
            y_cv_test = y_train_folds[num]

            classifier.train(X_cv_train, y_cv_train)
            dists = classifier.compute_distances_no_loops(X_cv_test)
            y_cv_test_pred = classifier.predict_labels(dists, k=k)
            num_correct = np.sum(y_cv_test_pred == y_cv_test)
            k_to_accuracies[k].append( float(num_correct) / y_cv_test.shape[0] )

    # 輸出k和對應(yīng)的準(zhǔn)確率
    for k in sorted(k_to_accuracies):
        for accuracy in k_to_accuracies[k]:
            print 'k = %d, accuracy = %f' % (k, accuracy)

    best_k = 6 # 根據(jù)上面交叉驗(yàn)證的結(jié)果,咱們確定最合適的k值為6,然后重新訓(xùn)練和測試一遍吧

3 為什么 KNN 沒法用于實(shí)際生產(chǎn)中?

1.準(zhǔn)確度不高

2.需要大量的實(shí)時(shí)計(jì)算和耗費(fèi)空間 KNN 在訓(xùn)練期間實(shí)際上沒做什么事情

4 做N折交叉驗(yàn)證

N折交叉驗(yàn)證

1.選取 n 個(gè)不同的 k 
2.把數(shù)據(jù)分成 m 份 
3.對于每個(gè)k 都可以做m次訓(xùn)練 每次訓(xùn)練的時(shí)候 
從這m中選一個(gè)當(dāng)測試集而不能把預(yù)先的測試集拿來用 
從 [1, m] 依次選取當(dāng)測試集 其他的當(dāng)數(shù)據(jù)集來預(yù)測
每一個(gè) k 在這 m 分?jǐn)?shù)據(jù)上做預(yù)測 取平均值即為在這個(gè)k對應(yīng)的準(zhǔn)確值
然后再選取最優(yōu)的K來得到最好的預(yù)測結(jié)果

交叉驗(yàn)證


交叉驗(yàn)證的結(jié)果


最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時(shí)請結(jié)合常識與多方信息審慎甄別。
平臺(tái)聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點(diǎn),簡書系信息發(fā)布平臺(tái),僅提供信息存儲(chǔ)服務(wù)。

相關(guān)閱讀更多精彩內(nèi)容

友情鏈接更多精彩內(nèi)容