統(tǒng)計(jì)學(xué)習(xí)方法 李航 最近鄰模型 python sklearn 實(shí)現(xiàn) 及課后習(xí)題

  • 算法特點(diǎn)
    簡單地說,k-近鄰算法采用測(cè)量不同特征值之間的距離方法進(jìn)行分類。
    優(yōu)點(diǎn):精度高、對(duì)異常值不敏感、無數(shù)據(jù)輸入假定
    缺點(diǎn):計(jì)算復(fù)雜度高、空間復(fù)雜度高
    適用數(shù)據(jù)范圍:數(shù)值型和標(biāo)稱型

  • 李航:
    K近鄰法(k-nearest neighbor, k-NN)是一種基本的分類與回歸的方法,1968年由Cover和hart提出。k近鄰的輸入為實(shí)例的特征向量, 對(duì)應(yīng)于特征空間的點(diǎn),輸出為實(shí)例的類別,可以取多類。 k近鄰假設(shè)給定一個(gè)訓(xùn)練數(shù)據(jù)集,其中的實(shí)例類別已定。分類時(shí),對(duì)新的實(shí)例,根據(jù)其k個(gè)最近鄰的訓(xùn)練實(shí)例的 類別,通過多數(shù)表決等方式進(jìn)行預(yù)測(cè)。因此,k近鄰不具有顯示的學(xué)習(xí)過程 k近鄰法實(shí)際上利用訓(xùn)練數(shù)據(jù)集對(duì)特征向量空間的劃分,作為其分類的模型。 k值的選擇,距離的度量以及分類決策規(guī)則是K近鄰的三個(gè)基本要素
    python代碼所用數(shù)據(jù)為kaggle中mnist數(shù)據(jù),將特征PCA至六維

# -*- coding: utf-8 -*-
"""
使用python實(shí)現(xiàn)的KNN算法進(jìn)行分類的一個(gè)實(shí)例,
使用數(shù)據(jù)集是Kaggle數(shù)字手寫體數(shù)據(jù)庫
"""
import pandas as pd
import numpy as np
import math
import operator
from sklearn.decomposition import PCA

# 加載數(shù)據(jù)集
def load_data(filename, n, mode):
    data_pd = pd.read_csv(filename)
    data = np.asarray(data_pd)
    pca = PCA(n_components=n)
    if not mode == 'test':
        dateset = pca.fit_transform(data[:, 1:])
        return dateset, data[:, 0]
    else:
        dateset = pca.fit_transform(data)
        return dateset, 1

# 計(jì)算距離
def euclideanDistance(instance1, instance2, length):
    distance = 0
    for index in range(length):
        distance = pow((instance1[index] - instance2[index]), 2)
    return math.sqrt(distance)


# 返回K個(gè)最近鄰
def getNeighbors(trainingSet, train_label, testInstance, k):
    distances = []
    length = len(testInstance) - 1
    # 計(jì)算每一個(gè)測(cè)試實(shí)例到訓(xùn)練集實(shí)例的距離
    for index in range(len(trainingSet)):
        dist = euclideanDistance(testInstance, trainingSet[index], length)
        distances.append(dist)
    # 對(duì)所有的距離進(jìn)行排序
    sortedDistIndicies = np.asarray(distances).argsort()
    neighbors = []
    # 返回k個(gè)最近鄰
    for index in range(k):
        dex = sortedDistIndicies[index]
        neighbors.append((dex, train_label[dex]))
    return neighbors


# 對(duì)k個(gè)近鄰進(jìn)行合并,返回value最大的key
def getResponse(neighbors):
    classVotes = {}
    for index in range(len(neighbors)):
        response = neighbors[index][-1]
        if response in classVotes:
            classVotes[response] += 1
        else:
            classVotes[response] = 1
    # 排序
    sortedVotes = sorted(classVotes.items(), key=operator.itemgetter(1), reverse=True)
    return sortedVotes[0][0]

def main(train_data_path, test_data_path, top_k, n_dim):
    train_data, train_label = load_data(train_data_path, n_dim, 'train')
    print("Train set :" + repr(len(train_data)))
    test_data, _ = load_data(test_data_path, n_dim, 'test')
    print("Test set :" + repr(len(test_data)))
    predictions = []
    for index in range(len(test_data)):
        neighbors = getNeighbors(train_data, train_label, test_data[index], top_k)
        result = getResponse(neighbors)
        predictions.append([index + 1, result])
        print(">Index : %s, predicted = %s" % (index + 1, result))
    columns = ['ImageId', 'Label']
    save_file = pd.DataFrame(columns=columns, data=predictions)
    save_file.to_csv('mm.csv', index=False, encoding="utf-8")

if __name__ == "__main__":
    train_data_path = 'train.csv'
    test_data_path = 'test.csv'
    top_k = 5
    n_dim = 6
    main(train_data_path, test_data_path, top_k, n_dim)

sklearn代碼所用數(shù)據(jù)為kaggle中mnist數(shù)據(jù),將特征PCA至六維

# -*- coding: utf-8 -*-
"""
使用sklearn實(shí)現(xiàn)的KNN算法進(jìn)行分類的一個(gè)實(shí)例,
使用數(shù)據(jù)集是Kaggle數(shù)字手寫體數(shù)據(jù)庫
"""

import pandas as pd
import numpy as np
from sklearn import neighbors
from sklearn.decomposition import PCA
import sklearn

# 加載數(shù)據(jù)集
def load_data(filename, n, mode):
    data_pd = pd.read_csv(filename)
    data = np.asarray(data_pd)
    pca = PCA(n_components=n)
    if not mode == 'test':
        dateset = pca.fit_transform(data[:, 1:])
        return dateset, data[:, 0]
    else:
        dateset = pca.fit_transform(data)
        return dateset, 1

def main(train_data_path, test_data_path, n_dim):
    train_data, train_label = load_data(train_data_path, n_dim, 'train')
    print("Train set :" + repr(len(train_data)))
    test_data, _ = load_data(test_data_path, n_dim, 'test')
    print("Test set :" + repr(len(test_data)))

    knn = neighbors.KNeighborsClassifier()
    # 訓(xùn)練數(shù)據(jù)集
    knn.fit(train_data, train_label)
    # 訓(xùn)練準(zhǔn)確率
    score = knn.score(train_data, train_label)
    print(">Training accuracy = " + repr(score))
    predictions = []
    for index in range(len(test_data)):
        # 預(yù)測(cè)
        result = knn.predict([test_data[index]])
        # 預(yù)測(cè),返回概率數(shù)組
        predict2 = knn.predict_proba([test_data[index]])
        predictions.append([index + 1, result[0]])
        print(">Index : %s, predicted = %s" % (index + 1, result[0]))
    columns = ['ImageId', 'Label']
    save_file = pd.DataFrame(columns=columns, data=predictions)
    save_file.to_csv('m.csv', index=False, encoding="utf-8")

if __name__ == "__main__":
    train_data_path = 'train.csv'
    test_data_path = 'test.csv'
    n_dim = 6
    main(train_data_path, test_data_path, n_dim)

課后習(xí)題

喜歡的關(guān)注點(diǎn)贊哈

最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請(qǐng)聯(lián)系作者
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時(shí)請(qǐng)結(jié)合常識(shí)與多方信息審慎甄別。
平臺(tái)聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點(diǎn),簡書系信息發(fā)布平臺(tái),僅提供信息存儲(chǔ)服務(wù)。

相關(guān)閱讀更多精彩內(nèi)容

友情鏈接更多精彩內(nèi)容