2018-12-25 python大作業(yè)

-- coding: utf-8 --

"""
Created on Sun Dec 23 01:10:07 2018

@author: NickyChu
"""

import numpy as np

import pandas as pd
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.linear_model import LogisticRegression as LR
from sklearn.linear_model import RandomizedLogisticRegression as RLR

In[121]:

df = pd.read_csv('Train.csv',header=None)#使用header選項(xiàng)讓列名為從0開(kāi)始的數(shù)列
df_new = pd.read_csv('Test.csv',header=None)
print(df.duplicated())#檢查重復(fù)項(xiàng)
df = df.drop_duplicates()#清洗重復(fù)項(xiàng)
df

In[163]:

my_matrix = df
data_x=my_matrix.iloc[:,0:-1]
data_y=my_matrix.iloc[:,-1]

In[123]:

rlr = RLR() #建立隨機(jī)邏輯回歸模型,篩選變量
rlr.fit(data_x, data_y) #訓(xùn)練模型
array1 = rlr.get_support() #獲取特征篩選結(jié)果,也可以通過(guò).scores_方法獲取各個(gè)特征的分?jǐn)?shù)
score1 = rlr.scores_
score1

In[161]:

i = 1

for name in array1:

if name == True:

print("第{0}個(gè)特征值為有用特征值".format(i))

i = i+1

In[162]:

篩選特征值

data_x.columns = score1
df_new.columns = score1
todrop = [0]
try:
data_x.drop(todrop, inplace=True, axis=1)
df_new.drop(todrop, inplace=True, axis=1)
except:
print("No need to drop")
data_x
df_new

In[]

分組

from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix, classification_report, roc_curve, auc
from sklearn.grid_search import GridSearchCV

X1_train, X1_test, y1_train, y1_test = train_test_split(data_x,data_y,test_size=0.33)

組內(nèi)預(yù)測(cè)

lr=LR(C = 1.5, penalty = 'l1',class_weight='balanced')#邏輯回歸模型
lr.fit(X1_train,y1_train)#train組內(nèi)模擬
print('邏輯回歸模型訓(xùn)練結(jié)束')
print('模型的平均正確率:%s' % lr.score(X1_train,y1_train))#輸出模型平均正確率(不準(zhǔn)確)
print(lr.get_params)#輸出相關(guān)信息
y1_pre_val = lr.predict(X1_test)#train組內(nèi)的預(yù)測(cè)函數(shù)

輸出報(bào)告

print("Classification report (training):\n {0}".format(classification_report(y1_test,y1_pre_val,target_names=["0","1"])))

讓他去預(yù)測(cè)一下test組的

y_val_old = lr.predict(df_new)
print(lr.get_params)#輸出相關(guān)信息

In[]

parameter tuning

運(yùn)用窮盡網(wǎng)格搜索

from sklearn.grid_search import GridSearchCV
lr_clf_tuned = LR()

tuned_parameters=[{'penalty':['l1','l2'],
'C':[0.01,0.05,0.1,0.5,1,1.1,1.2,1.3,1.4,1.5,10],
'solver':['liblinear'],
'multi_class':['ovr']},
{'penalty':['l2'],
'C':[0.01,0.05,0.1,0.5,1,1.1,1.2,1.3,1.4,1.5,10],
'solver':['lbfgs'],
'multi_class':['ovr','multinomial']}]
lr_clf_params = {"penalty": ["l1", "l2"], "C": [1, 1.1,1.2,1.3,1.4,1.5, 1.7, 2] }
lr_clf_cv = GridSearchCV(lr_clf_tuned, tuned_parameters, cv=5)
lr_clf_cv.fit(X1_train,y1_train)
print(lr_clf_cv.best_params_)

In[]

from sklearn.ensemble import RandomForestClassifier
lr=LR( C = 10, multi_class='ovr', penalty = 'l1', solver='lbfgs')#邏輯回歸模型
lr.fit(X1_train,y1_train)
print('邏輯回歸模型訓(xùn)練結(jié)束')
print('模型的平均正確率:%s' % lr.score(X1_train,y1_train))
print(lr.get_params)
y1_pre_val = lr.predict(X1_test)#train組內(nèi)的預(yù)測(cè)函數(shù)

輸出報(bào)告

print("Classification report (training):\n {0}".format(classification_report(y1_test,y1_pre_val,target_names=["0","1"])))

讓他去預(yù)測(cè)一下test組的

y_val_new = lr.predict(df_new)
print(lr.get_params)#輸出相關(guān)信息

In[]

尋找最優(yōu)Roc值

from sklearn.metrics import confusion_matrix, classification_report, roc_curve, auc
from sklearn.metrics import roc_auc_score
def FindtheC():
score = []
aucarr = []
Conclu = []
for i in range(1,100):

i = 64

    play = i/1000
    for j in range(1,100):

j = 67時(shí) 達(dá)到 0.699的AUC

        Canshu = j
        lr=LR(C = Canshu, multi_class='ovr', penalty = 'l2', solver='liblinear',class_weight={0: play, 1: 1-play})#邏輯回歸模型
        lr.fit(X1_train,y1_train)
        print('邏輯回歸模型訓(xùn)練結(jié)束')
        print('模型的平均正確率:%s' % lr.score(X1_train,y1_train))
        print(lr.get_params)
        y1_pre_test1 = lr.predict(X1_test)#預(yù)測(cè)函數(shù)
        auc1 = "ROC是{0}".format(roc_auc_score(y1_test, y1_pre_test1))
        print(auc1)
        aucarr.append(roc_auc_score(y1_test, y1_pre_test1))
        Conclu.append([auc1,Canshu])

print(max(aucarr))

In[]

pre_probs = lr.predict_log_proba(data_x)
print(pre_probs[:,1])
print(pre_probs[:,0])

In[]

from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LogisticRegression

參數(shù)調(diào)整

x_train = X1_train
y_train = y1_train
x_test = X1_test
y_test = y1_test

def show_accuracy(a, b, tip):
acc = a.ravel() == b.ravel()
acc_rate = 100 * float(acc.sum()) / a.size
return acc_rate

Maincode

lr = LogisticRegression(penalty='l2')
lr.fit(x_train, y_train)
y_hat = lr.predict(x_test)
lr_rate = show_accuracy(y_hat, y_test, 'Logistic回歸 ')
print(roc_auc_score(y_test, y_hat))

隨機(jī)森林 n_estimators:決策樹(shù)的個(gè)數(shù),越多越好,不過(guò)值越大,性能就會(huì)越差,至少100

rfc = RandomForestClassifier(n_estimators=100)
rfc.fit(x_train, y_train)
y_hat = rfc.predict(x_test)
rfc_rate = show_accuracy(y_hat, y_test, '隨機(jī)森林 ')
print(roc_auc_score(y_test, y_hat))

In[]

XGBoost

import xgboost as xgb

data_train = xgb.DMatrix(x_train, label=y_train)
data_test = xgb.DMatrix(x_test, label=y_test)
watch_list = [(data_test, 'eval'), (data_train, 'train')]
param = {'max_depth': 6, 'eta': 0.8, 'silent': 1, 'objective': 'binary:logistic'}
bst = xgb.train(param, data_train, num_boost_round=100, evals=watch_list)
y_hat = bst.predict(data_test)
y_hat[y_hat > 0.5] = 1
y_hat[~(y_hat > 0.5)] = 0
xgb_rate = show_accuracy(y_hat, y_test, 'XGBoost ')
print(roc_auc_score(y_test, y_hat))

In[]

ROC圖所需參數(shù)

y1 = lr.predict_proba(data_x)
y1_valid_score_lr1 = lr.predict_proba(data_x)
fpr_lr1, tpr_lr1, thresholds_lr1 = roc_curve(data_y, y1_valid_score_lr1[:, 1])
roc_auc_lr1 = auc(fpr_lr1, tpr_lr1)

作ROC圖

plt.plot(fpr_lr1, tpr_lr1, lw=2, alpha=.6)
plt.plot([0, 1], [0, 1], lw=2, linestyle="--")
plt.xlim([0, 1])
plt.ylim([0, 1.05])
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.title("ROC curve")

?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請(qǐng)聯(lián)系作者
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時(shí)請(qǐng)結(jié)合常識(shí)與多方信息審慎甄別。
平臺(tái)聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點(diǎn),簡(jiǎn)書(shū)系信息發(fā)布平臺(tái),僅提供信息存儲(chǔ)服務(wù)。

相關(guān)閱讀更多精彩內(nèi)容

友情鏈接更多精彩內(nèi)容