Part 1、理論基礎(chǔ)
將不同分類器組合起來的方法叫“集成方法”(ensemble method)或者“元算法”(meta-algorithm),集成的形式有很多,可以是不同算法的集成、同一算法在不同設(shè)置下的集成、還可以是數(shù)據(jù)集的不同部分分配給不同分類器之后的集成。
自舉匯聚法(booststrap aggregating,bagging)是在原始數(shù)據(jù)集上選擇S次得到S個(gè)新的數(shù)據(jù)集,然后將某個(gè)算法分別應(yīng)用于這S個(gè)數(shù)據(jù)集,則得到S個(gè)分類器;需要對(duì)新的數(shù)據(jù)進(jìn)行分類時(shí),即可用這S個(gè)分類器進(jìn)行分類,對(duì)各分類結(jié)果進(jìn)行投票,得票多的類別即為分類結(jié)果。
boosting是通過集中關(guān)注被已有分類器錯(cuò)分的數(shù)據(jù)來獲得新的分類器,其分類結(jié)果是基于所有分類器加權(quán)求和得到的,分類器權(quán)重代表對(duì)應(yīng)分類器在上一輪迭代中的成功度,所以各分類器的權(quán)重并不相同。(boosting有多個(gè)版本,本文只討論AdaBoost)
自適應(yīng)boosting(adaptive boosting,AdaBoost)的簡(jiǎn)單步驟如下:
step1:樣本權(quán)重D初始化;
step2:訓(xùn)練弱分類器,并計(jì)算該分類器的錯(cuò)誤率;
step3:計(jì)算分類器權(quán)重;
step4:計(jì)算樣本權(quán)重;
step5:計(jì)算錯(cuò)誤率;
step6:判斷錯(cuò)誤率是否達(dá)到要求,若達(dá)到則break,否則返回step2。
相關(guān)參數(shù)的計(jì)算公式如下:

Part 2、算法實(shí)現(xiàn)
# 0
# 簡(jiǎn)單數(shù)據(jù)集
def loadSimpData():
datMat = matrix([[ 1. , 2.1],
[ 2. , 1.1],
[ 1.3, 1. ],
[ 1. , 1. ],
[ 2. , 1. ]])
classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
return datMat,classLabels
# 1
# 單層決策樹生成函數(shù)
def buildStump(dataArr,classLabels,D):
dataMatrix = mat(dataArr)
labelMat = mat(classLabels).T
m,n = shape(dataMatrix)
numSteps = 10.0
bestStump = {}
bestClasEst = mat(zeros((m,1)))
minError = inf
for i in range(n): # 第一個(gè)for循環(huán)用于計(jì)算第i個(gè)屬性的取值范圍,從而得到步長(zhǎng)
rangeMin = dataMatrix[:,i].min()
rangeMax = dataMatrix[:,i].max()
stepSize = (rangeMax - rangeMin)/numSteps
for j in range(-1,int(numSteps)+1): # 控制遍歷次數(shù)
for inequal in ['lt','gt']:
threshVal = (rangeMin + float(j)*stepSize) #此次的閾值
predictedVals = stumpClassify(dataMatrix,i,threshVal,inequal)
#調(diào)用stumpClassify()進(jìn)行分類
errArr = mat(ones((m,1))) #初始化都分類錯(cuò)誤
errArr[predictedVals == labelMat] = 0 #未分錯(cuò)的改為0
weightedError = D.T * errArr #加權(quán)錯(cuò)誤率
print "split:dim %d, thresh %.2f, thresh inequal:%s, the weighted error is %.3f" % \
(i,threshVal,inequal,weightedError)
if weightedError < minError: #判斷錯(cuò)誤是否減小到閾值允許的范圍內(nèi)
minError = weightedError #若滿足,則更新閾值
bestClasEst = predictedVals.copy()
bestStump['dim'] = i
bestStump['thresh'] = threshVal
bestStump['ineq'] = inequal
return bestStump, minError, bestClasEst
# 測(cè)試
D = mat(ones((5,1))/5)
datMat, classLabels = loadSimpData()
bestStump, minError, bestClasEst = buildStump(datMat, classLabels, D)
print bestStump,'\n', minError,'\n', bestClasEst
得到類似測(cè)試結(jié)果

# 2
# 基于單層決策樹的AdaBoost訓(xùn)練過程
def adaBoostTrainDS(dataArr,classLabels,numIt=40):
weakClassArr = []
m = shape(dataArr)[0]
D = mat(ones((m,1))/m)
aggClassEst = mat(zeros((m,1)))
for i in range(numIt):
print ("="*40)
bestStump,error,classEst = buildStump(dataArr,classLabels,D)
print "本次樣本權(quán)重D:",D.T
print "本次分類結(jié)果classEst:",classEst.T
alpha = float(0.5*log((1.0-error)/max(error,1e-16)))
print "通過error得到本次分類器權(quán)重alpha:",alpha
bestStump['alpha'] = alpha
weakClassArr.append(bestStump)
expon = multiply(-1*alpha*mat(classLabels).T,classEst)
print "通過alpha得到D中e的系數(shù):",expon.T
D = multiply(D,exp(expon))
D = D/D.sum()
print "從而得到樣本權(quán)重D的更新:",D.T
aggClassEst += alpha*classEst
print "通過alpha得到加權(quán)分類中間結(jié)果aggClassEst: ",aggClassEst.T
aggErrors = multiply(sign(aggClassEst) != mat(classLabels).T, ones((m,1)))
a = sign(aggClassEst)
print "從而得到加權(quán)分類最終結(jié)果sign(aggClassEst)",a.T
print "從而得到加權(quán)分類最終結(jié)果的錯(cuò)誤分布aggErrors:",aggErrors.T
errorRate = aggErrors.sum()/m
print "total error:",errorRate,"\n"
if errorRate == 0.0:
break
return weakClassArr,aggClassEst #這里的aggClassEst是后面算法中添加的,在測(cè)試adaClassify()時(shí)需要將該參數(shù)刪去
# 測(cè)試
datMat, classLabels = loadSimpData()
classifierArray = adaBoostTrainDS(datMat, classLabels, 9)
print classifierArray
得到類似測(cè)試結(jié)果

# 3
# AdaBoost分類函數(shù)
def adaClassify(datToClass,classifierArr):
#datToClass是待分類數(shù)據(jù),可以是一個(gè)數(shù)據(jù),也可以是一組數(shù)據(jù)
#classifierArr是已經(jīng)訓(xùn)練好的多個(gè)弱分類器組成的數(shù)組
dataMatrix = mat(datToClass)
m = shape(dataMatrix)[0]
aggClassEst = mat(zeros((m,1)))
for i in range(len(classifierArr)): #對(duì)待分類數(shù)據(jù)遍歷弱分類器
classEst = stumpClassify(dataMatrix, classifierArr[i]['dim'], \
classifierArr[i]['thresh'],\
classifierArr[i]['ineq'])
aggClassEst += classifierArr[i]['alpha']*classEst
#累加得到分類中間結(jié)果
print '累加%d個(gè)弱分類器得到的分類中間結(jié)果aggClassEst:\n' % i,aggClassEst
return sign(aggClassEst) #最終分類結(jié)果
# 測(cè)試
datMat, labelArr = loadSimpData()
classifierArr = adaBoostTrainDS(datMat, labelArr,30)
print "訓(xùn)練得到的弱分類器組:",classifierArr
print ('='*40)
print '以上為訓(xùn)練弱分類器過程\n'
result = adaClassify([0,0],classifierArr)
print '最終分類結(jié)果:',result
得到類似測(cè)試結(jié)果

# 4
# 自適應(yīng)數(shù)據(jù)加載函數(shù)
def loadDataSet(fileName):
numFeat = len(open(fileName).readline().split('\t'))
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = []
curLine = line.strip().split('\t')
for i in range(numFeat-1):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat,labelMat
# 測(cè)試
datArr, labelArr = loadDataSet('horseColicTraining2.txt')
classifierArray = adaBoostTrainDS(datArr, labelArr, 10)
testArr, testLabelArr = loadDataSet('horseColicTest2.txt')
prediction10 = adaClassify(testArr,classifierArray)
#以下計(jì)算錯(cuò)分個(gè)數(shù)
errArr = mat(ones((67,1)))
errNum = errArr[prediction10 != mat(testLabelArr).T].sum()
print '錯(cuò)分?jǐn)?shù)量:',errNum
得到類似測(cè)試結(jié)果

# 5
# ROC曲線的繪制以及AUC計(jì)算函數(shù)
def plotROC(predStrengths, classLabels):
import matplotlib.pylab as plt
cur = (1.0,1.0)
ySum = 0.0
numPosClas = sum(array(classLabels)==1.0)
yStep = 1/float(numPosClas)
xStep = 1/float(len(classLabels)-numPosClas)
sortedIndicies = predStrengths.argsort()
fig = plt.figure()
fig.clf()
ax = plt.subplot(111)
for index in sortedIndicies.tolist()[0]:
if classLabels[index] == 1.0:
delX = 0
delY = yStep
else:
delX = xStep
delY = 0
ySum += cur[1]
ax.plot([cur[0],cur[0]-delX],[cur[1],cur[1]-delY],c='b')
cur = (cur[0]-delX,cur[1]-delY)
ax.plot([0,1],[0,1],'b--')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.title('ROC curve for AdaBoost Horse Colic Detection System')
ax.axis([0,1,0,1])
plt.show()
print 'the area under the curve is:',ySum*xStep
# 測(cè)試
datArr, labelArr = loadDataSet('horseColicTraining2.txt')
classifierArray,aggClassEst = adaBoostTrainDS(datArr, labelArr, 10)
plotROC(aggClassEst.T,labelArr)
得到類似測(cè)試結(jié)果
