《机器学习实战》第六章----支持向量机

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/qq_28773183/article/details/81017913

支持向量机

SVM(Support Vector Machine)实际上是应用于二分类的一个分类器,其基本模型定义为特征空间上的间隔最大的线性分类器,其学习策略便是间隔最大化,最终可转化为一个凸二次规划问题的求解。这里不对整个过程进行推导,因为看了很多博客,有几篇大佬的博客写的非常清晰,这里只贴出代码和自己写的代码注释.

  1. 希尔伯特空间(Hilbert Space)
  2. 支持向量机通俗导论(理解SVM的三层境界)
  3. 机器学习经典算法详解及Python实现–基于SMO的SVM分类器
  4. 机器学习算法实践-SVM中的SMO算法

代码实现

(1)简化版本的SMO算法,随机选取内层循环变量:

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    :  //
# @Author  : FC
# @Site    : [email protected]
# @license : BSD
from numpy import *
def loadDataSet(fileName):
    dataMat = [];labelMat = []
    fr = open(fileName)
    for line in fr.readlines():
        lineArr = line.strip().split('\t')
        dataMat.append([float(lineArr[0]),float(lineArr[1])])
        labelMat.append(float(lineArr[2]))
    return dataMat,labelMat

#随机选择i外的j
def selectJrand(i,m):
    j=i
    while (j==i):
        j = int(random.uniform(0,m))
    return j

#对拉格朗日乘子alpha进行裁剪
def clipAlpha(aj,H,L):
    if aj>H:
        aj=H
    if L>aj:
        aj=L
    return aj


# 简化版的smo算法
# INPUT:dataMatIn:输入训练数据,classLabels:分类标签,C:alpha阈值,即0<alpha<C,maxIter:最大迭代次数
#       toler:容错率,因为Ei是函数g(x)对xi的预测值和真实输出值yi之差(见<统计学习方法>Page127)
# OUTPUT:alhpas,b
def smoSimple(dataMatIn,classLabels,C,toler,maxIter):
    dataMatrix = mat(dataMatIn);labelMat = mat(classLabels).transpose()
    b=0;m,n =shape(dataMatrix)
    alphas = mat(zeros((m,1)))
    iter = 0
    while(iter<maxIter):
        alphaPairsChanged = 0 #
        for i in range(m):
            fXi = float(multiply(alphas,labelMat).T*\
                        (dataMatrix*dataMatrix[i,:].T))+b #f(x)<统计学习方法>Page124
            Ei = fXi - float(labelMat[i]) #<统计学习方法>Page127 (7.105)
            # 分类误差比toler大,需要继续迭代优化
            if ((labelMat[i]*Ei<-toler) and (alphas[i]<C)) or \
                    ((labelMat[i]*Ei>toler) and \
                     (alphas[i]>0)):
                j = selectJrand(i,m) #选择alpha_2
                fXj = float(multiply(alphas,labelMat).T*\
                            (dataMatrix*dataMatrix[j,:].T))+b
                Ej = fXj-float(labelMat[j])
                alphaIold = alphas[i].copy()#alpha_1_old
                alphaJold = alphas[j].copy()#alpha_2_old
        #对alpha_2_new进行裁剪的上下界 (L,H)
                if (labelMat[i] != labelMat[j]):
                    L = max(0,alphas[j]-alphas[i])
                    H = min(C,C+alphas[j]-alphas[i])
                else:
                    L = max(0,alphas[j]+alphas[i]-C)
                    H = min(C,alphas[j]+alphas[i])
                if L==H:print("L==H");continue
                # <统计学习方法>Page128
                eta = 2.0*dataMatrix[i,:]*dataMatrix[j,:].T-\
                    dataMatrix[i,:]*dataMatrix[i,:].T-\
                    dataMatrix[j,:]*dataMatrix[j,:].T
                if eta>=0:print("eta>=0");continue
                alphas[j] -= labelMat[j]*(Ei-Ej)/eta #这里的减号是因为eta和书中的正好取反了
                alphas[j] =clipAlpha(alphas[j],H,L)#alpha_2_new
                if (abs(alphas[j]-alphaJold)<0.00001):print("j not moving enough");continue
                alphas[i] += labelMat[j]*labelMat[i]*(alphaJold-alphas[j]) #alpha_1_new
                b1 = b-Ei-labelMat[i]*(alphas[i]-alphaIold)*dataMatrix[i,:]*dataMatrix[i,:].T-\
                    labelMat[j]*(alphas[j]-alphaJold)*dataMatrix[i,:]*dataMatrix[j,:].T
                b2 = b-Ej-labelMat[i]*(alphas[i]-alphaIold)*dataMatrix[i,:]*dataMatrix[j,:].T-\
                    labelMat[j]*(alphas[j]-alphaJold)*dataMatrix[j,:]*dataMatrix[j,:].T
                if(0<alphas[i]) and (C>alphas[i]):b=b1
                elif (0<alphas[j]) and (C>alphas[j]):b=b2
                else:b=(b1+b2)/2.0
                alphaPairsChanged += 1
                print("iter:%d i:%d,paris changed %d" % (iter,i,alphaPairsChanged))
        if(alphaPairsChanged == 0):iter+=1
        else:iter=0
        print("iteration number:%d"% iter)
    return b,alphas





if '__main__':
    dataArr,labelArr = loadDataSet('testSet.txt')
    b,alphas = smoSimple(dataArr,labelArr,0.6,0.001,40)


(2)Platt的完整版的SMO算法,由于用例样本较小,外层变量的选取并没有检测KKT条件

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    :  //
# @Author  : FC
# @Site    : [email protected]
# @license : BSD
from numpy import *
import SMO
#内层:alphaJ
#外层:alphaI

#定义数据结构体:
    # dataMatIn:训练数据
    # classLabels:数据标签
    # C:alpha阈值
    # toler:容错率
    # kTup:核函数类型和参数,'rbf'是高斯径向基函数(radial basis function)  kTup=('rbf',sigma)
    # eCache:误差E的缓存区
class optStruct:
    def __init__(self,dataMatIn,classLabels,C,toler,kTup):
        self.X = dataMatIn
        self.labelMat = classLabels
        self.C = C
        self.tol = toler
        self.m = shape(dataMatIn)[0] #返回dataMatIn的行数
        self.alphas = mat(zeros((self.m,1)))
        self.b=0
        self.eCache=mat(zeros((self.m,2)))
        self.K = mat(zeros((self.m,self.m)))
        for i in range(self.m):
            self.K[:,i] = kernelTrans(self.X,self.X[i,:],kTup)#按列插值

def calcEk(oS,k):
    # fXK = float(multiply(oS.alphas,oS.labelMat).T*\
    #             (oS.X*oS.X[k,:].T))+oS.b
    # Ek = fXK - float(oS.labelMat[k])
    fXk = float(multiply(oS.alphas,oS.labelMat).T*oS.K[:,k]+oS.b)#<统计学习方法>Page127g(x)
    Ek = fXk - float(oS.labelMat[k])#预测值和真实值的误差
    return Ek

#alphaJ的选择:第一次循环随机抽取,后面的循环满足条件max|Ei-Ej|选取
#eCache:缓存误差E的值
def selectJ(i,oS,Ei):
    maxK = -1;maxDeltaE=0;Ej=0
    oS.eCache[i]=[1,Ei] #eCahe是所有误差的缓存区
    vaildEcacheList = nonzero(oS.eCache[:,0].A)[0]
    #.A表示转化为array,注意nonzero返回的成对的值,这里validEcache返回的是第一列非零的行索引.见博客:https://blog.csdn.net/qq_28773183/article/details/81013226
    if(len(vaildEcacheList))>1:
        for k in vaildEcacheList:
            if k==i:continue
            Ek=calcEk(oS,k) #计算Ek,有索引k即可
            deltaE = abs(Ei-Ek)
            if(deltaE>maxDeltaE):
                maxK=k;maxDeltaE=deltaE;Ej=Ek
        return maxK,Ej
    else:#随机选择alphaJ
        j=SMO.selectJrand(i,oS.m)
        Ej =calcEk(oS,j)
    return j,Ej

#更新误差缓存区eCache,一旦更新了设置标志位为1
def updateEk(oS,k):
    Ek = calcEk(oS,k)
    oS.eCache[k]=[1,Ek]

#内循环
def innerL(i,oS):
    Ei=calcEk(oS,i)
    if((oS.labelMat[i]*Ei<-oS.tol) and (oS.alphas[i]<oS.C)) or \
            ((oS.labelMat[i]*Ei>oS.tol) and (oS.alphas[i]>0)):
        j,Ej = selectJ(i,oS,Ei)#选择alphaJ
        alphaIold = oS.alphas[i].copy();alphaJold = oS.alphas[j].copy()
        #对最有值的边界确定
        if(oS.labelMat[i] != oS.labelMat[j]):
            L = max(0,oS.alphas[j]-oS.alphas[i])
            H = min(oS.C,oS.C+oS.alphas[j]-oS.alphas[i])
        else:
            L=max(0,oS.alphas[j]+oS.alphas[i]-oS.C)
            H=min(oS.C,oS.alphas[j]+oS.alphas[i])
        if L==H:print("L==H");return 0
        #eta = 2.0*oS.X[i,:]*oS.X[j,:].T-oS.X[i,:]*oS.X[i,:].T- oS.X[j,:]*oS.X[j,:].T
        eta = 2.0*oS.K[i,j]-oS.K[i,i]-oS.K[j,j]#<统计学习方法>Page128
        if eta >=0:print("eta>=0");return 0
        oS.alphas[j] -= oS.labelMat[j]*(Ei-Ej)/eta
        oS.alphas[j] = SMO.clipAlpha(oS.alphas[j],H,L)#将alphaJ限定在[L,H]之间 更新alphas[j]
        updateEk(oS,j)
        if(abs(oS.alphas[j]-alphaJold)<0.00001):
            print("j not moving enough!");return 0
        oS.alphas[i] += oS.labelMat[j]*oS.labelMat[i]*(alphaJold-oS.alphas[j])#更新alpha[i]
        updateEk(oS,i)
        b1 = oS.b-Ei-oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.K[i,i]-oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.K[i,j]
        b2 = oS.b-Ej-oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.K[i,j]-oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.K[j,j]
        # b1=oS.b-Ei-oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.X[i,:]*oS.X[i,:].T-oS.labelMat[j]*\
        #    (oS.alphas[j]-alphaJold)*oS.X[i,:]*oS.X[j,:].T
        # b2=oS.b-Ej-oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.X[i,:]*oS.X[j,:].T-oS.labelMat[j]*\
        #    (oS.alphas[j]-alphaJold)*oS.X[j,:]*oS.X[j,:].T
        if(0<oS.alphas[i]) and (oS.C>oS.alphas[i]):oS.b=b1
        elif (0<oS.alphas[j]) and (oS.C>oS.alphas[j]):oS.b=b2
        else:oS.b=(b1+b2)/2.0
        return 1
    else:return 0



def smoP(dataMatIn,classLabels,C,toler,maxIter,kTup=('lin',0)):
    oS = optStruct(mat(dataMatIn),mat(classLabels).transpose(),C,toler,kTup)
    iter = 0
    entireSet = True;alphaPairsChanged = 0
    while(iter<maxIter) and ((alphaPairsChanged>0)or(entireSet)):
        alphaPairsChanged=0
        if entireSet:
            for i in range(oS.m):#小样本不检查违背KKT条件,全部更新?
                alphaPairsChanged += innerL(i,oS)
            print("fullSet,iter: %d i:%d,pairs changed %d"%(iter,i,alphaPairsChanged))
            iter +=1
        else:#第二轮更新,之针对间隔内的alpha进行迭代更新
            nonBoundIs = nonzero((oS.alphas.A>0)*(oS.alphas.A<C))[0]
            for i in nonBoundIs:
                alphaPairsChanged += innerL(i,oS)
                print("non-bound,iter:%d i:%d,pairs changed %d"%(iter,i,alphaPairsChanged))
            iter += 1
        if entireSet:entireSet = False
        elif (alphaPairsChanged == 0):entireSet=True
        print("iteration number:%d"%iter)
    return oS.b,oS.alphas

def calcWs(alphas,dataArr,classLabels):
    X = mat(dataArr);labelMat = mat(classLabels).transpose()
    m,n = shape(X)
    w = zeros((n,1))
    for i in range(m):
        w += multiply(alphas[i]*labelMat[i],X[i,:].T)#<统计学习方法>Page111 (7.50)
    return w

def kernelTrans(X,A,kTup):
    m,n = shape(X)
    K = mat(zeros((m,1)))
    if kTup[0] == 'lin':K=X*A.T
    elif kTup[0] == 'rbf':
        for j in range(m):
            deltaRow = X[j,:]-A
            K[j]=deltaRow*deltaRow.T
        K = exp(K/(-1*kTup[1]**2))
    else:raise NameError('HOUSTON We Have a Problem -- That Kernel is not recognized')
    return K

def testRbf(k1=1.3):
    dataArr,labelArr = SMO.loadDataSet('testSetRBF.txt')
    b,alphas = smoP(dataArr,labelArr,200,0.0001,10000,('rbf',k1))
    dataMat = mat(dataArr);labelMat = mat(labelArr).transpose()
    svInd=nonzero(alphas.A>0)[0]
    sVs = dataMat[svInd]
    labelLSV = labelMat[svInd]
    print("there are %d Support Vectors"% shape(sVs)[0])
    m,n=shape(dataMat)
    errorCount = 0
    for i in range(m):
        kernelEval = kernelTrans(sVs,dataMat[i,:],('rbf',k1))
        predict = kernelEval.T*multiply(labelLSV,alphas[svInd])+b
        if sign(predict)!=sign(labelArr[i]):errorCount +=1
    print("the training error rate is: %f"% (float(errorCount)/m))
    dataArr,labelArr = SMO.loadDataSet('testSetRBF2.txt')
    errorCount = 0
    dataMat = mat(dataArr);labelMat = mat(labelArr).transpose()
    m,n = shape(dataMat)
    for i in range(m):
        kernelEval = kernelTrans(sVs,dataMat[i,:],('rbf',k1))
        predict = kernelEval.T*multiply(labelLSV,alphas[svInd])+b
        if sign(predict)!= sign(labelArr[i]):errorCount +=1
    print("the test error rate is: %f"% (float(errorCount)/m))



if '__main__':
    testRbf()

(3)handwriting示例

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    :  //
# @Author  : FC
# @Site    : [email protected]
# @license : BSD
from os import listdir
from numpy import *
import Platt_SMO

######
# 将一个文件的数据变成1x1024的array
# 输入:要读的文件名
# 输出:处理后的数据
######
def img2vector(filename):
    returnVect = zeros((1,1024))
    fr = open(filename)
    for i in range(32):
        lineStr = fr.readline()
        for j in range(32):
            returnVect[0,32*i+j] = int(lineStr[j])
    return returnVect
def loadImages(dirName):
    hwLabels = []
    trainingFileList = listdir(dirName)
    m = len(trainingFileList)
    trainingMat = zeros((m,1024))
    for i in range(m):
        fileNameStr =trainingFileList[i]
        fileStr = fileNameStr.split('.')[0]
        classNumStr = int(fileStr.split('_')[0])
        if classNumStr == 9:hwLabels.append(-1)
        else:hwLabels.append(1)
        trainingMat[i,:]=img2vector('%s/%s' %(dirName,fileNameStr))
    return trainingMat,hwLabels

def testDigits(kTup=('rbf',10)):
    dataArr,labelArr = loadImages('digits/trainingDigits')
    b,alphas = Platt_SMO.smoP(dataArr,labelArr,200,0.001,10000,kTup)
    dataMat = mat(dataArr);labelMat = mat(labelArr).transpose()
    svInd = nonzero(alphas.A>0)[0]
    sVs = dataMat[svInd]
    labelSV = labelMat[svInd]
    print("there are %d Support Vectors" % shape(sVs)[0])
    m,n = shape(dataMat)
    errorCount = 0
    for i in range(m):
        kernelEval = Platt_SMO.kernelTrans(sVs,dataMat[i,:],kTup)
        predict = kernelEval.T*multiply(labelSV,alphas[svInd])+b
        if sign(predict)!= sign(labelArr[i]):errorCount +=1
    print("the training error rate is: %f" % (float(errorCount)/m))
    dataArr,labelArr = loadImages('digits/testDigits')
    errorCount =0
    dataMat=mat(dataArr);labelMat=mat(labelArr).transpose()
    m,n=shape(dataMat)
    for i in range(m):
        kernelEval = Platt_SMO.kernelTrans(sVs,dataMat[i,:],kTup)
        predict = kernelEval.T*multiply(labelSV,alphas[svInd])+b
        if sign(predict) != sign(labelArr[i]):errorCount +=1
    print("the test error rate is: %f" % (float(errorCount)/m))

if '__main__':
    testDigits()

(4)运行结果:

fullSet,iter: 2 i:401,pairs changed 0
iteration number:3
there are 125 Support Vectors
the training error rate is: 0.000000
the test error rate is: 0.010753

完整代码和数据集

完整代码地址:SVM

猜你喜欢

转载自blog.csdn.net/qq_28773183/article/details/81017913
今日推荐