SMO从思想到实践

一、SMO算法思想


二、SMO Python 实现(代码来源于机器学习实战)

def smoSimple(dataMatIn, classLabels, C, toler, maxIter):
    #            mat()将数组转为矩阵                         transpose()将矩阵转制
    dataMatrix = mat(dataMatIn); labelMat = mat(classLabels).transpose()    
    b = 0; m,n = shape(dataMatrix) #shape()返回矩阵的行数、列数
    alphas = mat(zeros((m,1)))  #zeros()将指定行列的数组初始化为0
    iter = 0
    while (iter < maxIter):
        alphaPairsChanged = 0
        for i in range(m): #Multiply arguments element-wise.     dataMatrix[i,:]表示取矩阵第i行的所有元素
            fXi = float(multiply(alphas,labelMat).T*(dataMatrix*dataMatrix[i,:].T)) + b #首先选择样本i,带入超平面求函数值,也就是此时的y
            #计算此时超平面,带入xi后与样本真实值yi所产生的误差.
            Ei = fXi - float(labelMat[i])   #if checks if an example violates KKT conditions
            if ((labelMat[i]*Ei < -toler) and (alphas[i] < C)) or ((labelMat[i]*Ei > toler) and (alphas[i] > 0)): #违反KKT条件
                j = selectJrand(i,m)    #在dataMatrix的m行中任选一行向量进行处理
                fXj = float(multiply(alphas,labelMat).T*(dataMatrix*dataMatrix[j,:].T)) + b #
                Ej = fXj - float(labelMat[j]) #计算误差
                alphaIold = alphas[i].copy(); alphaJold = alphas[j].copy();
                if (labelMat[i] != labelMat[j]): # 判断 yi yj 是否同号,并找出相应的alpha的上下界 
                    L = max(0, alphas[j] - alphas[i])
                    H = min(C, C + alphas[j] - alphas[i])
                else:
                    L = max(0, alphas[j] + alphas[i] - C)
                    H = min(C, alphas[j] + alphas[i])
                if L==H: print "L==H"; continue
                #求沿着约束方向未经剪辑时的解
                #eta = K11 + K22 - 2K12 
                eta = 2.0 * dataMatrix[i,:]*dataMatrix[j,:].T - dataMatrix[i,:]*dataMatrix[i,:].T - dataMatrix[j,:]*dataMatrix[j,:].T
                if eta >= 0: print "eta>=0"; continue
                alphas[j] -= labelMat[j]*(Ei - Ej)/eta  #求沿着约束方向未经剪辑时的解
                alphas[j] = clipAlpha(alphas[j],H,L) #在alphas[j]、H、L中选择一个适当值
                if (abs(alphas[j] - alphaJold) < 0.00001): print "j not moving enough"; continue
                #由alphas2更新alphas1: alpha1 += y1y2(alpha2old -alpha2new)
                alphas[i] += labelMat[j]*labelMat[i]*(alphaJold - alphas[j])#update i by the same amount as j
                                                                        #the update is in the oppostie direction
                # 更新b的值 b1new = bold - E1 - y1K11(alpha1new-alpha1old) - y2K21(alpha2new-alpha2old)
                b1 = b - Ei- labelMat[i]*(alphas[i]-alphaIold)*dataMatrix[i,:]*dataMatrix[i,:].T - labelMat[j]*(alphas[j]-alphaJold)*dataMatrix[i,:]*dataMatrix[j,:].T
                b2 = b - Ej- labelMat[i]*(alphas[i]-alphaIold)*dataMatrix[i,:]*dataMatrix[j,:].T - labelMat[j]*(alphas[j]-alphaJold)*dataMatrix[j,:]*dataMatrix[j,:].T
                if (0 < alphas[i]) and (C > alphas[i]): b = b1
                elif (0 < alphas[j]) and (C > alphas[j]): b = b2
                else: b = (b1 + b2)/2.0
                alphaPairsChanged += 1
                print "iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged)
        if (alphaPairsChanged == 0): iter += 1
        else: iter = 0
        print "iteration number: %d" % iter
    return b,alphas





 

你可能感兴趣的:(ML)