写在开头
最近在学习一些关于机器学习的基础算法,结合学习Peter Harrington的《机器学习实战》和李航老师的《统计学习方法》两本书以及网上前辈的笔记,写下了以下的学习过程。
代码环境:Pytharm/Python3.7
内容有参考也有自己的想法,由于自己的理解不足,文章肯定存在很多错误,还恳请各位批评指正。
个人理解的回归就是发现变量之间的关系,也就是求回归系数,经常用回归来预测目标值。回归和分类同属于监督学习,所不同的是回归的目标变量必须是连续数值型。
1.简版SMO算法求分离超平面源码
import random
import numpy
import copy
# -----------------数据处理格式化-------------------
def loadDataSet():
dataMat = []
labelMat = []
lineArray = []
# read()和readline()是有区别的
txt = open("testSet.txt", "r", encoding="utf-8").readlines() # 打开文件
for line in txt:
# strip()
# 方法用于移除字符串头尾指定的字符(默认为空格或换行符)或字符序列。
# 注意:该方法只能删除开头或是结尾的字符,不能删除中间部分的字符。
lines = line.strip('\n')
lineArray = lines.split() # lineArray返回的是一个含有三个元素的list
# 进行向量扩充,简化回归,由此计算出的weight[0]就是对应的b.
dataMat.append([float(lineArray[0]), float(lineArray[1])]) # append()能够添加列表作为元素,区分extend()
labelMat.append(int(lineArray[2]))
return dataMat, labelMat
# 函数情况和书中的叙述不符!
# 书中叙述:i是第一个alpha的下标值,m是所有alpha的数目
# 只要函数值不等于输入值i,函数就会进行随机选择
# 自己还是修改为一下形式
def selectJrand(i, m):
j = int(random.uniform(0, m))
if j == i:
selectJrand(i, m)
return j
# ------------限定数的范围------------
def clipAlpha(aj, H, L):
if aj > H:
aj = H
if L > aj:
aj = L
return aj
# 输入参数依次是:数据集,类别标签,常数C,容错率,推出前最大的循环次数
def smoSimple(dataMatIn, classLabels, C, toler, maxIter):
dataMatrix = numpy.mat(dataMatIn)
labelMat = numpy.mat(classLabels).transpose()
b = 0
m, n = numpy.shape(dataMatrix)
alphas = numpy.mat(numpy.zeros((m, 1)))
iter = 0
while (iter < maxIter): # 外循环
alphaPairsChanged = 0 # 用于记录alpha值是否进行优化
for i in range(m): # 内循环
# 预测类别,multiply是对应元素相乘,不是矩阵相乘。.T代表转置
# dataMatrix[i, :].T,取第i行元素,并转置
# fXi是分类决策函数,《统计学习方法》106页公式(线性可分?)
fXi = float(numpy.multiply(alphas, labelMat).T * (dataMatrix * dataMatrix[i, :].T)) + b
# 预测误差
Ei = fXi - float(labelMat[i]) # if checks if an example violates KKT conditions
# 判断alpha是否需要优化
if ((labelMat[i] * Ei < -toler) and (alphas[i] < C)) or \
((labelMat[i] * Ei > toler) and (alphas[i] > 0)):
j = selectJrand(i, m) # 随机选择第二个alpha(简化SMO算法的第一步)
fXj = float(numpy.multiply(alphas, labelMat).T * (dataMatrix * dataMatrix[j, :].T)) + b
Ej = fXj - float(labelMat[j]) # 误差
# 浅拷贝(copy):拷贝父对象,不会拷贝对象的内部的子对象,因此子对象任然会随之改变
alphaIold = alphas[i].copy()
alphaJold = alphas[j].copy()
# 保证alpha在0与C之间,《统计学习方法》126页
# 由于alpha2的最优解必须满足不等式约束条件(在0与C之间),所以取值L<=alpha2<=H
# L H是alpha2所在的对角线段端点的界
if (labelMat[i] != labelMat[j]):
L = max(0, alphas[j] - alphas[i])
H = min(C, C + alphas[j] - alphas[i])
else:
L = max(0, alphas[j] + alphas[i] - C)
H = min(C, alphas[j] + alphas[i])
if L == H:
print("L==H")
continue
# 对i进行修改,修改量与j相同,但是方向相反
# eta是alpha[j]的最优修改量(但是为什么是这样算出来的呢?)
# 《统计学习方法》127页,计算的eta,但是符号与此处相反,正因为如此,所以下面进行了判断
# 核函数表示eta = K11+K22-2K12
eta = 2.0 * dataMatrix[i, :] * dataMatrix[j, :].T \
- dataMatrix[i, :] * dataMatrix[i, :].T \
- dataMatrix[j, :] * dataMatrix[j, :].T
if eta >= 0:
print("eta>=0")
continue # 不符合条件,跳出本次循环,重新选择alpha
alphas[j] -= labelMat[j] * (Ei - Ej) / eta # 《统计学习方法》127页,更新alpha的值
alphas[j] = clipAlpha(alphas[j], H, L) # 《统计学习方法》126页,更新alpha的值后同时要限制大小
# 判断alphas[j]是否只有轻微改变,如果是就认为没有变化跳出for循环
if (abs(alphas[j] - alphaJold) < 0.00001):
print("j not moving enough")
continue
# alphas[i]变化方向与 alphas[j]大小相同,方向相反
alphas[i] += labelMat[j] * labelMat[i] * (alphaJold - alphas[j]) # update i by the same amount as j
# the update is in the oppostie direction
# 在每次完成两个变量的优化之后,都要重新计算阈值B
# 《统计学习方法》130页
b1 = b - Ei - labelMat[i] * (alphas[i] - alphaIold) * dataMatrix[i, :] * dataMatrix[i, :].T - labelMat[
j] * (alphas[j] - alphaJold) * dataMatrix[i, :] * dataMatrix[j, :].T
b2 = b - Ej - labelMat[i] * (alphas[i] - alphaIold) * dataMatrix[i, :] * dataMatrix[j, :].T - labelMat[
j] * (alphas[j] - alphaJold) * dataMatrix[j, :] * dataMatrix[j, :].T
if (0 < alphas[i]) and (C > alphas[i]):
b = b1
elif (0 < alphas[j]) and (C > alphas[j]):
b = b2
else:
b = (b1 + b2) / 2.0
alphaPairsChanged += 1
print("iter: {} i:{}, pairs changed {}".format(iter, i, alphaPairsChanged))
if (alphaPairsChanged == 0):
iter += 1
else:
iter = 0
print("iteration number:{}".format(iter))
return b, alphas
def main():
dataMat, labelMat = loadDataSet()
# print(dataMat, labelMat, sep='\n')
b, alphas = smoSimple(dataMat, labelMat, 0.6, 0.001, 40)
print(b, alphas[alphas > 0], sep='\n')
if __name__ == '__main__':
main()
2. 完整版SMO算法
# -------完成版Platt SMO的支持函数----------------
import numpy
import random
import math
# -----------------数据处理格式化-------------------
def loadDataSet():
dataMat = []
labelMat = []
lineArray = []
# read()和readline()是有区别的
txt = open("testSet.txt", "r", encoding="utf-8").readlines() # 打开文件
for line in txt:
# strip()
# 方法用于移除字符串头尾指定的字符(默认为空格或换行符)或字符序列。
# 注意:该方法只能删除开头或是结尾的字符,不能删除中间部分的字符。
lines = line.strip('\n')
lineArray = lines.split() # lineArray返回的是一个含有三个元素的list
# 进行向量扩充,简化回归,由此计算出的weight[0]就是对应的b.
dataMat.append([float(lineArray[0]), float(lineArray[1])]) # append()能够添加列表作为元素,区分extend()
labelMat.append(int(lineArray[2]))
return dataMat, labelMat
# 函数情况和书中的叙述不符!
# 书中叙述:i是第一个alpha的下标值,m是所有alpha的数目
# 只要函数值不等于输入值i,函数就会进行随机选择
# 自己还是修改为一下形式
def selectJrand(i, m):
j = int(random.uniform(0, m))
if j == i:
selectJrand(i, m)
return j
# ------------限定数的范围------------
def clipAlpha(aj, H, L):
if aj > H:
aj = H
if L > aj:
aj = L
return aj
def kernelTrans(X, A, kTup): # calc the kernel or transform data to a higher dimensional space
m, n = numpy.shape(X)
K = numpy.mat(numpy.zeros((m, 1)))
if kTup[0] == 'lin':
K = X * A.T # linear kernel
elif kTup[0] == 'rbf':
for j in range(m):
deltaRow = X[j, :] - A
K[j] = deltaRow * deltaRow.T
K = math.exp(K / (-1 * kTup[1] ** 2)) # divide in NumPy is element-wise not matrix like Matlab
else:
raise NameError('Houston We Have a Problem -- That Kernel is not recognized')
return K
class optStruct:
def __init__(self, dataMatIn, classLabels, C, toler, kTup): # Initialize the structure with the parameters
self.X = dataMatIn
self.labelMat = classLabels
self.C = C
self.tol = toler
self.m = numpy.shape(dataMatIn)[0]
self.alphas = numpy.mat(numpy.zeros((self.m, 1)))
self.b = 0
self.eCache = numpy.mat(numpy.zeros((self.m, 2))) # first column is valid flag
self.K = numpy.mat(numpy.zeros((self.m, self.m)))
for i in range(self.m):
self.K[:, i] = kernelTrans(self.X, self.X[i, :], kTup)
def calcEk(oS, k):
fxk = float(numpy.multiply(oS.alphas, oS.labelMat).T * (oS.X * oS.X[k, :].T)) + oS.b
Ek = fxk - float(oS.labelMat[k])
return Ek
# 用于选择第二个alpha(内循环)的值
def selectJ(i, oS, Ei): # this is the second choice -heurstic, and calcs Ej
maxK = -1
maxDeltaE = 0
Ej = 0
oS.eCache[i] = [1, Ei] # set valid #choose the alpha that gives the maximum delta E
validEcacheList = numpy.nonzero(oS.eCache[:, 0].A)[0]
if (len(validEcacheList)) > 1:
for k in validEcacheList: # loop through valid Ecache values and find the one that maximizes delta E
if k == i:
continue # don't calc for i, waste of time
Ek = calcEk(oS, k)
deltaE = abs(Ei - Ek)
if (deltaE > maxDeltaE):
maxK = k
maxDeltaE = deltaE
Ej = Ek
return maxK, Ej
else: # in this case (first time around) we don't have any valid eCache values
j = selectJrand(i, oS.m)
Ej = calcEk(oS, j)
return j, Ej
def updateEk(oS, k): # after any alpha has changed update the new value in the cache
Ek = calcEk(oS, k)
oS.eCache[k] = [1, Ek]
def innerL(i, oS):
Ei = calcEk(oS, i)
if ((oS.labelMat[i] * Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or (
(oS.labelMat[i] * Ei > oS.tol) and (oS.alphas[i] > 0)):
j, Ej = selectJ(i, oS, Ei) # this has been changed from selectJrand
alphaIold = oS.alphas[i].copy()
alphaJold = oS.alphas[j].copy()
if (oS.labelMat[i] != oS.labelMat[j]):
L = max(0, oS.alphas[j] - oS.alphas[i])
H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
else:
L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C, oS.alphas[j] + oS.alphas[i])
if L == H:
print("L==H")
return 0
eta = 2.0 * oS.K[i, j] - oS.K[i, i] - oS.K[j, j] # changed for kernel
if eta >= 0:
print("eta>=0")
return 0
oS.alphas[j] -= oS.labelMat[j] * (Ei - Ej) / eta
oS.alphas[j] = clipAlpha(oS.alphas[j], H, L)
updateEk(oS, j) # added this for the Ecache
if (abs(oS.alphas[j] - alphaJold) < 0.00001):
print("j not moving enough")
return 0
oS.alphas[i] += oS.labelMat[j] * oS.labelMat[i] * (alphaJold - oS.alphas[j]) # update i by the same amount as j
updateEk(oS, i) # added this for the Ecache #the update is in the oppostie direction
b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[i, i] - oS.labelMat[j] * (
oS.alphas[j] - alphaJold) * oS.K[i, j]
b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[i, j] - oS.labelMat[j] * (
oS.alphas[j] - alphaJold) * oS.K[j, j]
if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]):
oS.b = b1
elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]):
oS.b = b2
else:
oS.b = (b1 + b2) / 2.0
return 1
else:
return 0
def smoP(dataMatIn, classLabels, C, toler, maxIter, kTup=('lin', 0)): # full Platt SMO
oS = optStruct(numpy.mat(dataMatIn), numpy.mat(classLabels).transpose(), C, toler, kTup)
iter = 0
entireSet = True
alphaPairsChanged = 0
while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)):
alphaPairsChanged = 0
if entireSet: # go over all
for i in range(oS.m):
alphaPairsChanged += innerL(i, oS)
print("fullSet, iter: {} i:{}, pairs changed {}".format(iter, i, alphaPairsChanged))
iter += 1
else:
nonBoundIs = numpy.nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]
for i in nonBoundIs:
alphaPairsChanged += innerL(i, oS)
print("non-bound, iter: {} i:{}, pairs changed {}".format(iter, i, alphaPairsChanged))
iter += 1
if entireSet:
entireSet = False # toggle entire set loop
elif (alphaPairsChanged == 0):
entireSet = True
print("iteration number: {}".format(iter))
return oS.b, oS.alphas
def main():
dataMat, labelMat = loadDataSet()
# print(dataMat, labelMat, sep='\n')
b, alphas = smoP(dataMat, labelMat, 0.6, 0.001, 40)
print(b, alphas[alphas > 0], sep='\n')
if __name__ == '__main__':
main()
参考文档:
[1]《机器学习实战》第六章
[2]《统计学习方法》李航