这是我学习了关联规则Apriori算法原理后参照《机器学习实战》实现的算法代码,首先分为两个部分,第一部分是频繁项集的构建,第二部分是关联规则的挖掘。特别的是我的测试数据也就是loadDataSet()函数中的数据进行了改变,这是为了能帮助理解第二部分。然后代码中我加了很多为了让自己理解的输出测试,保留在里面,应该也能帮助大家理解
from numpy import *
def loadDataSet():
return [[1, 3, 4,6,7], [2, 3, 4,5,6,7], [1, 2, 3, 5,7], [2,4, 5,6],[3,4,5,6,7]]
def createC1(dataSet):
C1 = []
for transaction in dataSet:
for item in transaction:
if not [item] in C1:
C1.append([item])
C1.sort()
return map(frozenset, C1)#use frozen set so we can use it as a key in a dict
def scanD(D, Ck, minSupport):
ssCnt = {}
for tid in D:
for can in Ck:
if can.issubset(tid):
if not ssCnt.has_key(can): ssCnt[can]=1
else: ssCnt[can] += 1
numItems = float(len(D))
retList = []
supportData = {}
for key in ssCnt:
support = ssCnt[key]/numItems
if support >= minSupport:
retList.insert(0,key)
supportData[key] = support
return retList, supportData
def aprioriGen(Lk, k): #creates Ck
print'Lk:',Lk
retList = []
lenLk = len(Lk)
for i in range(lenLk):
for j in range(i+1, lenLk):
L1 = list(Lk[i])[:k-2]; L2 = list(Lk[j])[:k-2]#见《数据挖掘概念与技术》p161的连接步讲解
L1.sort(); L2.sort()
print'L1,L2',L1,L2
if L1==L2: #if first k-2 elements are equal,另外在由一项频繁集L1构造C2时,以上[:k-2]=[:0]=[],所以会有L1==L2,构造出2项候选集C2
retList.append(Lk[i] | Lk[j]) #set union
return retList
def apriori(dataSet, minSupport = 0.5):
C1 = createC1(dataSet)
D = map(set, dataSet)
L1, supportData = scanD(D, C1, minSupport)
L = [L1]
k = 2
while (len(L[k-2]) > 0):
Ck = aprioriGen(L[k-2], k)
Lk, supK = scanD(D, Ck, minSupport)#scan DB to get Lk
supportData.update(supK)
L.append(Lk)
k += 1
return L, supportData
需要注意的就是对rulesFromConseq()函数的理解,可以参照我代码中添加的用于测试的输出函数的输出理解
def generateRules(L, supportData, minConf=0.7): #supportData is a dict coming from scanD
bigRuleList = []
for i in range(1, len(L)):#only get the sets with two or more items
print 'i:',i
for freqSet in L[i]:
print'L[i]:',L[i]
print'freqSet:',freqSet
H1 = [frozenset([item]) for item in freqSet]
print 'H1:',H1
if (i > 1):
rulesFromConseq(freqSet, H1, supportData, bigRuleList, minConf)
else:
calcConf(freqSet, H1, supportData, bigRuleList, minConf)
return bigRuleList
def calcConf(freqSet, H, supportData, brl, minConf=0.7):
prunedH = [] #create new list to return
for conseq in H:
print'conseq:',conseq
print'freqSet-conseq:',freqSet-conseq
conf = supportData[freqSet]/supportData[freqSet-conseq] #calc confidence
if conf >= minConf:
print freqSet-conseq,'-->',conseq,'conf:',conf
brl.append((freqSet-conseq, conseq, conf))
prunedH.append(conseq)
# print 'prunedH:',prunedH
return prunedH
#rulesFromConseq目的就是对每一个频繁项集生成右边的规则,一个频繁项集可以生成很多右边的规则
#if (len(freqSet) > (m + 1))这是迭代停止的条件,也就是当右边的规则的元素数+1=该频繁项长度时停止迭代
#if (len(Hmp1) > 1)当Hmp1的长度大于1时才能合并,合并需要至少两个frozenset
def rulesFromConseq(freqSet, H, supportData, brl, minConf=0.7):
m = len(H[0])
print'm:',m
print'len(freqSet):',len(freqSet)
if (len(freqSet) > (m + 1)): #try further merging
Hmp1 = aprioriGen(H, m+1)#create Hm+1 new candidates
print'Hmp1:',Hmp1
Hmp1 = calcConf(freqSet, Hmp1, supportData, brl, minConf)
print 'Hmp2:',Hmp1
if (len(Hmp1) > 1): #need at least two sets to merge
print'---------'
rulesFromConseq(freqSet, Hmp1, supportData, brl, minConf)
print'*********'