机器学习实验:朴素贝叶斯算法

机器学习实验:朴素贝叶斯算法

问题如下:
根据给出的算法naivebayes.py,实现:
1、将数据集文件naivebayes_data.csv中的数据替换成14天打球与天气数据;
2、预测样本{Outlook=Sunny,Temp=Cool,Humidity=High,Windy=Strong}是否打球。
机器学习实验:朴素贝叶斯算法_第1张图片

代码实现:

  1. 导入包

    from collections import Counter
    import pandas as pd
    import numpy as np
    
  2. 导入数据集

    def getTrainSet():
        dataSet = pd.read_csv('naivebayes_data-ball.csv')
        dataSetNP = np.array(dataSet)#将数据由dataframe类型转换为数组类型
        trainData = dataSetNP[:,0:dataSetNP.shape[1]-1]#训练数据x1,x2
        labels = dataSetNP[:,dataSetNP.shape[1]-1]#训练数据所对应的所属类型Y
        return trainData, labels
    

    输出数据集:

    trainData,labels = getTrainSet()
    print(trainData)    #训练数据
    print(labels)       #结果集
    # [['Sunny' 'Hot' 'High' 'Weak']
    #  ['Sunny' 'Hot' 'High' 'Strong']
    #  ['Overcast' 'Hot' 'High' 'Weak']
    #  ['Rain' 'Mild' 'High' 'Weak']
    #  ['Rain' 'Cool' 'Normal' 'Weak']
    #  ['Rain' 'Cool' 'Normal' 'Strong']
    #  ['Overcast' 'Cool' 'Normal' 'Strong']
    #  ['Sunny' 'Mild' 'High' 'Weak']
    #  ['Sunny' 'Cool' 'Normal' 'Weak']
    #  ['Rain' 'Mild' 'Normal' 'Weak']
    #  ['Sunny' 'Mild' 'Normal' 'Strong']
    #  ['Overcast' 'Mild' 'High' 'Strong']
    #  ['Overcast' 'Hot' 'Normal' 'Weak']
    #  ['Rain' 'Mild' 'High' 'Strong']]
    # ['No' 'No' 'Yes' 'Yes' 'Yes' 'No' 'Yes' 'No' 'Yes' 'Yes' 'Yes' 'Yes' 'Yes' 'No']
    

    ·

  3. 实现朴素贝叶斯算法

    def classify(trainData, labels, features):
        #求labels中每个label的先验概率
        labels = list(labels)    #转换为list类型
        P_y = {}       #存入label的概率
        for label in labels:
            P_y[label] = labels.count(label)/float(len(labels))   # p = count(y) / count(Y)
        print('先验概率:',P_y)
        #求label与feature同时发生的概率
        P_xy = {}
        for y in P_y.keys():
            y_index = [i for i, label in enumerate(labels) if label == y]  # labels中出现y值的所有数值的下标索引
            for j in range(len(features)):      # features[0] 在trainData[:,0]中出现的值的所有下标索引
                x_index = [i for i, feature in enumerate(trainData[:,j]) if feature == features[j]]
                xy_count = len(set(x_index) & set(y_index))   # set(x_index)&set(y_index)列出两个表相同的元素
                pkey = str(features[j]) + '*' + str(y)  #如Sunny*No
                P_xy[pkey] = xy_count / float(len(labels))  #求出P_xy
                # 以y = no,j=0(此时的x为Sunny)为例,x_index表示了值为sunny的下标,xy_count表示了值为sunny且最终值为y(即no)的个数,P_xy[pkey]表示P(xy)
        print(P_xy)
        #求条件概率
        P = {}
        for y in P_y.keys():
            for x in features:
                pkey = str(x) + '|' + str(y)
                P[pkey] = P_xy[str(x)+'*'+str(y)] / float(P_y[y])    #P[X1/Y] = P[X1Y]/P[Y]
        print('似然概率:', P)
        #求['Sunny','Cool','High','Strong']所属类别
        F = {}   #['Sunny','Cool','High','Strong']属于各个类别的概率
        for y in P_y:
            F[y] = P_y[y]
            for x in features:
                F[y] = F[y]*P[str(x)+'|'+str(y)]     #P[y/X] = P[X/y]*P[y]/P[X],分母相等,比较分子即可,所以有F=P[X/y]*P[y]=P[x1/Y]*P[x2/Y]*P[y]
        print('后验概率:',F)
        features_label = max(F, key=F.get)  #概率最大值对应的类别
        return features_label
    
  4. 训练样本并预测

    
    # 获取数据
    trainData, labels = getTrainSet()
    # 预测数据
    features = ['Sunny','Cool','High','Strong']
    
    # 获取预测结果
    result = classify(trainData, labels, features)
    print (features,'属于',result)
    # 先验概率: {'No': 0.35714285714285715, 'Yes': 0.6428571428571429}
    # {'Sunny*No': 0.21428571428571427, 'Cool*No': 0.07142857142857142, 'High*No': 0.2857142857142857, 'Strong*No': 0.21428571428571427, 'Sunny*Yes': 0.14285714285714285, 'Cool*Yes': 0.21428571428571427, 'High*Yes': 0.21428571428571427, 'Strong*Yes': 0.21428571428571427}
    # 似然概率: {'Sunny|No': 0.6, 'Cool|No': 0.19999999999999998, 'High|No': 0.7999999999999999, 'Strong|No': 0.6, 'Sunny|Yes': 0.22222222222222218, 'Cool|Yes': 0.3333333333333333, 'High|Yes': 0.3333333333333333, 'Strong|Yes': 0.3333333333333333}
    # 后验概率: {'No': 0.020571428571428567, 'Yes': 0.005291005291005291}
    # ['Sunny', 'Cool', 'High', 'Strong'] 属于 No
    

你可能感兴趣的:(机器学习,机器学习)