KMeans算法属于无监督学习,对于数据集D,不需提供数据标记
import pandas as pd
import numpy as np
data = pd.read_csv(‘data.csv’)
data.head()
X = data.drop([‘labels’],axis=1)
y = data.loc[:,‘labels’]
from sklearn.cluster import KMeans
KM = KMeans(n_clusters=3,random_state=0)
KM.fit(X)
#中心点
centers = KM.cluster_centers_
#可能准确率不高
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(y,y_predict)
print(accuracy)
#根据实际情况矫正
y_corrected = []
for i in y_predict:
if i= =0:
y_corrected.append(1)#括号里面的值具体情况具体分析,与原类别对应
elif i= =1:
y_corrected.append(2)#括号里面的值具体情况具体分析,与原类别对应
else:
y_corrected.append(0)#括号里面的值具体情况具体分析,与原类别对应
print(pd.value_counts(y_corrected),pd.value_counts(y))
k近邻法(k-nearest neighbor,k-NN)是一种基本的分类和回归方法,是监督学习方法里的一种常用方法。
from sklearn.neighbors import KNeighborsClassifier
KNN = KNeighborsClassifier(n_neighbors=3)
KNN.fit(X,y)
y_predict_knn_test = KNN.predict([[80,60]])
y_predict_knn = KNN.predict(X)
print(y_predict_knn_test)
print(‘knn accuracy:’,accuracy_score(y,y_predict_knn))
print(pd.value_counts(y_predict_knn),pd.value_counts(y))
Meanshift的核心思想是: 给定一个数据点,在其周围一定的Region of interest内,计算这个Region的质心,由原来的点指向这个计算出来的质心的向量被称为Mean Shift vector。
from sklearn.cluster import MeanShift,estimate_bandwidth
#obtain the bandwidth
bw = estimate_bandwidth(X,n_samples=500)
print(bw)
ms = MeanShift(bandwidth=bw)
ms.fit(X)
y_predict_ms = ms.predict(X)
print(pd.value_counts(y_predict_ms),pd.value_counts(y))
y_predict_ms = ms.predict(X)
print(pd.value_counts(y_predict_ms),pd.value_counts(y))
y_corrected_ms = []
for i in y_predict_ms:
if i= =0:
y_corrected_ms.append(2)
elif i= =1:
y_corrected_ms.append(1)
else:
y_corrected_ms.append(0)
print(pd.value_counts(y_corrected_ms),pd.value_counts(y))