10、数字数据集上的K-均值聚类(PCA约简数据)

10、数字数据集上的K-均值聚类(PCA约简数据)

from time import time

import numpy as np

import matplotlib.pyplot as plt

from sklearn import metrics

from sklearn.cluster import KMeans

from sklearn.datasets import load_digits

from sklearn.decomposition import PCA

from sklearn.preprocessing import scale

plt.rcParams['font.sans-serif'] = ['SimHei']

plt.rcParams['axes.unicode_minus'] = False

np.random.seed(42)

X_digits, y_digits = load_digits(return_X_y=True)

data = scale(X_digits)

n_samples, n_features = data.shape

n_digits = len(np.unique(y_digits))

labels = y_digits

sample_size = 300

print("n_digits: %d, \t n_samples %d, \t n_features %d"

      % (n_digits, n_samples, n_features))

print(82 * '_')

print('init\t\ttime\tinertia\thomo\tcompl\tv-meas\tARI\tAMI\tsilhouette')

def bench_k_means(estimator, name, data):

    t0 = time()

    estimator.fit(data)

    print('%-9s\t%.2fs\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'

          % (name, (time() - t0), estimator.inertia_,

            metrics.homogeneity_score(labels, estimator.labels_),

            metrics.completeness_score(labels, estimator.labels_),

            metrics.v_measure_score(labels, estimator.labels_),

            metrics.adjusted_rand_score(labels, estimator.labels_),

            metrics.adjusted_mutual_info_score(labels,  estimator.labels_),

            metrics.silhouette_score(data, estimator.labels_,

                                      metric='euclidean',

                                      sample_size=sample_size)))

bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),

              name="k-means++", data=data)

bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),

              name="random", data=data)

# 运行kmeans算法n_init=1只一次

pca = PCA(n_components=n_digits).fit(data)

bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),

              name="PCA-based",

              data=data)

print(82 * '_')

# 在PCA缩减数据上可视化结果

reduced_data = PCA(n_components=2).fit_transform(data)

kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)

kmeans.fit(reduced_data)

# 网格的步长减少,以增加VQ的质量。

h = .02    # 网格中的点[x_min,x_max]x[y_min,y_max]。

# 绘制决策边界并且配色

x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1

y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1

xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))

# 获取网格中每个点的标签。使用上次训练的模型。

Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])

# 将结果放入彩色图中

Z = Z.reshape(xx.shape)

plt.figure(1)

plt.clf()

plt.imshow(Z, interpolation='nearest',

          extent=(xx.min(), xx.max(), yy.min(), yy.max()),

          cmap=plt.cm.Paired,

          aspect='auto', origin='lower')

plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)

# 用白色X画出中心体

centroids = kmeans.cluster_centers_

plt.scatter(centroids[:, 0], centroids[:, 1],

            marker='x', s=169, linewidths=3,

            color='w', zorder=10)

plt.title('数字数据集上的K-均值聚类(PCA约简数据)')

plt.xlim(x_min, x_max)

plt.ylim(y_min, y_max)

plt.xticks(())

plt.yticks(())

plt.show()


你可能感兴趣的:(10、数字数据集上的K-均值聚类(PCA约简数据))