import pandas as pd
import numpy as np
import random
from sklearn.svm import SVC
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score
# 1.读取训练数据集
data = pd.read_csv(r"iris.csv")
x = data.iloc[:, 1:]
Y = data.iloc[:, 0]
# print(x.shape)
# 2.标准化
scaler = StandardScaler()
X = scaler.fit_transform(x)
# 3.初始化参数
W = 0.5 # 惯性因子
c1 = 0.2 # 学习因子
c2 = 0.5 # 学习因子
n_iterations = 10 # 迭代次数
n_particles = 50 # 种群规模
# 4.设置适应度值 输出分类精度得分,返回比较分类结果和实际测得值,可以把分类结果的精度显示在一个混淆矩阵里面
def fitness_function(position): # 输出
# 全局极值 svm分类器 核函数gamma 惩罚参数c
svclassifier = SVC(kernel='rbf', gamma=position[0], C=position[1])
# 参数gamma和惩罚参数c以实数向量的形式进行编码作为PSO的粒子的位置
svclassifier.fit(X, Y)
score = cross_val_score(svclassifier, X, Y, cv=10).mean() # 交叉验证
print('分类精度',score) # 分类精度
Y_pred = cross_val_predict(svclassifier, X, Y, cv=10) # 获取预测值
# 返回混淆函数,分类误差矩阵,分别是训练中的 测试中的 下面输出错误分类结果
return confusion_matrix(Y, Y_pred)[0][1] + confusion_matrix(Y, Y_pred)[0][2] + confusion_matrix(Y, Y_pred)[1][0] + \
confusion_matrix(Y, Y_pred)[1][2] + confusion_matrix(Y, Y_pred)[2][0] + confusion_matrix(Y, Y_pred)[2][1]\
, confusion_matrix(Y, Y_pred)[0][1] + confusion_matrix(Y, Y_pred)[0][2] + confusion_matrix(Y, Y_pred)[1][0] + \
confusion_matrix(Y, Y_pred)[1][2] + confusion_matrix(Y, Y_pred)[2][0] + confusion_matrix(Y, Y_pred)[2][1]
# 5.粒子图
def plot(position):
x = []
y = []
for i in range(0, len(particle_position_vector)):
x.append(particle_position_vector[i][0])
y.append(particle_position_vector[i][1])
colors = (0, 0, 0)
plt.scatter(x, y, c = colors, alpha = 0.1)
# 设置横纵坐标的名称以及对应字体格式
#font2 = {'family': 'Times New Roman','weight': 'normal', 'size': 20,}
plt.xlabel('gamma') # 核函数
plt.ylabel('C') # 惩罚函数
plt.axis([0, 10, 0, 10],)
plt.gca().set_aspect('equal', adjustable='box') # #设置横纵坐标缩放比例相同,默认的是y轴被压缩了。
return plt.show()
# 6.初始化粒子位置,进行迭代
# 粒子位置向量
particle_position_vector = np.array([np.array([random.random() * 10, random.random() * 10]) for _ in range(n_particles)])
pbest_position = particle_position_vector #个体极值等于最初位置
pbest_fitness_value = np.array([float('inf') for _ in range(n_particles)]) #个体极值的适应度值
gbest_fitness_value = np.array([float('inf'), float('inf')]) #全局极值的适应度值
gbest_position = np.array([float('inf'), float('inf')])
velocity_vector = ([np.array([0, 0]) for _ in range(n_particles)]) # 粒子速度
# 迭代更新
iteration = 0
while iteration < n_iterations:
# plot(particle_position_vector) # 粒子具体位置
for i in range(n_particles): # 对每个粒子进行循环
fitness_cadidate = fitness_function(particle_position_vector[i]) # 每个粒子的适应度值=适应度函数(每个粒子的具体位置)
# print("粒子误差", i, "is (training, test)", fitness_cadidate, " At (gamma, c): ",
# particle_position_vector[i])
if (pbest_fitness_value[i] > fitness_cadidate[1]): # 每个粒子的适应度值与其个体极值的适应度值(pbest_fitness_value)作比较,如果更优的话,则更新个体极值,
pbest_fitness_value[i] = fitness_cadidate[1]
pbest_position[i] = particle_position_vector[i]
if (gbest_fitness_value[1] > fitness_cadidate[1]): # 更新后的每个粒子的个体极值与全局极值(gbest_fitness_value)比较,如果更优的话,则更新全局极值
gbest_fitness_value = fitness_cadidate
gbest_position = particle_position_vector[i]
elif (gbest_fitness_value[1] == fitness_cadidate[1] and gbest_fitness_value[0] > fitness_cadidate[0]):
gbest_fitness_value = fitness_cadidate
gbest_position = particle_position_vector[i]
for i in range(n_particles): # 更新速度和位置,更新新的粒子的具体位置
new_velocity = (W * velocity_vector[i]) + (c1 * random.random()) * (
pbest_position[i] - particle_position_vector[i]) + (c2 * random.random()) * (
gbest_position - particle_position_vector[i])
new_position = new_velocity + particle_position_vector[i]
particle_position_vector[i] = new_position
iteration = iteration + 1
# 7.输出最终结果
print("全局最优点的位置是 ", gbest_position, "在第", iteration, "步迭代中(训练集,测试集)错误个数:",
fitness_function(gbest_position))