波士顿房价预测模型源代码

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd

np.set_printoptions(threshold=np.inf)  # 解决显示不完全问题
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
plt.rcParams['axes.unicode_minus'] = False


def load_data():
    datafile = "F:\PyCharm\PyCharm文件\波士顿房价预测\housing.data"
    data = np.fromfile(datafile, sep=' ')
    feature_names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTATA',
                     'MEDV']
    feature_num = len(feature_names)  # 特征值数量
    data = data.reshape([data.shape[0] // feature_num, feature_num])
    ratio = 0.8
    offset = int(data.shape[0] * ratio)
    training_data = data[:offset]
    maximums, minimums, avgs = training_data.max(axis=0), training_data.min(axis=0), training_data.sum(axis=0) / \
                               training_data.shape[0]
    for i in range(feature_num):
        data[:, i] = (data[:, i] - minimums[i]) / (maximums[i] - minimums[i])

    training_data = data[:offset]
    test_data = data[offset:]
    return training_data, test_data


training_data, test_data = load_data()
x = training_data[:, :-1]
y = training_data[:, -1:]


class Network(object):
    def __init__(self, num_of_weights):  # num_of_weights=13
        np.random.seed(0)
        self.w = np.random.randn(num_of_weights, 1)
        self.b = 0

    def forward(self, x):
        z = np.dot(x, self.w) + self.b  # 404*1
        return z

    def loss(self, z, y):
        error = z - y
        num_samples = error.shape[0]  # 404*1
        cost = error * error  # 404*1
        cost = np.sum(cost) / num_samples
        return cost

    def gradient(self, x, y, z):
        gradient_w = (z - y) * x  # (z-y):404*1,gradient_w:404*12
        gradient_w = np.mean(gradient_w, axis=0)  # 计算504*13各列平均值,得到13*1的矩阵
        gradient_w = gradient_w[:, np.newaxis]  # 转置13*1
        gradient_b = (z - y)  # 404*1
        gradient_b = np.mean(gradient_b)  # 一个数
        return gradient_w, gradient_b  # 最后存在13个w,一个b

    def update(self, gradient_w, gradient_b, eta=0.01):  # eta:设置步长又称学习率
        self.w = self.w - eta * gradient_w  # 对参数w进行微调
        self.b = self.b - eta * gradient_b  # 对参数b进行微调

    def train(self, x, y, iterations=100, eta=0.01):
        losses = []
        for i in range(iterations):  # 循环1000次
            z = self.forward(x)  # 进行前向运算
            L = self.loss(z, y)  # 计算损失值404*1
            gradient_w, gradient_b = self.gradient(x, y, z)  # 计算梯度
            self.update(gradient_w, gradient_b, eta)  # 更新梯度,进行微调
            losses.append(L)
            if (i + 1) % 10 == 0:
                print('iter{},loss{}'.format(i, L))  # 每十轮输出一波损失值
        print(self.w)  # 输出各个变量权重
        print(self.b)  # 输出偏移矢量
        return losses  # 返回iteration次每次的损失值


net = Network(13)  # wx+b b是12个变量共用一个
num_iterations = 1000  # 迭代次数
losses = net.train(x, y, iterations=num_iterations, eta=0.01)
plot_x = np.arange(num_iterations)
plot_y = np.array(losses)
plt.plot(plot_x, plot_y)
plt.title("损失值")
plt.show()

你可能感兴趣的:(文章,python,深度学习,机器学习,numpy,神经网络)