24.8.19学习笔记(MNIST,)

pytorch MNIST手写数字识别:

import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms

# 设定随机种子以保证结果可复现
torch.manual_seed(0)

# 定义超参数
batch_size = 32
learning_rate = 0.001
num_epochs = 10

# 1. 数据预处理
# 使用transforms.Compose组合多个变换
transform = transforms.Compose([
    transforms.ToTensor(),  # 将PIL Image或者numpy数组转换为tensor,并将其数值范围从[0, 255]变为[0.0, 1.0]
    transforms.Normalize((0.1307,), (0.3081,))  # 标准化数据,均值和标准差来自MNIST数据集
])

# 加载训练数据
train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

# 加载测试数据
test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

# 2. 构建模型
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        # 输入层到第一个隐藏层
        self.fc1 = nn.Linear(28 * 28, 512)  # 输入层有28*28个节点,第一个隐藏层有512个节点
        self.fc2 = nn.Linear(512, 256)  # 第二个隐藏层有256个节点
        self.fc3 = nn.Linear(256, 128)  # 第三个隐藏层有128个节点
        self.fc4 = nn.Linear(128, 64)
        self.fc5 = nn.Linear(64, 10)  # 输出层有10个节点(对应10个数字)



    def forward(self, x):
        # 将输入图像展平为一维向量
        x = x.view(-1, 28 * 28)
        x = torch.relu(self.fc1(x))  # 通过第一个全连接层和ReLU激活函数
        x = torch.relu(self.fc2(x))  # 通过第二个全连接层和ReLU激活函数
        x = torch.relu(self.fc3(x))  # 通过第三个全连接层和ReLU激活函数
        x = torch.relu(self.fc4(x))  # 通过第4个全连接层和ReLU激活函数
        x = self.fc5(x)  # 通过输出层
        return x

# 创建模型实例
model = Net()

# 3. 设置损失函数和优化器
criterion = nn.CrossEntropyLoss()  # 使用交叉熵损失函数
optimizer = optim.SGD(model.parameters(), lr=learning_rate,momentum=0.5)  # 使用随机梯度下降作为优化器

# 4. 训练模型
def train(epoch):
    model.train()  # 设置模型为训练模式
    for batch_idx, (data, target) in enumerate(train_loader):
        optimizer.zero_grad()  # 清除梯度
        output = model(data)  # 前向传播
        loss = criterion(output, target)  # 计算损失
        loss.backward()  # 反向传播
        optimizer.step()  # 更新权重
        if batch_idx % 150 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))

# 5. 测试模型
def test():
    model.eval()  # 设置模型为评估模式
    test_loss = 0
    correct = 0
    with torch.no_grad():  # 不需要计算梯度
        for data, target in test_loader:
            output = model(data)  # 前向传播,得到预测值(每个数字的概率)
            test_loss += criterion(output, target).item()  # 累加损失,计算损失值
            pred = output.argmax(dim=1, keepdim=True)  # 一个二维张量,形状为(batch_size, 1),其中每一行包含一个元素
            correct += pred.eq(target.view_as(pred)).sum().item()  # 统计正确数量
    test_loss /= len(test_loader.dataset)  # 平均损失
    accuracy = 100. * correct / len(test_loader.dataset)  # 准确率
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset), accuracy))
    return accuracy

# 开始训练,代码很简洁,写了两个函数,这个地方很巧妙
for epoch in range(1, num_epochs + 1):
    train(epoch)
    test()

# 打印最终测试集上的准确率
print("Final Test Accuracy: {:.2f}%".format(test()))

输出:

"D:\deep learn\envs\pytorch\python.exe" C:\Users\kk\PycharmProjects\pythonProject2\dataset\train\MNIST.py 
Train Epoch: 1 [0/60000 (0%)]	Loss: 2.309689
Train Epoch: 1 [4800/60000 (8%)]	Loss: 2.310969
Train Epoch: 1 [9600/60000 (16%)]	Loss: 2.305695
Train Epoch: 1 [14400/60000 (24%)]	Loss: 2.290891
Train Epoch: 1 [19200/60000 (32%)]	Loss: 2.284240
Train Epoch: 1 [24000/60000 (40%)]	Loss: 2.256572
Train Epoch: 1 [28800/60000 (48%)]	Loss: 2.258740
Train Epoch: 1 [33600/60000 (56%)]	Loss: 2.281724
Train Epoch: 1 [38400/60000 (64%)]	Loss: 2.259450
Train Epoch: 1 [43200/60000 (72%)]	Loss: 2.222388
Train Epoch: 1 [48000/60000 (80%)]	Loss: 2.193747
Train Epoch: 1 [52800/60000 (88%)]	Loss: 2.152849
Train Epoch: 1 [57600/60000 (96%)]	Loss: 2.134803

Test set: Average loss: 0.0668, Accuracy: 2698/10000 (26.98%)

Train Epoch: 2 [0/60000 (0%)]	Loss: 2.225060
Train Epoch: 2 [4800/60000 (8%)]	Loss: 2.149050
Train Epoch: 2 [9600/60000 (16%)]	Loss: 2.068803
Train Epoch: 2 [14400/60000 (24%)]	Loss: 2.028979
Train Epoch: 2 [19200/60000 (32%)]	Loss: 1.979294
Train Epoch: 2 [24000/60000 (40%)]	Loss: 1.915839
Train Epoch: 2 [28800/60000 (48%)]	Loss: 2.029020
Train Epoch: 2 [33600/60000 (56%)]	Loss: 1.741296
Train Epoch: 2 [38400/60000 (64%)]	Loss: 1.905341
Train Epoch: 2 [43200/60000 (72%)]	Loss: 1.622296
Train Epoch: 2 [48000/60000 (80%)]	Loss: 1.477951
Train Epoch: 2 [52800/60000 (88%)]	Loss: 1.222387
Train Epoch: 2 [57600/60000 (96%)]	Loss: 1.314141

Test set: Average loss: 0.0373, Accuracy: 7225/10000 (72.25%)

Train Epoch: 3 [0/60000 (0%)]	Loss: 1.157169
Train Epoch: 3 [4800/60000 (8%)]	Loss: 1.198444
Train Epoch: 3 [9600/60000 (16%)]	Loss: 0.888496
Train Epoch: 3 [14400/60000 (24%)]	Loss: 0.746717
Train Epoch: 3 [19200/60000 (32%)]	Loss: 0.708051
Train Epoch: 3 [24000/60000 (40%)]	Loss: 0.802987
Train Epoch: 3 [28800/60000 (48%)]	Loss: 0.748208
Train Epoch: 3 [33600/60000 (56%)]	Loss: 0.570825
Train Epoch: 3 [38400/60000 (64%)]	Loss: 0.842269
Train Epoch: 3 [43200/60000 (72%)]	Loss: 0.457923
Train Epoch: 3 [48000/60000 (80%)]	Loss: 0.469652
Train Epoch: 3 [52800/60000 (88%)]	Loss: 0.289702
Train Epoch: 3 [57600/60000 (96%)]	Loss: 0.435462

Test set: Average loss: 0.0148, Accuracy: 8644/10000 (86.44%)

Train Epoch: 4 [0/60000 (0%)]	Loss: 0.678779
Train Epoch: 4 [4800/60000 (8%)]	Loss: 0.653893
Train Epoch: 4 [9600/60000 (16%)]	Loss: 0.330618
Train Epoch: 4 [14400/60000 (24%)]	Loss: 0.517715
Train Epoch: 4 [19200/60000 (32%)]	Loss: 0.339571
Train Epoch: 4 [24000/60000 (40%)]	Loss: 0.250688
Train Epoch: 4 [28800/60000 (48%)]	Loss: 0.555737
Train Epoch: 4 [33600/60000 (56%)]	Loss: 0.485628
Train Epoch: 4 [38400/60000 (64%)]	Loss: 0.341829
Train Epoch: 4 [43200/60000 (72%)]	Loss: 0.294278
Train Epoch: 4 [48000/60000 (80%)]	Loss: 0.210282
Train Epoch: 4 [52800/60000 (88%)]	Loss: 0.418023
Train Epoch: 4 [57600/60000 (96%)]	Loss: 0.639594

Test set: Average loss: 0.0114, Accuracy: 8923/10000 (89.23%)

Train Epoch: 5 [0/60000 (0%)]	Loss: 0.075756
Train Epoch: 5 [4800/60000 (8%)]	Loss: 0.628859
Train Epoch: 5 [9600/60000 (16%)]	Loss: 0.268495
Train Epoch: 5 [14400/60000 (24%)]	Loss: 0.245660
Train Epoch: 5 [19200/60000 (32%)]	Loss: 0.157376
Train Epoch: 5 [24000/60000 (40%)]	Loss: 0.149876
Train Epoch: 5 [28800/60000 (48%)]	Loss: 0.259019
Train Epoch: 5 [33600/60000 (56%)]	Loss: 0.199592
Train Epoch: 5 [38400/60000 (64%)]	Loss: 0.534609
Train Epoch: 5 [43200/60000 (72%)]	Loss: 0.271385
Train Epoch: 5 [48000/60000 (80%)]	Loss: 0.226728
Train Epoch: 5 [52800/60000 (88%)]	Loss: 0.562785
Train Epoch: 5 [57600/60000 (96%)]	Loss: 0.609851

Test set: Average loss: 0.0099, Accuracy: 9046/10000 (90.46%)

Train Epoch: 6 [0/60000 (0%)]	Loss: 0.283710
Train Epoch: 6 [4800/60000 (8%)]	Loss: 0.288696
Train Epoch: 6 [9600/60000 (16%)]	Loss: 0.389604
Train Epoch: 6 [14400/60000 (24%)]	Loss: 0.192314
Train Epoch: 6 [19200/60000 (32%)]	Loss: 0.181009
Train Epoch: 6 [24000/60000 (40%)]	Loss: 0.258654
Train Epoch: 6 [28800/60000 (48%)]	Loss: 0.076739
Train Epoch: 6 [33600/60000 (56%)]	Loss: 0.391774
Train Epoch: 6 [38400/60000 (64%)]	Loss: 0.581220
Train Epoch: 6 [43200/60000 (72%)]	Loss: 0.308373
Train Epoch: 6 [48000/60000 (80%)]	Loss: 0.255795
Train Epoch: 6 [52800/60000 (88%)]	Loss: 0.123116
Train Epoch: 6 [57600/60000 (96%)]	Loss: 0.535722

Test set: Average loss: 0.0089, Accuracy: 9151/10000 (91.51%)

Train Epoch: 7 [0/60000 (0%)]	Loss: 0.195117
Train Epoch: 7 [4800/60000 (8%)]	Loss: 0.334070
Train Epoch: 7 [9600/60000 (16%)]	Loss: 0.426106
Train Epoch: 7 [14400/60000 (24%)]	Loss: 0.547939
Train Epoch: 7 [19200/60000 (32%)]	Loss: 0.621642
Train Epoch: 7 [24000/60000 (40%)]	Loss: 0.271703
Train Epoch: 7 [28800/60000 (48%)]	Loss: 0.427611
Train Epoch: 7 [33600/60000 (56%)]	Loss: 0.134861
Train Epoch: 7 [38400/60000 (64%)]	Loss: 0.331025
Train Epoch: 7 [43200/60000 (72%)]	Loss: 0.235256
Train Epoch: 7 [48000/60000 (80%)]	Loss: 0.293789
Train Epoch: 7 [52800/60000 (88%)]	Loss: 0.846908
Train Epoch: 7 [57600/60000 (96%)]	Loss: 0.340743

Test set: Average loss: 0.0080, Accuracy: 9240/10000 (92.40%)

Train Epoch: 8 [0/60000 (0%)]	Loss: 0.263807
Train Epoch: 8 [4800/60000 (8%)]	Loss: 0.145736
Train Epoch: 8 [9600/60000 (16%)]	Loss: 0.219392
Train Epoch: 8 [14400/60000 (24%)]	Loss: 0.200102
Train Epoch: 8 [19200/60000 (32%)]	Loss: 0.100795
Train Epoch: 8 [24000/60000 (40%)]	Loss: 0.318653
Train Epoch: 8 [28800/60000 (48%)]	Loss: 0.277175
Train Epoch: 8 [33600/60000 (56%)]	Loss: 0.117883
Train Epoch: 8 [38400/60000 (64%)]	Loss: 0.111346
Train Epoch: 8 [43200/60000 (72%)]	Loss: 0.292880
Train Epoch: 8 [48000/60000 (80%)]	Loss: 0.150133
Train Epoch: 8 [52800/60000 (88%)]	Loss: 0.177790
Train Epoch: 8 [57600/60000 (96%)]	Loss: 0.088824

Test set: Average loss: 0.0073, Accuracy: 9299/10000 (92.99%)

Train Epoch: 9 [0/60000 (0%)]	Loss: 0.201245
Train Epoch: 9 [4800/60000 (8%)]	Loss: 0.182352
Train Epoch: 9 [9600/60000 (16%)]	Loss: 0.124325
Train Epoch: 9 [14400/60000 (24%)]	Loss: 0.176092
Train Epoch: 9 [19200/60000 (32%)]	Loss: 0.217576
Train Epoch: 9 [24000/60000 (40%)]	Loss: 0.278557
Train Epoch: 9 [28800/60000 (48%)]	Loss: 0.227982
Train Epoch: 9 [33600/60000 (56%)]	Loss: 0.165331
Train Epoch: 9 [38400/60000 (64%)]	Loss: 0.096038
Train Epoch: 9 [43200/60000 (72%)]	Loss: 0.299993
Train Epoch: 9 [48000/60000 (80%)]	Loss: 0.281508
Train Epoch: 9 [52800/60000 (88%)]	Loss: 0.163592
Train Epoch: 9 [57600/60000 (96%)]	Loss: 0.297048

Test set: Average loss: 0.0066, Accuracy: 9384/10000 (93.84%)

Train Epoch: 10 [0/60000 (0%)]	Loss: 0.224364
Train Epoch: 10 [4800/60000 (8%)]	Loss: 0.126923
Train Epoch: 10 [9600/60000 (16%)]	Loss: 0.219346
Train Epoch: 10 [14400/60000 (24%)]	Loss: 0.247256
Train Epoch: 10 [19200/60000 (32%)]	Loss: 0.156729
Train Epoch: 10 [24000/60000 (40%)]	Loss: 0.060918
Train Epoch: 10 [28800/60000 (48%)]	Loss: 0.074432
Train Epoch: 10 [33600/60000 (56%)]	Loss: 0.064755
Train Epoch: 10 [38400/60000 (64%)]	Loss: 0.139279
Train Epoch: 10 [43200/60000 (72%)]	Loss: 0.245214
Train Epoch: 10 [48000/60000 (80%)]	Loss: 0.314279
Train Epoch: 10 [52800/60000 (88%)]	Loss: 0.150742
Train Epoch: 10 [57600/60000 (96%)]	Loss: 0.076270

Test set: Average loss: 0.0060, Accuracy: 9422/10000 (94.22%)


Test set: Average loss: 0.0060, Accuracy: 9422/10000 (94.22%)

Final Test Accuracy: 94.22%

进程已结束,退出代码为 0

Otto Group Product Classification Challenge:

import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from torch.utils.data import DataLoader, TensorDataset

# 设定随机种子以保证结果可复现
torch.manual_seed(0)

# 定义超参数
batch_size = 32
learning_rate = 0.001
num_epochs = 10

# 1. 数据预处理
# 读取训练数据
train_df = pd.read_csv("C:/Users/kk/PycharmProjects/pythonProject/train.csv")

# 分离特征和标签(只操作训练集)
X = train_df.drop(['id', 'target'], axis=1).values
y = train_df['target'].astype('category').cat.codes.values  # 转换为整数编码

# 划分训练集和验证集
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)

# 标准化数据
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)

# 转换为PyTorch张量
X_train_tensor = torch.tensor(X_train, dtype=torch.float32)
y_train_tensor = torch.tensor(y_train, dtype=torch.long)
X_val_tensor = torch.tensor(X_val, dtype=torch.float32)
y_val_tensor = torch.tensor(y_val, dtype=torch.long)

# 创建数据加载器
train_dataset = TensorDataset(X_train_tensor, y_train_tensor)
val_dataset = TensorDataset(X_val_tensor, y_val_tensor)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)

# 2. 构建模型
class Net(nn.Module):
    def __init__(self, input_dim):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(input_dim, 256)  # 输入层到第一个隐藏层
        self.fc2 = nn.Linear(256, 128)  # 第一个隐藏层到第二个隐藏层
        self.fc3 = nn.Linear(128, 64)  # 第二个隐藏层到第三个隐藏层
        self.fc4 = nn.Linear(64, 32)  # 第三个隐藏层到第四个隐藏层
        self.fc5 = nn.Linear(32, 9)  # 第四个隐藏层到输出层
        self.dropout1 = nn.Dropout(p=0.5)  # 添加Dropout层
        self.dropout2 = nn.Dropout(p=0.5)  # 添加Dropout层
        self.dropout3 = nn.Dropout(p=0.5)  # 添加Dropout层
        self.dropout4 = nn.Dropout(p=0.5)  # 添加Dropout层

    def forward(self, x):
        x = torch.relu(self.fc1(x))  # 通过第一个全连接层和ReLU激活函数
        x = self.dropout1(x)  # 应用Dropout
        x = torch.relu(self.fc2(x))  # 通过第二个全连接层和ReLU激活函数
        x = self.dropout2(x)  # 应用Dropout
        x = torch.relu(self.fc3(x))  # 通过第三个全连接层和ReLU激活函数
        x = self.dropout3(x)  # 应用Dropout
        x = torch.relu(self.fc4(x))  # 通过第四个全连接层和ReLU激活函数
        x = self.dropout4(x)  # 应用Dropout
        x = self.fc5(x)  # 通过输出层
        return x

# 创建模型实例
input_dim = X_train_tensor.shape[1]
model = Net(input_dim)

# 3. 设置损失函数和优化器
criterion = nn.CrossEntropyLoss()  # 使用交叉熵损失函数
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# 4. 训练模型
def train(epoch):
    model.train()  # 设置模型为训练模式
    for batch_idx, (data, target) in enumerate(train_loader):
        optimizer.zero_grad()  # 清除梯度
        output = model(data)  # 前向传播
        loss = criterion(output, target)  # 计算损失
        loss.backward()  # 反向传播
        optimizer.step()  # 更新权重
        if batch_idx % 300 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))

# 5. 测试模型
def test():
    model.eval()  # 设置模型为评估模式
    test_loss = 0
    correct = 0
    with torch.no_grad():  # 不需要计算梯度
        for data, target in val_loader:
            output = model(data)  # 前向传播,得到预测值(每个类别的概率)
            test_loss += criterion(output, target).item()  # 累加损失
            pred = output.argmax(dim=1, keepdim=True)  # 一个二维张量,形状为(batch_size, 1),其中每一行包含一个元素
            correct += pred.eq(target.view_as(pred)).sum().item()  # 统计正确数量
    test_loss /= len(val_loader.dataset)  # 平均损失
    accuracy = 100. * correct / len(val_loader.dataset)  # 准确率
    print('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
        test_loss, correct, len(val_loader.dataset), accuracy))

# 开始训练
for epoch in range(1, num_epochs + 1):
    train(epoch)
    test()

# 6. 处理测试数据
# 读取测试数据
test_df = pd.read_csv("C:/Users/kk/PycharmProjects/pythonProject/test.csv")

# 分离特征,不用删target,因为本来就没有,把id删除就行,要纯特征
X_test = test_df.drop('id', axis=1).values

# 标准化测试数据
X_test = scaler.transform(X_test)

# 转换为PyTorch张量
X_test_tensor = torch.tensor(X_test, dtype=torch.float32)

# 7. 预测测试数据
model.eval()
with torch.no_grad():
    output = model(X_test_tensor)  # 前向传播,得到预测值(每个类别的概率)

# 8. 提交结果
# 获取预测概率
probabilities = torch.softmax(output, dim=1).numpy()  # 将概率转换为0001000,类似于这样的结果

# 创建提交DataFrame
submission = pd.DataFrame({
    'id': test_df['id'],
    'Class_1': probabilities[:, 0],
    'Class_2': probabilities[:, 1],
    'Class_3': probabilities[:, 2],
    'Class_4': probabilities[:, 3],
    'Class_5': probabilities[:, 4],
    'Class_6': probabilities[:, 5],
    'Class_7': probabilities[:, 6],
    'Class_8': probabilities[:, 7],
    'Class_9': probabilities[:, 8]
})

# 保存提交文件
submission.to_csv('ottohaha.csv', index=False)

# 打印完成信息
print("Submission1 file 'submission1.csv' has been created.")

输出:

"D:\deep learn\envs\pytorch\python.exe" C:\Users\kk\PycharmProjects\pythonProject2\dataset\train\dogcat\train.py 
Train Epoch: 1 [0/49502 (0%)]	Loss: 2.219477
Train Epoch: 1 [9600/49502 (19%)]	Loss: 1.185523
Train Epoch: 1 [19200/49502 (39%)]	Loss: 0.827805
Train Epoch: 1 [28800/49502 (58%)]	Loss: 0.812111
Train Epoch: 1 [38400/49502 (78%)]	Loss: 0.678358
Train Epoch: 1 [48000/49502 (97%)]	Loss: 0.602139

Validation set: Average loss: 0.0233, Accuracy: 9029/12376 (72.96%)

Train Epoch: 2 [0/49502 (0%)]	Loss: 0.587927
Train Epoch: 2 [9600/49502 (19%)]	Loss: 0.894121
Train Epoch: 2 [19200/49502 (39%)]	Loss: 0.926607
Train Epoch: 2 [28800/49502 (58%)]	Loss: 0.752447
Train Epoch: 2 [38400/49502 (78%)]	Loss: 0.563700
Train Epoch: 2 [48000/49502 (97%)]	Loss: 0.749053

Validation set: Average loss: 0.0213, Accuracy: 9133/12376 (73.80%)

Train Epoch: 3 [0/49502 (0%)]	Loss: 0.943063
Train Epoch: 3 [9600/49502 (19%)]	Loss: 0.886904
Train Epoch: 3 [19200/49502 (39%)]	Loss: 0.726560
Train Epoch: 3 [28800/49502 (58%)]	Loss: 0.659948
Train Epoch: 3 [38400/49502 (78%)]	Loss: 0.745690
Train Epoch: 3 [48000/49502 (97%)]	Loss: 0.529640

Validation set: Average loss: 0.0209, Accuracy: 9178/12376 (74.16%)

Train Epoch: 4 [0/49502 (0%)]	Loss: 1.031278
Train Epoch: 4 [9600/49502 (19%)]	Loss: 0.561578
Train Epoch: 4 [19200/49502 (39%)]	Loss: 0.948739
Train Epoch: 4 [28800/49502 (58%)]	Loss: 0.688323
Train Epoch: 4 [38400/49502 (78%)]	Loss: 0.588122
Train Epoch: 4 [48000/49502 (97%)]	Loss: 0.772149

Validation set: Average loss: 0.0202, Accuracy: 9301/12376 (75.15%)

Train Epoch: 5 [0/49502 (0%)]	Loss: 0.599225
Train Epoch: 5 [9600/49502 (19%)]	Loss: 0.706988
Train Epoch: 5 [19200/49502 (39%)]	Loss: 0.737061
Train Epoch: 5 [28800/49502 (58%)]	Loss: 0.830560
Train Epoch: 5 [38400/49502 (78%)]	Loss: 0.694044
Train Epoch: 5 [48000/49502 (97%)]	Loss: 0.556151

Validation set: Average loss: 0.0199, Accuracy: 9355/12376 (75.59%)

Train Epoch: 6 [0/49502 (0%)]	Loss: 0.393021
Train Epoch: 6 [9600/49502 (19%)]	Loss: 0.736266
Train Epoch: 6 [19200/49502 (39%)]	Loss: 0.893262
Train Epoch: 6 [28800/49502 (58%)]	Loss: 0.800717
Train Epoch: 6 [38400/49502 (78%)]	Loss: 0.747830
Train Epoch: 6 [48000/49502 (97%)]	Loss: 0.695366

Validation set: Average loss: 0.0200, Accuracy: 9292/12376 (75.08%)

Train Epoch: 7 [0/49502 (0%)]	Loss: 1.168882
Train Epoch: 7 [9600/49502 (19%)]	Loss: 0.769432
Train Epoch: 7 [19200/49502 (39%)]	Loss: 0.561796
Train Epoch: 7 [28800/49502 (58%)]	Loss: 0.564427
Train Epoch: 7 [38400/49502 (78%)]	Loss: 0.608078
Train Epoch: 7 [48000/49502 (97%)]	Loss: 0.935349

Validation set: Average loss: 0.0193, Accuracy: 9414/12376 (76.07%)

Train Epoch: 8 [0/49502 (0%)]	Loss: 0.671977
Train Epoch: 8 [9600/49502 (19%)]	Loss: 0.751692
Train Epoch: 8 [19200/49502 (39%)]	Loss: 0.873173
Train Epoch: 8 [28800/49502 (58%)]	Loss: 0.727809
Train Epoch: 8 [38400/49502 (78%)]	Loss: 0.514163
Train Epoch: 8 [48000/49502 (97%)]	Loss: 0.611271

Validation set: Average loss: 0.0193, Accuracy: 9405/12376 (75.99%)

Train Epoch: 9 [0/49502 (0%)]	Loss: 0.487758
Train Epoch: 9 [9600/49502 (19%)]	Loss: 1.141270
Train Epoch: 9 [19200/49502 (39%)]	Loss: 0.712523
Train Epoch: 9 [28800/49502 (58%)]	Loss: 0.570099
Train Epoch: 9 [38400/49502 (78%)]	Loss: 0.683906
Train Epoch: 9 [48000/49502 (97%)]	Loss: 0.772561

Validation set: Average loss: 0.0189, Accuracy: 9443/12376 (76.30%)

Train Epoch: 10 [0/49502 (0%)]	Loss: 0.647482
Train Epoch: 10 [9600/49502 (19%)]	Loss: 0.729250
Train Epoch: 10 [19200/49502 (39%)]	Loss: 0.622458
Train Epoch: 10 [28800/49502 (58%)]	Loss: 0.719655
Train Epoch: 10 [38400/49502 (78%)]	Loss: 0.520825
Train Epoch: 10 [48000/49502 (97%)]	Loss: 0.653330

Validation set: Average loss: 0.0189, Accuracy: 9520/12376 (76.92%)

Submission1 file 'submission1.csv' has been created.

进程已结束,退出代码为 0

你可能感兴趣的:(学习,笔记)