K折交叉验证

一个K折交叉验证的例子:

import torch
import torch.nn as nn
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score

# 定义一维卷积网络模型
class ConvNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv1d(in_channels=1, out_channels=16, kernel_size=3, stride=1, padding=1)
        self.pool1 = nn.MaxPool1d(kernel_size=2, stride=2)
        self.fc1 = nn.Linear(16*50, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = torch.relu(x)
        x = self.pool1(x)
        x = torch.flatten(x, 1)
        x = self.fc1(x)
        return x

# 定义数据和标签
X = torch.Tensor(dataset[:, 0:100])
Y = torch.Tensor(dataset[:, 100])

# 定义五折交叉验证
kfold = KFold(n_splits=5, shuffle=True)

# 对每一折数据进行训练和测试
for fold, (train_ids, test_ids) in enumerate(kfold.split(X)):
    print(f'Fold {fold}')
    # 将数据集分为训练集和测试集
    X_train = X[train_ids].unsqueeze(1)
    Y_train = Y[train_ids].long()
    X_test = X[test_ids].unsqueeze(1)
    Y_test = Y[test_ids].long()

    # 定义模型和优化器
    model = ConvNet()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    # 进行训练和测试
    model.train()
    for epoch in range(10):
        optimizer.zero_grad()
        outputs = model(X_train)
        loss = nn.functional.cross_entropy(outputs, Y_train)
        loss.backward()
        optimizer.step()

    model.eval()
    with torch.no_grad():
        outputs = model(X_test)
        # 计算测试准确率
        _, predicted = torch.max(outputs, 1)
        accuracy = accuracy_score(Y_test, predicted)
        print(f'Accuracy: {accuracy}')

主要重要的是这一部分:对于训练集的划分(训练集,验证集)

# 对每一折数据进行训练和测试
for fold, (train_ids, valid_ids) in enumerate(kfold.split(X,Y)):

你可能感兴趣的:(机器学习,python)