pytorch复现_batch_norm

import numpy as np

def batch_norm(inputs, gamma, beta, eps):
    N, C, H, W = inputs.shape

    # 在N、H和W的维度上计算每个通道的均值和方差
    mean = np.mean(inputs, axis=(0, 2, 3), keepdims=True)
    var = np.var(inputs, axis=(0, 2, 3), keepdims=True)

    # 计算归一化的输入。eps防止除0
    inputs_norm = (inputs - mean) / np.sqrt(var + eps)

    # 缩放和偏移
    outputs = gamma * inputs_norm + beta

    return outputs

# 创建模拟数据
N, C, H, W = 4, 3, 2, 2  # 示例中的形状
inputs = np.random.rand(N, C, H, W)  # 输入数据
gamma = np.random.rand(C)  # 缩放因子,与通道数匹配
beta = np.random.rand(C)  # 偏移因子,与通道数匹配
eps = 1e-5  # 防止除零的小数值

# 调用batch_norm函数进行批量归一化
outputs = batch_norm(inputs, gamma, beta, eps)

# 打印归一化后的输出形状
print("归一化后的输出形状:", outputs.shape)

你可能感兴趣的:(pytorch复现,pytorch,batch,人工智能)