这篇文章下面的代码主要实现以下三个功能:
数据集采用深度学习中常用的MNIST数据集
采用脉冲序列作为输入的三大好处:3-S
Spikes 脉冲神经网络的输入是一系列由0和1组成的脉冲序列,也是人脑中沿着轴突传递的神经冲动的数字化表示。
Sparsity sparsity是稀疏性的意思,是指我们上一点提到的的脉冲序列通常是稀疏矩阵的形式,也就是说这些矩阵大部分元素都为0,只有少数有意义的部分为1。这两点特性使得脉冲神经网络的硬件化电路的能耗会非常低,大胆预测以后的智能无人系统的大脑芯片会更多的使用脉冲神经网络~
Static-Suppression 静态抑制也称为事件驱动,听着非常的抽象,这同样是受人脑的启发,我们一般都会对动的、变化的东西更加敏感,静态抑制也就是指输入转化为脉冲序列的过程中一些不变的部分被抑制,凸显出变化的部分,由一些新的“事件”驱动着脉冲序列的产生。
脉冲神经网络作为第三代神经网络,其在类脑智能、低功耗的硬件化实现方面的研究也在这几年刚刚兴起,相比于现在大放光彩的第二代神经网络,脉冲神经网络无论是应用还是理论研究、推广性方面等都稍显逊色,但是还是要去不断的探索~
以下即为代码部分:(请先通过pip install snntorch
安装好snntorch包)
import snntorch as snn
import torch
# Training Parameters
batch_size=128
data_path='/data/mnist'
num_classes = 10 # MNIST has 10 output classes
# Torch Variables
dtype = torch.float # tensor中的数据全部存储为torch.float型
from torchvision import datasets, transforms
# Define a transform
# 图像变换,通过定义好一个transform对象实现
transform = transforms.Compose([
transforms.Resize((28,28)),
transforms.Grayscale(),
transforms.ToTensor(),
transforms.Normalize((0,), (1,))]) # 转为tensor,并归一化至[0-1]
mnist_train = datasets.MNIST(data_path, train=True, download=True, transform=transform)
from snntorch import utils
# 在我们实际开始训练网络之前,不需要太大的数据集,通过utils中的data_subset函数将MNIST数据集数目从60000减少到6000
subset = 10
mnist_train = utils.data_subset(mnist_train, subset)
print(f"The size of mnist_train is {len(mnist_train)}")
from torch.utils.data import DataLoader
train_loader = DataLoader(mnist_train, batch_size=batch_size, shuffle=True)
# Temporal Dynamics
num_steps = 10
# create vector filled with 0.5
raw_vector = torch.ones(num_steps)*0.5
# pass each sample through a Bernoulli trial
rate_coded_vector = torch.bernoulli(raw_vector)
print(f"Converted vector: {rate_coded_vector}")
print(f"The output is spiking {rate_coded_vector.sum()*100/len(rate_coded_vector):.2f}% of the time.")
num_steps = 100
# create vector filled with 0.5
raw_vector = torch.ones(num_steps)*0.5
# pass each sample through a Bernoulli trial
rate_coded_vector = torch.bernoulli(raw_vector)
print(f"The output is spiking {rate_coded_vector.sum()*100/len(rate_coded_vector):.2f}% of the time.")
from snntorch import spikegen
# Iterate through minibatches
data = iter(train_loader)
data_it, targets_it = next(data)
# Spiking Data
spike_data = spikegen.rate(data_it, num_steps=num_steps)
print(spike_data.size())
import matplotlib.pyplot as plt
import snntorch.spikeplot as splt
from IPython.display import HTML ###
# To plot one sample of data, index into a single sample from the batch (B) dimension of `spike_data`, ``[T x B x 1 x 28 x 28]``:
# In[ ]:
spike_data_sample = spike_data[:, 0, 0]
print(spike_data_sample.size())
# `spikeplot.animator` makes it super simple to animate 2-D data.
# Note: if you are running the notebook locally on your desktop, please uncomment the line below and modify the path to your ffmpeg.exe
# In[ ]:
fig, ax = plt.subplots()
anim = splt.animator(spike_data_sample, fig, ax)
# plt.rcParams['animation.ffmpeg_path'] = 'C:\\path\\to\\your\\ffmpeg.exe'
HTML(anim.to_html5_video()) ###
anim.save("spike_mnist_test.mp4")
print(f"The corresponding target is: {targets_it[0]}")
spike_data = spikegen.rate(data_it, num_steps=num_steps, gain=0.25)
spike_data_sample2 = spike_data[:, 0, 0]
fig, ax = plt.subplots()
anim = splt.animator(spike_data_sample2, fig, ax)
HTML(anim.to_html5_video())
plt.figure(facecolor="w")
plt.subplot(1,2,1)
plt.imshow(spike_data_sample.mean(axis=0).reshape((28,-1)).cpu(), cmap='binary')
plt.axis('off')
plt.title('Gain = 1')
plt.subplot(1,2,2)
plt.imshow(spike_data_sample2.mean(axis=0).reshape((28,-1)).cpu(), cmap='binary')
plt.axis('off')
plt.title('Gain = 0.25')
plt.show()
# Reshape
spike_data_sample2 = spike_data_sample2.reshape((num_steps, -1))
# raster plot
fig = plt.figure(facecolor="w", figsize=(10, 5))
ax = fig.add_subplot(111)
splt.raster(spike_data_sample2, ax, s=1.5, c="black")
plt.title("Input Layer")
plt.xlabel("Time step")
plt.ylabel("Neuron Number")
plt.show()
idx = 210 # index into 210th neuron
fig = plt.figure(facecolor="w", figsize=(8, 1))
ax = fig.add_subplot(111)
splt.raster(spike_data_sample.reshape(num_steps, -1)[:, idx].unsqueeze(1), ax, s=100, c="black", marker="|")
plt.title("Input Neuron")
plt.xlabel("Time step")
plt.yticks([])
plt.show()
def convert_to_time(data, tau=5, threshold=0.01):
spike_time = tau * torch.log(data / (data - threshold))
return spike_time
raw_input = torch.arange(0, 5, 0.05) # tensor from 0 to 5
spike_times = convert_to_time(raw_input)
plt.plot(raw_input, spike_times)
plt.xlabel('Input Value')
plt.ylabel('Spike Time (s)')
plt.show()
spike_data = spikegen.latency(data_it, num_steps=100, tau=5, threshold=0.01)
fig = plt.figure(facecolor="w", figsize=(10, 5))
ax = fig.add_subplot(111)
splt.raster(spike_data[:, 0].view(num_steps, -1), ax, s=25, c="black")
plt.title("Input Layer")
plt.xlabel("Time step")
plt.ylabel("Neuron Number")
plt.show()
spike_data = spikegen.latency(data_it, num_steps=100, tau=5, threshold=0.01, linear=True)
fig = plt.figure(facecolor="w", figsize=(10, 5))
ax = fig.add_subplot(111)
splt.raster(spike_data[:, 0].view(num_steps, -1), ax, s=25, c="black")
plt.title("Input Layer")
plt.xlabel("Time step")
plt.ylabel("Neuron Number")
plt.show()
spike_data = spikegen.latency(data_it, num_steps=100, tau=5, threshold=0.01,
normalize=True, linear=True)
fig = plt.figure(facecolor="w", figsize=(10, 5))
ax = fig.add_subplot(111)
splt.raster(spike_data[:, 0].view(num_steps, -1), ax, s=25, c="black")
plt.title("Input Layer")
plt.xlabel("Time step")
plt.ylabel("Neuron Number")
plt.show()
spike_data = spikegen.latency(data_it, num_steps=100, tau=5, threshold=0.01,
clip=True, normalize=True, linear=True)
fig = plt.figure(facecolor="w", figsize=(10, 5))
ax = fig.add_subplot(111)
splt.raster(spike_data[:, 0].view(num_steps, -1), ax, s=25, c="black")
plt.title("Input Layer")
plt.xlabel("Time step")
plt.ylabel("Neuron Number")
plt.show()
spike_data_sample = spike_data[:, 0, 0]
print(spike_data_sample.size())
# In[ ]:
fig, ax = plt.subplots()
anim = splt.animator(spike_data_sample, fig, ax)
HTML(anim.to_html5_video())
print(targets_it[0])
# Create a tensor with some fake time-series data
data = torch.Tensor([0, 1, 0, 2, 8, -20, 20, -5, 0, 1, 0])
# Plot the tensor
plt.plot(data)
plt.title("Some fake time-series data")
plt.xlabel("Time step")
plt.ylabel("Voltage (mV)")
plt.show()
# Convert data
spike_data = spikegen.delta(data, threshold=4)
# Create fig, ax
fig = plt.figure(facecolor="w", figsize=(8, 1))
ax = fig.add_subplot(111)
# Raster plot of delta converted data
splt.raster(spike_data, ax, c="black")
plt.title("Input Neuron")
plt.xlabel("Time step")
plt.yticks([])
plt.xlim(0, len(data))
plt.show()
# Convert data
spike_data = spikegen.delta(data, threshold=4, off_spike=True)
# Create fig, ax
fig = plt.figure(facecolor="w", figsize=(8, 1))
ax = fig.add_subplot(111)
# Raster plot of delta converted data
splt.raster(spike_data, ax, c="black")
plt.title("Input Neuron")
plt.xlabel("Time step")
plt.yticks([])
plt.xlim(0, len(data))
plt.show()
print(spike_data)
spike_prob = torch.rand((num_steps, 28, 28), dtype=dtype) * 0.5
spike_rand = spikegen.rate_conv(spike_prob)
fig, ax = plt.subplots()
anim = splt.animator(spike_rand, fig, ax)
HTML(anim.to_html5_video())
fig = plt.figure(facecolor="w", figsize=(10, 5))
ax = fig.add_subplot(111)
splt.raster(spike_rand[:, 0].view(num_steps, -1), ax, s=25, c="black")
plt.title("Input Layer")
plt.xlabel("Time step")
plt.ylabel("Neuron Number")
plt.show()