MINI-RNN代码学习

代码来源:https://gist.github.com/karpathy/d4dee566867f8291f086

这是一个简易的RNN代码,用于学习RNN的基本原理,前向传播和反向传播的基本式子

【文本输入】

import numpy as np

# data I/O
data = open('input.txt', 'r').read()  # should be simple plain text file
chars = list(set(data))#set用于掉重复元素!!!得到文本里一共有的各个字符
data_size, vocab_size = len(data), len(chars)
print ('data has %d characters, %d unique.' % (data_size, vocab_size))
char_to_ix = {ch: i for i, ch in enumerate(chars)}#获得字符与其index的对应关系字典
ix_to_char = {i: ch for i, ch in enumerate(chars)}

# hyperparameters
hidden_size = 100  # size of hidden layer of neurons
seq_length = 25  # number of steps to unroll the RNN for
learning_rate = 1e-1

# model parameters
Wxh = np.random.randn(hidden_size, vocab_size) * 0.01  # input to hidden
Whh = np.random.randn(hidden_size, hidden_size) * 0.01  # hidden to hidden
Why = np.random.randn(vocab_size, hidden_size) * 0.01  # hidden to output
bh = np.zeros((hidden_size, 1))  # hidden bias
by = np.zeros((vocab_size, 1))  # output bias

【训练】

n, p = 0, 0 # p是指针,指向送入文档的现有位置,n是iteration数
mWxh, mWhh, mWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
mbh, mby = np.zeros_like(bh), np.zeros_like(by)  # memory variables for Adagrad
smooth_loss = -np.log(1.0 / vocab_size) * seq_length  # loss at iteration 0
while True:
    # prepare inputs (we're sweeping from left to right in steps seq_length long)
    if p + seq_length + 1 >= len(data) or n == 0:
        hprev = np.zeros((hidden_size, 1))  # reset RNN memory
        p = 0  # go from start of data
    inputs = [char_to_ix[ch] for ch in data[p:p + seq_length]]#得到25个字符的idx
    targets = [char_to_ix[ch] for ch in data[p + 1:p + seq_length + 1]]#得到输入每个字符对应的下一个字符

    # sample from the model now and then,每100轮循环,试着输出一下训练结果,从inputs[0]开始,往后200个字符的预测结果
    if n % 100 == 0:
        sample_ix = sample(hprev, inputs[0], 200)
        txt = ''.join(ix_to_char[ix] for ix in sample_ix)
        print ('----\n %s \n----' % (txt,))

    # forward seq_length characters through the net and fetch gradient
    loss, dWxh, dWhh, dWhy, dbh, dby, hprev = lossFun(inputs, targets, hprev)
    smooth_loss = smooth_loss * 0.999 + loss * 0.001
    if n % 100 == 0: print ('iter %d, loss: %f' % (n, smooth_loss))  # print progress
    #gradient check
    #gradCheck(inputs,targets,hprev)
    #############################
    # perform parameter update with Adagrad 用Adagrad进行梯度更新
    for param, dparam, mem in zip([Wxh, Whh, Why, bh, by],
                                  [dWxh, dWhh, dWhy, dbh, dby],
                                  [mWxh, mWhh, mWhy, mbh, mby]):
        mem += dparam * dparam
        param += -learning_rate * dparam / np.sqrt(mem + 1e-8)  # adagrad update

    p += seq_length  # move data pointer
    n += 1  # iteration counter


【前向传播和反向传播】

前向传播:对于每个t,先算的此时输入的字符对应的one-hot vector,[1,0,0,0.....]xs[t],然后算出隐藏状态hs[t],再算出ys[t],根据softmax算出对应每个字符的概率ps[t],loss

反向传播:先算出dy,根据softmax的求导公式可得,此处要注意,由于Wxh,Whh,Why,dbh,dby这些参数都是无论什么时刻都是共享的,所以反向传播时,要把每一个t时刻求出的相加之后,为了防止梯度爆炸,我们用np.clip()来裁剪掉梯度大于5,小于-5的结果

def lossFun(inputs, targets, hprev):
    """
    inputs,targets are both list of integers.
    hprev is Hx1 array of initial hidden state
    returns the loss, gradients on model parameters, and last hidden state
    """
    xs, hs, ys, ps = {}, {}, {}, {}
    hs[-1] = np.copy(hprev)
    loss = 0
    # forward pass
    for t in range(len(inputs)):
        xs[t] = np.zeros((vocab_size, 1))  # encode in 1-of-k representation
        xs[t][inputs[t]] = 1#先得到one-hot vector
        hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t - 1]) + bh)  # hidden state
        ys[t] = np.dot(Why, hs[t]) + by  # unnormalized log probabilities for next chars
        ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t]))  # probabilities for next chars
        loss += -np.log(ps[t][targets[t], 0])  # softmax (cross-entropy loss)#a[1,0]可表示a[1][0]
    # backward pass: compute gradients going backwards
    dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
    dbh, dby = np.zeros_like(bh), np.zeros_like(by)
    dhnext = np.zeros_like(hs[0])
    for t in reversed(range(len(inputs))):#加个reversed可以把顺序反过来
        dy = np.copy(ps[t])#(60,1)
        dy[targets[
            t]] -= 1  # backprop into y. loss关于y的导数,可看softmax总结
        dWhy += np.dot(dy, hs[t].T)#(60,100)
        dby += dy
        dh = np.dot(Why.T, dy) + dhnext  # backprop into h  (100,1)
        dhraw = (1 - hs[t] * hs[t]) * dh  # backprop through tanh nonlinearity (100,1)
        dbh += dhraw
        dWxh += np.dot(dhraw, xs[t].T) #(100,60)
        dWhh += np.dot(dhraw, hs[t - 1].T)#(100,100)
        dhnext = np.dot(Whh.T, dhraw)#(100,1)对hs[t-1]求导的结果
    for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
        np.clip(dparam, -5, 5, out=dparam)  # 裁剪梯度防止梯度爆炸
    return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs) - 1]

【测试阶段】

def sample(h, seed_ix, n):
    """
    sample a sequence of integers from the model
    h is memory state, seed_ix is seed letter for first time step
    """
    x = np.zeros((vocab_size, 1))
    x[seed_ix] = 1#生成输入的one-hot 向量
    ixes = []
    for t in range(n):
        h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)
        y = np.dot(Why, h) + by
        p = np.exp(y) / np.sum(np.exp(y))
        ix = np.random.choice(range(vocab_size), p=p.ravel())#按照预测的概率随机抽取一个字符
        x = np.zeros((vocab_size, 1))
        x[ix] = 1#把输入重新设为刚刚预测的字符
        ixes.append(ix)
    return ixes


P.S.——梯度检查

def gradCheck(inputs, target, hprev):
  global Wxh, Whh, Why, bh, by
  num_checks, delta = 10, 1e-5
  _, dWxh, dWhh, dWhy, dbh, dby, _ = lossFun(inputs, targets, hprev)
  for param,dparam,name in zip([Wxh, Whh, Why, bh, by], [dWxh, dWhh, dWhy, dbh, dby], ['Wxh', 'Whh', 'Why', 'bh', 'by']):
    s0 = dparam.shape
    s1 = param.shape
    assert s0 == s1, 'Error dims dont match: %s and %s.' % (s0, s1)
    print (name)
    for i in range(num_checks):
      ri = int(uniform(0,param.size))
      # evaluate cost at [x + delta] and [x - delta]
      old_val = param.flat[ri]
      param.flat[ri] = old_val + delta#把参数中其中一个数改动一点
      cg0, _, _, _, _, _, _ = lossFun(inputs, targets, hprev)
      param.flat[ri] = old_val - delta
      cg1, _, _, _, _, _, _ = lossFun(inputs, targets, hprev)
      param.flat[ri] = old_val # reset old value for this parameter
      # fetch both numerical and analytic gradient
      grad_analytic = dparam.flat[ri]
      grad_numerical = (cg0 - cg1) / ( 2 * delta )
      rel_error = abs(grad_analytic - grad_numerical) / abs(grad_numerical + grad_analytic)
      print ('%f, %f => %e ' % (grad_numerical, grad_analytic, rel_error))
      # rel_error should be on order of 1e-7 or less


你可能感兴趣的:(MINI-RNN代码学习)