dive into deepleaning笔记

阅读更多
http://zh.d2l.ai/chapter_prerequisite/install.html

https://zh.d2l.ai/d2l-zh-1.0.zip

conda env create -f environment.yml


conda activate gluon

jupyter notebook

mxnet


nd.dot(X, Y.T)

from mxnet import nd
x = nd.arange(12)
x.shape
x.size
X = x.reshape((3, 4))
nd.zeros((2, 3, 4))
nd.ones((3, 4))
Y = nd.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
nd.random.normal(0, 1, shape=(3, 4))
X + Y
X * Y
X / Y
Y.exp()
nd.dot(X, Y.T)

nd.concat(X, Y, dim=0), nd.concat(X, Y, dim=1)


X == Y

X.sum()
X.norm().asscalar()

广播
A = nd.arange(3).reshape((3, 1))
B = nd.arange(2).reshape((1, 2))
A + B

索引
X[1:3]
X[1, 2] = 9

运算的内存开销
before = id(Y)
Y = Y + X
id(Y) == before

Z = Y.zeros_like()
before = id(Z)
Z[:] = X + Y
id(Z) == before

before = id(X)
X += Y
id(X) == before

自动求梯度

from mxnet import autograd, nd
x = nd.arange(4).reshape((4, 1))
x.attach_grad()
with autograd.record():
    y = 2 * nd.dot(x.T, x)

print(autograd.is_training())

with autograd.record():
    print(autograd.is_training())


def f(a):
    b = a * 2
    while b.norm().asscalar() < 1000:
        b = b * 2
    if b.sum().asscalar() > 0:
        c = b
    else:
        c = 100 * b
    return c

a = nd.random.normal(shape=1)
a.attach_grad()
with autograd.record():
    c = f(a)
c.backward()

a.grad == c / a












你可能感兴趣的:(dive into deepleaning笔记)