pytorch基础入门(一)

pytorch基础入门(一)

一、tensor属性

import torch
a = torch.tensor([1,2,3], dtype = int)
print(a)
tensor([1, 2, 3])
a.type

b = torch.tensor([4,5,6], dtype = float)
print(b)
tensor([4., 5., 6.], dtype=torch.float64)
tensor = torch.tensor([[1,2,3],
                       [4,5,6]])
# 查看数据的维度
tensor.ndim
2
# 查看数据的形状
tensor.shape  # 属性
torch.Size([2, 3])
tensor.size()  # 方法
torch.Size([2, 3])

二、数据生成

import torch
# 生成权重为1的两行三列的矩阵
torch.ones(2,3)
tensor([[1., 1., 1.],
        [1., 1., 1.]])
# 生成权重为0的三行三列的矩阵
torch.zeros(3,3)
tensor([[0., 0., 0.],
        [0., 0., 0.],
        [0., 0., 0.]])
# 生成三行四列的随机数矩阵
torch.rand(3,4)
tensor([[0.2095, 0.2981, 0.9930, 0.2970],
        [0.5243, 0.6877, 0.2672, 0.1739],
        [0.3329, 0.4882, 0.5752, 0.0886]])
# 生成0-10之间两行三列的随机整数矩阵
torch.randint(0,10,(2,3))
tensor([[1, 6, 6],
        [9, 4, 7]])
# 生成三行四列的符合正态分布矩阵
torch.randn(3,4)
tensor([[ 1.6634,  0.8821,  0.3271,  0.7346],
        [-0.7800, -0.8522,  0.3166, -1.0605],
        [-0.2146, -0.4478,  1.0225,  0.7094]])
a = torch.tensor([[1,2],
                  [3,4],
                  [5,6]])
a
tensor([[1, 2],
        [3, 4],
        [5, 6]])
# 根据a的形状生成b
b = torch.rand_like(a, dtype = float)
b
tensor([[0.0610, 0.8480],
        [0.9012, 0.8032],
        [0.8593, 0.0249]], dtype=torch.float64)
# 改变数据形状
d = b.view(2,3)
d
tensor([[0.0610, 0.8480, 0.9012],
        [0.8032, 0.8593, 0.0249]], dtype=torch.float64)
# 改变数据形状
d = d.reshape(6)
d
tensor([0.0610, 0.8480, 0.9012, 0.8032, 0.8593, 0.0249], dtype=torch.float64)
d[1]
tensor(0.8480, dtype=torch.float64)
# 转化为正常数据,非tensor,只能改变一个
d[1].item()
0.8479788102877875
import numpy as np
# 将tenser转numpy array
array = np.array(d)
array
array([0.06101681, 0.84797881, 0.90117927, 0.80323335, 0.85928866,
       0.02492348])
tensor = torch.tensor(array)
tensor
tensor([0.0610, 0.8480, 0.9012, 0.8032, 0.8593, 0.0249], dtype=torch.float64)

三、数据索引

import torch
tensor = torch.arange(2,14)
tensor
tensor([ 2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13])
print(tensor[2])
tensor(4)
tensor[1:4]
tensor([3, 4, 5])
tensor[2:-1]
tensor([ 4,  5,  6,  7,  8,  9, 10, 11, 12])
tensor[:5]
tensor([2, 3, 4, 5, 6])
tensor[::2]
tensor([ 2,  4,  6,  8, 10, 12])
tensor[-3:]
tensor([11, 12, 13])
index = [1,4,5,5,7]
tensor[index]
tensor([3, 6, 7, 7, 9])

四、torch的基本运算操作

import torch
a = torch.randint(1,5,(2,3))
b = torch.randint(1,5,(2,3))
print(a)
print(b)
tensor([[4, 2, 2],
        [2, 2, 3]])
tensor([[4, 1, 3],
        [3, 1, 2]])
a+b
tensor([[8, 3, 5],
        [5, 3, 5]])
torch.add(a,b)
tensor([[8, 3, 5],
        [5, 3, 5]])
result = torch.zeros(2,3)
torch.add(a,b,out= result)
tensor([[8., 3., 5.],
        [5., 3., 5.]])
# a + b
# 注意 任何使张量tensor会发生变化的操作都有一个前缀'_',例如:a.add_()
a.add_(b)
tensor([[8, 3, 5],
        [5, 3, 5]])
tensor = torch.ones(3,5)
tensor
tensor([[1., 1., 1., 1., 1.],
        [1., 1., 1., 1., 1.],
        [1., 1., 1., 1., 1.]])
a = a.float()
# 矩阵乘法
torch.matmul(a,tensor)
tensor([[16., 16., 16., 16., 16.],
        [13., 13., 13., 13., 13.]])
a = torch.rand(3,2)
a
tensor([[0.3879, 0.3897],
        [0.6362, 0.4619],
        [0.9510, 0.0311]])
torch.sum(a)
tensor(2.8577)
torch.min(a)
tensor(0.0311)
# 求最小值所在位置(索引)
torch.argmin(a)
tensor(5)
# 平均值
torch.mean(a)
tensor(0.4763)
# 中位数
torch.median(a)
tensor(0.3897)
torch.sqrt(a)
tensor([[0.6228, 0.6243],
        [0.7976, 0.6796],
        [0.9752, 0.1763]])

五、自动求导

import torch
x = torch.ones(2,2,requires_grad=True)
x
tensor([[1., 1.],
        [1., 1.]], requires_grad=True)
y = x + 2
y
tensor([[3., 3.],
        [3., 3.]], grad_fn=)
z = y * y * 3
z
tensor([[27., 27.],
        [27., 27.]], grad_fn=)
out = z.mean()
out
tensor(27., grad_fn=)
# 求导
out.backward()
print(x.grad)
tensor([[4.5000, 4.5000],
        [4.5000, 4.5000]])

参考:31 项目实战 猫狗分类模型预测 - YouTube

你可能感兴趣的:(pytorch笔记,pytorch,python,numpy)