这是查看模型的参数。
import torch.nn as nn
embedding = nn.Embedding(5,5)
lstm = nn.LSTM(5,5)
P = embedding.parameters()
for p in P:
print(p)
print('==========================')
L = lstm.parameters()
for l in L:
print(l)
Parameter containing:
tensor([[ 1.2369, 0.9615, -1.0872, 0.6052, 1.0365],
[-0.7401, 0.7690, 0.2973, -0.6002, 0.5088],
[ 0.0238, 0.0606, 1.0652, 1.5462, -0.6882],
[ 1.0782, -1.9932, -0.2231, -0.9345, -0.4399],
[-0.7458, 1.1297, -1.7649, -0.6804, -0.1075]], requires_grad=True)
==========================
Parameter containing:
tensor([[-0.1166, 0.3232, -0.0093, -0.3475, -0.0117],
[ 0.0352, 0.1153, -0.1492, 0.3389, -0.4450],
[-0.1702, -0.1058, 0.3470, 0.3221, 0.2428],
[ 0.1496, 0.3461, -0.1642, 0.3224, -0.1130],
[ 0.0583, 0.2114, -0.0104, -0.1049, -0.4377],
[ 0.2164, 0.3559, -0.1578, -0.1494, -0.4058],
[ 0.2789, 0.3192, 0.2078, -0.2472, 0.1765],
[-0.3810, -0.1640, -0.0017, 0.0837, -0.1191],
[ 0.0985, 0.0244, 0.2521, -0.0278, 0.2224],
[ 0.0541, 0.2074, -0.2630, 0.2477, -0.0520],
[-0.2335, 0.1957, -0.0838, -0.0979, 0.3911],
[ 0.1733, 0.3617, 0.3972, -0.1505, -0.1232],
[ 0.0193, -0.0281, 0.1759, -0.2768, 0.1652],
[ 0.0967, -0.1668, -0.1974, -0.3515, 0.2815],
[-0.3389, -0.2895, 0.2645, -0.3290, 0.1681],
[ 0.0967, 0.0548, 0.2284, 0.1504, -0.1633],
[-0.2658, -0.3761, -0.3217, 0.1800, 0.2955],
[ 0.2902, -0.3684, 0.1768, -0.3400, 0.2085],
[ 0.0979, 0.0067, 0.2163, -0.3048, -0.1939],
[ 0.3080, -0.1542, -0.2918, 0.2848, -0.2895]], requires_grad=True)
Parameter containing:
tensor([[-0.4396, 0.2501, 0.2535, 0.3453, 0.2279],
[ 0.0834, 0.4345, -0.2294, -0.3072, -0.2858],
[ 0.0241, -0.2103, 0.1693, 0.0783, -0.3495],
[-0.2856, 0.0827, -0.2763, -0.1330, -0.0223],
[ 0.1206, 0.1540, -0.3873, -0.0403, -0.3412],
[ 0.2267, 0.1350, 0.1981, -0.4236, 0.1932],
[-0.2882, -0.2399, -0.4008, -0.0451, 0.4314],
[ 0.0111, 0.1231, -0.0450, 0.2339, 0.2534],
[ 0.1199, 0.4405, 0.3582, 0.2533, -0.0980],
[-0.2366, -0.1942, 0.3301, 0.2643, -0.4055],
[ 0.3097, 0.0122, -0.1280, -0.2833, -0.0976],
[-0.1016, -0.4331, 0.2153, 0.1480, -0.3547],
[ 0.3807, -0.0522, -0.1900, 0.0691, -0.1945],
[ 0.1133, 0.0317, -0.4028, 0.1945, 0.1717],
[-0.2563, 0.0596, 0.1541, -0.0882, 0.3693],
[ 0.3666, -0.1902, 0.1833, -0.2669, 0.2957],
[ 0.2875, -0.0143, -0.0626, -0.0390, -0.2675],
[-0.4046, -0.3556, 0.3012, -0.0032, 0.4080],
[ 0.1698, -0.4314, 0.0027, 0.4194, 0.1207],
[-0.4034, 0.0641, -0.4169, 0.2518, -0.2517]], requires_grad=True)
Parameter containing:
tensor([ 0.0578, -0.3982, 0.2157, -0.0937, -0.2598, 0.2260, -0.2956, -0.3980,
0.2066, -0.1841, -0.0890, -0.2964, 0.1906, 0.0840, 0.0556, -0.0530,
0.0700, -0.3697, 0.0964, 0.2704], requires_grad=True)
Parameter containing:
tensor([ 0.1522, -0.0940, -0.3696, 0.4359, 0.3166, 0.3506, -0.2915, 0.2420,
-0.2743, -0.1104, 0.0361, -0.1473, -0.0096, 0.0345, 0.4039, -0.3477,
0.1493, 0.3542, -0.2741, -0.3424], requires_grad=True)
是把某个tensor加载成为可以学习训练的参数。
https://www.cnblogs.com/jfdwd/p/11185050.html