start
目录
开始!
用import torch
导入pytorch
库
不要打成import pytorch
哦~
下面是我的学习记录:
import torch#导入模块
= torch.rand(5,3)#生成随机张量
x x
tensor([[0.8241, 0.9623, 0.8265],
[0.8875, 0.6775, 0.0678],
[0.8438, 0.5565, 0.0824],
[0.7778, 0.7368, 0.5326],
[0.6096, 0.5767, 0.5788]])
= x.new_ones(5,3,dtype=torch.double)#生成值为1的张量,并定义数据类型
x x
tensor([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype=torch.float64)
= torch.randn_like(x,dtype=torch.float)#改变数据类型,不改变数
x x
tensor([[-1.1167, 0.1029, 0.5996],
[-1.2067, 1.4284, -1.0661],
[-0.0320, -0.3634, 1.4178],
[ 0.2564, -1.0210, -2.3204],
[-0.0476, -0.2605, -0.1166]])
#获取尺寸 x.size()
torch.Size([5, 3])
= torch.rand(5,3) y
#也可以写成x+y torch.add(x,y)
tensor([[-0.8348, 0.5407, 0.6893],
[-0.9977, 1.4544, -0.6345],
[ 0.7664, -0.3510, 2.3684],
[ 0.4159, -0.4354, -1.6096],
[ 0.0588, -0.1941, 0.5014]])
= torch.empty(5,3)#空张量
result = result)#把运算结果储存在result里 torch.add(x,y,out
tensor([[-0.8348, 0.5407, 0.6893],
[-0.9977, 1.4544, -0.6345],
[ 0.7664, -0.3510, 2.3684],
[ 0.4159, -0.4354, -1.6096],
[ 0.0588, -0.1941, 0.5014]])
1]#类似于numpy的切片操作,取第二列 x,x[:,
(tensor([[-1.1167, 0.1029, 0.5996],
[-1.2067, 1.4284, -1.0661],
[-0.0320, -0.3634, 1.4178],
[ 0.2564, -1.0210, -2.3204],
[-0.0476, -0.2605, -0.1166]]),
tensor([ 0.1029, 1.4284, -0.3634, -1.0210, -0.2605]))
= torch.rand(4,4)
x = x.view(16)#类似于numpy的resize()但用法不太相同
y = x.view(-1,8)
z x.size(),y.size(),z.size()
(torch.Size([4, 4]), torch.Size([16]), torch.Size([2, 8]))
= torch.rand(1)
x print(x)
print(x.item())#取值
tensor([0.5160])
0.5160175561904907
import numpy as np
= torch.ones(5)
a a
tensor([1., 1., 1., 1., 1.])
=a.numpy()#将张量转换为numpy的array
b b
array([1., 1., 1., 1., 1.], dtype=float32)
1) #a自加1,b也跟着改变
a.add_( a,b
(tensor([2., 2., 2., 2., 2.]), array([2., 2., 2., 2., 2.], dtype=float32))
= np.ones(5)
a =torch.from_numpy(a)#将array转换为张量
b1,out=a)
np.add(a, a,b
(array([2., 2., 2., 2., 2.]),
tensor([2., 2., 2., 2., 2.], dtype=torch.float64))
= torch.ones(2,2,requires_grad=True)#requires_grad参数用于说明当前量是否需要在计算中保留对应的梯度信息以线性回归为例,为了得到最合适的参数值,我们需要设置一个相关的损失函数,根据梯度回传的思路进行训练。
x x
tensor([[1., 1.],
[1., 1.]], requires_grad=True)
= x+2
y y
tensor([[3., 3.],
[3., 3.]], grad_fn=<AddBackward0>)
#用于指导反向传播,我现在也不太懂 y.grad_fn
<AddBackward0 at 0x25b91b2c710>
= y*y*3
z = torch.mean(z)
out z,out
(tensor([[27., 27.],
[27., 27.]], grad_fn=<MulBackward0>),
tensor(27., grad_fn=<MeanBackward0>))
= torch.randn(2,2)
a print(a)
print(a.requires_grad)
True)#修改requires_grad的值
a.requires_grad_(print(a.requires_grad)
=(a*a).sum()
bprint(b.grad_fn)
tensor([[-0.6831, 1.5310],
[-0.5836, 0.4117]])
False
True
<SumBackward0 object at 0x0000025B91B39828>
和numpy
的互相转换:
import numpy as np
import torch
= torch.Tensor([])
a =a.numpy()#a为张量
b= np.arange.randn()
a = torch.from_numpy(a)#a为array b