pytorch 如何将张量、类实例、函数放到GPU上

调用“.cuda()”函数,将tensor从GPU中取回到CPU中,只需调用“.cpu()”即可

直接看代码:

import torch
import torch.nn as nn
import copy

class MyClass(nn.Module):
    def __init__(self,para1,para2):
        super(MyClass,self).__init__()
        self.attr1 = para1
        self.attr2 = para2

        ##定义函数
        self.func1 = nn.Conv2d(in_channels=3,out_channels=64,kernel_size=3,padding=1)
        self.func2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1)

        self.func3 = nn.Sequential(
            nn.Conv2d(64,64,3,padding=1),
            nn.Conv2d(64,64,3,padding=1),
            nn.MaxPool2d(kernel_size=2,stride=2),
            nn.Sigmoid()
        )

    def forward1(self,inputs):
        print('forward1')
        out = self.func1(inputs)
        out = self.func2(out)
        out = self.func3(out)
        return out

    def forward2(self,inputs):
        print('forward2')
        out = self.func1(inputs)
        out = self.func2(out)
        out = self.func3(out)
        ## 定义函数
        conv = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1)
        out = conv(out)
        return out

    def function_1(self,input):
        print('function_1')
        conv = nn.Conv2d(in_channels=3,out_channels=64,kernel_size=3,padding=1).cuda()
        return conv(input)

    def function_2(self,input):
        print('function_2')
        conv = nn.Conv2d(in_channels=3,out_channels=64,kernel_size=3,padding=1)
        return conv(input)

def function1(input):
    conv = nn.Conv2d(in_channels=3,out_channels=64,kernel_size=3,padding=1).cuda()
    return conv(input)

def function2(input):
    conv = nn.Conv2d(in_channels=3,out_channels=64,kernel_size=3,padding=1)
    return conv(input)


if __name__ == "__main__":
    # 将张量放入gpu
    input = torch.empty((10,3,256,256),dtype=torch.float32).cuda()

    ## 将类对象放入gpu##两种方式均可,推荐方式2
    ## 但是这两种方法都只能把__init__中的self的属性、函数放到cuda上,不能把其他函数放到cuda上
    ##方式1
    myclass1 = MyClass(para1=1,para2=2)
    myclass1.cuda()
    ##方式2
    myclass2 = MyClass(para1=1,para2=2).cuda()

    ## 测试
    out1 =  myclass2.forward1(input)#不会报错
    # out2 = myclass2.forward2(input)#会报错
    # out3 =  myclass2.func1(input)#不会报错
    # out4 = myclass2.func2(input)#会报错

    # 将独立函数放入gpu ## 会报错
    # out5 = function1(input)#不会报错
    # out6 = function2(input)#会报错

通过实验证明,难以直接将自定义的独立的函数放到GPU中。

在将类实例放到GPU上时,只会将__init__中的有self前缀的属性及函数放到GPU上,对于其他的函数无作用。

大家如果遇到这种情况会怎么处理?

你可能感兴趣的:(pytorch)