Pytorch中的variable, tensor与numpy相互转化

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/m0_37592397/article/details/88327248

1.将numpy矩阵转换为Tensor张量

sub_ts = torch.from_numpy(sub_img)   #sub_img为numpy类型

2.将Tensor张量转化为numpy矩阵

sub_np1 = sub_ts.numpy()             #sub_ts为tensor张量

3.将numpy转换为Variable

sub_va = Variable(torch.from_numpy(sub_img))

4.将Variable张量转化为numpy

sub_np2 = sub_va.data.numpy()

应用实例1

# coding=utf-8
import numpy as np
import torch
"""Pytorch中tensor了解"""
 
 
def just_try():
    # Tensor可以认为是一个高维数组,和numpy相似,但tensor可以用GPU加速
    x = torch.FloatTensor(5, 3)  # [torch.FloatTensor of size 5x3],预先分配空间
    print('x: {}'.format(x))
    print('x.size(): {}'.format(x.size()))  # torch.Size([5, 3], torch.Size是tuple对象的子类,支持tuple的所有操作
 
    y_tensor_gpu = x.cuda()  # 转化为在GPU上的tensor
    print('y_tensor_gpu: {}'.format(y_tensor_gpu))
 
 
def multiple_add():
    x = torch.FloatTensor(3, 2)
    y = torch.FloatTensor(3, 2)
    # 第一种加法
    result_1 = x + y
    # 第二种加法
    result_2 = torch.add(x, y)
    # 第三种加法
    result_3 = torch.FloatTensor(3, 2)
    torch.add(x, y, out=result_3)
 
    print('result_1: {}'.format(result_1))
    print('result_1.size():{}'.format(result_1.size()))
    print('result_2: {}'.format(result_2))
    print('result_2.size():{}'.format(result_2.size()))
    print('result_3: {}'.format(result_3))
    print('result_3.size():{}'.format(result_3.size()))
 
 
def inplace_operation():
    x = torch.FloatTensor(3, 2)
    y = torch.FloatTensor(3, 2)
    print('original y: {}'.format(y))
 
    # 普通加法,不改变原始的y值
    result_common = y.__add__(x)
    print('common add, result_common: {}'.format(result_common))
    print('common add, y: {}'.format(y))
    # inplace 加法,改变y值
    y.__iadd__(x)
    print('inplace add, y: {}'.format(y))
 
 
def tensor_vs_numpy():
    """
    tensor 和 numpy 之间的联系
    :return: 
    """
    y = torch.FloatTensor(3, 2)
    print('y: {}'.format(y))
    # tensor的slice操作与numpy类似
    print('y slice: {}'.format(y[:, 1]))
 
    # 技巧: tensor与numpy之间的转换,互操作比较容易且快速,
    # Tensor不支持的操作,可以先转换为numpy数组处理,之后再转回tensor
    aa_tensor = torch.ones(3, 2)
    print('orignal aa_tensor: {}'.format(aa_tensor))
    # tensor ---> numpy
    bb_numpy = aa_tensor.numpy()  # Note: tensor和numpy之间共享内存,所以他们之间的转换很快,同时也意味着如果其中一个变了,
                                  # 另外一个也会随之改变
    print('bb_numpy: {}'.format(bb_numpy))
    # numpy ---> tensor
    cc_tensor = torch.from_numpy(bb_numpy)
    print('cc_tensor: {}'.format(cc_tensor))
 
    bb_numpy += 1
    print('after adding one, bb_numpy: {}'.format(bb_numpy))
    print('after adding one, aa_tensor: {}'.format(aa_tensor))
    print('after adding one, cc_tensor: {}'.format(cc_tensor))
 
 
if __name__ == '__main__':
    just_try()
    print("********************")
    multiple_add()
    print("********************")
    inplace_operation()
    print("********************")
    tensor_vs_numpy()

运行结果

x: tensor([[ 8.4735e-01,  4.5852e-41,  1.4709e-28],
        [ 3.0645e-41,  9.5032e-04,  4.5852e-41],
        [ 2.5129e-39,  4.5852e-41, -4.3164e-02],
        [ 4.5850e-41,  2.6068e-39,  4.5852e-41],
        [ 3.0926e+00,  4.5852e-41,  2.5129e-39]])
x.size(): torch.Size([5, 3])
y_tensor_gpu: tensor([[ 8.4735e-01,  4.5852e-41,  1.4709e-28],
        [ 3.0645e-41,  9.5032e-04,  4.5852e-41],
        [ 2.5129e-39,  4.5852e-41, -4.3164e-02],
        [ 4.5850e-41,  2.6068e-39,  4.5852e-41],
        [ 3.0926e+00,  4.5852e-41,  2.5129e-39]], device='cuda:0')
********************
result_1: tensor([[8.4734e-01, 7.6497e-41],
        [2.3627e-29, 3.0672e-41],
        [2.2296e-29, 3.0645e-41]])
result_1.size():torch.Size([3, 2])
result_2: tensor([[8.4734e-01, 7.6497e-41],
        [2.3627e-29, 3.0672e-41],
        [2.2296e-29, 3.0645e-41]])
result_2.size():torch.Size([3, 2])
result_3: tensor([[8.4734e-01, 7.6497e-41],
        [2.3627e-29, 3.0672e-41],
        [2.2296e-29, 3.0645e-41]])
result_3.size():torch.Size([3, 2])
********************
original y: tensor([[1.4718e-28, 3.0645e-41],
        [2.3627e-29, 3.0672e-41],
        [2.2296e-29, 3.0645e-41]])
common add, result_common: tensor([[1.7051e-28, 6.1290e-41],
        [4.7253e-29, 6.1343e-41],
        [4.4592e-29, 6.1290e-41]])
common add, y: tensor([[1.4718e-28, 3.0645e-41],
        [2.3627e-29, 3.0672e-41],
        [2.2296e-29, 3.0645e-41]])
inplace add, y: tensor([[1.7051e-28, 6.1290e-41],
        [4.7253e-29, 6.1343e-41],
        [4.4592e-29, 6.1290e-41]])
********************
y: tensor([[1.4718e-28, 3.0645e-41],
        [4.7253e-29, 6.1343e-41],
        [4.4592e-29, 6.1290e-41]])
y slice: tensor([3.0645e-41, 6.1343e-41, 6.1290e-41])
orignal aa_tensor: tensor([[1., 1.],
        [1., 1.],
        [1., 1.]])
bb_numpy: [[1. 1.]
 [1. 1.]
 [1. 1.]]
cc_tensor: tensor([[1., 1.],
        [1., 1.],
        [1., 1.]])
after adding one, bb_numpy: [[2. 2.]
 [2. 2.]
 [2. 2.]]
after adding one, aa_tensor: tensor([[2., 2.],
        [2., 2.],
        [2., 2.]])
after adding one, cc_tensor: tensor([[2., 2.],
        [2., 2.],
        [2., 2.]])

应用实例2

# coding=utf-8
import numpy as np
import torch
from torch.autograd import Variable
 
"""pytorch中Variable了解"""
"""
Variable是Pytorch中autograd自动微分模块的核心。
它封装了Tensor,支持几乎所有的tensor操作。
主要包含如下3个属性:
1. data: 保存Variable所包含的Tensor
2. grad: 保存data对应的梯度,grad也是一个Variable,而不是一个Tensor,和data的形状一样
3. grad_fn: 指向一个Function对象,这个Function用来反向传播计算输入的梯度
"""
 
 
def about_variable():
    x = Variable(torch.ones(3, 2), requires_grad=True)
    y = x.detach().numpy()
    z = torch.from_numpy(y)
    print('x: {}'.format(x))
    print('***************')
    print('y: {}'.format(y))
    print('***************')
    print('z: {}'.format(z))
    print('***************')
    print('x.data: {}'.format(x.data))
    print('***************')
    print('x.grad: {}'.format(x.grad))
 
    # Variable和Tensor具有几乎一致的接口
    aa_variable = Variable(torch.ones(3, 2))
    print('torch.cos(aa_variable): {}'.format(torch.cos(aa_variable)))
    print('torch.cos(aa_variable.data): {}'.format(torch.cos(aa_variable.data)))
 
 
if __name__ == '__main__':

    about_variable()

运行结果

x: tensor([[1., 1.],
        [1., 1.],
        [1., 1.]], requires_grad=True)
***************
y: [[1. 1.]
 [1. 1.]
 [1. 1.]]
***************
z: tensor([[1., 1.],
        [1., 1.],
        [1., 1.]])
***************
x.data: tensor([[1., 1.],
        [1., 1.],
        [1., 1.]])
***************
x.grad: None
torch.cos(aa_variable): tensor([[0.5403, 0.5403],
        [0.5403, 0.5403],
        [0.5403, 0.5403]])
torch.cos(aa_variable.data): tensor([[0.5403, 0.5403],
        [0.5403, 0.5403],
        [0.5403, 0.5403]])

猜你喜欢

转载自blog.csdn.net/m0_37592397/article/details/88327248