This notebook is developed by Prof. Monali Mavani
This notebook is to explain tensor basics using Pytorch
Reference : https://pytorch.org/
import torch
import numpy as np
Tensors are multi-dimensional arrays with a uniform type.
A"scalar" or "rank-0" tensor . A scalar contains a single value, and no "axes".
A "vector" or "rank-1" or 1D tensor is like a list of values. A vector has one axis.
A "matrix" or "rank-2" or 2D tensor has two axes
Anything with more than two dimensions is generally just called a tensor.
rank0 = torch.tensor(1)
print("dim",rank0.dim())
print("\n rank 0: ", rank0)
rank1 = torch.tensor([1,2,3])
print("dim",rank1.dim())
print("\n rank 1: ", rank1)
rank2 = torch.tensor([[1,2,3],[4,5,6]])
print("dim",rank2.dim())
print("\n rank 2: ", rank2)
rank3 = torch.tensor([[1,2,3],[4,5,6],[7,8,9]])
print("dim",rank3.dim())
print("\n rank 3: ", rank3)
dim 0
rank 0: tensor(1)
dim 1
rank 1: tensor([1, 2, 3])
dim 2
rank 2: tensor([[1, 2, 3],
[4, 5, 6]])
dim 2
rank 3: tensor([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
##tensor datatype
tensor1d = torch.tensor([1, 2, 3])
print(tensor1d.dtype)
#create tensors from Python floats, PyTorch creates tensors with a 32-bit precision
floatvec = torch.tensor([1.0, 2.0, 3.0])
print(floatvec.dtype)
#change tensor datatype
floatvec1 = tensor1d.to(torch.float32)
print(floatvec1.dtype)
torch.int64 torch.float32 torch.float32
#Initializing a Tensor
#from list
data = [[1, 2],[3, 4]]
x_data = torch.tensor(data)
#from numpy array
np_array = np.array(data)
x_np = torch.from_numpy(np_array)
print(x_data)
print(x_np)
tensor([[1, 2],
[3, 4]])
tensor([[1, 2],
[3, 4]])
#tensors can be created with With random or constant values
shape = (2,3)
rand_tensor = torch.rand(shape)
ones_tensor = torch.ones(shape)
zeros_tensor = torch.zeros(shape)
print(f"Random Tensor: \n {rand_tensor} \n")
print(f"Ones Tensor: \n {ones_tensor} \n")
print(f"Zeros Tensor: \n {zeros_tensor}")
print(f"Shape of tensor: {rand_tensor.shape}")
print(f"Datatype of tensor: {rand_tensor.dtype}")
print(f"Device tensor is stored on: {rand_tensor.device}")
Random Tensor:
tensor([[0.3445, 0.8270, 0.2246],
[0.3391, 0.1405, 0.4354]])
Ones Tensor:
tensor([[1., 1., 1.],
[1., 1., 1.]])
Zeros Tensor:
tensor([[0., 0., 0.],
[0., 0., 0.]])
Shape of tensor: torch.Size([2, 3])
Datatype of tensor: torch.float32
Device tensor is stored on: cpu
Over 100 tensor operations, including arithmetic, linear algebra, matrix manipulation (transposing, indexing, slicing), sampling and more are comprehensively described here.
# By default, tensors are created on the CPU. We need to explicitly move tensors to the GPU using .to method
#( copying large tensors across devices can be expensive in terms of time and memory)
if torch.cuda.is_available():
tensor = rand_tensor.to("cuda")
# Standard numpy-like indexing and slicing
tensor = torch.ones(3, 4)
print(f"First row: {tensor[0]}")
print(f"First column: {tensor[:, 0]}")
print(f"Last column: {tensor[..., -1]}")
tensor[:,1] = 0
print(tensor)
First row: tensor([1., 1., 1., 1.])
First column: tensor([1., 1., 1.])
Last column: tensor([1., 1., 1.])
tensor([[1., 0., 1., 1.],
[1., 0., 1., 1.],
[1., 0., 1., 1.]])
#combine multiple tensors columnwise. torch.cat() concatenates the given sequence along an existing dimension.
t1 = torch.cat([tensor, tensor, tensor], dim=1)
print(t1)
print(tensor.size())
print(t1.size())
tensor([[1., 0., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1.],
[1., 0., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1.],
[1., 0., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1.]])
torch.Size([3, 4])
torch.Size([3, 12])
#torch.stack() Concatenates a sequence of tensors along a new dimension. All tensors need to be of the same size.
t1 = torch.tensor([1,2,3])
t2 = torch.tensor([4,5,6])
t3 = torch.tensor([7,8,9])
t4 = torch.tensor([10,11,12])
print("t1 dimesions:", t1.dim())
print("t1 size:", t1.size())
print("t1 shape:", t1.shape)
t5=torch.stack((t1,t2,t3,t4),dim=0) # axis=0 stacking
print("\n t5 size:",t5.size())
print("t5 dimesions:", t5.dim())
print("t5 shape:", t5.shape)
print(t5)
t6=torch.stack((t1,t2,t3,t4),dim=1) # axis=1 stacking
print("\n t6 size:",t6.size())
print("t6 dimesions:", t6.dim())
print("t6 shape:", t6.shape)
print(t6)
t1 dimesions: 1
t1 size: torch.Size([3])
t1 shape: torch.Size([3])
t5 size: torch.Size([4, 3])
t5 dimesions: 2
t5 shape: torch.Size([4, 3])
tensor([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 8, 9],
[10, 11, 12]])
t6 size: torch.Size([3, 4])
t6 dimesions: 2
t6 shape: torch.Size([3, 4])
tensor([[ 1, 4, 7, 10],
[ 2, 5, 8, 11],
[ 3, 6, 9, 12]])
# matrix multiplication between two tensors. ``tensor.T`` returns the transpose of a tensor
y1 = tensor @ tensor.T # '@' for Matrix mul
y2 = tensor.matmul(tensor.T)
y3=tensor.mm(tensor.T)
print(y1)
print(y2)
print(y3)
y4 = torch.rand_like(y1)
torch.matmul(tensor, tensor.T, out=y4)
tensor([[3., 3., 3.],
[3., 3., 3.],
[3., 3., 3.]])
tensor([[3., 3., 3.],
[3., 3., 3.],
[3., 3., 3.]])
tensor([[3., 3., 3.],
[3., 3., 3.],
[3., 3., 3.]])
tensor([[3., 3., 3.],
[3., 3., 3.],
[3., 3., 3.]])
Single-element tensors If you have a one-element tensor, for example
by aggregating all values of a tensor into one value, you can convert it
to a Python numerical value using item():
agg = tensor.sum()
agg_item = agg.item()
print(tensor, agg, agg_item, type(agg_item))
tensor([[1., 0., 1., 1.],
[1., 0., 1., 1.],
[1., 0., 1., 1.]]) tensor(9.) 9.0 <class 'float'>
#Operations that store the result into the operand are called in-place. They are denoted by a _ suffix.
print(tensor)
tensor.add_(5)
print(tensor)
tensor([[1., 0., 1., 1.],
[1., 0., 1., 1.],
[1., 0., 1., 1.]])
tensor([[6., 5., 6., 6.],
[6., 5., 6., 6.],
[6., 5., 6., 6.]])
Tensor to NumPy array¶
#Tensors on the CPU and NumPy arrays can share their underlying memory locations, and changing one will change the other.
t = torch.ones(5)
print("t:", t)
n = t.numpy()
print("n: ", n)
t.add_(1)
print("after adding")
print("t:", t)
print("n: ", n)
t: tensor([1., 1., 1., 1., 1.]) n: [1. 1. 1. 1. 1.] after adding t: tensor([2., 2., 2., 2., 2.]) n: [2. 2. 2. 2. 2.]
#Changes in the NumPy array reflects in the tensor.
n = np.ones(5)
t = torch.from_numpy(n)
print("t:", t)
print("n: ", n)
np.add(n, 1, out=n)
print("t:", t)
print("n: ", n)
t: tensor([1., 1., 1., 1., 1.], dtype=torch.float64) n: [1. 1. 1. 1. 1.] t: tensor([2., 2., 2., 2., 2.], dtype=torch.float64) n: [2. 2. 2. 2. 2.]
#tensor datatypes
a = torch.ones((2, 3), dtype=torch.int16)
print(a)
b = torch.rand((2, 3), dtype=torch.float64)
print(b)
c = b.to(torch.int32)
print(c)
tensor([[1, 1, 1],
[1, 1, 1]], dtype=torch.int16)
tensor([[0.9791, 0.0385, 0.9006],
[0.3490, 0.4603, 0.7692]], dtype=torch.float64)
tensor([[0, 0, 0],
[0, 0, 0]], dtype=torch.int32)