网站qq代码,天津创思佳网络网站制作公司,湖南 网站备案,装修公司网站模板下载六 Tensor常见操作
Tensor#xff1a;多维数组#xff0c;用于存储和操作数据
1 获取元素值
data.item():单个元素tensor转为python数值
import torch
#标量
xtorch.tensor(1)
print(x.item())
#一阶
xtorch.tensor([100])
print(x.item())
#如果输入的数据超过1个#…六 Tensor常见操作
Tensor多维数组用于存储和操作数据
1 获取元素值
data.item():单个元素tensor转为python数值
import torch
#标量
xtorch.tensor(1)
print(x.item())
#一阶
xtorch.tensor([100])
print(x.item())
#如果输入的数据超过1个就不能用item函数取
#取出来的是基本数据的数字
xtorch.tensor([1,2])
print(x.item())
2 元素值运算
加减乘除幂次方取余取整等带有_的方法会替换原始值
import torch
def test01():#带_结尾的函数基本都是直接操作原tensorxtorch.manual_seed(66)xtorch.randint(1,10,(3,3))print(x)#加x2x.add(100)#返回一个新的数print(x2)x.add_(200)print(x)#减xx.sub(1)print(x)x.sub_(100)print(x)#乘xx.mul(2)print(x)xx.mul_(2)print(x)#除xx.div(4)print(x)x.div_(2)print(x)
xx.pow(2)#平方print(x)
xx**2print(x)xx10print(x)xx-10print(x)xx*10print(x)xx/2print(x)xx//2#取整print(x)xx%2#取余print(x)x-100#xx-100print(x)
if __name____main__:test01()
3 阿达玛积
矩阵对应位置的元素相乘mul函数或者*
import torch
def test():x1torch.tensor([1,2],[3,4])x2torch.tensor([1,2],[3,4])#阿达玛积时必须形状一样x3x1*x2print(x3)x4x1.mul(x2)print(x4)if __name____main__:test()
4 Tensor相乘
将两个向量映射为一个标量如果第一个矩阵是N,M那么第二个矩阵的shape必须是M,P,最后两个矩阵的点积运算的shape为N,P使用或者matmul完成
mm只能用于2维矩阵
import torch
def test2():x1torch.tensor([1,2],[3,4])x2torch.tensor([1,2],[3,4])x3torch.matmul(x1,x2)x3x1.matmul(x2)x3x1 x2x3x1.mm(x2)print(x3)
xtorch.randint(1,4,(3,3,3))print(x)x2torch.tensor(1,4,(3,3,3))print(x2)x3xx2x3x.matmul(x2)print(x3)if __name____main__:test2()
5 索引操作
1.简单索引
根据指定的下标选取数据
import torch
def test():data torch.randint(0, 10, (3, 4))print(data)# 1. 行索引print(行索引, data[0])# 2. 列索引print(列索引, data[:, 0])# 3. 固定位置索引2种方式都行print(索引, data[0, 0], data[0][0])
if __name__ __main__:test() 2.列表索引
import torch
def test():torch.manual_seed(66)xtorch.randint(1,10,(5,5,3))#5个模块模块里面的size是5×3print(x)print(x.shape)print(x[1])#取下标为1实际上排序为第2个的模块print(x[1,2])#取模块下标为1行数下标为2的数据print(x[1,2,1].item())#取出实际上排序为第2个模块里面第3行2列的数据6print(x[0:2])#0,1print(x[0:2,1])#第1个第2个模块里面的第2行print(x[0:2,1:3])#第1个第2个模块里面的第2.3行print(x[0:2,1:3,2])#第1个第2个模块里面的第2.3行的第3列print(x[[1,3]])#第2和第4个模块print(x[[1,3],1])#第2和第4个模块的第2行print(x[[1,3],[1,2]])#取第2个模块的第2行和第4个模块的第3行并非笛卡尔积坐标而是[1,1]×[3,2]print(x[2,[1,3],0:2])#取第3个模块的2.4行的1-2列数据#注意点如果填列表那么列表中的下标的数字是讲究顺序的print(x[[3,1]])#可以不按顺序取结果是有顺序的print(x[[1,3]])#切片冒号左右两边不写就表示到开头或者末尾print(x[0,1,:2])print(x[-1])print(x[:-1])#不取最后一个print(x[:-2])print(x[1][1][1])#[]:成员访问符print(x[1,1])print(x[[1,1]])
if __name__ __main__:test()
3.布尔索引
def tool(x):return x%20#进行布尔运算得到跟tensor形状一样的布尔数组 算术运算符例如x-得到原来的tensor
def test2():#tensor的布尔运算torch.manual_seed(66)xtorch.randint(1,10,(5,5))print(x)x2x8print(x2)x3x[x2]print(x3)print(x[x5])print(x[x%21])#取出所有的奇数print(x[tool(x)])
if __name__ __main__:test2()
#思考找出第一列是偶数 第二列是奇数 第三列是闰年的行中的第4列和第5列数据
xtorch.tensor([],[],[])
x[:,0]%20
x[:,1]%21
(x[:,2]%40 and x[:,2]%100!0) or (x[:,2]%4000 )
x[:,3:5]
4.索引赋值
def test3():torch.manual_seed(66)xtorch.randint(1,10,(5,5))print(x)x2x[1,1]print(x2)x[1,1]100print(x)x[:,3]200print(x)x[:,:]99x.fill_(66)print(x)
if __name____main__:test3()
6 张量拼接
cat在现有的维度上拼接不会增加新维度
stack在新维度上堆叠会增加一个新维度
1.torch.cat
orch.catconcatenate):在现有维度上将多个张量连接到一起这些张量在除了指定拼接的维度之外的所有维度上的大小必须相同
import torch
def test01():torch.manual_seed(66)xtorch.randint(1,10,(3,3))ytorch.randint(1,10,(2,3))print(x)print(y)ztorch.cat([x,y],dim0)#0是行1是列#不能在1的维度上拼接因为x有3行y只有2行print(z)if __name____main__:test01()
2.torch.stack
torch.stack:在新维度上拼接张量它会增加一个新的维度然后沿着指定维度堆叠张量。这些张量必须具有相同的形状。
堆叠指沿着某个维度一人出一个交替添加stack
拼接指一人出完下个人再出完cat
import torch
def test02():torch.manual_seed(66)xtorch.randint(1,10,(3,3))ytorch.randint(1,10,(3,3))print(x)print(y)ztorch.stack((x,y),dim0)#维度的堆叠ztorch.stack([x,y],dim1)print(z)
def test03():torch.manual_seed(66)xtorch.randint(1,10,(3,3,2))ytorch.randint(1,10,(3,3,2))print(x)print(y)ztorch.stack([x,y],dim3)print(z)def test04():#加载本地图片为PIL对象img_pilImage.open(./data/1.png)#把pil对象转化为张量transfertransforms.ToTensor()img_tensortransfer(img_pil)print(img_tensor)print(img_tensor.shape)print(img_tensor.shape)print(img_tensor)restorch.stack([img_tensor[0],img_tensor[1],img_tensor[2]],dim2)print(res,res.shape)print(sum(sum(res100)))
if __name____main__:test02()
7 形状操作
1.reshape
转换后的形状与原始形状具有相同的元素数量
import torch
def test01():xtorch.randint(1,10,(4,3))print(x)#reshape改变形状x2torch.reshape(x,(2,6))#改变原x的数据内存空间和连续性生成新的数据内存空间具有连续性print(x2)x3torch.reshape(x,(2,2,3))print(x3)x4torch.reshape(x,(3,5))#改变形状后的数量不能改变print(x4)#-1表示自动计算x5torch.reshape(x,(-1,6))#-1相当于替代符当不知道该填多少可以用-1替代print(x5)print(torch.reshape(x,(2,2,-1)))#-1表示某个维度的数量推出来但是只能有一个维度为-1
if __name____main__:test01()
2.view
特征张量在内存中是连续的返回的是原始张量视图不重新分配内存效率更高
def test02():#内存上具有连续性才能viewxtorch.randint(1,10,(4,3))print(x)x2x.view((2,6))#view操作的是连续的原始张量视图不重新分配内存只是重新编了一个下标速度快print(x2)#改变形状由于没有改变原x中的数据内存空间因此它改变形状比reshape快
#非连续性不能view#x3torch.randint(1,10,(4,3))#x4torch.reshape(x3,(2,6))# x4x3.t()#转置后x4的数据在内存中不连续# print(x4)# x5x4.view(1,12)#改变形状在内存中不连续的数据不能通过view来转换# print(x5)
#改变形状后数据是否共享内存x6torch.randint(1,10,(4,3))x7x6.view(2,6)x6[1,1]100print(x6,x7)if __name____main__:test02()
view高效但需要连续性
reshape灵活但涉及内存复制
3.transpose
用于交换张量的两个维度返回原张量的视图内存
def test03():xtorch.randint(1,10,(4,3,2))print(x,x.shape)x2torch.transpose(x,0,1)#只调换前2个维度print(x2,x2.shape)if __name____main__:test03()
4.permute
用于改变张量的所有维度顺序可以交换多个维度
def test04():xtorch.randint(0,255,(3,512,360))#包不包含255print(x)#(C,h,w)(0,1,2)x2x.permute(1,2,0)#(h,w,c)print(x2,x2.shape)if __name____main__:test04()
5.flatten
用于将张量展平为一维向量
tensor.flatten(start_dim0, end_dim-1) start_dim从哪个维度开始展平。 end_dim在哪个维度结束展平。默认值为 -1表示展平到最后一个维度。
def test05():xtorch.randint(0,255,(3,4))x2x.flatten()print(x2)
xtorch.randint(0,255,(3,4,2,2))x2x.flatten(start_dim1,end_dim2)#(3,[],2)print(x)print(x2)if __name____main__:test05()
6升维和降维 unsqueeze用于在指定位置插入一个大小为 1 的新维度。 squeeze用于移除所有大小为 1 的维度或者移除指定维度的大小为 1 的维度。
1.squeeze降维
def test06():#数据降维xtorch.randint(0,255,(1,3,4,1))print(x)x2x.squeeze()#全部print(x2)x3x.squeeze(0).squeeze(-1)#指定维度print(x3)if __name____main__:test06()
2.unsqueeze升维
def test07():#数据升维xtorch.randint(0,255,(3,4))print(x)x2x.unsqueeze(0)print(x2)print(x2.shape)
x2x.unsqueeze(1)#(3,4)(3,1,4)print(x2)print(x2.shape)if __name____main__:test01()
8 张量分割
chunkdatax把data分成x份
splitdatax把data按照大小为x进行分割
def test08():xtorch.randint(0,255,(21,4))x2torch.split(x,2)#每个tensor有2行print(x2)x3torch.chunk(x,2)#分割成2份print(x3)
if __name____main__:test08() 9 广播机制
允许对不同形状的张量进行计算广播机制会自动扩展较小维度的张量使其与较大维度的张量兼容实现计算
规则每个张量的维度至少为1满足右对齐
import torch
def test01():torch.manual_seed(66)xtorch.randint(1,10,(4,3))print(x)x2torch.randint(1,10,(1,3))print(x2)x3xx2print(x3)
x4torch.randint(1,10,(4,3))x5torch.randint(1,10,(4,1))print(x4)print(x5)x6x4x5print(x6)
def test02():data1d torch.tensor([1, 2, 3])data2d torch.tensor([[4], [2], [3]])print(data1d.shape, data2d.shape)# 进行计算会自动进行广播机制print(data1d data2d)
if __name____main__:test01()
#2D和3D张量广播时会根据需要对两个张量进行形状扩展从而能够进行运算。
def test003():# 2D 张量a torch.tensor([[1, 2, 3], [4, 5, 6]])#2*3#2*2*3#[[[1, 2, 3], [4, 5, 6]],[[1, 2, 3], [4, 5, 6]]]# 3D 张量b torch.tensor([[[2, 3, 4]], [[5, 6, 7]]])#2*1*3#2*2*3#[[[2, 3, 4],[2, 3, 4]], [[5, 6, 7],[5, 6, 7]]]print(a.shape, b.shape)# 进行运算result a bprint(result, result.shape)
if __name____main__:test03()
10 数学运算
1基本操作
import torch
def test():data torch.tensor([[1, 2, -3.5], [4, 5, 6], [10.5, 18.6, 19.6], [11.05, 19.3, 20.6], ])print(data)x1torch.floor(data)#向下取整下指往下取整print(x1)x2torch.ceil(data)#向上取整上指往大取整print(x2)x3torch.round(data)#四舍五入(内部用的py的round函数四舍6入 5看整数的个位的奇数偶数奇进偶不进)print(x3)x4torch.trunc(data)#截断只保留整数部分print(x4)x5torch.frac(data)#截断只保留小数部分print(x5)x6torch.fix(data)#向0的方向取整负数往向大的方向取整正数往向小的方向取整print(x6)x7data%2#取模print(x7)x8torch.abs(data)#取绝对值曼哈顿街道距离print(x8)if __name____main__:test()
2三角函数
import torch
def test02():#3.141592653 里面的是数字相当于弧度print(torch.pi)degtorch.pi/180#相当于度datatorch.tensor([0,90*deg,3])xtorch.sin(data)print(x)xtorch.cos(data)print(x)xtorch.sinh(data)#双曲正弦函数print(x)xtorch.cosh(data)#双曲余弦函数print(x)xtorch.tan(data)print(x)xtorch.tanh(data)#双曲正切函数print(x)
if __name____main__:test02()
3统计学函数
import torch
import math
import cv2
def test03():torch.manual_seed(66)xtorch.randint(1,10,(4,3)).type(torch.float32)print(x)x1x.mean()#平均数print(x1)x2torch.mean(x)#平均数print(x2)x3torch.sum(x)#求和print(x3)x5torch.std(x)#标准差print(x5)x6torch.var(x)#方差print(x6)x7torch.median(x)#中位数print(x7)x8torch.mode(x)#众数print(x8.values)x9torch.max(x)#最大值print(x9)x9torch.min(x)#最小值print(x9)x10torch.sort(x)#排序print(x10)print(x10.values)#值print(x10.indices)#下标x11x.sort()print(x11)arr[8,10,11,13,14]resarr.sort(keylambda x:abs(x-10))#列表 谁离10近谁排前面print(res)def myabs(x):return abs(x-10)arr.sort(keymyabs)#列表print(arr)xtorch.tensor([1,1,1,2,3,4,5,2,3,4,5,6],dtypetorch.float32)print(torch.topk(x,3))#大概率是快排print(torch.histc(x,bins5,min2,max4))#统计每个数出现的次数,指定个数print(torch.unique(x))#分类的数据集中看有几种类型print(torch.unique(x).shape)xtorch.tensor([1,1,1,2,3,4,5,2,3,4,5,6],dtypetorch.float32)print(torch.bincount(x))#统计每个数出现的次数,不指定个数
imgcv2.imread(./data/1.png)img_tensortorch.from_numpy(img).flatten()bincounttorch.bincount(img_tensor)print(bincount)restorch.topk(bincount,1)#出现得最多的像素值print(res)
if __name____main__:test()
11保存和加载
torch.save(x,路径)
torch.load(x,路径)
import torch
def test01():xtorch.tensor([1,2,3])torch.save(x,./data/tensor.pth)#保存
def test01():devicetorch.device(cuda if torch.cuda.is_available() else cpu)xtorch.load(./data/tensor.pth,map_locationdevice)#加载到指定设备xx.cuda()#返回一个新的xx.to(cuda)#返回一个新的print(x)print(x.device)
if __name____main__:test01()
12并行化
torch.get_num_threads()#获取cpu的线程
torch.set_num_threads(4)#设置pytorch使用cpu的线程数量
import torch
def test03():counttorch.get_num_threads()#获取cpu的线程print(count)
def test04():torch.set_num_threads(4)#设置cpu的线程counttorch.get_num_threads()#获取cpu的线程print(count)
if __name____main__:test03()