pytorch入门


pytorch入门_1

1. 读取数据

from torch.utils.data import Dataset
from PIL import Image
import os


class MyData(Dataset):

    def __init__(self, root_dir, label_dir):
        # 初始化数据集的根目录和标签目录
        self.root_dir = root_dir
        self.label_dir = label_dir
        self.path = os.path.join(root_dir, label_dir)
        # 获取标签目录下的所有图像文件路径
        self.img_path = os.listdir(self.path)

    def __getitem__(self, idx):
        # 根据索引获取图像文件名和路径
        img_name = self.img_path[idx]
        img_item_path = os.path.join(self.root_dir, self.label_dir, img_name)
        # 打开图像文件并转换为PIL Image对象
        img = Image.open(img_item_path)
        # 设置标签为目录名
        label = self.label_dir
        return img, label

    def __len__(self):
        # 返回数据集的长度,即图像文件的数量
        return len(self.img_path)


root_dir = "hymenoptera_data/train"
# 定义蚂蚁图像的标签目录
ants_label_dir = "ants_image"
# 创建蚂蚁图像数据集对象
ants_dataset = MyData(root_dir, ants_label_dir)
# 定义蜜蜂图像的标签目录
bees_label_dir = "bees_image"
# 创建蜜蜂图像数据集对象
bees_dataset = MyData(root_dir, bees_label_dir)

# 将蜜蜂和蚂蚁数据集合并成一个训练数据集
train_dataset = bees_dataset + ants_dataset

2. 批量生成标签文件

import os

root_dir = "hymenoptera_data/train"
target_dir = "ants_image"
img_path = os.listdir(os.path.join(root_dir, target_dir))
#img_path = os.listdir("hymenoptera_data/train/ants_image")
label = target_dir.split('_')[0]
out_dir = "ants_label"

# 针对每个图像文件进行遍历
for i in img_path:
    # 从文件名中提取出不带扩展名的部分
    file_name = i.split('.jpg')[0]

    # 创建目标文件路径,文件名与图像文件相同,扩展名为 .txt
    target_file = os.path.join(root_dir, out_dir, "{}.txt".format(file_name))

    # 打开文件并写入标签
    with open(target_file, 'w') as f:
        f.write(label)

3. 常见的 Transforms

  • 输入 PIL Image.open()
  • 输出 tensor Totensor()
  • 作用 warrays cv.imread()

4. 卷积操作

import torch
import torch.nn.functional as F

# 输入图像
input = torch.tensor([[1, 2, 0, 3, 1],
                      [0, 1, 2, 3, 1],
                      [1, 2, 1, 0, 0],
                      [5, 2, 3, 1, 1],
                      [2, 1, 0, 1, 1]])

# 卷积核
kernel = torch.tensor([[1, 2, 1],
                       [0, 1, 0],
                       [2, 1, 0]])

input = torch.reshape(input, (1, 1, 5, 5))
kernel = torch.reshape(kernel, (1, 1, 3, 3))

print(input.shape)
print(kernel.shape)

output = F.conv2d(input, kernel, stride=1)
print(output)

output = F.conv2d(input, kernel, stride=2)
print(output)

output = F.conv2d(input, kernel, stride=1, padding=1)
print(output)

5. 卷积层

import torch
import torchvision
from torch import nn
from torch.nn import Conv2d
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

dataset = torchvision.datasets.CIFAR10("../data", train=True, transform=torchvision.transforms.ToTensor(), download=True)

dataloader = DataLoader(dataset, batch_size=64)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.conv1 = Conv2d(in_channels=3, out_channels=6, kernel_size=3, stride=1, padding=0)

    def forward(self, x):
        x = self.conv1(x)
        return x

tudui = Tudui()

writer = SummaryWriter("../logs")

step = 0
for data in dataloader:
    imgs, targets = data
    output = tudui(imgs)
    print(imgs.shape)
    print(output.shape)
    #torch.Size([64, 3, 32, 32])
    writer.add_images("input", imgs, step)
    #torch.Size([64, 6, 30, 30])

    output = torch.reshape(output, (-1, 3, 30, 30))
    writer.add_images("output", output, step)

    step = step + 1

6. 最大池化的使用

import torch
import torchvision
from torch import nn
from torch.nn import MaxPool2d
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

dataset = torchvision.datasets.CIFAR10("../data", train=False, download=True,
                                       transform=torchvision.transforms.ToTensor())


dataloader = DataLoader(dataset, batch_size=64)

# input = torch.tensor([[1, 2, 0, 3, 1],
#                       [0, 1, 2, 3, 1],
#                       [1, 2, 1, 0, 0],
#                       [5, 2, 3, 1, 1],
#                       [2, 1, 0, 1, 1]], dtype=torch.float32)
#
# input = torch.reshape(input, (-1, 1, 5, 5))
# print(input.shape)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.maxpool1 = MaxPool2d(kernel_size=3, ceil_mode=False)

    def forward(self, input):
        output = self.maxpool1(input)
        return output

tudui = Tudui()
# output = tudui(input)
# print(output)

writer = SummaryWriter("../logs_maxpool")
step = 0

for data in dataloader:
    imgs, targets = data
    writer.add_images("input", imgs, step)
    output = tudui(imgs)
    writer.add_images("output", output, step)

    step = step + 1

writer.close()

7. 非线性激活

import torch
import torchvision.datasets
from torch import nn
from torch.nn import ReLU, Sigmoid
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

input = torch.tensor([[1, -0.5],
                      [-1, 3]])

input = torch.reshape(input, (-1, 1, 2, 2))
print(input.shape)

dataset = torchvision.datasets.CIFAR10("../data", train=False, download=True,
                                       transform=torchvision.transforms.ToTensor())

dataloader = DataLoader(dataset, batch_size=64)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.relu1 = ReLU()
        self.sigmoid1 = Sigmoid()

    def forward(self, input):
        output = self.sigmoid1(input)
        return output

tudui = Tudui()
step = 0
writer = SummaryWriter("../logs_relu")
for data in dataloader:
    imgs, targets = data
    writer.add_images("input", imgs, global_step=step)
    output = tudui(imgs)
    writer.add_images("output", output, step)
    step += 1

writer.close()

8. 神经网络 - 搭建小实战和Sequential的使用

  • CIFAR 10 model 结构

  • 计算

  • 未使用Sequential
import torch
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.tensorboard import SummaryWriter


class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        # 第一个卷积部分
        self.conv1 = Conv2d(3, 32, 5, padding=2)
        # 第一个池化部分
        self.maxpool1 = MaxPool2d(2)
        # 第二个卷积部分
        self.conv2 = Conv2d(32, 32, 5, padding=2)
        # 第二个池化部分
        self.maxpool2 = MaxPool2d(2)
        # 第三个卷积部分
        self.conv3 = Conv2d(32, 64, 5, padding=2)
        # 第三个池化部分
        self.maxpool3 = MaxPool2d(2)
        # 对数据进行展平
        self.flatten = Flatten()
        # 第一个线性层
        self.linear1 = Linear(1024, 64)
        # 第二个线性层
        self.linear2 = Linear(64, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = self.maxpool1(x)
        x = self.conv2(x)
        x = self.maxpool2(x)
        x = self.conv3(x)
        x = self.maxpool3(x)
        x = self.flatten(x)
        x = self.linear1(x)
        x = self.linear2(x)
        
        return x


tudui = Tudui()
# 利用print进行可视化
print(tudui)
input = torch.ones((64, 3, 32, 32))
output = tudui(input)
print(output.shape)


# 利用tensorboard进行可视化
writer = SummaryWriter("../logs_seq")
writer.add_graph(tudui, input)
writer.close()

  • 使用Sequential
import torch
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.tensorboard import SummaryWriter


class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.module1 = Sequential(
            Conv2d(3, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )

    def forward(self, x):
        x = self.module1(x)

        return x


tudui = Tudui()
# 利用print进行可视化
print(tudui)
input = torch.ones((64, 3, 32, 32))
output = tudui(input)
print(output.shape)


# 利用tensorboard进行可视化
writer = SummaryWriter("../logs_seq")
writer.add_graph(tudui, input)
writer.close()

9. 损失函数与反向传播

  • loss
import torch
from torch import nn
from torch.nn import L1Loss, MSELoss

input = torch.tensor([1, 2, 3], dtype=torch.float32)
targets = torch.tensor([1, 2, 5], dtype=torch.float32)

input = torch.reshape(input, (1, 1, 1, 3))
targets = torch.reshape(targets, (1, 1, 1, 3))

logs = L1Loss(reduction='sum')
result = logs(input, targets)
print(result)

# 方差
loss_mes = MSELoss()
mes_result = loss_mes(input, targets)
print(mes_result)


x = torch.tensor([0.1, 0.2, 0.3])
y = torch.tensor([1])
x = torch.reshape(x, (1, 3))
loss_cross = nn.CrossEntropyLoss()
result_cross = loss_cross(x, y)
print(result_cross)
  • CrossEntropyLoss
import torchvision.datasets
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Linear, Flatten
from torch.utils.data import DataLoader

dataset = torchvision.datasets.CIFAR10("../data", train=False, download=True,
                                       transform=torchvision.transforms.ToTensor())

dataloader = DataLoader(dataset, batch_size=64)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.module1 = Sequential(
            Conv2d(3, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )

    def forward(self, x):
        x = self.module1(x)

        return x


loss = nn.CrossEntropyLoss()
tudui = Tudui()
for data in dataloader:
    imgs, targets = data
    output = tudui(imgs)
    result_loss = loss(output, targets)
    print(result_loss)

10. 优化器

import torch.optim
import torchvision.datasets
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Linear, Flatten
from torch.utils.data import DataLoader

dataset = torchvision.datasets.CIFAR10("../data", train=False, download=True,
                                       transform=torchvision.transforms.ToTensor())

dataloader = DataLoader(dataset, batch_size=64)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.module1 = Sequential(
            Conv2d(3, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )

    def forward(self, x):
        x = self.module1(x)

        return x

loss = nn.CrossEntropyLoss()
tudui = Tudui()

# 定义优化器
optim = torch.optim.SGD(tudui.parameters(), lr=0.01)

for epoch in range(20):
    running_loss = 0.0
    for data in dataloader:
        imgs, targets = data
        output = tudui(imgs)
        result_loss = loss(output, targets)
        # 将网络中的每个参数的梯度清零,以便在下一次反向传播时不会受到之前计算出的梯度的影响。
        optim.zero_grad()
        #  调用损失函数的反向传播,计算出每个节点的梯度。此操作通过链式法则将误差从输出层向输入层传播,并计算每个参数的梯度。
        result_loss.backward()
        # 使用优化器来更新模型的参数。优化器根据计算出的梯度和选择的优化算法来调整模型参数,以减小损失函数的值。
        optim.step()
        running_loss = running_loss + result_loss
    print(running_loss)


# forward 前向传播
# backward 反向传播

文章作者: han yue
版权声明: 本博客所有文章除特別声明外,均采用 CC BY 4.0 许可协议。转载请注明来源 han yue !
评论
  目录