12.使用VGG网络进行Fashion-Mnist分类

12.1 VGG网络结构设计

在这里插入图片描述

import torch
from torch import nn
import matplotlib.pyplot as plt
from torchsummary import summary
#vgg block实现
def vgg_block(num_convs,in_channels,out_channels):
    layers=[]
    for _ in range(num_convs):
        layers.append(nn.Conv2d(in_channels,out_channels,kernel_size=3,padding=1))
        layers.append(nn.ReLU())
        in_channels=out_channels
    layers.append(nn.MaxPool2d(kernel_size=2,stride=2))#layers只是一个块
    return nn.Sequential(*layers)
#vgg
def vgg(conv_arch):
    conv_bls=[]
    in_channels=1
    for (num_convs,out_channels) in conv_arch:
        conv_bls.append(vgg_block(num_convs,in_channels,out_channels))
        in_channels=out_channels#下一个vgg block的in等于前一个的out
    net=nn.Sequential(
        *conv_bls,nn.Flatten(),
        nn.Linear(out_channels*7*7,4096),nn.ReLU(),nn.Dropout(p=0.5),
        nn.Linear(4096,4096),nn.ReLU(),nn.Dropout(p=0.5),
        nn.Linear(4096,10)
    )
    return net
conv_arch_11 = ((1, 64), (1, 128), (2, 256), (2, 512), (2, 512))
conv_arch_16 = ((2, 64), (2, 128), (3, 256), (3, 512), (3, 512))
conv_arch_19 = ((2, 64), (2, 128), (4, 256), (4, 512), (4, 512))
X=torch.randn(size=(1,1,224,224))
model=vgg(conv_arch_19)
summary(model,input_size=(1,224,224))

在这里插入图片描述

12.2 VGG网络实现Fashion-Mnist分类

import torch
import torchvision
from torch import nn
import matplotlib.pyplot as plt
from torchvision.transforms import transforms
from torch.utils.data import DataLoader
from tqdm import tqdm
from sklearn.metrics import accuracy_score
plt.rcParams['font.family']=['Times New Roman']
def vgg_block(num_convs,in_channels,out_channels):
    layers=[]
    for _ in range(num_convs):
        layers.append(nn.Conv2d(in_channels,out_channels,kernel_size=3,padding=1))
        layers.append(nn.ReLU())
        in_channels=out_channels
    layers.append(nn.MaxPool2d(kernel_size=2,stride=2))#layers只是一个块
    return nn.Sequential(*layers)
#vgg
def vgg(conv_arch):
    conv_bls=[]
    in_channels=1
    for (num_convs,out_channels) in conv_arch:
        conv_bls.append(vgg_block(num_convs,in_channels,out_channels))
        in_channels=out_channels#下一个vgg block的in等于前一个的out
    net=nn.Sequential(
        *conv_bls,nn.Flatten(),
        nn.Linear(out_channels*7*7,4096),nn.ReLU(),nn.Dropout(p=0.5),
        nn.Linear(4096,4096),nn.ReLU(),nn.Dropout(p=0.5),
        nn.Linear(4096,10)
    )
    return net
class Reshape(torch.nn.Module):
    def forward(self,x):
        return x.view(-1,1,28,28)#[bs,1,28,28]
def plot_metrics(train_loss_list, train_acc_list, test_acc_list, title='Training Curve'):
    epochs = range(1, len(train_loss_list) + 1)
    plt.figure(figsize=(4, 3))
    plt.plot(epochs, train_loss_list, label='Train Loss')
    plt.plot(epochs, train_acc_list, label='Train Acc',linestyle='--')
    plt.plot(epochs, test_acc_list, label='Test Acc', linestyle='--')
    plt.xlabel('Epoch')
    plt.ylabel('Value')
    plt.title(title)
    plt.legend()
    plt.grid(True)
    plt.tight_layout()
    plt.show()
def train_model(model,train_data,test_data,num_epochs):
    train_loss_list = []
    train_acc_list = []
    test_acc_list = []
    for epoch in range(num_epochs):
        total_loss=0
        total_acc_sample=0
        total_samples=0
        #loop=tqdm(train_data,desc=f"EPOCHS[{epoch+1}/{num_epochs}]")
        loop1=tqdm(train_data,desc=f"EPOCHS[{epoch+1}/{num_epochs}]")
        loop2=tqdm(test_data,desc=f"EPOCHS[{epoch+1}/{num_epochs}]")
        for X,y in loop1:
            #X=X.reshape(X.shape[0],-1)
            #print(X.shape)
            X=X.to(device)
            y=y.to(device)
            y_hat=model(X)
            loss=CEloss(y_hat,y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            #loss累加
            total_loss+=loss.item()*X.shape[0]
            y_pred=y_hat.argmax(dim=1).detach().cpu().numpy()
            y_true=y.detach().cpu().numpy()
            total_acc_sample+=accuracy_score(y_pred,y_true)*X.shape[0]#保存样本数
            total_samples+=X.shape[0]
        test_acc_samples=0
        test_samples=0
        for X,y in loop2:
            X=X.to(device)
            y=y.to(device)
            #X=X.reshape(X.shape[0],-1)
            y_hat=model(X)
            y_pred=y_hat.argmax(dim=1).detach().cpu().numpy()
            y_true=y.detach().cpu().numpy()
            test_acc_samples+=accuracy_score(y_pred,y_true)*X.shape[0]#保存样本数
            test_samples+=X.shape[0]
        avg_train_loss=total_loss/total_samples
        avg_train_acc=total_acc_sample/total_samples
        avg_test_acc=test_acc_samples/test_samples
        train_loss_list.append(avg_train_loss)
        train_acc_list.append(avg_train_acc)
        test_acc_list.append(avg_test_acc)
        print(f"Epoch {epoch+1}: Loss: {avg_train_loss:.4f},Trian Accuracy: {avg_train_acc:.4f},test Accuracy: {avg_test_acc:.4f}")
    plot_metrics(train_loss_list, train_acc_list, test_acc_list)
    return model
def init_weights(m):
    if type(m) == nn.Linear or type(m) == nn.Conv2d:
        nn.init.xavier_uniform_(m.weight)
################################################################################################################
#注意这里从28*28 resize成224*224了
transforms=transforms.Compose([transforms.Resize(224),transforms.ToTensor(),transforms.Normalize((0.5,),(0.5,))])#第一个是mean,第二个是std
train_img=torchvision.datasets.FashionMNIST(root="./data",train=True,transform=transforms,download=True)
test_img=torchvision.datasets.FashionMNIST(root="./data",train=False,transform=transforms,download=True)
train_data=DataLoader(train_img,batch_size=256,num_workers=4,shuffle=True)
test_data=DataLoader(test_img,batch_size=256,num_workers=4,shuffle=False)
################################################################################################################
conv_arch_11 = ((1, 64), (1, 128), (2, 256), (2, 512), (2, 512))
conv_arch_16 = ((2, 64), (2, 128), (3, 256), (3, 512), (3, 512))
conv_arch_19 = ((2, 64), (2, 128), (4, 256), (4, 512), (4, 512))
model=vgg(conv_arch_11)
model.apply(init_weights)
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
#print(device)
model=model.to(device)
optimizer=torch.optim.SGD(model.parameters(),lr=0.01)
CEloss=nn.CrossEntropyLoss()
num_epochs=1
model=train_model(model,train_data,test_data,num_epochs)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

温柔济沧海

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值