1. 理论介绍

2. 实例解析

2.1. 实例描述

使用以下三阶多项式生成训练和测试数据

y

=

5

+

1.2

x

3.4

x

2

2

!

+

5.6

x

3

3

!

+

ϵ

 where 

ϵ

N

(

0

,

0.

1

2

)

.

y = 5 + 1.2x – 3.4frac{x^2}{2!} + 5.6 frac{x^3}{3!} + epsilon text{ where } epsilon sim mathcal{N}(0, 0.1^2).

y=5+1.2x3.42!x2+5.63!x3+ϵ where ϵN(0,0.12).并用1阶(线性模型)、3阶、20阶多项式拟合。

2.2. 代码实现

2.2.1. 完整代码

import os
import numpy as np
import math, torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from tensorboardX import SummaryWriter
from rich.progress import track

def evaluate_loss(dataloader, net, criterion):
    """评估模型在指定数据集上的损失"""
    num_examples = 0
    loss_sum = 0.0
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.cuda(), y.cuda()
            loss = criterion(net(X), y)
            num_examples += y.shape[0]
            loss_sum += loss.sum()
        return loss_sum / num_examples

def load_dataset(*tensors):
    """加载数据集"""
    dataset = TensorDataset(*tensors)
    return DataLoader(dataset, batch_size, shuffle=True)


if __name__ == '__main__':
    # 全局参数设置
    num_epochs = 400
    batch_size = 10
    learning_rate = 0.01

    # 创建记录
    def log_dir():
        root = "runs"
        if not os.path.exists(root):
            os.mkdir(root)
        order = len(os.listdir(root)) + 1
        return f'{root}/exp{order}'
    writer = SummaryWriter(log_dir())

    # 生成数据集
    max_degree = 20             # 多项式高阶
    n_train, n_test = 100, 100  # 训练集和测试大小

    true_w = np.zeros(max_degree+1)
    true_w[0:4] = np.array([5, 1.2, -3.4, 5.6])

    features = np.random.normal(size=(n_train + n_test, 1))
    np.random.shuffle(features)
    poly_features = np.power(features, np.arange(max_degree+1).reshape(1, -1))
    for i in range(max_degree+1):
        poly_features[:, i] /= math.gamma(i + 1)  # gamma(n)=(n-1)!
    labels = np.dot(poly_features, true_w)
    labels += np.random.normal(scale=0.1, size=labels.shape)    # 加高斯噪声服从N(0, 0.01)

    poly_features, labels = [
        torch.as_tensor(x, dtype=torch.float32) for x in [
            poly_features, labels.reshape(-1, 1)]]
    
    def loop(model_degree):
        # 创建模型
        net = nn.Linear(model_degree+1, 1, bias=False).cuda()
        nn.init.normal_(net.weight, mean=0, std=0.01)
        criterion = nn.MSELoss(reduction='none')
        optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate)

        # 加载数据集
        dataloader_train = load_dataset(poly_features[:n_train, :model_degree+1], labels[:n_train])
        dataloader_test = load_dataset(poly_features[n_train:, :model_degree+1], labels[n_train:])
        
        # 训练循环
        for epoch in track(range(num_epochs), description=f'{model_degree}-degree'):
            for X, y in dataloader_train:
                X, y = X.cuda(), y.cuda()
                loss = criterion(net(X), y)
                optimizer.zero_grad()
                loss.mean().backward()
                optimizer.step()

            writer.add_scalars(f"{model_degree}-degree", {
                "train_loss": evaluate_loss(dataloader_train, net, criterion),
                "test_loss": evaluate_loss(dataloader_test, net, criterion),
            }, epoch)
        print(f"{model_degree}-degree: weights =", net.weight.data.cpu().numpy())

    for model_degree in [1, 3, 20]:
        loop(model_degree)
    writer.close()

2.2.2. 输出结果

权重

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注