らんだむな記憶

blogというものを体験してみようか!的なー

摂氏から華氏

\begin{align}
F = \frac{9}{5} C + 32
\end{align}

を学習させてみる。あまり自分でモデルを実装した記憶もないし。

import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
import numpy as np

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

X = np.arange(-100, 100, 0.1)
y = np.array([x*9/5+32 for x in X])

X_train = torch.stack([torch.from_numpy(np.array(i)) for i in X])
y_train = torch.stack([torch.from_numpy(np.array(i)) for i in y])
X_train, y_train = X_train.float(), y_train.float()
trainset = torch.utils.data.TensorDataset(X_train, y_train)
valsize = int(len(trainset)*.2)
trainset, valset = torch.utils.data.random_split(trainset, [len(trainset)-valsize, valsize])

trainloader = torch.utils.data.DataLoader(trainset, batch_size=1, shuffle=True)
valloader = torch.utils.data.DataLoader(valset, batch_size=1)

model = nn.Sequential(nn.Linear(1, 1)).to(device)

criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.003)

def train(model, dataloaders, otpimizer, criterion, num_epochs, device):
    for epoch in range(1, num_epochs+1):
        for phase in ["train", "val"]:
            if phase == "train":
                model.train()
            elif phase == "val":
                model.eval()

            with torch.set_grad_enabled(phase == "train"), tqdm(total=len(dataloaders[phase]), unit="batch") as pbar:
                loss_sum = 0
                total = 0
                pbar.set_description(f"Epoch[{epoch}/{num_epochs}]({phase})")
                for batch_idx, (xs, ys) in enumerate(dataloaders[phase]):
                    xs, ys = xs.to(device), ys.to(device)
                    optimizer.zero_grad()
                    output = model(xs)
                    loss = criterion(output, ys)

                    if phase == "train":
                        loss.backward()
                        optimizer.step()

                    total += xs.size(0)
                    loss_sum += loss.item() * xs.size(0) 

                    running_loss = loss_sum / total

                    pbar.set_postfix({"loss":running_loss})
                    pbar.update(1)

dataloaders = {"train":trainloader, "val":valloader}
num_epochs = 10

train(model, dataloaders, optimizer, criterion, num_epochs, device)

for name, param in model.named_parameters():
    print(name, param)

これで

0.weight Parameter containing:
tensor([[1.7989]], device='cuda:0', requires_grad=True)
0.bias Parameter containing:
tensor([31.8399], device='cuda:0', requires_grad=True)

くらいの値が学習される。