動(dòng)手學(xué)習(xí)深度學(xué)習(xí)筆記一

logistic Regression

import torch

from torchimport nn

import numpyas np

torch.manual_seed(1)

torch.set_default_tensor_type('torch.FloatTensor')

num_inputs =2

num_examples =1000

true_w = [2, -3.4]

true_b =4.2

features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)

labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b

labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)

import torch.utils.dataas Data

batch_size =10

# combine featues and labels of dataset

dataset = Data.TensorDataset(features, labels)

# put dataset into DataLoader

data_iter = Data.DataLoader(

dataset=dataset,? ? ? ? ? ? # torch TensorDataset format

? ? batch_size=batch_size,? ? ? # mini batch size

? ? shuffle=True,? ? ? ? ? ? ? # whether shuffle the data or not

? ? num_workers=2,? ? ? ? ? ? ? # read data in multithreading

)

class LinearNet(nn.Module):

def __init__(self, n_feature):

super(LinearNet, self).__init__()# call father function to init

? ? ? ? self.linear = nn.Linear(n_feature, 1)# function prototype: `torch.nn.Linear(in_features, out_features, bias=True)`

? ? def forward(self, x):

y =self.linear(x)

return y

net = nn.Sequential(

nn.Linear(num_inputs, 1)

# other layers can be added here

? ? )

from torch.nnimport init

init.normal_(net[0].weight, mean=0.0, std=0.01)

init.constant_(net[0].bias, val=0.0)# or you can use `net[0].bias.data.fill_(0)` to modify it directly

loss = nn.MSELoss()# nn built-in squared loss function

# function prototype: `torch.nn.MSELoss(size_average=None, reduce=None, reduction='mean')`

import torch.optimas optim

optimizer = optim.SGD(net.parameters(), lr=0.03)# built-in random gradient descent function

print(optimizer)# function prototype: `torch.optim.SGD(params, lr=, momentum=0, dampening=0, weight_decay=0, nesterov=False)`

num_epochs =3

for epochin range(1, num_epochs +1):

for X, yin data_iter:

print(X.shape)

print(y.shape)

output = net(X)

print(output.shape)

l = loss(output, y.view(-1, 1))

optimizer.zero_grad()# reset gradient, equal to net.zero_grad()

? ? ? ? l.backward()

optimizer.step()

print('epoch %d, loss: %f' % (epoch, l.item()))

Softmax Regression

加載各種包或者模塊

import torch

from torchimport nn

from torch.nnimport init

import torchvision

import torchvision.transformsas transforms

import numpyas np

import sys

def sgd(params, lr, batch_size):

# 為了和原書保持一致,這里除以了batch_size,但是應(yīng)該是不用除的,因?yàn)橐话阌肞yTorch計(jì)算loss時(shí)就默認(rèn)已經(jīng)

# 沿batch維求了平均了。

? ? for paramin params:

param.data -= lr * param.grad / batch_size# 注意這里更改param時(shí)用的param.data

def load_data_fashion_mnist(batch_size, root='~/Datasets/FashionMNIST'):

"""Download the fashion mnist dataset and then load into memory."""

? ? transform = transforms.ToTensor()

mnist_train = torchvision.datasets.FashionMNIST(root=root, train=True, download=True, transform=transform)

mnist_test = torchvision.datasets.FashionMNIST(root=root, train=False, download=True, transform=transform)

if sys.platform.startswith('win'):

num_workers =0? # 0表示不用額外的進(jìn)程來(lái)加速讀取數(shù)據(jù)

? ? else:

num_workers =4

? ? train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)

test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)

return train_iter, test_iter

train_iter,test_iter = load_data_fashion_mnist(128)

for x,yin train_iter:

print(x.shape)

print(y.shape)

def evaluate_accuracy(data_iter, net):

acc_sum, n =0.0, 0

? ? for X, yin data_iter:

acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()

n += y.shape[0]

return acc_sum / n

batch_size =256

train_iter, test_iter = load_data_fashion_mnist(batch_size)

num_inputs =784

num_outputs =10

def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,

? ? ? ? ? ? ? params=None, lr=None, optimizer=None):

for epochin range(num_epochs):

train_l_sum, train_acc_sum, n =0.0, 0.0, 0

? ? ? ? for X, yin train_iter:

y_hat = net(X)

l = loss(y_hat, y).sum()

# 梯度清零

? ? ? ? ? ? if optimizeris not None:

optimizer.zero_grad()

elif paramsis not None and params[0].gradis not None:

for paramin params:

param.grad.data.zero_()

l.backward()

if optimizeris None:

sgd(params, lr, batch_size)

else:

optimizer.step()# “softmax回歸的簡(jiǎn)潔實(shí)現(xiàn)”一節(jié)將用到

? ? ? ? ? ? train_l_sum += l.item()

train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()

n += y.shape[0]

test_acc = evaluate_accuracy(test_iter, net)

print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'

? ? ? ? ? ? ? % (epoch +1, train_l_sum / n, train_acc_sum / n, test_acc))

class LinearNet(nn.Module):

def __init__(self, num_inputs, num_outputs):

super(LinearNet, self).__init__()

self.linear = nn.Linear(num_inputs, num_outputs)

def forward(self, x):# x 的形狀: (batch, 1, 28, 28)

? ? ? ? y =self.linear(x.view(x.shape[0], -1))

return y

# net = LinearNet(num_inputs, num_outputs)

class FlattenLayer(nn.Module):

def __init__(self):

super(FlattenLayer, self).__init__()

def forward(self, x):# x 的形狀: (batch, *, *, ...)

? ? ? ? return x.view(x.shape[0], -1)

from collectionsimport OrderedDict

net = nn.Sequential(

# FlattenLayer(),

# LinearNet(num_inputs, num_outputs)

? ? OrderedDict([

('flatten', FlattenLayer()),

? ? ? ? ('linear', nn.Linear(num_inputs, num_outputs))])# 或者寫成我們自己定義的 LinearNet(num_inputs, num_outputs) 也可以

)

init.normal_(net.linear.weight, mean=0, std=0.01)

init.constant_(net.linear.bias, val=0)

loss = nn.CrossEntropyLoss()# 下面是他的函數(shù)原型

# class torch.nn.CrossEntropyLoss(weight=None, size_average=None, ignore_index=-100, reduce=None, reduction='mean')

optimizer = torch.optim.SGD(net.parameters(), lr=0.1)# 下面是函數(shù)原型

# class torch.optim.SGD(params, lr=, momentum=0, dampening=0, weight_decay=0, nesterov=False)

num_epochs =5

train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)

Perception

# 加載各種包或者模塊

import torch

from torchimport nn

from torch.nnimport init

import torchvision

import torchvision.transformsas transforms

import numpyas np

import sys

def sgd(params, lr, batch_size):

# 為了和原書保持一致,這里除以了batch_size,但是應(yīng)該是不用除的,因?yàn)橐话阌肞yTorch計(jì)算loss時(shí)就默認(rèn)已經(jīng)

# 沿batch維求了平均了。

? ? for paramin params:

param.data -= lr * param.grad / batch_size# 注意這里更改param時(shí)用的param.data

class FlattenLayer(torch.nn.Module):

def __init__(self):

super(FlattenLayer, self).__init__()

def forward(self, x):# x shape: (batch, *, *, ...)

? ? ? ? return x.view(x.shape[0], -1)

def load_data_fashion_mnist(batch_size, root='~/Datasets/FashionMNIST'):

"""Download the fashion mnist dataset and then load into memory."""

? ? transform = transforms.ToTensor()

mnist_train = torchvision.datasets.FashionMNIST(root=root, train=True, download=True, transform=transform)

mnist_test = torchvision.datasets.FashionMNIST(root=root, train=False, download=True, transform=transform)

if sys.platform.startswith('win'):

num_workers =0? # 0表示不用額外的進(jìn)程來(lái)加速讀取數(shù)據(jù)

? ? else:

num_workers =4

? ? train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)

test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)

return train_iter, test_iter

train_iter,test_iter = load_data_fashion_mnist(128)

for x,yin train_iter:

print(x.shape)

print(y.shape)

def evaluate_accuracy(data_iter, net):

acc_sum, n =0.0, 0

? ? for X, yin data_iter:

acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()

n += y.shape[0]

return acc_sum / n

batch_size =256

train_iter, test_iter = load_data_fashion_mnist(batch_size)

num_inputs =784

num_outputs =10

def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,

? ? ? ? ? ? ? params=None, lr=None, optimizer=None):

for epochin range(num_epochs):

train_l_sum, train_acc_sum, n =0.0, 0.0, 0

? ? ? ? for X, yin train_iter:

y_hat = net(X)

l = loss(y_hat, y).sum()

# 梯度清零

? ? ? ? ? ? if optimizeris not None:

optimizer.zero_grad()

elif paramsis not None and params[0].gradis not None:

for paramin params:

param.grad.data.zero_()

l.backward()

if optimizeris None:

sgd(params, lr, batch_size)

else:

optimizer.step()# “softmax回歸的簡(jiǎn)潔實(shí)現(xiàn)”一節(jié)將用到

? ? ? ? ? ? train_l_sum += l.item()

train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()

n += y.shape[0]

test_acc = evaluate_accuracy(test_iter, net)

print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'

? ? ? ? ? ? ? % (epoch +1, train_l_sum / n, train_acc_sum / n, test_acc))

num_inputs, num_outputs, num_hiddens =784, 10, 256

net = nn.Sequential(

FlattenLayer(),

? ? nn.Linear(num_inputs, num_hiddens),

? ? nn.ReLU(),

? ? nn.Linear(num_hiddens, num_outputs),

)

for paramsin net.parameters():

init.normal_(params, mean=0, std=0.01)

batch_size =256

train_iter, test_iter = load_data_fashion_mnist(batch_size)

loss = torch.nn.CrossEntropyLoss()

optimizer = torch.optim.SGD(net.parameters(), lr=0.5)

num_epochs =5

train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)

?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請(qǐng)聯(lián)系作者
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時(shí)請(qǐng)結(jié)合常識(shí)與多方信息審慎甄別。
平臺(tái)聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點(diǎn),簡(jiǎn)書系信息發(fā)布平臺(tái),僅提供信息存儲(chǔ)服務(wù)。

相關(guān)閱讀更多精彩內(nèi)容

友情鏈接更多精彩內(nèi)容