通過(guò)學(xué)習(xí)文本預(yù)處理、語(yǔ)言模型以及循環(huán)神經(jīng)網(wǎng)絡(luò),仿照教材的代碼編寫了《三國(guó)演義》的文本預(yù)測(cè)。
import collections
import re
import torch
import random
import torch
import torch.nn as nn
import time
import math
import sys
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
'''第一步:讀取文本,用集合剔除所有重復(fù)的字,給剩下來(lái)的字標(biāo)序'''
def load_data_3kingdoms():
with open('三國(guó)演義utf-8.txt', 'r', encoding='UTF-8') as f:
corpus_chars = f.read()
corpus_chars = corpus_chars.replace('\n', ' ').replace('\r', ' ')
corpus_chars = corpus_chars[1000:11000]
idx_to_char = list(set(corpus_chars))
char_to_idx = dict([(char, i) for i, char in enumerate(idx_to_char)])
vocab_size = len(char_to_idx)
corpus_indices = [char_to_idx[char] for char in corpus_chars]
return corpus_indices, char_to_idx, idx_to_char, vocab_size
(corpus_indices, char_to_idx, idx_to_char, vocab_size) = load_data_3kingdoms()
'''第二步: 根據(jù)序號(hào),將文本里所有的字都改成one-hot向量'''
def one_hot(x, n_class, dtype=torch.float32):
result = torch.zeros(x.shape[0], n_class, dtype=dtype, device=x.device)
result.scatter_(1, x.long().view(-1, 1), 1)
return result
def to_onehot(X, n_class):
return [one_hot(X[:, i], n_class) for i in range(X.shape[1])]
'''第三步:隨機(jī)采樣和相鄰采樣'''
def data_iter_random(corpus_indices, batch_size, num_steps, device=None): # 隨機(jī)采樣
# 減1是因?yàn)閷?duì)于長(zhǎng)度為n的序列,X最多只有包含其中的前n - 1個(gè)字符
num_examples = (len(corpus_indices) - 1) // num_steps # 下取整,得到不重疊情況下的樣本個(gè)數(shù)
example_indices = [i * num_steps for i in range(num_examples)] # 每個(gè)樣本的第一個(gè)字符在corpus_indices中的下標(biāo)
random.shuffle(example_indices)
def _data(i):
# 返回從i開始的長(zhǎng)為num_steps的序列
return corpus_indices[i: i + num_steps]
if device is None:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
for i in range(0, num_examples, batch_size):
# 每次選出batch_size個(gè)隨機(jī)樣本
batch_indices = example_indices[i: i + batch_size] # 當(dāng)前batch的各個(gè)樣本的首字符的下標(biāo)
X = [_data(j) for j in batch_indices]
Y = [_data(j + 1) for j in batch_indices]
yield torch.tensor(X, device=device), torch.tensor(Y, device=device)
def data_iter_consecutive(corpus_indices, batch_size, num_steps, device=None): #相鄰采樣
if device is None:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
corpus_len = len(corpus_indices) // batch_size * batch_size # 保留下來(lái)的序列的長(zhǎng)度
corpus_indices = corpus_indices[: corpus_len] # 僅保留前corpus_len個(gè)字符
indices = torch.tensor(corpus_indices, device=device)
indices = indices.view(batch_size, -1) # resize成(batch_size, )
batch_num = (indices.shape[1] - 1) // num_steps
for i in range(batch_num):
i = i * num_steps
X = indices[:, i: i + num_steps]
Y = indices[:, i + 1: i + num_steps + 1]
yield X, Y
'''第四步:初始化循環(huán)神經(jīng)網(wǎng)絡(luò)的參數(shù),超參數(shù)為隱藏層單元個(gè)數(shù),并假定輸入和輸出的維度相同'''
num_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size
# num_inputs: d
# num_hiddens: h, 隱藏單元的個(gè)數(shù)是超參數(shù)
# num_outputs: q
def get_params():
def _one(shape):
param = torch.zeros(shape, device=device, dtype=torch.float32)
nn.init.normal_(param, 0, 0.01)
return torch.nn.Parameter(param)
# 隱藏層參數(shù)
W_xh = _one((num_inputs, num_hiddens))
W_hh = _one((num_hiddens, num_hiddens))
b_h = torch.nn.Parameter(torch.zeros(num_hiddens, device=device))
# 輸出層參數(shù)
W_hq = _one((num_hiddens, num_outputs))
b_q = torch.nn.Parameter(torch.zeros(num_outputs, device=device))
return (W_xh, W_hh, b_h, W_hq, b_q)
'''第五步:根據(jù)循環(huán)神經(jīng)網(wǎng)絡(luò)的核心公式寫出代碼'''
def rnn(inputs, state, params):
# inputs和outputs皆為num_steps個(gè)形狀為(batch_size, vocab_size)的矩陣
W_xh, W_hh, b_h, W_hq, b_q = params
H, = state
outputs = []
for X in inputs:
H = torch.tanh(torch.matmul(X, W_xh) + torch.matmul(H, W_hh) + b_h)
Y = torch.matmul(H, W_hq) + b_q
outputs.append(Y)
return outputs, (H,)
def init_rnn_state(batch_size, num_hiddens, device): # 函數(shù)init_rnn_state初始化隱藏變量,這里的返回值是一個(gè)元組。
return (torch.zeros((batch_size, num_hiddens), device=device),)
'''第六步:為防止梯度爆炸,對(duì)梯度進(jìn)行剪裁'''
def grad_clipping(params, theta, device):
norm = torch.tensor([0.0], device=device)
for param in params:
norm += (param.grad.data ** 2).sum()
norm = norm.sqrt().item()
if norm > theta:
for param in params:
param.grad.data *= (theta / norm)
'''第七步:定義預(yù)測(cè)函數(shù)'''
def predict_rnn(prefix, num_chars, rnn, params, init_rnn_state,
num_hiddens, vocab_size, device, idx_to_char, char_to_idx):
state = init_rnn_state(1, num_hiddens, device)
output = [char_to_idx[prefix[0]]] # output記錄prefix加上預(yù)測(cè)的num_chars個(gè)字符
for t in range(num_chars + len(prefix) - 1):
# 將上一時(shí)間步的輸出作為當(dāng)前時(shí)間步的輸入
X = to_onehot(torch.tensor([[output[-1]]], device=device), vocab_size)
# 計(jì)算輸出和更新隱藏狀態(tài)
(Y, state) = rnn(X, state, params)
# 下一個(gè)時(shí)間步的輸入是prefix里的字符或者當(dāng)前的最佳預(yù)測(cè)字符
if t < len(prefix) - 1:
output.append(char_to_idx[prefix[t + 1]])
else:
output.append(Y[0].argmax(dim=1).item())
return ''.join([idx_to_char[i] for i in output])
'''第八步:定義模型訓(xùn)練函數(shù)'''
def train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
vocab_size, device, corpus_indices, idx_to_char,
char_to_idx, is_random_iter, num_epochs, num_steps,
lr, clipping_theta, batch_size, pred_period,
pred_len, prefixes):
if is_random_iter:
data_iter_fn = data_iter_random
else:
data_iter_fn = data_iter_consecutive
params = get_params()
loss = nn.CrossEntropyLoss()
def sgd(params, lr, batch_size): # 定義梯度下降函數(shù)
for param in params:
param.data -= lr * param.grad / batch_size
for epoch in range(num_epochs):
if not is_random_iter: # 如使用相鄰采樣,在epoch開始時(shí)初始化隱藏狀態(tài)
state = init_rnn_state(batch_size, num_hiddens, device)
l_sum, n, start = 0.0, 0, time.time()
data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, device)
for X, Y in data_iter:
if is_random_iter: # 如使用隨機(jī)采樣,在每個(gè)小批量更新前初始化隱藏狀態(tài)
state = init_rnn_state(batch_size, num_hiddens, device)
else: # 否則需要使用detach函數(shù)從計(jì)算圖分離隱藏狀態(tài)
for s in state:
s.detach_()
# inputs是num_steps個(gè)形狀為(batch_size, vocab_size)的矩陣
inputs = to_onehot(X, vocab_size)
# outputs有num_steps個(gè)形狀為(batch_size, vocab_size)的矩陣
(outputs, state) = rnn(inputs, state, params)
# 拼接之后形狀為(num_steps * batch_size, vocab_size)
outputs = torch.cat(outputs, dim=0)
# Y的形狀是(batch_size, num_steps),轉(zhuǎn)置后再變成形狀為
# (num_steps * batch_size,)的向量,這樣跟輸出的行一一對(duì)應(yīng)
y = torch.flatten(Y.T)
# 使用交叉熵?fù)p失計(jì)算平均分類誤差
l = loss(outputs, y.long())
# 梯度清0
if params[0].grad is not None:
for param in params:
param.grad.data.zero_()
l.backward()
grad_clipping(params, clipping_theta, device) # 裁剪梯度
sgd(params, lr, 1) # 因?yàn)檎`差已經(jīng)取過(guò)均值,梯度不用再做平均
l_sum += l.item() * y.shape[0]
n += y.shape[0]
if (epoch + 1) % pred_period == 0:
print('epoch %d, perplexity %f, time %.2f sec' % (
epoch + 1, math.exp(l_sum / n), time.time() - start))
for prefix in prefixes:
print(' -', predict_rnn(prefix, pred_len, rnn, params, init_rnn_state,
num_hiddens, vocab_size, device, idx_to_char, char_to_idx))
num_epochs, num_steps, batch_size, lr, clipping_theta = 250, 35, 32, 1e2, 1e-2
pred_period, pred_len, prefixes = 50, 50, ['分開', '不分開']
train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
vocab_size, device, corpus_indices, idx_to_char,
char_to_idx, True, num_epochs, num_steps, lr,
clipping_theta, batch_size, pred_period, pred_len,
prefixes)