1 賽題理解
2 數(shù)據(jù)分析
3 詞向量+機器學(xué)習(xí)模型
4 Fasttext
5 Word2Vec+TextCNN模型
這是一份還沒完成的作品。后面再補上~
Word2Vec
兩個算法:
Skip-grams (SG):預(yù)測上下文
Continuous Bag of Words (CBOW):預(yù)測目標(biāo)單詞
兩種稍微高效一些的訓(xùn)練方法:
Hierarchical softmax
Negative sampling
ps:時間已經(jīng)來不及了,詳細介紹及代碼講解以后補上。
這次主要來講一下TextCNN。
TextCNN

image.png

image.png
然后是代碼部分,小白的我表示看不懂,只能一行一行的去看,補充了注釋和相關(guān)知識,在時間限時內(nèi)表示沒看完,就先放上看完的部分的理解了。
# split data to 10 fold
fold_num = 10
data_file = r'E:\jupyter_lab\TianChi\News_classifier\data\train\train_set.csv'
import pandas as pd
def all_data2fold(fold_num, num=10000):
fold_data = []
f = pd.read_csv(data_file, sep='\t', encoding='UTF-8')
texts = f['text'].tolist()[:num] #只取前10000條數(shù)據(jù) 原來是的shape(200000,2)
labels = f['label'].tolist()[:num] #texts,lables都變成了list,里面有10000個元素
total = len(labels) #總的數(shù)據(jù)量 與num相等
index = list(range(total))
np.random.shuffle(index) #打亂這個包含索引的list
all_texts = []
all_labels = []
for i in index: #在這個索引list里
all_texts.append(texts[i])
all_labels.append(labels[i]) #用這些打亂的索引,去取原來的texts 和 labels 里的值,此時他們也相應(yīng)變成了all_texts ,all_labels
label2id = {} #給這個label—id建立一個字典 ,key是label,value是一個列表,元素是label在 all_labels中的位置索引
for i in range(total):
label = str(all_labels[i])
if label not in label2id:
label2id[label] = [i]
else:
label2id[label].append(i) #同一個label會出現(xiàn)多條,所以他在all_labels中有多個索引位置
all_index = [[] for _ in range(fold_num)]#根據(jù)fold_num,建立相等長度的列表,目前列表里的元素為空。即all_index是一個長度為fold_num的list,元素目前為空。
for label, data in label2id.items(): #在label-id這個字典里,data存的是label在all_labels中的位置索引,然后,每一個label每一個label的去循環(huán)
# print(label, len(data))
batch_size = int(len(data) / fold_num) #len(data)某個label出現(xiàn)的次數(shù)。將每個label出現(xiàn)的總次數(shù) 分成fold_num份,每一份batch_size個數(shù)據(jù)
other = len(data) - batch_size * fold_num # 不能整除的剩下的數(shù)據(jù),此處的other一定小于fold_num
for i in range(fold_num): #某一折中,我們記為i
cur_batch_size = batch_size + 1 if i < other else batch_size #如果i小于剩下的數(shù)據(jù)個數(shù),batch_size就增加1,把剩下的數(shù)據(jù)依次加進每一份里
# print(cur_batch_size) #每個label 分成fold_num份后,每一份目前的數(shù)據(jù)量
batch_data = [data[i * batch_size + b] for b in range(cur_batch_size)]
#取出當(dāng)前的數(shù)據(jù),data里是label的索引位置。 i * batch_size用來跳過以前的batch_size,跳到現(xiàn)在這一折的位置
all_index[i].extend(batch_data)#添加到all_index的相應(yīng)位置中
#all_index數(shù)據(jù)形式:[[],[],...[]]-->[[label_1_1折 expand label_2_1折 expand label_3_1折...expand label_19_1折 ],[label_1_2折 expand label_2_1折...],...[label_1_n折 expand ...]]
#這樣在每一折中都保證了,label的分布同原始數(shù)據(jù)一致
batch_size = int(total / fold_num) #總的數(shù)據(jù)量分成fold_num份,每一份大小為batch_size
other_texts = []
other_labels = []
other_num = 0
start = 0
for fold in range(fold_num):
num = len(all_index[fold]) #num每一折的數(shù)據(jù)量
texts = [all_texts[i] for i in all_index[fold]] #all_texts是打亂后的text數(shù)據(jù), all_index[fold]代表第fold折的數(shù)據(jù)
labels = [all_labels[i] for i in all_index[fold]]
'''
以fold=0為例, all_index[0]=[label_1_1折 expand label_2_1折 expand label_3_1折...expand label_19_1折 ]
其中,label_1_1折 代表label——1劃分到1折中的數(shù)據(jù),數(shù)據(jù)的內(nèi)容是label-1在原始數(shù)據(jù)中的索引位置 data[i * batch_size + b]
所以,all_index[0] 代表是是,各個label劃分到1折中的數(shù)據(jù),數(shù)據(jù)內(nèi)容是每個label在原始數(shù)據(jù)中的索引位置
然后,利用這些索引位置,在all_texts和all_labels中取到真正的數(shù)據(jù)
'''
if num > batch_size: #如果這一折的數(shù)量>batch_size
fold_texts = texts[:batch_size] #取前batch_size個數(shù)據(jù)
other_texts.extend(texts[batch_size:]) #之后的數(shù)據(jù)放到 other_texts中
fold_labels = labels[:batch_size]
other_labels.extend(labels[batch_size:]) #label也這樣操作
other_num += num - batch_size #統(tǒng)計每一折中在batch_size中放不開的數(shù)據(jù)總量
elif num < batch_size:#如果這一折的數(shù)量<batch_size
end = start + batch_size - num # batch_size比num多的數(shù)據(jù)量
fold_texts = texts + other_texts[start: end] # 從others_texts里補上
fold_labels = labels + other_labels[start: end]
start = end #移動在others_texts中的start位置
else:
fold_texts = texts
fold_labels = labels
assert batch_size == len(fold_labels) #assert函數(shù)主要是用來聲明某個函數(shù)是真的,當(dāng)assert()語句失敗的時候,就會引發(fā)assertError
#這里 將batch_size的大小與每一折的大小弄成一樣的 數(shù)據(jù)存到fold_texts和fold_labels里
# shuffle
index = list(range(batch_size))
np.random.shuffle(index) #打亂batch_size
shuffle_fold_texts = []
shuffle_fold_labels = []
for i in index:
shuffle_fold_texts.append(fold_texts[i])
shuffle_fold_labels.append(fold_labels[i]) #打亂索引再去取值 fold_texts和fold_labels 變成了 shuffle_fold_texts和shuffle_fold_labels
data = {'label': shuffle_fold_labels, 'text': shuffle_fold_texts} #一折中的數(shù)據(jù)
fold_data.append(data) #每一折的數(shù)據(jù)現(xiàn)在變成了上面的data字典,然后加入到fold_data列表中。
logging.info("Fold lens %s", str([len(data['label']) for data in fold_data])) #打印每一折數(shù)據(jù)的大小
return fold_data
#fold_data = all_data2fold(10)
# build train, dev, test data
fold_id = 9
# dev
dev_data = fold_data[fold_id] # 驗證集dev_data取第10折
# train
train_texts = []
train_labels = []
for i in range(0, fold_id):
data = fold_data[i]
train_texts.extend(data['text'])
train_labels.extend(data['label']) #train取前9折數(shù)據(jù)
train_data = {'label': train_labels, 'text': train_texts}
# test
test_data_file = '../data/test_a.csv'
f = pd.read_csv(test_data_file, sep='\t', encoding='UTF-8')
texts = f['text'].tolist()
test_data = {'label': [0] * len(texts), 'text': texts}
Counter的用法
from collections import Counter
C = Counter() #{word:count}
data = '1 2 3 4 5 1 2 3 4 1 2 3 1 2 1'
for text in data:
words = text.split()
for word in words:
C[word] += 1
C

image.png
C.most_common()
#[('1', 5), ('2', 4), ('3', 3), ('4', 2), ('5', 1)]
len(C)
#5
C['1']
#5
# build vocab
from collections import Counter
from transformers import BasicTokenizer
basic_tokenizer = BasicTokenizer()
class Vocab():
def __init__(self, train_data):
self.min_count = 5
self.pad = 0
self.unk = 1
self._id2word = ['[PAD]', '[UNK]']
self._id2extword = ['[PAD]', '[UNK]']
self._id2label = []
self.target_names = []
self.build_vocab(train_data)
reverse = lambda x: dict(zip(x, range(len(x)))) #給x編號,0-(len(x)-1)
self._word2id = reverse(self._id2word)
self._label2id = reverse(self._id2label)
logging.info("Build vocab: words %d, labels %d." % (self.word_size, self.label_size))
def build_vocab(self, data):
self.word_counter = Counter() #{word:count}
for text in data['text']:
words = text.split()
for word in words:
self.word_counter[word] += 1 #統(tǒng)計每個詞出現(xiàn)的頻數(shù)
for word, count in self.word_counter.most_common(): # most_common()返回的是統(tǒng)計的列表,元素是元組,(‘詞’:頻數(shù))
if count >= self.min_count: #頻數(shù)大于閾值
self._id2word.append(word) # 將word添加到這個 list 中 self._id2word = ['[PAD]', '[UNK]']
label2name = {0: '科技', 1: '股票', 2: '體育', 3: '娛樂', 4: '時政', 5: '社會', 6: '教育', 7: '財經(jīng)',
8: '家居', 9: '游戲', 10: '房產(chǎn)', 11: '時尚', 12: '彩票', 13: '星座'}
self.label_counter = Counter(data['label']) #每個label出現(xiàn)的次數(shù)
for label in range(len(self.label_counter)): # label的個數(shù) 19個,此處有個點就是:label正好也是0-18的賦值,與range(19)相同
count = self.label_counter[label] #各個label的頻數(shù)
self._id2label.append(label) # 將label加到這個list 中 self._id2label = []
self.target_names.append(label2name[label]) # self.target_names = [],label2name是上面的這個label字典,通過label能知道name。
'''
build_vocab方法
構(gòu)建了: self._id2word 這個list,元素是word ,具體來講是data['text']中詞頻大于min_count的詞
self._id2label 這個list,元素是label,具體來講是data['label']中l(wèi)abel的集合,所謂集合即去重了,len(_id2label)=19
self.target_names 這個list,元素是label的名字
'''
def load_pretrained_embs(self, embfile): #加載預(yù)訓(xùn)練的詞向量
with open(embfile, encoding='utf-8') as f:
lines = f.readlines()#readlines()方法讀取整個文件所有行,保存在一個列表(list)變量中,每行作為一個元素,但讀取大文件會比較占內(nèi)存。
items = lines[0].split() #embfile的第一行,lines[0]=[item[0],item[1]], item[0]word_count(總的詞數(shù)) item[1]詞向量維度
word_count, embedding_dim = int(items[0]), int(items[1])
index = len(self._id2extword)# self._id2extword = ['[PAD]', '[UNK]']
'''現(xiàn)在還不知道干啥的, index是它的長度'''
embeddings = np.zeros((word_count + index, embedding_dim)) #創(chuàng)建一個空的矩陣,(預(yù)訓(xùn)練的總詞量+_id2extword的詞量,詞向量的維度
for line in lines[1:]: #詞向量文件里從第二行起 循環(huán)到最后
values = line.split() #values=[word_id,word_vector]
self._id2extword.append(values[0])
'''我猜測value[0]是word_id'''
vector = np.array(values[1:], dtype='float64') #value[1]是word_vector
embeddings[self.unk] += vector #self.unk = 1
'''embeddings[1]=embeddings[1]+values[1:] 把所有詞的詞向量都加起來? 這是要干什么?'''
embeddings[index] = vector #embeddings[index] index是len(_id2extword),在embeddings矩陣的這個位置開始放預(yù)訓(xùn)練里的所有詞的詞向量
index += 1 #索引向前移動
embeddings[self.unk] = embeddings[self.unk] / word_count
'''把所有詞的詞向量都加起來除以總詞數(shù),看起來像求了個均值?'''
embeddings = embeddings / np.std(embeddings)
'''這個embedding矩陣除以了它的標(biāo)準(zhǔn)差 這又是在干什么'''
reverse = lambda x: dict(zip(x, range(len(x)))) #erverse這個函數(shù)是給x賦個索引值,返回字典,key是x,value是x是索引或者說是編號,編號從0開始
self._extword2id = reverse(self._id2extword) #把_id2extword里面的東西編個號,
#_extword2id是一個字典,key是_id2extword里的值,value是他們的編號
assert len(set(self._id2extword)) == len(self._id2extword) #確定他們的長度相等,即_id2extword里的值不重復(fù),每個值唯一
return embeddings #返回這個詞向量矩陣
'''
embeddings[self.unk] = embeddings[self.unk] / word_count
'''把所有詞的詞向量都加起來除以總詞數(shù),看起來像求了個均值?'''
embeddings = embeddings / np.std(embeddings)
'''這個embedding矩陣除以了它的標(biāo)準(zhǔn)差 這又是在干什么'''
embdding目前是這個形式
'''
def word2id(self, xs):
if isinstance(xs, list): #xs要是一個list
return [self._word2id.get(x, self.unk) for x in xs] #self._word2id = reverse(self._id2word)
#_word2id是一個字典,key是_id2word這里面的元素,value是它們的編號。
#在這個字典里找x,找知道x就返回x是value 也就是x的編號,找不到就返回self.unk=1
return self._word2id.get(xs, self.unk)
#word2id得到的是 xs這個list里的詞的編號
def extword2id(self, xs):
if isinstance(xs, list): #xs要是一個list
return [self._extword2id.get(x, self.unk) for x in xs] #self._extword2id = reverse(self._id2extword)
# _extword2id是一個字典,key是_id2extword里的值,value是他們的編號
'''self._id2extword這個是干啥的??'''
return self._extword2id.get(xs, self.unk)
#extword2id得到的是 xs這個list里的元素的編號
def label2id(self, xs):
if isinstance(xs, list):
return [self._label2id.get(x, self.unk) for x in xs] #self._label2id = reverse(self._id2label)
return self._label2id.get(xs, self.unk)
#label2id得到label的編號
@property
def word_size(self):
return len(self._id2word) #data['text']中詞頻大于min_count的總詞數(shù)
@property
def extword_size(self):
return len(self._id2extword) #不知道干啥的
@property
def label_size(self):
return len(self._id2label) #類別數(shù)
vocab = Vocab(train_data)
補充知識
- pytorch自定義層
- 類的繼承
- 參數(shù)初始化1,參數(shù)初始化2
- outputs.masked_fill
- mm,bmm
- squeeze,示例
- nn.Embedding
- requires_grad
- nn.Modulelist
- nn.Conv2d,資料2
# build module
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
"""自己定義網(wǎng)絡(luò)的層,要繼承nn.Module"""
def __init__(self, hidden_size):
super().__init__() #繼承nn.Module中的__init__()
'''聲明一些Parameter類的實例(在__init__函數(shù)中)'''
self.weight = nn.Parameter(torch.Tensor(hidden_size, hidden_size)) #權(quán)重矩陣參數(shù)weight(hidden_size, hidden_size)
self.weight.data.normal_(mean=0.0, std=0.05)
#self.weight.data表示需要初始化的權(quán)重
self.bias = nn.Parameter(torch.Tensor(hidden_size)) #bias參數(shù)
b = np.zeros(hidden_size, dtype=np.float32) #b (hidden_size,)
self.bias.data.copy_(torch.from_numpy(b)) #跟b形狀相同
self.query = nn.Parameter(torch.Tensor(hidden_size)) #query參數(shù)
self.query.data.normal_(mean=0.0, std=0.05)
def forward(self, batch_hidden, batch_masks):
# batch_hidden: b x len x hidden_size (2 * hidden_size of lstm)
# batch_masks: b x len
# linear
key = torch.matmul(batch_hidden, self.weight) + self.bias # key的維度:b x len x hidden
# compute attention
outputs = torch.matmul(key, self.query) #outputs的維度: b x len
masked_outputs = outputs.masked_fill((1 - batch_masks).bool(), float(-1e32))
#masked_fill_(mask, value) 用value填充tensor中與mask中值為1位置相對應(yīng)的元素。
'''batch_masks=0,(1 - batch_masks)=1 .bool()為真,然后去填float(-1e32)???'''
attn_scores = F.softmax(masked_outputs, dim=1) #attn_scores的維度: b x len
# 對于全零向量,-1e32的結(jié)果為 1/len, -inf為nan, 額外補0
'''???上面這句什么意思?'''
masked_attn_scores = attn_scores.masked_fill((1 - batch_masks).bool(), 0.0) #b x len
# sum weighted sources
batch_outputs = torch.bmm(masked_attn_scores.unsqueeze(1), key).squeeze(1) # b x hidden
#顧名思義, 就是兩個batch矩陣乘法.
'''
a.unsqueeze(N):在a中指定位置N加上一個維數(shù)為1的維度。
masked_attn_scores:b x 1 x len ; key:b x len x hidden = b x 1 x hidden
.squeeze(1),去掉維數(shù)為1的維度,
返回值:
batch_outputs:b x hidden
attn_scores :b x len
'''
return batch_outputs, attn_scores
# build word encoder
word2vec_path = '../emb/word2vec.txt'
dropout = 0.15
class WordCNNEncoder(nn.Module):
def __init__(self, vocab):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.word_dims = 100
self.word_embed = nn.Embedding(vocab.word_size, self.word_dims, padding_idx=0)
#word_embed embedding層
extword_embed = vocab.load_pretrained_embs(word2vec_path) #vocab是上面定義的類
'''extword_embed 是加載的預(yù)訓(xùn)練的詞向量'''
extword_size, word_dims = extword_embed.shape
logging.info("Load extword embed: words %d, dims %d." % (extword_size, word_dims))
self.extword_embed = nn.Embedding(extword_size, word_dims, padding_idx=0)
#self.extword_embed embedding層
self.extword_embed.weight.data.copy_(torch.from_numpy(extword_embed))
#self.extword_embed 加載了預(yù)訓(xùn)練詞向量的embedding層
self.extword_embed.weight.requires_grad = False #不訓(xùn)練
input_size = self.word_dims
self.filter_sizes = [2, 3, 4] # n-gram window
self.out_channel = 100
self.convs = nn.ModuleList([nn.Conv2d(1, self.out_channel, (filter_size, input_size), bias=True)
for filter_size in self.filter_sizes])
#創(chuàng)建3個卷積層,
'''
self.filter_sizes = [2, 3, 4] # n-gram window self.out_channel = 100
nn.Conv2d(1, self.out_channel, (filter_size, input_size), bias=True) 1個卷積核,輸出100,卷積核大?。?,1),
nn.Conv2d(1, self.out_channel, (filter_size, input_size), bias=True) 1個卷積核,輸出100,卷積核大?。?,1),
nn.Conv2d(1, self.out_channel, (filter_size, input_size), bias=True) 1個卷積核,輸出100,卷積核大?。?,1),
'''
def forward(self, word_ids, extword_ids):
# word_ids: sen_num x sent_len
# extword_ids: sen_num x sent_len
# batch_masks: sen_num x sent_len
sen_num, sent_len = word_ids.shape
word_embed = self.word_embed(word_ids) # sen_num x sent_len x 100
#word_embed nn.Embedding 傳入數(shù)據(jù)
extword_embed = self.extword_embed(extword_ids)
#extword_embed 加載了預(yù)訓(xùn)練詞向量的embedding層
batch_embed = word_embed + extword_embed # sen_num x sent_len x 100
if self.training:
batch_embed = self.dropout(batch_embed)
batch_embed.unsqueeze_(1) # sen_num x 1 x sent_len x 100
pooled_outputs = []
for i in range(len(self.filter_sizes)):
filter_height = sent_len - self.filter_sizes[i] + 1
conv = self.convs[i](batch_embed)
hidden = F.relu(conv) # sen_num x out_channel x filter_height x 1
mp = nn.MaxPool2d((filter_height, 1)) # (filter_height, filter_width)
pooled = mp(hidden).reshape(sen_num,
self.out_channel) # sen_num x out_channel x 1 x 1 -> sen_num x out_channel
pooled_outputs.append(pooled)
reps = torch.cat(pooled_outputs, dim=1) # sen_num x total_out_channel
if self.training:
reps = self.dropout(reps)
return reps
# build sent encoder
sent_hidden_size = 256
sent_num_layers = 2
class SentEncoder(nn.Module):
def __init__(self, sent_rep_size):
super(SentEncoder, self).__init__()
self.dropout = nn.Dropout(dropout)
self.sent_lstm = nn.LSTM(
input_size=sent_rep_size,
hidden_size=sent_hidden_size,
num_layers=sent_num_layers,
batch_first=True,
bidirectional=True
)
def forward(self, sent_reps, sent_masks):
# sent_reps: b x doc_len x sent_rep_size
# sent_masks: b x doc_len
sent_hiddens, _ = self.sent_lstm(sent_reps) # b x doc_len x hidden*2
sent_hiddens = sent_hiddens * sent_masks.unsqueeze(2)
if self.training:
sent_hiddens = self.dropout(sent_hiddens)
return sent_hiddens
這部分還沒看完只看完了attention層,但是里面的mask和quary也不是很懂原理。WordCNNEncoder和SentEncoder就更不懂是干啥的了,先寫到這,后面再來補上。
下一節(jié)預(yù)告:bert