DQN 系列算法

一、 DoubleDQN


相當(dāng)于把不同的DQN代碼進(jìn)行融合得到的效果

彩虹DQN

DQN會過高的評估自己,Q 值評估會比較高

隨著游戲的進(jìn)行, 期望的 Q 會越來越大, 不利于網(wǎng)絡(luò)訓(xùn)練。我們看下 DQN 以及 DoubleDQN 的目標(biāo)函數(shù), 唯一區(qū)別就是加入Q_{\phi_B}, 也就是 B網(wǎng)絡(luò)抑制高估問題

  • DQN target Valuey_j = r_j + \gamma \max_{{\mathbf{a}'_j}} Q_{\phi'}(\mathbf{s}'_j, \mathbf{a}'_j)
  • DoubleDQN target valueQ_{\phi_A}(\mathbf{s}, \mathbf{a}) \leftarrow r + \gamma Q_{\phi_B}\Big(\mathbf{s}^\prime, \arg\max_{\mathbf{a}^\prime} Q_{\phi_A}(\mathbf{s}^\prime, \mathbf{a}^\prime)\Big)
    至于模型為什么會產(chǎn)生高估的問題, 下面一張圖可以清晰的進(jìn)行解釋:
    產(chǎn)生高估的原因

    DoubleDQN

代碼如下所示(來源:【強(qiáng)化學(xué)習(xí)】雙深度Q網(wǎng)絡(luò)(DDQN)求解倒立擺問題 + Pytorch代碼實(shí)戰(zhàn)):

import argparse
import datetime
import time
import math
import torch.optim as optim
import gym
from torch import nn

# 這里需要改成自己的RL_Utils.py文件的路徑
from Python.ReinforcementLearning.EasyRL.RL_Utils import *


# Q網(wǎng)絡(luò)(3層全連接網(wǎng)絡(luò))
class MLP(nn.Module):
    def __init__(self, input_dim, output_dim, hidden_dim=128):
        """ 初始化q網(wǎng)絡(luò),為全連接網(wǎng)絡(luò)
            input_dim: 輸入的特征數(shù)即環(huán)境的狀態(tài)維度
            output_dim: 輸出的動作維度
        """
        super(MLP, self).__init__()
        self.fc1 = nn.Linear(input_dim, hidden_dim)  # 輸入層
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)  # 隱藏層
        self.fc3 = nn.Linear(hidden_dim, output_dim)  # 輸出層

    def forward(self, x):
        # 各層對應(yīng)的激活函數(shù)
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        return self.fc3(x)


# 經(jīng)驗(yàn)回放緩存區(qū)
class ReplayBuffer:
    def __init__(self, capacity):
        self.capacity = capacity  # 經(jīng)驗(yàn)回放的容量
        self.buffer = []  # 緩沖區(qū)
        self.position = 0

    def push(self, state, action, reward, next_state, done):
        ''' 緩沖區(qū)是一個隊列,容量超出時去掉開始存入的轉(zhuǎn)移(transition)
        '''
        if len(self.buffer) < self.capacity:
            self.buffer.append(None)
        self.buffer[self.position] = (state, action, reward, next_state, done)
        self.position = (self.position + 1) % self.capacity

    def sample(self, batch_size):
        batch = random.sample(self.buffer, batch_size)  # 隨機(jī)采出小批量轉(zhuǎn)移
        state, action, reward, next_state, done = zip(*batch)  # 解壓成狀態(tài),動作等
        return state, action, reward, next_state, done

    def __len__(self):
        ''' 返回當(dāng)前存儲的量
        '''
        return len(self.buffer)


# DDQN智能體對象
class DDQN:
    def __init__(self, model, memory, cfg):

        self.n_actions = cfg['n_actions']
        self.device = torch.device(cfg['device'])
        self.gamma = cfg['gamma']
        ## e-greedy 探索策略參數(shù)
        self.sample_count = 0  # 采樣次數(shù)
        self.epsilon = cfg['epsilon_start']
        self.sample_count = 0
        self.epsilon_start = cfg['epsilon_start']
        self.epsilon_end = cfg['epsilon_end']
        self.epsilon_decay = cfg['epsilon_decay']
        self.batch_size = cfg['batch_size']
        self.policy_net = model.to(self.device)
        self.target_net = model.to(self.device)
        # 初始化的時候,目標(biāo)Q網(wǎng)絡(luò)和估計Q網(wǎng)絡(luò)相等,將策略網(wǎng)絡(luò)的參數(shù)復(fù)制給目標(biāo)網(wǎng)絡(luò)
        self.target_net.load_state_dict(self.policy_net.state_dict())

        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg['lr'])
        self.memory = memory
        self.update_flag = False

    # 訓(xùn)練過程采樣:e-greedy policy
    def sample_action(self, state):
        self.sample_count += 1
        self.epsilon = self.epsilon_end + (self.epsilon_start - self.epsilon_end) * \
                       math.exp(-1. * self.sample_count / self.epsilon_decay)
        if random.random() > self.epsilon:
            return self.predict_action(state)
        else:
            action = random.randrange(self.n_actions)
        return action

    # 測試過程:以最大Q值選取動作
    def predict_action(self, state):
        with torch.no_grad():
            state = torch.tensor(state, device=self.device, dtype=torch.float32).unsqueeze(dim=0)
            q_values = self.policy_net(state)
            action = q_values.max(1)[1].item()
        return action

    def update(self):
        # 當(dāng)經(jīng)驗(yàn)緩存區(qū)沒有滿的時候,不進(jìn)行更新
        if len(self.memory) < self.batch_size:
            return
        else:
            if not self.update_flag:
                print("Begin to update!")
                self.update_flag = True
        # 從經(jīng)驗(yàn)緩存區(qū)隨機(jī)取出一個batch的數(shù)據(jù)
        state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample(
            self.batch_size)
        # 將數(shù)據(jù)轉(zhuǎn)化成Tensor格式
        state_batch = torch.tensor(np.array(state_batch), device=self.device,
                                   dtype=torch.float)  # shape(batchsize,n_states)
        action_batch = torch.tensor(action_batch, device=self.device).unsqueeze(1)  # shape(batchsize,1)
        reward_batch = torch.tensor(reward_batch, device=self.device, dtype=torch.float).unsqueeze(
            1)  # shape(batchsize,1)
        next_state_batch = torch.tensor(np.array(next_state_batch), device=self.device,
                                        dtype=torch.float)  # shape(batchsize,n_states)
        done_batch = torch.tensor(np.float32(done_batch), device=self.device).unsqueeze(1)  # shape(batchsize,1)
        # 計算Q估計
        q_value_batch = self.policy_net(state_batch).gather(dim=1,
                                                            index=action_batch)  # shape(batchsize,1),requires_grad=True
        # DDQN和DQN不同之處!DDQN先用policy_net預(yù)測處最大的動作,然后再用target_net預(yù)測其Q值
        # next_max_q_value_batch = self.policy_net(next_state_batch).max(1)[0].detach().unsqueeze(1)

        next_q_value_batch = self.policy_net(next_state_batch)
        next_target_value_batch = self.target_net(next_state_batch)  # type = Tensor , shape([batch_size, n_actions])

        # gather函數(shù)的功能可以解釋為根據(jù) index 參數(shù)(即是索引)返回數(shù)組里面對應(yīng)位置的值 , 第一個參數(shù)為1代表按列索引,為0代表按行索引
        # unsqueeze函數(shù)起到了升維的作用,例如 torch.Size([6]):tensor([0, 1, 2, 3, 4, 5]).unsqueeze(0) => torch.Size([1, 6])
        # torch.max(tensorData,dim) 返回輸入張量給定維度上每行的最大值,并同時返回每個最大值的位置索引。
        # .detach(): 輸入一個張量,返回一個不具有梯度的張量(返回的張量將永久失去梯度,即使修改其requires_grad屬性也無法改變)
        next_max_q_value_batch = next_target_value_batch.gather(1, torch.max(next_q_value_batch, 1)[1].unsqueeze(1))

        # 計算Q現(xiàn)實(shí)
        expected_q_value_batch = reward_batch + self.gamma * next_max_q_value_batch * (1 - done_batch)
        # 計算損失函數(shù)MSE(Q估計,Q現(xiàn)實(shí))
        loss = nn.MSELoss()(q_value_batch, expected_q_value_batch)
        # 梯度下降
        self.optimizer.zero_grad()
        loss.backward()
        # 限制梯度的范圍,以避免梯度爆炸
        for param in self.policy_net.parameters():
            param.grad.data.clamp_(-1.0, 1.0)
        self.optimizer.step()

    def save_model(self, path):
        Path(path).mkdir(parents=True, exist_ok=True)
        torch.save(self.target_net.state_dict(), f"{path}/checkpoint.pt")

    def load_model(self, path):
        self.target_net.load_state_dict(torch.load(f"{path}/checkpoint.pt"))
        for target_param, param in zip(self.target_net.parameters(), self.policy_net.parameters()):
            param.data.copy_(target_param.data)


# 訓(xùn)練函數(shù)
def train(arg_dict, env, agent):
    # 開始計時
    startTime = time.time()
    print(f"環(huán)境名: {arg_dict['env_name']}, 算法名: {arg_dict['algo_name']}, Device: {arg_dict['device']}")
    print("開始訓(xùn)練智能體......")
    rewards = []
    steps = []
    for i_ep in range(arg_dict["train_eps"]):
        ep_reward = 0
        ep_step = 0
        state = env.reset()
        for _ in range(arg_dict['ep_max_steps']):
            # 畫圖
            if arg_dict['train_render']:
                env.render()
            ep_step += 1
            action = agent.sample_action(state)
            next_state, reward, done, _ = env.step(action)
            agent.memory.push(state, action, reward,
                              next_state, done)
            state = next_state
            agent.update()
            ep_reward += reward
            if done:
                break
        # 目標(biāo)網(wǎng)絡(luò)更新
        if (i_ep + 1) % arg_dict["target_update"] == 0:
            agent.target_net.load_state_dict(agent.policy_net.state_dict())
        steps.append(ep_step)
        rewards.append(ep_reward)
        if (i_ep + 1) % 10 == 0:
            print(f'Episode: {i_ep + 1}/{arg_dict["train_eps"]}, Reward: {ep_reward:.2f}: Epislon: {agent.epsilon:.3f}')
    print('訓(xùn)練結(jié)束 , 用時: ' + str(time.time() - startTime) + " s")
    # 關(guān)閉環(huán)境
    env.close()
    return {'episodes': range(len(rewards)), 'rewards': rewards}


# 測試函數(shù)
def test(arg_dict, env, agent):
    startTime = time.time()
    print("開始測試智能體......")
    print(f"環(huán)境名: {arg_dict['env_name']}, 算法名: {arg_dict['algo_name']}, Device: {arg_dict['device']}")
    rewards = []
    steps = []
    for i_ep in range(arg_dict['test_eps']):
        ep_reward = 0
        ep_step = 0
        state = env.reset()
        for _ in range(arg_dict['ep_max_steps']):
            # 畫圖
            if arg_dict['test_render']:
                env.render()
            ep_step += 1
            action = agent.predict_action(state)
            next_state, reward, done, _ = env.step(action)
            state = next_state
            ep_reward += reward
            if done:
                break
        steps.append(ep_step)
        rewards.append(ep_reward)
        print(f"Episode: {i_ep + 1}/{arg_dict['test_eps']},Reward: {ep_reward:.2f}")
    print("測試結(jié)束 , 用時: " + str(time.time() - startTime) + " s")
    env.close()
    return {'episodes': range(len(rewards)), 'rewards': rewards}


# 創(chuàng)建環(huán)境和智能體
def create_env_agent(arg_dict):
    # 創(chuàng)建環(huán)境
    env = gym.make(arg_dict['env_name'])
    # 設(shè)置隨機(jī)種子
    all_seed(env, seed=arg_dict["seed"])
    # 獲取狀態(tài)數(shù)
    try:
        n_states = env.observation_space.n
    except AttributeError:
        n_states = env.observation_space.shape[0]
    # 獲取動作數(shù)
    n_actions = env.action_space.n
    print(f"狀態(tài)數(shù): {n_states}, 動作數(shù): {n_actions}")
    # 將狀態(tài)數(shù)和動作數(shù)加入算法參數(shù)字典
    arg_dict.update({"n_states": n_states, "n_actions": n_actions})
    # 實(shí)例化智能體對象
    # Q網(wǎng)絡(luò)模型
    model = MLP(n_states, n_actions, hidden_dim=arg_dict["hidden_dim"])
    # 回放緩存區(qū)對象
    memory = ReplayBuffer(arg_dict["memory_capacity"])
    # 智能體
    agent = DDQN(model, memory, arg_dict)
    # 返回環(huán)境,智能體
    return env, agent


if __name__ == '__main__':
    # 防止報錯 OMP: Error #15: Initializing libiomp5md.dll, but found libiomp5md.dll already initialized.
    os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
    # 獲取當(dāng)前路徑
    curr_path = os.path.dirname(os.path.abspath(__file__))
    # 獲取當(dāng)前時間
    curr_time = datetime.datetime.now().strftime("%Y_%m_%d-%H_%M_%S")
    # 相關(guān)參數(shù)設(shè)置
    parser = argparse.ArgumentParser(description="hyper parameters")
    parser.add_argument('--algo_name', default='DDQN', type=str, help="name of algorithm")
    parser.add_argument('--env_name', default='CartPole-v0', type=str, help="name of environment")
    parser.add_argument('--train_eps', default=200, type=int, help="episodes of training")
    parser.add_argument('--test_eps', default=20, type=int, help="episodes of testing")
    parser.add_argument('--ep_max_steps', default=100000, type=int,
                        help="steps per episode, much larger value can simulate infinite steps")
    parser.add_argument('--gamma', default=0.95, type=float, help="discounted factor")
    parser.add_argument('--epsilon_start', default=0.95, type=float, help="initial value of epsilon")
    parser.add_argument('--epsilon_end', default=0.01, type=float, help="final value of epsilon")
    parser.add_argument('--epsilon_decay', default=500, type=int,
                        help="decay rate of epsilon, the higher value, the slower decay")
    parser.add_argument('--lr', default=0.0001, type=float, help="learning rate")
    parser.add_argument('--memory_capacity', default=100000, type=int, help="memory capacity")
    parser.add_argument('--batch_size', default=64, type=int)
    parser.add_argument('--target_update', default=4, type=int)
    parser.add_argument('--hidden_dim', default=256, type=int)
    parser.add_argument('--device', default='cpu', type=str, help="cpu or cuda")
    parser.add_argument('--seed', default=520, type=int, help="seed")
    parser.add_argument('--show_fig', default=False, type=bool, help="if show figure or not")
    parser.add_argument('--save_fig', default=True, type=bool, help="if save figure or not")
    parser.add_argument('--train_render', default=False, type=bool,
                        help="Whether to render the environment during training")
    parser.add_argument('--test_render', default=True, type=bool,
                        help="Whether to render the environment during testing")
    args = parser.parse_args()
    default_args = {'result_path': f"{curr_path}/outputs/{args.env_name}/{curr_time}/results/",
                    'model_path': f"{curr_path}/outputs/{args.env_name}/{curr_time}/models/",
                    }
    # 將參數(shù)轉(zhuǎn)化為字典 type(dict)
    arg_dict = {**vars(args), **default_args}
    print("算法參數(shù)字典:", arg_dict)

    # 創(chuàng)建環(huán)境和智能體
    env, agent = create_env_agent(arg_dict)
    # 傳入算法參數(shù)、環(huán)境、智能體,然后開始訓(xùn)練
    res_dic = train(arg_dict, env, agent)
    print("算法返回結(jié)果字典:", res_dic)
    # 保存相關(guān)信息
    agent.save_model(path=arg_dict['model_path'])
    save_args(arg_dict, path=arg_dict['result_path'])
    save_results(res_dic, tag='train', path=arg_dict['result_path'])
    plot_rewards(res_dic['rewards'], arg_dict, path=arg_dict['result_path'], tag="train")

    # =================================================================================================
    # 創(chuàng)建新環(huán)境和智能體用來測試
    print("=" * 300)
    env, agent = create_env_agent(arg_dict)
    # 加載已保存的智能體
    agent.load_model(path=arg_dict['model_path'])
    res_dic = test(arg_dict, env, agent)
    save_results(res_dic, tag='test', path=arg_dict['result_path'])
    plot_rewards(res_dic['rewards'], arg_dict, path=arg_dict['result_path'], tag="test")

二、 Dueling-DQN


上述圖表明, 我 w*x 只能一一匹配, 但是不能舉一反三, 這邊想的是如果有一個b, 可以對上述的參數(shù)都產(chǎn)生影響,那就很好了, 也就是說一個偏置就夠了, 下面的S可以起到全局控制的作用。







代碼如下所示強(qiáng)化學(xué)習(xí)代碼實(shí)戰(zhàn)-06 Dueling DQN 算法

import random
import gym
import torch
import numpy as np
from matplotlib import pyplot as plt
from IPython import display

env = gym.make("Pendulum-v0")
# 智能體狀態(tài)
state = env.reset()
# 動作空間
actions = env.action_space
print(state, actions)
# 打印游戲
# plt.imshow(env.render(mode='rgb_array'))
# plt.show()


"""重新定義策略價值網(wǎng)絡(luò)Q, 比DQN性能更優(yōu)"""
class VAnet(torch.nn.Module):
    def __init__(self):
        super().__init__()
        
        self.fc = torch.nn.Sequential(torch.nn.Linear(3, 128),
                                     torch.nn.ReLU())
        self.fc_A = torch.nn.Linear(128, 11)
        self.fc_V = torch.nn.Linear(128, 1)
        
    def forward(self, x):
        A = self.fc_A(self.fc(x))
        V = self.fc_V(self.fc(x))
        A_mean = A.mean(dim=1).reshape(-1, 1)
        A = A -  A_mean
        # Q值由A和V求和得到
        Q = A + V
        
        return Q
    
    
# 定義動作模型(策略網(wǎng)絡(luò))
model = VAnet()

# 經(jīng)驗(yàn)網(wǎng)絡(luò),評估一個動作的分?jǐn)?shù)(目標(biāo)網(wǎng)絡(luò))
next_model = VAnet()
# model的參數(shù)賦予next_model
next_model.load_state_dict(model.state_dict())

# 得到一個動作
def get_action(state):
    """state: agent所處的狀態(tài)。由于是連續(xù)動作,做離散化操作"""
    # 走神經(jīng)網(wǎng)絡(luò)NN,得到分值最大的那個動作。轉(zhuǎn)為tensor數(shù)據(jù)
    state = torch.FloatTensor(state).reshape(1, 3)
    action = model(state).argmax().item()
    if random.random() < 0.01:
        action = random.choice(range(11))
    # 離散動作連續(xù)化
    action_continuous = action
    action_continuous /= 10
    action_continuous *= 4
    action_continuous -= 2
    
    return action, action_continuous


# 數(shù)據(jù)池
datas = []
def update_data():
    """加入新的N條數(shù)據(jù),刪除最老的M條數(shù)據(jù)"""
    count = len(datas)
    while len(datas) - count < 200:
        # 一直追加數(shù)據(jù),盡可能多的獲取環(huán)境狀態(tài)
        state = env.reset()
        done = False
        while not done:
            # 由初始狀態(tài)開始得到一個動作
            action, action_continuous = get_action(state)
            next_state, reward, done, _ = env.step([action_continuous])
            datas.append((state, action, reward, next_state, done))
            # 更新狀態(tài)
            state = next_state
    # 此時新數(shù)據(jù)集中比原來多了大約200條樣本,如果超過了最大容量,刪除最開始數(shù)據(jù)
    update_count = len(datas) - count
    while len(datas) > 5000:
        datas.pop(0)
    return update_count

# 從數(shù)據(jù)池中采樣
def get_sample():
    # batch size = 64, 數(shù)據(jù)類型轉(zhuǎn)換為Tensor
    samples = random.sample(datas, 64)
    state = torch.FloatTensor([i[0] for i in samples]).reshape(-1, 3)
    action = torch.LongTensor([i[1] for i in samples]).reshape(-1, 1)
    reward = torch.FloatTensor([i[2] for i in samples]).reshape(-1, 1)
    next_state = torch.FloatTensor([i[3] for i in samples]).reshape(-1, 3)
    done = torch.LongTensor([i[4] for i in samples]).reshape(-1, 1)
    
    return state, action, reward, next_state, done

# 獲取動作價值
def get_value(state, action):
    """根據(jù)網(wǎng)絡(luò)輸出找到對應(yīng)動作的得分,使用策略網(wǎng)絡(luò)"""
    action_value = model(state)
    action_value = action_value.gather(dim=1, index=action)
    
    return action_value

# 獲取學(xué)習(xí)目標(biāo)值
def get_target(next_state, reward, done):
    """使用next_state和reward計算真實(shí)得分。對價值的估計,使用目標(biāo)網(wǎng)絡(luò)"""
    with torch.no_grad():
        target = next_model(next_state)
        
    target = target.max(dim=1)[0].reshape(-1, 1)
    target *= (1 - done)        # 游戲結(jié)束的狀態(tài),沒有獎勵
    
    target = reward + target * 0.98
    
    return target

# 一局游戲得分測試
def test():
    reward_sum = 0
    
    state = env.reset()
    done = False
    
    while not done:
        _, action_continuous = get_action(state)
        next_state, reward, done, _ = env.step([action_continuous])
        reward_sum += reward
        state = next_state
        
    return reward_sum

def train():
    model.train()
    optimizer = torch.optim.Adam(model.parameters(), lr=2e-3)
    loss_fn = torch.nn.MSELoss()
    
    for epoch in range(600):
        # 更新一批數(shù)據(jù)
        update_counter = update_data()
        
        # 更新過數(shù)據(jù)后,學(xué)習(xí)N次
        for i in range(200):
            state, action, reward, next_state, done = get_sample()
            # 計算value和target
            value = get_value(state, action)
            target = get_target(next_state, reward, done)
            
            # 參數(shù)更新
            loss = loss_fn(value, target)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            """周期性更新目標(biāo)網(wǎng)絡(luò)"""
            if (i + 1) % 10 == 0:
                next_model.load_state_dict(model.state_dict())
            
        if epoch % 50 == 0:
            test_score = sum([test() for i in range(50)]) / 50
            print(epoch, len(datas), update_counter, test_score)

三、 MultiStep-DQN


MultiStep 其實(shí)就是計算 Q 值的時候選擇多個時間步。


四、 連續(xù)動作處理方法



?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時請結(jié)合常識與多方信息審慎甄別。
平臺聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點(diǎn),簡書系信息發(fā)布平臺,僅提供信息存儲服務(wù)。

相關(guān)閱讀更多精彩內(nèi)容

友情鏈接更多精彩內(nèi)容