网站维护中模版,网络管理员考试,WordPress数据库和网站文件,国外做蛋糕的网站人工智能例子汇总#xff1a;AI常见的算法和例子-CSDN博客
LSTM 通过 记忆单元#xff08;cell#xff09; 和 三个门控机制#xff08;遗忘门、输入门、输出门#xff09;来控制信息流#xff1a; 记忆单元#xff08;Cell State#xff09;
负责存储长期信息… 人工智能例子汇总AI常见的算法和例子-CSDN博客
LSTM 通过 记忆单元cell 和 三个门控机制遗忘门、输入门、输出门来控制信息流 记忆单元Cell State
负责存储长期信息并通过门控机制决定保留或丢弃信息。 遗忘门Forget Gate, ftf_tft 输入门Input Gate, iti_tit 输出门Output Gate, oto_tot 特性 传统 RNNLSTM记忆能力短期记忆长短期记忆计算复杂度低高解决梯度消失否是适用场景短序列数据长序列数据 LSTM 应用场景
自然语言处理NLP文本生成、情感分析、机器翻译时间序列预测股票预测、天气预报、传感器数据分析语音识别自动字幕生成、语音转文字ASR机器人与控制系统智能体决策、自动驾驶
例子
下面例子实现了一个 基于 LSTM 的强化学习智能体在 1D 网格环境 里移动并找到最优路径。 最终我们 绘制 5 条测试路径并高亮显示最佳路径红色
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt# 1. 定义 LSTM 策略网络
class LSTMPolicy(nn.Module):def __init__(self, input_size, hidden_size, output_size, num_layers1):super(LSTMPolicy, self).__init__()self.hidden_size hidden_sizeself.num_layers num_layersself.lstm nn.LSTM(input_size, hidden_size, num_layers, batch_firstTrue)self.fc nn.Linear(hidden_size, output_size)self.softmax nn.Softmax(dim-1)def forward(self, x, hidden_state):batch_size x.size(0)# 确保 hidden_state 维度正确if hidden_state[0].dim() 2:hidden_state (hidden_state[0].unsqueeze(1).repeat(1, batch_size, 1),hidden_state[1].unsqueeze(1).repeat(1, batch_size, 1))out, hidden_state self.lstm(x, hidden_state)out self.fc(out[:, -1, :]) # 取最后时间步的输出action_prob self.softmax(out) # 归一化输出作为策略return action_prob, hidden_statedef init_hidden(self, batch_size1):return (torch.zeros(self.num_layers, batch_size, self.hidden_size),torch.zeros(self.num_layers, batch_size, self.hidden_size))# 2. 创建网格环境
class GridWorld:def __init__(self, grid_size10, goal_position9):self.grid_size grid_sizeself.goal_position goal_positionself.reset()def reset(self):self.position 0return self.positiondef step(self, action):if action 0:self.position max(0, self.position - 1)elif action 1:self.position min(self.grid_size - 1, self.position 1)reward 1 if self.position self.goal_position else -0.1done self.position self.goal_positionreturn self.position, reward, done# 3. 训练智能体
def train(num_episodes500, max_steps50):env GridWorld()input_size 1hidden_size 64output_size 2num_layers 1policy LSTMPolicy(input_size, hidden_size, output_size, num_layers)optimizer optim.Adam(policy.parameters(), lr0.01)gamma 0.99for episode in range(num_episodes):state torch.tensor([[env.reset()]], dtypetorch.float32).unsqueeze(0) # (1, 1, input_size)hidden_state policy.init_hidden(batch_size1)log_probs []rewards []for step in range(max_steps):action_probs, hidden_state policy(state, hidden_state)action torch.multinomial(action_probs, 1).item()log_prob torch.log(action_probs.squeeze(0)[action])log_probs.append(log_prob)next_state, reward, done env.step(action)rewards.append(reward)if done:breakstate torch.tensor([[next_state]], dtypetorch.float32).unsqueeze(0)# 计算回报并更新策略returns []R 0for r in reversed(rewards):R r gamma * Rreturns.insert(0, R)returns torch.tensor(returns, dtypetorch.float32)returns (returns - returns.mean()) / (returns.std() 1e-9)loss sum([-log_prob * R for log_prob, R in zip(log_probs, returns)])optimizer.zero_grad()loss.backward()optimizer.step()if (episode 1) % 50 0:print(fEpisode {episode 1}/{num_episodes}, Total Reward: {sum(rewards)})torch.save(policy.state_dict(), policy.pth)# 训练智能体
train(500)# 4. 测试智能体并绘制最佳路径
def test(num_episodes5):env GridWorld()input_size 1hidden_size 64output_size 2num_layers 1policy LSTMPolicy(input_size, hidden_size, output_size, num_layers)policy.load_state_dict(torch.load(policy.pth))plt.figure(figsize(10, 5))best_path Nonebest_steps float(inf)for episode in range(num_episodes):state torch.tensor([[env.reset()]], dtypetorch.float32).unsqueeze(0) # (1, 1, input_size)hidden_state policy.init_hidden(batch_size1)positions [env.position] # 记录位置变化while True:action_probs, hidden_state policy(state, hidden_state)action torch.argmax(action_probs, dim-1).item()next_state, reward, done env.step(action)positions.append(next_state)if done:breakstate torch.tensor([[next_state]], dtypetorch.float32).unsqueeze(0)# 记录最佳路径最短步数if len(positions) best_steps:best_steps len(positions)best_path positions# 绘制普通路径蓝色plt.plot(range(len(positions)), positions, markero, linestyle-, colorblue, alpha0.6,labelfEpisode {episode 1} if episode 0 else )# 绘制最佳路径红色if best_path:plt.plot(range(len(best_path)), best_path, markero, linestyle-, colorred, linewidth2,labelBest Path)# 打印最佳路径print(fBest Path (steps{best_steps}): {best_path})plt.xlabel(Time Steps)plt.ylabel(Agent Position)plt.title(Agents Movement Path (Best Path in Red))plt.legend()plt.grid(True)plt.show()# 测试并绘制智能体移动路径
test(5)