{"id":1452,"date":"2024-09-27T07:32:00","date_gmt":"2024-09-26T23:32:00","guid":{"rendered":"https:\/\/blog.laoyulaoyu.top\/?p=1452"},"modified":"2024-09-10T09:42:19","modified_gmt":"2024-09-10T01:42:19","slug":"rlhf-%e7%9a%84%e5%90%af%e7%a4%ba%ef%bc%9a%e5%be%ae%e8%b0%83-lstm-%e8%83%bd%e6%9b%b4%e5%a5%bd%e9%a2%84%e6%b5%8b%e8%82%a1%e7%a5%a8%ef%bc%9f","status":"publish","type":"post","link":"https:\/\/laoyulaoyu.com\/index.php\/2024\/09\/27\/rlhf-%e7%9a%84%e5%90%af%e7%a4%ba%ef%bc%9a%e5%be%ae%e8%b0%83-lstm-%e8%83%bd%e6%9b%b4%e5%a5%bd%e9%a2%84%e6%b5%8b%e8%82%a1%e7%a5%a8%ef%bc%9f\/","title":{"rendered":"RLHF \u7684\u542f\u793a\uff1a\u5fae\u8c03 LSTM \u80fd\u66f4\u597d\u9884\u6d4b\u80a1\u7968\uff1f"},"content":{"rendered":"\n<p>\u4f5c\u8005\uff1a<a href=\"https:\/\/www.laoyulaoyu.com\/\" target=\"_blank\" rel=\"noreferrer noopener\">\u8001\u4f59\u635e\u9c7c<\/a><\/p>\n\n\n\n<p><strong><mark style=\"background-color:rgba(0, 0, 0, 0)\" class=\"has-inline-color has-cyan-bluish-gray-color\">\u539f\u521b\u4e0d\u6613\uff0c\u8f6c\u8f7d\u8bf7\u6807\u660e\u51fa\u5904\u53ca\u539f\u4f5c\u8005\u3002<\/mark><\/strong><\/p>\n\n\n\n<figure class=\"wp-block-image size-large\"><img decoding=\"async\" src=\"https:\/\/www.laoyulaoyu.com\/wp-content\/uploads\/2024\/09\/992bb9fda5bb644d1ae83fd579ee67d-1024x520.png\" alt=\"\" class=\"wp-image-1873\"\/><\/figure>\n\n\n\n<blockquote class=\"wp-block-quote is-layout-flow wp-block-quote-is-layout-flow\">\n<pre class=\"wp-block-verse\"><strong>\u5199\u5728\u524d\u9762\u7684\u8bdd\uff1a<\/strong>\u5728\u8d22\u52a1\u9884\u6d4b\u9886\u57df\uff0c\u51c6\u786e\u9884\u6d4b\u80a1\u7968\u4ef7\u683c\u662f\u4e00\u9879\u5177\u6709\u6311\u6218\u6027\u4f46\u81f3\u5173\u91cd\u8981\u7684\u4efb\u52a1\u3002\u4f20\u7edf\u65b9\u6cd5\u901a\u5e38\u96be\u4ee5\u5e94\u5bf9\u80a1\u7968\u5e02\u573a\u56fa\u6709\u7684\u6ce2\u52a8\u6027\u548c\u590d\u6742\u6027\u3002\u8fd9\u7bc7\u6587\u7ae0\u4ecb\u7ecd\u4e86\u4e00\u79cd\u521b\u65b0\u65b9\u6cd5\uff0c<mark style=\"background-color:rgba(0, 0, 0, 0)\" class=\"has-inline-color has-vivid-cyan-blue-color\">\u8be5\u65b9\u6cd5\u5c06\u957f\u77ed\u671f\u8bb0\u5fc6 \uff08LSTM\uff09 \u7f51\u7edc\u4e0e\u57fa\u4e8e\u8bc4\u5206\u7684\u5fae\u8c03\u673a\u5236\u76f8\u7ed3\u5408\uff0c\u4ee5\u589e\u5f3a\u80a1\u7968\u4ef7\u683c\u9884\u6d4b\u3002<\/mark>\u6211\u4eec\u5c06\u4ee5 Reliance Industries Limited \u7684\u80a1\u7968\u4f5c\u4e3a\u6211\u4eec\u7684\u6848\u4f8b\u7814\u7a76\uff0c\u5c55\u793a\u8fd9\u79cd\u65b9\u6cd5\u5982\u4f55\u6f5c\u5728\u5730\u63d0\u9ad8\u9884\u6d4b\u51c6\u786e\u6027\u3002<\/pre>\n<\/blockquote>\n\n\n\n<h3 class=\"wp-block-heading\"><strong>\u4e00\u3001\u6838\u5fc3\u7406\u5ff5<\/strong><\/h3>\n\n\n\n<p>\u53d7 RLHF \u7684\u542f\u53d1\uff0c\u6211\u4eec\u5c1d\u8bd5\u5728\u65f6\u95f4\u5e8f\u5217\u9884\u6d4b\u4e2d\u5e94\u7528\u76f8\u540c\u7684\u6982\u5ff5\uff0cRLHF\u7684\u6982\u5ff5\u56e0\u4e3aChatGPT\u7684\u51fa\u73b0\uff0c\u53ef\u80fd\u7b2c\u4e00\u6b21\u51fa\u73b0\u5728\u5927\u591a\u6570\u4eba\u7684\u773c\u91cc\uff0cRLHF \u662f &#8220;Reinforcement Learning from Human Feedback&#8221; \u7684\u7f29\u5199\uff0c\u8fd9\u662f\u4e00\u79cd\u7ed3\u5408\u4e86\u5f3a\u5316\u5b66\u4e60\u548c\u4eba\u7c7b\u53cd\u9988\u7684\u673a\u5668\u5b66\u4e60\u65b9\u6cd5\u3002\u5728\u8fd9\u79cd\u65b9\u6cd5\u4e2d\uff0c\u4eba\u5de5\u667a\u80fd\uff08AI\uff09\u7cfb\u7edf\u901a\u8fc7\u6267\u884c\u4efb\u52a1\u5e76\u63a5\u6536\u4eba\u7c7b\u8bc4\u4f30\u8005\u5bf9\u5176\u884c\u4e3a\u7684\u53cd\u9988\u6765\u5b66\u4e60\u3002\u8fd9\u79cd\u65b9\u6cd5\u7279\u522b\u9002\u7528\u4e8e\u90a3\u4e9b\u96be\u4ee5\u7528\u4f20\u7edf\u5956\u52b1\u51fd\u6570\u660e\u786e\u5b9a\u4e49\u4efb\u52a1\u6210\u529f\u4e0e\u5426\u7684\u60c5\u51b5\u3002\u56de\u5230\u6b63\u9898\uff0c\u6211\u4eec\u7684\u65b9\u6cd5\u56f4\u7ed5\u4e09\u4e2a\u5173\u952e\u7ec4\u6210\u90e8\u5206\uff1a<\/p>\n\n\n\n<p>1. \u7528\u4e8e\u521d\u59cb\u80a1\u7968\u4ef7\u683c\u9884\u6d4b\u7684LSTM\u6a21\u578b<br>2.\u8bc4\u4f30\u8fd9\u4e9b\u9884\u6d4b\u8d28\u91cf\u7684\u8bc4\u5206\u6a21\u578b<br>3.\u4f7f\u7528\u8bc4\u5206\u6a21\u578b\u7684\u8f93\u51fa\u6765\u4f18\u5316 LSTM \u6027\u80fd\u7684\u5fae\u8c03\u8fc7\u7a0b<\/p>\n\n\n\n<p>\u901a\u8fc7\u96c6\u6210\u8fd9\u4e9b\u7ec4\u4ef6\uff0c\u6211\u4eec\u7684\u76ee\u6807\u662f\u521b\u5efa\u4e00\u4e2a\u66f4\u5177\u9002\u5e94\u6027\u548c\u51c6\u786e\u6027\u7684\u9884\u6d4b\u7cfb\u7edf\uff0c\u4ece\u800c\u66f4\u597d\u5730\u6355\u6349\u80a1\u4ef7\u53d8\u52a8\u7684\u7ec6\u5fae\u5dee\u522b\u3002<\/p>\n\n\n\n<h3 class=\"wp-block-heading\"><strong>\u4e8c\u3001\u67b6\u6784\u6982\u8ff0<\/strong><\/h3>\n\n\n\n<p><strong>1. LSTM \u6a21\u578b\uff1a<\/strong><br>\u6211\u4eec\u7cfb\u7edf\u7684\u6838\u5fc3\u662f LSTM \u795e\u7ecf\u7f51\u7edc\u3002LSTM \u7279\u522b\u9002\u5408\u4e8e\u80a1\u7968\u4ef7\u683c\u7b49\u65f6\u95f4\u5e8f\u5217\u6570\u636e\uff0c\u56e0\u4e3a\u5b83\u4eec\u80fd\u591f\u6355\u83b7\u6570\u636e\u4e2d\u7684\u957f\u671f\u4f9d\u8d56\u5173\u7cfb\u3002\u6211\u4eec\u7684 LSTM \u6a21\u578b\u5c06\u4e00\u7cfb\u5217\u5386\u53f2\u80a1\u7968\u4ef7\u683c\u4f5c\u4e3a\u8f93\u5165\uff0c\u5e76\u9884\u6d4b\u5e8f\u5217\u4e2d\u7684\u4e0b\u4e00\u4e2a\u4ef7\u683c\u3002<\/p>\n\n\n\n<p><strong>2. \u8bc4\u5206\u6a21\u578b\uff1a<\/strong><br>\u8bc4\u5206\u6a21\u578b\u662f\u4e00\u4e2a\u5355\u72ec\u7684\u795e\u7ecf\u7f51\u7edc\uff0c\u65e8\u5728\u8bc4\u4f30 LSTM \u9884\u6d4b\u7684\u8d28\u91cf\u3002\u5b83\u91c7\u7528\u539f\u59cb\u4ef7\u683c\u5e8f\u5217\u548c LSTM \u7684\u9884\u6d4b\u4f5c\u4e3a\u8f93\u5165\uff0c\u8f93\u51fa\u4e00\u4e2a\u8868\u793a LSTM \u9884\u6d4b\u9884\u6d4b\u51c6\u786e\u6027\u7684\u5206\u6570\u3002<\/p>\n\n\n\n<p><strong>3. \u5fae\u8c03\u673a\u5236\uff1a<\/strong><br>\u8be5\u7ec4\u4ef6\u4f7f\u7528\u8bc4\u5206\u6a21\u578b\u751f\u6210\u7684\u5206\u6570\u6765\u8c03\u6574 LSTM \u7684\u8bad\u7ec3\u8fc7\u7a0b\u3002\u5728\u5fae\u8c03\u8fc7\u7a0b\u4e2d\uff0c\u4ece\u8bc4\u5206\u6a21\u578b\u83b7\u5f97\u8f83\u9ad8\u5206\u6570\u7684\u9884\u6d4b\u4f1a\u5f97\u5230\u66f4\u5927\u7684\u6743\u91cd\uff0c\u4ece\u800c\u9f13\u52b1 LSTM \u5b66\u4e60\u6a21\u5f0f\uff0c\u4ece\u800c\u83b7\u5f97\u66f4\u51c6\u786e\u7684\u9884\u6d4b\u3002<\/p>\n\n\n\n<h3 class=\"wp-block-heading\"><strong>\u4e09\u3001\u5de5\u4f5c\u6d41\u7a0b<\/strong><\/h3>\n\n\n\n<p><strong>1. \u6570\u636e\u51c6\u5907\uff1a<\/strong><br>\u6211\u4eec\u9996\u5148\u4f7f\u7528 yfinance \u5e93\u83b7\u53d6 Reliance Industries Limited \u7684\u5386\u53f2\u80a1\u7968\u4ef7\u683c\u6570\u636e\u3002\u7136\u540e\uff0c\u8fd9\u4e9b\u6570\u636e\u88ab\u9884\u5904\u7406\u5e76\u62c6\u5206\u4e3a\u9002\u5408 LSTM \u8bad\u7ec3\u7684\u5e8f\u5217\u3002<\/p>\n\n\n\n<p><strong>2. \u521d\u59cb LSTM \u8bad\u7ec3\uff1a<\/strong><br>LSTM \u6a21\u578b\u5728\u90e8\u5206\u5386\u53f2\u6570\u636e\u4e0a\u8fdb\u884c\u8bad\u7ec3\u3002\u8fd9\u4e3a\u6211\u4eec\u63d0\u4f9b\u4e86\u4e00\u4e2a\u80fd\u591f\u505a\u51fa\u5408\u7406\u80a1\u7968\u4ef7\u683c\u9884\u6d4b\u7684\u57fa\u51c6\u6a21\u578b\u3002<\/p>\n\n\n\n<p><strong>3. \u8bc4\u5206\u6a21\u578b\u8bad\u7ec3\uff1a<\/strong><br>\u6211\u4eec\u4f7f\u7528\u53e6\u4e00\u90e8\u5206\u6570\u636e\u6765\u8bad\u7ec3\u8bc4\u5206\u6a21\u578b\u3002\u8be5\u6a21\u578b\u901a\u8fc7\u5c06 LSTM \u7684\u9884\u6d4b\u4e0e\u5b9e\u9645\u80a1\u7968\u4ef7\u683c\u8fdb\u884c\u6bd4\u8f83\u6765\u5b66\u4e60\u8bc4\u4f30 LSTM \u9884\u6d4b\u7684\u8d28\u91cf\u3002<\/p>\n\n\n\n<p><strong>4. \u5fae\u8c03\u8fc7\u7a0b\uff1a<\/strong><br>\u4f7f\u7528\u6570\u636e\u7684\u7b2c\u4e09\u90e8\u5206\uff0c\u6211\u4eec\u5bf9 LSTM \u6a21\u578b\u8fdb\u884c\u5fae\u8c03\u3002\u5728\u6b64\u8fc7\u7a0b\u4e2d\uff0c\u6211\u4eec\u4f7f\u7528\u8bc4\u5206\u6a21\u578b\u6765\u8bc4\u4f30\u6bcf\u4e2a\u9884\u6d4b\u3002LSTM \u7684\u5b66\u4e60\u7387\u4f1a\u6839\u636e\u8fd9\u4e9b\u5206\u6570\u8fdb\u884c\u8c03\u6574\uff0c\u4f7f\u5176\u80fd\u591f\u66f4\u4e13\u6ce8\u4e8e\u6539\u8fdb\u8bc4\u5206\u6a21\u578b\u8ba4\u4e3a\u4e0d\u592a\u51c6\u786e\u7684\u9884\u6d4b\u3002<\/p>\n\n\n\n<p><strong>5. \u8bc4\u4f30\uff1a<\/strong><br>\u6700\u540e\uff0c\u6211\u4eec\u5728\u6d4b\u8bd5\u96c6\u4e0a\u8bc4\u4f30\u539f\u59cb LSTM \u548c\u5fae\u8c03\u540e\u7684 LSTM \u7684\u6027\u80fd\uff0c\u6bd4\u8f83\u5b83\u4eec\u7684\u9884\u6d4b\u4ee5\u8bc4\u4f30\u5fae\u8c03\u65b9\u6cd5\u7684\u6709\u6548\u6027\u3002<\/p>\n\n\n\n<h3 class=\"wp-block-heading\"><strong>\u56db\u3001\u4ee3\u7801\u5b9e\u73b0<\/strong><\/h3>\n\n\n\n<p>\u8ba9\u6211\u4eec\u5c06\u4ee3\u7801\u5206\u89e3\u4e3a\u591a\u4e2a\u90e8\u5206\u5e76\u8be6\u7ec6\u89e3\u91ca\u6bcf\u4e2a\u90e8\u5206\u3002<\/p>\n\n\n\n<p><strong>1. \u5bfc\u5165\u5e93\u5e76\u8bbe\u7f6e\u73af\u5883<\/strong><\/p>\n\n\n\n<pre class=\"wp-block-code\"><code><code>import yfinance as yf<\/code><code>import numpy as np<\/code><code>import pandas as pd<\/code><code>from sklearn.preprocessing import MinMaxScaler<\/code><code>import torch.nn<\/code><code>import torch.nn import<\/code><code>torch.optim<\/code><code>as optim from torch.utils.data import TensorDataset\uff0c DataLoader<\/code><code>import matplotlib.pyplot as plt<\/code><code>device = torch.device\uff08\u201ccuda\u201d if torch.cuda.is_available\uff08\uff09 else \u201ccpu\u201d\uff09<\/code><code>print\uff08f\u201cUsing device\uff1a{device}\u201d\uff09<\/code><\/code><\/pre>\n\n\n\n<p>\u6b64\u90e8\u5206\u5bfc\u5165\u6240\u6709\u5fc5\u8981\u7684\u5e93\u3002\u6211\u4eec\u4f7f\u7528 yfinance \u6765\u83b7\u53d6\u80a1\u7968\u6570\u636e\uff0c\u4f7f\u7528 numpy \u548c pandas \u8fdb\u884c\u6570\u636e\u64cd\u4f5c\uff0c\u4f7f\u7528 sklearn \u8fdb\u884c\u6570\u636e\u9884\u5904\u7406\uff0c\u4f7f\u7528 torch \u6784\u5efa\u548c\u8bad\u7ec3\u795e\u7ecf\u7f51\u7edc\uff0c\u4f7f\u7528 matplotlib \u8fdb\u884c\u53ef\u89c6\u5316\u3002\u6211\u4eec\u8fd8\u8bbe\u7f6e\u4e86 PyTorch \u5c06\u7528\u4e8e\u8ba1\u7b97\u7684\u8bbe\u5907\uff08CPU \u6216 GPU\uff09\u3002<\/p>\n\n\n\n<p><strong>2. \u6570\u636e\u83b7\u53d6\u548c\u9884\u5904\u7406<\/strong><\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-code\"><code><code>reliance = yf.Ticker(\u201cRELIANCE.NS\u201d)<\/code><code>data = reliance.history(period=\u201dmax\u201d)&#91;\u2018Close\u2019].values.reshape(-1, 1)<\/code><code><br><\/code><code>scaler = MinMaxScaler(feature_range=(0, 1))<\/code><code>data_normalized = scaler.fit_transform(data)<\/code><code><br><\/code><code>def create_sequences(data, seq_length):<\/code><code>sequences = &#91;]<\/code><code>targets = &#91;]<\/code><code>for i in range(len(data) \u2014 seq_length):<\/code><code>seq = data&#91;i:i+seq_length]<\/code><code>target = data&#91;i+seq_length]<\/code><code>sequences.append(seq)<\/code><code>targets.append(target)<\/code><code>return np.array(sequences), np.array(targets)<\/code><code><br><\/code><code>seq_length = 60 # 60 days of historical data<\/code><code>X, y = create_sequences(data_normalized, seq_length)<\/code><\/code><\/pre>\n\n\n\n<p>\u5728\u8fd9\u91cc\uff0c\u6211\u4eec\u83b7\u53d6 Reliance Industries Limited \u80a1\u7968\u7684\u5386\u53f2\u6536\u76d8\u4ef7\u3002\u6211\u4eec\u4f7f\u7528 MinMaxScaler \u5bf9\u6570\u636e\u8fdb\u884c\u5f52\u4e00\u5316\uff0c\u4ee5\u786e\u4fdd\u6240\u6709\u503c\u90fd\u5728 0 \u5230 1 \u4e4b\u95f4\uff0c\u8fd9\u6709\u52a9\u4e8e\u8bad\u7ec3\u795e\u7ecf\u7f51\u7edc\u3002<\/p>\n\n\n\n<p>\u201ccreate_sequences\u201d\u529f\u80fd\u81f3\u5173\u91cd\u8981\u3002\u5b83\u5c06\u6211\u4eec\u7684\u65f6\u95f4\u5e8f\u5217\u6570\u636e\u8f6c\u6362\u4e3a\u9002\u5408 LSTM \u8bad\u7ec3\u7684\u683c\u5f0f\u3002\u5bf9\u4e8e\u6bcf\u4e2a\u6570\u636e\u70b9\uff0c\u5b83\u4f1a\u521b\u5efa\u4e00\u4e2a\u524d 60 \u5929 \uff08seq_length\uff09 \u7684\u5e8f\u5217\u4f5c\u4e3a\u8f93\u5165\uff0c\u5e76\u4ee5\u7b2c\u4e8c\u5929\u7684\u4ef7\u683c\u4e3a\u76ee\u6807\u3002<\/p>\n\n\n\n<p><strong>3. \u6570\u636e\u5207\u5206<\/strong><\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-code\"><code><code>lstm_split = int(0.5 * len(X))<\/code><code>scoring_split = int(0.75 * len(X))<\/code><code><br><\/code><code>X_lstm, y_lstm = X&#91;:lstm_split], y&#91;:lstm_split]<\/code><code>X_scoring, y_scoring = X&#91;lstm_split:scoring_split], y&#91;lstm_split:scoring_split]<\/code><code>X_finetuning, y_finetuning = X&#91;scoring_split:], y&#91;scoring_split:]<\/code><code><br><\/code><code>lstm_train_split = int(0.8 * len(X_lstm))<\/code><code>X_lstm_train, y_lstm_train = X_lstm&#91;:lstm_train_split], y_lstm&#91;:lstm_train_split]<\/code><code>X_lstm_test, y_lstm_test = X_lstm&#91;lstm_train_split:], y_lstm&#91;lstm_train_split:]<\/code><\/code><\/pre>\n\n\n\n<p>\u6211\u4eec\u5c06\u6570\u636e\u5206\u4e3a\u4e09\u4e2a\u4e3b\u8981\u90e8\u5206\uff1a<\/p>\n\n\n\n<p>1. LSTM \u8bad\u7ec3\u548c\u6d4b\u8bd5<br>2.\u8bc4\u5206\u6a21\u578b\u8bad\u7ec3<br>3.\u5fae\u8c03<\/p>\n\n\n\n<p>\u8fd9\u786e\u4fdd\u4e86\u6211\u4eec\u6d41\u7a0b\u7684\u6bcf\u4e2a\u9636\u6bb5\u90fd\u4f7f\u7528\u5355\u72ec\u7684\u6570\u636e\uff0c\u9632\u6b62\u6570\u636e\u6cc4\u9732\uff0c\u5e76\u5bf9\u6211\u4eec\u7684\u65b9\u6cd5\u8fdb\u884c\u516c\u5e73\u8bc4\u4f30\u3002<\/p>\n\n\n\n<p><strong>4. LSTM \u6a21\u578b\u5b9a\u4e49<\/strong><\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-code\"><code><code>class LSTMModel(nn.Module):<\/code><code>  def __init__(self, input_size=1, hidden_size=50, num_layers=2, output_size=1):<\/code><code>    super(LSTMModel, self).__init__()<\/code><code>    self.hidden_size = hidden_size<\/code><code>    self.num_layers = num_layers<\/code><code>    self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)<\/code><code>    self.fc = nn.Linear(hidden_size, output_size)<\/code><code><br><\/code><code>  def forward(self, x):<\/code><code>    h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)<\/code><code>    c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)<\/code><code>    out, _ = self.lstm(x, (h0, c0))<\/code><code>    out = self.fc(out&#91;:, -1, :])<\/code><code>    return out<\/code><code><br><\/code><code>lstm_model = LSTMModel().to(device)<\/code><code>criterion = nn.MSELoss()<\/code><code>optimizer = optim.Adam(lstm_model.parameters(), lr=0.001)<\/code><\/code><\/pre>\n\n\n\n<p>\u8fd9\u5b9a\u4e49\u4e86\u6211\u4eec\u7684 LSTM \u6a21\u578b\u3002\u5b83\u7531\u4e00\u4e2a LSTM \u5c42\u548c\u4e00\u4e2a\u5168\u8fde\u63a5\u5c42\u7ec4\u6210\u3002\u8be5\u6a21\u578b\u91c7\u7528\u4e00\u7cfb\u5217\u80a1\u7968\u4ef7\u683c\u5e76\u8f93\u51fa\u5355\u4e2a\u9884\u6d4b\u4ef7\u683c\u3002<\/p>\n\n\n\n<p><strong>5. LSTM \u6a21\u578b\u8bad\u7ec3<\/strong><\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-code\"><code><code>def train_model(model, train_data, train_targets, epochs=50, batch_size=32):<\/code><code>  train_data = torch.FloatTensor(train_data).to(device)<\/code><code>  train_targets = torch.FloatTensor(train_targets).to(device)<\/code><code>  train_dataset = TensorDataset(train_data, train_targets)<\/code><code>  train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)<\/code><code><br><\/code><code>  model.train()<\/code><code>  for epoch in range(epochs):<\/code><code>  for batch_X, batch_y in train_loader:<\/code><code>  optimizer.zero_grad()<\/code><code>  outputs = model(batch_X)<\/code><code>  loss = criterion(outputs, batch_y)<\/code><code>  loss.backward()<\/code><code>  optimizer.step()<\/code><code><br><\/code><code>  if (epoch + 1) % 10 == 0:<\/code><code>  print(f\u2019Epoch &#91;{epoch+1}\/{epochs}], Loss: {loss.item():.4f}\u2019)<\/code><code><br><\/code><code>train_model(lstm_model, X_lstm_train, y_lstm_train)<\/code><\/code><\/pre>\n\n\n\n<p>\u6b64\u51fd\u6570\u5904\u7406 LSTM \u6a21\u578b\u7684\u8bad\u7ec3\u8fc7\u7a0b\u3002\u5b83\u4f7f\u7528 DataLoader \u5bf9\u6211\u4eec\u7684\u6570\u636e\u8fdb\u884c\u6279\u5904\u7406\uff0c\u8fd9\u6709\u52a9\u4e8e\u9ad8\u6548\u8bad\u7ec3\uff0c\u5c24\u5176\u662f\u5bf9\u4e8e\u5927\u578b\u6570\u636e\u96c6\u3002\u8be5\u6a21\u578b\u9488\u5bf9\u6307\u5b9a\u6570\u91cf\u7684 epoch \u8fdb\u884c\u8bad\u7ec3\uff0c\u6bcf 10 \u4e2a epoch \u6253\u5370\u4e00\u6b21\u635f\u5931\u4ee5\u76d1\u63a7\u8fdb\u5ea6\u3002<\/p>\n\n\n\n<p><strong>6. LSTM\u8bc4\u4f30\u51fd\u6570<\/strong><\/p>\n\n\n\n<pre class=\"wp-block-code\"><code><code>def predict_and_evaluate(model, X, y):<\/code><code>  model.eval()<\/code><code>  with torch.no_grad():<\/code><code>  X = torch.FloatTensor(X).to(device)<\/code><code>  predictions = model(X).cpu().numpy()<\/code><code><br><\/code><code>  y = scaler.inverse_transform(y.reshape(-1, 1))<\/code><code>  predictions = scaler.inverse_transform(predictions.reshape(-1, 1))<\/code><code><br><\/code><code>  mae = np.mean(np.abs(y \u2014 predictions))<\/code><code>  mape = np.mean(np.abs((y \u2014 predictions) \/ y)) * 100<\/code><code><br><\/code><code>  return predictions, mae, mape<\/code><\/code><\/pre>\n\n\n\n<p>\u6b64\u51fd\u6570\u7528\u4e8e\u8fdb\u884c\u9884\u6d4b\u548c\u8bc4\u4f30\u6a21\u578b\u7684\u6027\u80fd\u3002\u5b83\u8ba1\u7b97\u4e24\u4e2a\u91cd\u8981\u6307\u6807\uff1a<br>1. \u5e73\u5747\u7edd\u5bf9\u8bef\u5dee \uff08MAE\uff09\uff1a\u8fd9\u4e3a\u6211\u4eec\u63d0\u4f9b\u4e86\u9884\u6d4b\u8bef\u5dee\u7684\u5e73\u5747\u5e45\u5ea6\u3002<br>2. \u5e73\u5747\u7edd\u5bf9\u767e\u5206\u6bd4\u8bef\u5dee \uff08MAPE\uff09\uff1a\u8fd9\u63d0\u4f9b\u4e86\u9884\u6d4b\u51c6\u786e\u6027\u7684\u767e\u5206\u6bd4\u5ea6\u91cf<\/p>\n\n\n\n<p><strong>7. \u8bc4\u5206\u6a21\u578b\u5b9e\u73b0<\/strong><\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-code\"><code><code>class ScoringModel(nn.Module):<\/code><code>def __init__(self, input_size=seq_length+1, hidden_size=32, output_size=1):<\/code><code>  super(ScoringModel, self).__init__()<\/code><code>  self.fc1 = nn.Linear(input_size, hidden_size)<\/code><code>  self.fc2 = nn.Linear(hidden_size, output_size)<\/code><code>  self.relu = nn.ReLU()<\/code><code>  self.sigmoid = nn.Sigmoid()<\/code><code>def forward(self, x):<\/code><code>  out = self.relu(self.fc1(x))<\/code><code>  out = self.sigmoid(self.fc2(out))<\/code><code>  return out<\/code><code>  scoring_model = ScoringModel().to(device)<\/code><code>  scoring_criterion = nn.MSELoss()<\/code><code>  scoring_optimizer = optim.Adam(scoring_model.parameters(), lr=0.001)<\/code><\/code><\/pre>\n\n\n\n<p>\u8bc4\u5206\u6a21\u578b\u662f\u4e00\u4e2a\u7b80\u5355\u7684\u524d\u9988\u795e\u7ecf\u7f51\u7edc\u3002\u5b83\u91c7\u7528\u4e0e LSTM \u7684\u9884\u6d4b\u8fde\u63a5\u7684\u539f\u59cb\u4ef7\u683c\u5e8f\u5217\u4f5c\u4e3a\u8f93\u5165\u3002\u8f93\u51fa\u662f\u4ecb\u4e8e 0 \u548c 1 \u4e4b\u95f4\u7684\u5206\u6570\uff0c\u8868\u793a LSTM \u9884\u6d4b\u7684\u9884\u6d4b\u51c6\u786e\u6027\u3002<\/p>\n\n\n\n<p><strong>8. \u4e3a\u8bc4\u5206\u6a21\u578b\u51c6\u5907\u6570\u636e<\/strong><\/p>\n\n\n\n<pre class=\"wp-block-code\"><code><code>def prepare_scoring_data(X, y):<\/code><code>  lstm_model.eval()<\/code><code>  with torch.no_grad():<\/code><code>    X_tensor = torch.FloatTensor(X).to(device)<\/code><code>    predictions = lstm_model(X_tensor).cpu().numpy()<\/code><code><br><\/code><code>  scoring_X = np.concatenate(&#91;X.reshape(X.shape&#91;0], -1), predictions], axis=1)<\/code><code>  scoring_y = np.abs(y \u2014 predictions.reshape(-1, 1)) # Use absolute error as the score<\/code><code>  return scoring_X, scoring_y<\/code><code><br><\/code><code>X_scoring_train, y_scoring_train = prepare_scoring_data(X_scoring&#91;:int(0.8*len(X_scoring))], y_scoring&#91;:int(0.8*len(X_scoring))])<\/code><code>X_scoring_test, y_scoring_test = prepare_scoring_data(X_scoring&#91;int(0.8*len(X_scoring)):], y_scoring&#91;int(0.8*len(X_scoring)):])<\/code><\/code><\/pre>\n\n\n\n<p>\u6b64\u51fd\u6570\u51c6\u5907\u7528\u4e8e\u8bad\u7ec3\u8bc4\u5206\u6a21\u578b\u7684\u6570\u636e\u3002\u5b83\u4f7f\u7528 LSTM \u5bf9\u8bc4\u5206\u6570\u636e\u96c6\u8fdb\u884c\u9884\u6d4b\uff0c\u7136\u540e\u5c06\u8fd9\u4e9b\u9884\u6d4b\u4e0e\u539f\u59cb\u8f93\u5165\u5e8f\u5217\u8fde\u63a5\u8d77\u6765\u3002\u8bc4\u5206\u6a21\u578b\u7684\u76ee\u6807\u662f LSTM \u9884\u6d4b\u7684\u7edd\u5bf9\u8bef\u5dee\u3002<\/p>\n\n\n\n<p><strong>9. \u5fae\u8c03\u8fc7\u7a0b<\/strong><\/p>\n\n\n\n<pre class=\"wp-block-code\"><code><code>def fine_tune_lstm(lstm_model, scoring_model, X, y, epochs=10, lr=0.0001):<\/code><code>  fine_tune_optimizer = optim.Adam(lstm_model.parameters(), lr=lr)<\/code><code>  X_tensor = torch.FloatTensor(X).to(device)<\/code><code>  y_tensor = torch.FloatTensor(y).to(device)<\/code><code><br><\/code><code>  for epoch in range(epochs):<\/code><code>    lstm_model.train()<\/code><code>    total_loss = 0<\/code><code>    for i in range(len(X)):<\/code><code>      fine_tune_optimizer.zero_grad()<\/code><code>      lstm_output = lstm_model(X_tensor&#91;i].unsqueeze(0))<\/code><code>      loss = criterion(lstm_output, y_tensor&#91;i].unsqueeze(0))<\/code><code><br><\/code><code>      # Get score from scoring model<\/code><code>      scoring_input = torch.cat(&#91;X_tensor&#91;i].reshape(1, -1), lstm_output.detach()], dim=1)<\/code><code>      score = scoring_model(scoring_input)<\/code><code><br><\/code><code>    # Adjust loss based on score<\/code><code>    adjusted_loss = loss * (1 + score.item())<\/code><code>    adjusted_loss.backward()<\/code><code>    fine_tune_optimizer.step()<\/code><code>    total_loss += adjusted_loss.item()<\/code><code><br><\/code><code>    if (epoch + 1) % 5 == 0:<\/code><code>      print(f\u2019Fine-tuning Epoch &#91;{epoch+1}\/{epochs}], Avg Loss: {total_loss\/len(X):.4f}\u2019)<\/code><code><br><\/code><code># Fine-tune LSTM<\/code><code>fine_tune_lstm(lstm_model, scoring_model, X_finetune_train, y_finetune_train)<\/code><\/code><\/pre>\n\n\n\n<p><strong>\u8fd9\u5c31\u662f\u672c\u6587\u63d0\u5230\u7684\u5173\u952e\u3002\u5fae\u8c03\u8fc7\u7a0b\u4f7f\u7528\u8bc4\u5206\u6a21\u578b\u6765\u8c03\u6574 LSTM \u7684\u5b66\u4e60\u3002<\/strong>\u5bf9\u4e8e\u6bcf\u4e2a\u9884\u6d4b\uff0c\u6211\u4eec\u4f7f\u7528\u8bc4\u5206\u6a21\u578b\u8ba1\u7b97\u5206\u6570\u3002\u7136\u540e\uff0c\u6b64\u5206\u6570\u7528\u4e8e\u8c03\u6574\u635f\u5931-\u5206\u6570\u8f83\u4f4e\uff08\u8868\u793a\u51c6\u786e\u6027\u8f83\u4f4e\uff09\u7684\u9884\u6d4b\u4f1a\u5bfc\u81f4\u8f83\u9ad8\u7684\u635f\u5931\uff0c\u4ece\u800c\u9f13\u52b1\u6a21\u578b\u66f4\u591a\u5730\u5173\u6ce8\u6539\u8fdb\u8fd9\u4e9b\u9884\u6d4b\u3002<\/p>\n\n\n\n<p><strong>10. \u8bc4\u4f30\u548c\u6bd4\u8f83<\/strong><\/p>\n\n\n\n<p>\u73b0\u5728\u6211\u4eec\u65e2\u6709\u4e86\u539f\u59cb LSTM \u6a21\u578b\uff0c\u53c8\u6709\u4e86\u5fae\u8c03\u6a21\u578b\uff0c\u6211\u4eec\u53ef\u4ee5\u6bd4\u8f83\u5b83\u4eec\u5728\u5fae\u8c03\u6570\u636e\u96c6\u4e0a\u7684\u6027\u80fd\u3002\u8fd9\u79cd\u6bd4\u8f83\u5c06\u6709\u52a9\u4e8e\u6211\u4eec\u4e86\u89e3\u6211\u4eec\u57fa\u4e8e\u8bc4\u5206\u7684\u5fae\u8c03\u65b9\u6cd5\u662f\u5426\u786e\u5b9e\u6539\u8fdb\u4e86\u6a21\u578b\u7684\u9884\u6d4b\u3002<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-code\"><code><code># Predictions with original LSTM on fine-tuning dataset<\/code><code>original_finetune_train_preds, original_finetune_train_mae, original_finetune_train_mape = predict_and_evaluate(lstm_model, X_finetune_train, y_finetune_train)<\/code><code>original_finetune_test_preds, original_finetune_test_mae, original_finetune_test_mape = predict_and_evaluate(lstm_model, X_finetune_test, y_finetune_test)<\/code><code><br><\/code><code>print(\u201c\\nOriginal LSTM on Fine-tuning Dataset:\u201d)<\/code><code>print(f\u201dTrain MAE: {original_finetune_train_mae:.2f}, MAPE: {original_finetune_train_mape:.2f}%\u201d)<\/code><code>print(f\u201dTest MAE: {original_finetune_test_mae:.2f}, MAPE: {original_finetune_test_mape:.2f}%\u201d)<\/code><code><br><\/code><code># Evaluate fine-tuned model<\/code><code>finetuned_train_preds, finetuned_train_mae, finetuned_train_mape = predict_and_evaluate(lstm_model, X_finetune_train, y_finetune_train)<\/code><code>finetuned_test_preds, finetuned_test_mae, finetuned_test_mape = predict_and_evaluate(lstm_model, X_finetune_test, y_finetune_test)<\/code><code><br><\/code><code>print(\u201c\\nFine-tuned LSTM on Fine-tuning Dataset:\u201d)<\/code><code>print(f\u201dTrain MAE: {finetuned_train_mae:.2f}, MAPE: {finetuned_train_mape:.2f}%\u201d)<\/code><code>print(f\u201dTest MAE: {finetuned_test_mae:.2f}, MAPE: {finetuned_test_mape:.2f}%\u201d)<\/code><\/code><\/pre>\n\n\n\n<p>\u6b64\u4ee3\u7801\u5757\u5728\u5fae\u8c03\u6570\u636e\u96c6\uff08\u8bad\u7ec3\u548c\u6d4b\u8bd5\u62c6\u5206\uff09\u4e0a\u8bc4\u4f30\u539f\u59cb\u6a21\u578b\u548c\u5fae\u8c03\u7684 LSTM \u6a21\u578b\u3002\u6211\u4eec\u8ba1\u7b97\u6bcf\u79cd\u60c5\u51b5\u7684\u5e73\u5747\u7edd\u5bf9\u8bef\u5dee \uff08MAE\uff09 \u548c\u5e73\u5747\u7edd\u5bf9\u767e\u5206\u6bd4\u8bef\u5dee \uff08MAPE\uff09\u3002<\/p>\n\n\n\n<p><strong>11. \u53ef\u89c6\u5316<\/strong><\/p>\n\n\n\n<p>\u4e3a\u4e86\u66f4\u597d\u5730\u4e86\u89e3\u8fd9\u4e9b\u6539\u8fdb\uff0c\u6211\u4eec\u53ef\u4ee5\u5c06\u4e24\u4e2a\u6a21\u578b\u7684\u9884\u6d4b\u4e0e\u5b9e\u9645\u80a1\u7968\u4ef7\u683c\u4e00\u8d77\u53ef\u89c6\u5316\u3002<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-code\"><code><code>plt.figure(figsize=(20, 15))<\/code><code><br><\/code><code># Training set comparison<\/code><code>plt.subplot(2, 1, 1)<\/code><code>plt.plot(scaler.inverse_transform(y_finetune_train), label=\u2019Actual\u2019, color=\u2019black\u2019)<\/code><code>plt.plot(original_finetune_train_preds, label=\u2019Original LSTM\u2019, color=\u2019blue\u2019, alpha=0.7)<\/code><code>plt.plot(finetuned_train_preds, label=\u2019Fine-tuned LSTM\u2019, color=\u2019red\u2019, alpha=0.7)<\/code><code>plt.title(\u2018Predictions on Fine-tuning Training Set\u2019)<\/code><code>plt.legend()<\/code><code><br><\/code><code># Test set comparison<\/code><code>plt.subplot(2, 1, 2)<\/code><code>plt.plot(scaler.inverse_transform(y_finetune_test), label=\u2019Actual\u2019, color=\u2019black\u2019)<\/code><code>plt.plot(original_finetune_test_preds, label=\u2019Original LSTM\u2019, color=\u2019blue\u2019, alpha=0.7)<\/code><code>plt.plot(finetuned_test_preds, label=\u2019Fine-tuned LSTM\u2019, color=\u2019red\u2019, alpha=0.7)<\/code><code>plt.title(\u2018Predictions on Fine-tuning Test Set\u2019)<\/code><code>plt.legend()<\/code><code><br><\/code><code>plt.tight_layout()<\/code><code>plt.show()<\/code><\/code><\/pre>\n\n\n\n<p>\u8fd9\u5c06\u521b\u5efa\u4e00\u4e2a\u56fe\uff0c\u5c06\u5b9e\u9645\u80a1\u7968\u4ef7\u683c\u4e0e\u539f\u59cb LSTM \u6a21\u578b\u548c\u5fae\u8c03 LSTM \u6a21\u578b\u7684\u9884\u6d4b\u8fdb\u884c\u6bd4\u8f83\u3002\u8fd9\u79cd\u53ef\u89c6\u5316\u8868\u793a\u53ef\u4ee5\u5e2e\u52a9\u6211\u4eec\u786e\u5b9a\u5fae\u8c03\u540e\u7684\u6a21\u578b\u5728\u54ea\u4e9b\u65b9\u9762\u8fdb\u884c\u4e86\u6539\u8fdb\uff0c\u54ea\u4e9b\u65b9\u9762\u53ef\u80fd\u4ecd\u5728\u82e6\u82e6\u6323\u624e\u3002<\/p>\n\n\n\n<p><strong>12. \u91cf\u5316\u6539\u8fdb<\/strong><\/p>\n\n\n\n<p>\u4e3a\u4e86\u6e05\u695a\u5730\u4e86\u89e3\u6211\u4eec\u7684\u5fae\u8c03\u8fc7\u7a0b\u5bf9\u6a21\u578b\u7684\u6539\u8fdb\u7a0b\u5ea6\uff0c\u6211\u4eec\u53ef\u4ee5\u8ba1\u7b97\u8bc4\u4f30\u6307\u6807\u4e2d\u7684\u6539\u8fdb\u767e\u5206\u6bd4\u3002<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-code\"><code><code># Calculate improvement percentages<\/code><code>train_mae_improvement = (original_finetune_train_mae \u2014 finetuned_train_mae) \/ original_finetune_train_mae * 100<\/code><code>train_mape_improvement = (original_finetune_train_mape \u2014 finetuned_train_mape) \/ original_finetune_train_mape * 100<\/code><code>test_mae_improvement = (original_finetune_test_mae \u2014 finetuned_test_mae) \/ original_finetune_test_mae * 100<\/code><code>test_mape_improvement = (original_finetune_test_mape \u2014 finetuned_test_mape) \/ original_finetune_test_mape * 100<\/code><code><br><\/code><code>print(\u201c\\nImprovement after Fine-tuning:\u201d)<\/code><code>print(f\u201dTrain MAE Improvement: {train_mae_improvement:.2f}%\u201d)<\/code><code>print(f\u201dTrain MAPE Improvement: {train_mape_improvement:.2f}%\u201d)<\/code><code>print(f\u201dTest MAE Improvement: {test_mae_improvement:.2f}%\u201d)<\/code><code>print(f\u201dTest MAPE Improvement: {test_mape_improvement:.2f}%\u201d)<\/code><\/code><\/pre>\n\n\n\n<p>\u6b64\u4ee3\u7801\u8ba1\u7b97\u5e76\u6253\u5370\u8bad\u7ec3\u96c6\u548c\u6d4b\u8bd5\u96c6\u7684 MAE \u548c MAPE \u6539\u8fdb\u767e\u5206\u6bd4\u3002<\/p>\n\n\n\n<h3 class=\"wp-block-heading\"><strong>\u4e94\u3001\u7ed3\u679c\u5206\u6790<\/strong><\/h3>\n\n\n\n<p>\u57fa\u4e8e\u5fae\u8c03\u6570\u636e\u96c6\u4e0a\u7684\u539f\u59cb<br>LSTM \u8f93\u51fa\uff1a\u8bad\u7ec3 MAE\uff1a326.92\uff0cMAPE\uff1a17.98%\uff0c\u6d4b\u8bd5 MAE\uff1a860.84\uff0cMAPE\uff1a32.48%<\/p>\n\n\n\n<p>\u5fae\u8c03\u6570\u636e\u96c6\u4e0a\u7684\u5fae\u8c03 LSTM\uff1a\u8bad\u7ec3 MAE\uff1a246.51\uff0cMAPE\uff1a21.79%\uff0c\u6d4b\u8bd5 MAE\uff1a340.44\uff0cMAPE\uff1a12.20%<\/p>\n\n\n\n<figure class=\"wp-block-image size-full\"><img decoding=\"async\" src=\"https:\/\/www.laoyulaoyu.com\/wp-content\/uploads\/2024\/09\/426838f547360fdacf0ed240b6b6d25a.jpeg\" alt=\"\" class=\"wp-image-1874\"\/><\/figure>\n\n\n\n<p>SBI<\/p>\n\n\n\n<p>\u5fae\u8c03\u6570\u636e\u96c6\u4e0a\u7684\u539f\u59cb LSTM\uff1a\u8bad\u7ec3 MAE\uff1a14.49\uff0cMAPE\uff1a3.88% \u6d4b\u8bd5 MAE\uff1a67.14\uff0cMAPE\uff1a9.62%.<\/p>\n\n\n\n<p>\u5fae\u8c03\u6570\u636e\u96c6\u4e0a\u7684\u5fae\u8c03 LSTM\uff1a\u8bad\u7ec3 MAE\uff1a71.53\uff0cMAPE\uff1a27.42%\u6d4b\u8bd5 MAE\uff1a86.50\uff0cMAPE\uff1a11.62%.<\/p>\n\n\n\n<figure class=\"wp-block-image size-full\"><img decoding=\"async\" src=\"https:\/\/www.laoyulaoyu.com\/wp-content\/uploads\/2024\/09\/7dd475856fc2917117975a0db673c422.jpeg\" alt=\"\" class=\"wp-image-1875\"\/><\/figure>\n\n\n\n<p>\u603b\u4e4b\uff0c\u6211\u4eec\u53ef\u4ee5\u770b\u5230 \u7ed3\u679c\u559c\u5fe7\u53c2\u534a \u3002\u5bf9\u4e8e Reliance\uff0c\u6211\u4eec\u57fa\u4e8e\u8bc4\u5206\u7684\u5fae\u8c03\u65b9\u6cd5\u663e\u793a\u51fa\u63d0\u9ad8 LSTM \u80a1\u7968\u4ef7\u683c\u9884\u6d4b\u6027\u80fd\u7684\u524d\u666f\uff0c\u4f46 SBI \u5e76\u975e\u5982\u6b64\u3002\u56e0\u6b64\uff0c\u4e0e\u4efb\u4f55\u673a\u5668\u5b66\u4e60\u6a21\u578b\u4e00\u6837\uff0c\u5c24\u5176\u662f\u5728\u91d1\u878d\u9886\u57df\uff0c\u5e94\u8c28\u614e\u4f7f\u7528\u5e76\u4e0e\u5176\u4ed6\u5206\u6790\u5de5\u5177\u548c\u4e13\u4e1a\u77e5\u8bc6\u7ed3\u5408\u4f7f\u7528\u3002\u6b64\u5916\uff0c\u5b83\u662f\u4e00\u4e2a\u5b9e\u9a8c\u6027\u67b6\u6784\uff0c\u53ef\u80fd\u65e0\u6cd5\u5728\u6240\u6709\u573a\u666f\u4e2d\u90fd\u6709\u6548\u3002<\/p>\n\n\n\n<h3 class=\"wp-block-heading\"><strong>\u516d\u3001\u4ee3\u7801<\/strong><\/h3>\n\n\n\n<p>\u4ee3\u7801\u662f\u5916\u7f51\u76f4\u63a5\u5f00\u6e90\u5728colab\u4e0a\u7684\uff0c\u611f\u5174\u8da3\u7684\u670b\u53cb\u5c31\u4e0d\u9700\u8981\u6587\u4e2d\u4e00\u70b9\u70b9\u7684\u590d\u5236\u9ecf\u8d34\u4e86\uff0c\u611f\u8c22\u5f00\u6e90\u3002<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><\/li>\n<\/ul>\n\n\n\n<pre class=\"wp-block-code\"><code><code>import yfinance as yf<\/code><code>import numpy as np<\/code><code>import pandas as pd<\/code><code>from sklearn.preprocessing import MinMaxScaler<\/code><code>import torch<\/code><code>import torch.nn as nn<\/code><code>import torch.optim as optim<\/code><code>from torch.utils.data import TensorDataset, DataLoader<\/code><code>import matplotlib.pyplot as plt<\/code><code><br><\/code><code># Check if GPU is available<\/code><code>device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")<\/code><code>print(f\"Using device: {device}\")<\/code><code><br><\/code><code># Fetch Reliance stock data<\/code><code>reliance = yf.Ticker(\"SBIN.NS\")<\/code><code>data = reliance.history(period=\"max\")&#91;'Close'].values.reshape(-1, 1)<\/code><code><br><\/code><code># Normalize the data<\/code><code>scaler = MinMaxScaler(feature_range=(0, 1))<\/code><code>data_normalized = scaler.fit_transform(data)<\/code><code><br><\/code><code>def create_sequences(data, seq_length):<\/code><code>    sequences = &#91;]<\/code><code>    targets = &#91;]<\/code><code>    for i in range(len(data) - seq_length):<\/code><code>        seq = data&#91;i:i+seq_length]<\/code><code>        target = data&#91;i+seq_length]<\/code><code>        sequences.append(seq)<\/code><code>        targets.append(target)<\/code><code>    return np.array(sequences), np.array(targets)<\/code><code><br><\/code><code># Parameters<\/code><code>seq_length = 60  # 60 days of historical data<\/code><code><br><\/code><code># Prepare data with sliding window<\/code><code>X, y = create_sequences(data_normalized, seq_length)<\/code><code><br><\/code><code># Split data for LSTM, Scoring, and Fine-tuning<\/code><code>lstm_split = int(0.5 * len(X))<\/code><code>scoring_split = int(0.75 * len(X))<\/code><code><br><\/code><code>X_lstm, y_lstm = X&#91;:lstm_split], y&#91;:lstm_split]<\/code><code>X_scoring, y_scoring = X&#91;lstm_split:scoring_split], y&#91;lstm_split:scoring_split]<\/code><code>X_finetuning, y_finetuning = X&#91;scoring_split:], y&#91;scoring_split:]<\/code><code><br><\/code><code># Further split LSTM data into train and test<\/code><code>lstm_train_split = int(0.8 * len(X_lstm))<\/code><code>X_lstm_train, y_lstm_train = X_lstm&#91;:lstm_train_split], y_lstm&#91;:lstm_train_split]<\/code><code>X_lstm_test, y_lstm_test = X_lstm&#91;lstm_train_split:], y_lstm&#91;lstm_train_split:]<\/code><code><br><\/code><code># LSTM Model<\/code><code>class LSTMModel(nn.Module):<\/code><code>    def __init__(self, input_size=1, hidden_size=50, num_layers=2, output_size=1):<\/code><code>        super(LSTMModel, self).__init__()<\/code><code>        self.hidden_size = hidden_size<\/code><code>        self.num_layers = num_layers<\/code><code>        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)<\/code><code>        self.fc = nn.Linear(hidden_size, output_size)<\/code><code><br><\/code><code>    def forward(self, x):<\/code><code>        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)<\/code><code>        c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)<\/code><code>        out, _ = self.lstm(x, (h0, c0))<\/code><code>        out = self.fc(out&#91;:, -1, :])<\/code><code>        return out<\/code><code><br><\/code><code>lstm_model = LSTMModel().to(device)<\/code><code>criterion = nn.MSELoss()<\/code><code>optimizer = optim.Adam(lstm_model.parameters(), lr=0.001)<\/code><code><br><\/code><code># Train LSTM model<\/code><code>def train_model(model, train_data, train_targets, epochs=50, batch_size=32):<\/code><code>    train_data = torch.FloatTensor(train_data).to(device)<\/code><code>    train_targets = torch.FloatTensor(train_targets).to(device)<\/code><code>    train_dataset = TensorDataset(train_data, train_targets)<\/code><code>    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)<\/code><code><br><\/code><code>    model.train()<\/code><code>    for epoch in range(epochs):<\/code><code>        for batch_X, batch_y in train_loader:<\/code><code>            optimizer.zero_grad()<\/code><code>            outputs = model(batch_X)<\/code><code>            loss = criterion(outputs, batch_y)<\/code><code>            loss.backward()<\/code><code>            optimizer.step()<\/code><code><br><\/code><code>        if (epoch + 1) % 10 == 0:<\/code><code>            print(f'Epoch &#91;{epoch+1}\/{epochs}], Loss: {loss.item():.4f}')<\/code><code><br><\/code><code>train_model(lstm_model, X_lstm_train, y_lstm_train)<\/code><code><br><\/code><code># LSTM Predictions and Evaluation<\/code><code>def predict_and_evaluate(model, X, y):<\/code><code>    model.eval()<\/code><code>    with torch.no_grad():<\/code><code>        X = torch.FloatTensor(X).to(device)<\/code><code>        predictions = model(X).cpu().numpy()<\/code><code><br><\/code><code>    y = scaler.inverse_transform(y.reshape(-1, 1))<\/code><code>    predictions = scaler.inverse_transform(predictions.reshape(-1, 1))<\/code><code><br><\/code><code>    mae = np.mean(np.abs(y - predictions))<\/code><code>    mape = np.mean(np.abs((y - predictions) \/ y)) * 100<\/code><code><br><\/code><code>    return predictions, mae, mape<\/code><code><br><\/code><code>lstm_train_preds, lstm_train_mae, lstm_train_mape = predict_and_evaluate(lstm_model, X_lstm_train, y_lstm_train)<\/code><code>lstm_test_preds, lstm_test_mae, lstm_test_mape = predict_and_evaluate(lstm_model, X_lstm_test, y_lstm_test)<\/code><code><br><\/code><code>print(f\"LSTM Train MAE: {lstm_train_mae:.2f}, MAPE: {lstm_train_mape:.2f}%\")<\/code><code>print(f\"LSTM Test MAE: {lstm_test_mae:.2f}, MAPE: {lstm_test_mape:.2f}%\")<\/code><code><br><\/code><code># Scoring Model<\/code><code>class ScoringModel(nn.Module):<\/code><code>    def __init__(self, input_size=seq_length+1, hidden_size=32, output_size=1):<\/code><code>        super(ScoringModel, self).__init__()<\/code><code>        self.fc1 = nn.Linear(input_size, hidden_size)<\/code><code>        self.fc2 = nn.Linear(hidden_size, output_size)<\/code><code>        self.relu = nn.ReLU()<\/code><code>        self.sigmoid = nn.Sigmoid()<\/code><code><br><\/code><code>    def forward(self, x):<\/code><code>        out = self.relu(self.fc1(x))<\/code><code>        out = self.sigmoid(self.fc2(out))<\/code><code>        return out<\/code><code><br><\/code><code>scoring_model = ScoringModel().to(device)<\/code><code>scoring_criterion = nn.MSELoss()<\/code><code>scoring_optimizer = optim.Adam(scoring_model.parameters(), lr=0.001)<\/code><code><br><\/code><code># Prepare scoring data<\/code><code>def prepare_scoring_data(X, y):<\/code><code>    lstm_model.eval()<\/code><code>    with torch.no_grad():<\/code><code>        X_tensor = torch.FloatTensor(X).to(device)<\/code><code>        predictions = lstm_model(X_tensor).cpu().numpy()<\/code><code><br><\/code><code>    scoring_X = np.concatenate(&#91;X.reshape(X.shape&#91;0], -1), predictions], axis=1)<\/code><code>    scoring_y = np.abs(y - predictions.reshape(-1, 1))  # Use absolute error as the score<\/code><code>    return scoring_X, scoring_y<\/code><code><br><\/code><code>X_scoring_train, y_scoring_train = prepare_scoring_data(X_scoring&#91;:int(0.8*len(X_scoring))], y_scoring&#91;:int(0.8*len(X_scoring))])<\/code><code>X_scoring_test, y_scoring_test = prepare_scoring_data(X_scoring&#91;int(0.8*len(X_scoring)):], y_scoring&#91;int(0.8*len(X_scoring)):])<\/code><code><br><\/code><code># Train scoring model<\/code><code>train_model(scoring_model, X_scoring_train, y_scoring_train, epochs=30)<\/code><code><br><\/code><code># Fine-tuning function<\/code><code>def fine_tune_lstm(lstm_model, scoring_model, X, y, epochs=10, lr=0.0001):<\/code><code>    fine_tune_optimizer = optim.Adam(lstm_model.parameters(), lr=lr)<\/code><code>    X_tensor = torch.FloatTensor(X).to(device)<\/code><code>    y_tensor = torch.FloatTensor(y).to(device)<\/code><code><br><\/code><code>    for epoch in range(epochs):<\/code><code>        lstm_model.train()<\/code><code>        total_loss = 0<\/code><code>        for i in range(len(X)):<\/code><code>            fine_tune_optimizer.zero_grad()<\/code><code>            lstm_output = lstm_model(X_tensor&#91;i].unsqueeze(0))<\/code><code>            loss = criterion(lstm_output, y_tensor&#91;i].unsqueeze(0))<\/code><code><br><\/code><code>            # Get score from scoring model<\/code><code>            scoring_input = torch.cat(&#91;X_tensor&#91;i].reshape(1, -1), lstm_output.detach()], dim=1)<\/code><code>            score = scoring_model(scoring_input)<\/code><code><br><\/code><code>            # Adjust loss based on score<\/code><code>            adjusted_loss = loss * (1 + score.item())<\/code><code>            adjusted_loss.backward()<\/code><code>            fine_tune_optimizer.step()<\/code><code>            total_loss += adjusted_loss.item()<\/code><code><br><\/code><code>        if (epoch + 1) % 5 == 0:<\/code><code>            print(f'Fine-tuning Epoch &#91;{epoch+1}\/{epochs}], Avg Loss: {total_loss\/len(X):.4f}')<\/code><code><br><\/code><code># Split fine-tuning data<\/code><code>X_finetune_train, y_finetune_train = X_finetuning&#91;:int(0.8*len(X_finetuning))], y_finetuning&#91;:int(0.8*len(X_finetuning))]<\/code><code>X_finetune_test, y_finetune_test = X_finetuning&#91;int(0.8*len(X_finetuning)):], y_finetuning&#91;int(0.8*len(X_finetuning)):]<\/code><code><br><\/code><code># Predictions with original LSTM on fine-tuning dataset<\/code><code>original_finetune_train_preds, original_finetune_train_mae, original_finetune_train_mape = predict_and_evaluate(lstm_model, X_finetune_train, y_finetune_train)<\/code><code>original_finetune_test_preds, original_finetune_test_mae, original_finetune_test_mape = predict_and_evaluate(lstm_model, X_finetune_test, y_finetune_test)<\/code><code><br><\/code><code>print(\"\\nOriginal LSTM on Fine-tuning Dataset:\")<\/code><code>print(f\"Train MAE: {original_finetune_train_mae:.2f}, MAPE: {original_finetune_train_mape:.2f}%\")<\/code><code>print(f\"Test MAE: {original_finetune_test_mae:.2f}, MAPE: {original_finetune_test_mape:.2f}%\")<\/code><code><br><\/code><code># Fine-tune LSTM<\/code><code>fine_tune_lstm(lstm_model, scoring_model, X_finetune_train, y_finetune_train)<\/code><code><br><\/code><code># Evaluate fine-tuned model<\/code><code>finetuned_train_preds, finetuned_train_mae, finetuned_train_mape = predict_and_evaluate(lstm_model, X_finetune_train, y_finetune_train)<\/code><code>finetuned_test_preds, finetuned_test_mae, finetuned_test_mape = predict_and_evaluate(lstm_model, X_finetune_test, y_finetune_test)<\/code><code><br><\/code><code>print(\"\\nFine-tuned LSTM on Fine-tuning Dataset:\")<\/code><code>print(f\"Train MAE: {finetuned_train_mae:.2f}, MAPE: {finetuned_train_mape:.2f}%\")<\/code><code>print(f\"Test MAE: {finetuned_test_mae:.2f}, MAPE: {finetuned_test_mape:.2f}%\")<\/code><code><br><\/code><code># Plot results<\/code><code>plt.figure(figsize=(20, 15))<\/code><code><br><\/code><code># Training set comparison<\/code><code>plt.subplot(2, 1, 1)<\/code><code>plt.plot(scaler.inverse_transform(y_finetune_train), label='Actual', color='black')<\/code><code>plt.plot(original_finetune_train_preds, label='Original LSTM', color='blue', alpha=0.7)<\/code><code>plt.plot(finetuned_train_preds, label='Fine-tuned LSTM', color='red', alpha=0.7)<\/code><code>plt.title('Predictions on Fine-tuning Training Set')<\/code><code>plt.legend()<\/code><code><br><\/code><code># Test set comparison<\/code><code>plt.subplot(2, 1, 2)<\/code><code>plt.plot(scaler.inverse_transform(y_finetune_test), label='Actual', color='black')<\/code><code>plt.plot(original_finetune_test_preds, label='Original LSTM', color='blue', alpha=0.7)<\/code><code>plt.plot(finetuned_test_preds, label='Fine-tuned LSTM', color='red', alpha=0.7)<\/code><code>plt.title('Predictions on Fine-tuning Test Set')<\/code><code>plt.legend()<\/code><code><br><\/code><code>plt.tight_layout()<\/code><code>plt.show()<\/code><code><br><\/code><code># Calculate improvement percentages<\/code><code>train_mae_improvement = (original_finetune_train_mae - finetuned_train_mae) \/ original_finetune_train_mae * 100<\/code><code>train_mape_improvement = (original_finetune_train_mape - finetuned_train_mape) \/ original_finetune_train_mape * 100<\/code><code>test_mae_improvement = (original_finetune_test_mae - finetuned_test_mae) \/ original_finetune_test_mae * 100<\/code><code>test_mape_improvement = (original_finetune_test_mape - finetuned_test_mape) \/ original_finetune_test_mape * 100<\/code><code><br><\/code><code>print(\"\\nImprovement after Fine-tuning:\")<\/code><code>print(f\"Train MAE Improvement: {train_mae_improvement:.2f}%\")<\/code><code>print(f\"Train MAPE Improvement: {train_mape_improvement:.2f}%\")<\/code><code>print(f\"Test MAE Improvement: {test_mae_improvement:.2f}%\")<\/code><code>print(f\"Test MAPE Improvement: {test_mape_improvement:.2f}%\")<\/code><\/code><\/pre>\n\n\n\n<hr class=\"wp-block-separator has-alpha-channel-opacity\"\/>\n\n\n\n<p><\/p>\n\n\n\n<p><\/p>\n\n\n\n<p><\/p>\n\n\n\n<p class=\"has-text-align-center\"><strong><mark style=\"background-color:#ffffff\" class=\"has-inline-color has-cyan-bluish-gray-color\">\u672c<\/mark><\/strong><strong><mark style=\"background-color:#ffffff\" class=\"has-inline-color has-cyan-bluish-gray-color\">\u6587\u5185\u5bb9\u4ec5\u4ec5\u662f\u6280\u672f\u63a2\u8ba8\u548c\u5b66\u4e60\uff0c\u5e76\u4e0d\u6784\u6210\u4efb\u4f55\u6295\u8d44\u5efa\u8bae\u3002<\/mark><\/strong><\/p>\n\n\n\n<p class=\"has-text-align-center\"><strong><mark style=\"background-color:#ffffff\" class=\"has-inline-color has-cyan-bluish-gray-color\">\u8f6c\u53d1\u8bf7\u6ce8\u660e\u539f\u4f5c\u8005\u548c\u51fa\u5904\u3002<\/mark><\/strong><\/p>\n","protected":false},"excerpt":{"rendered":"<p>\u4f5c\u8005\uff1a\u8001\u4f59\u635e\u9c7c \u539f\u521b\u4e0d\u6613\uff0c\u8f6c\u8f7d\u8bf7\u6807\u660e\u51fa\u5904\u53ca\u539f\u4f5c\u8005\u3002&#8230;<\/p>\n<div class=\"more-link-wrapper\"><a class=\"more-link\" href=\"https:\/\/laoyulaoyu.com\/index.php\/2024\/09\/27\/rlhf-%e7%9a%84%e5%90%af%e7%a4%ba%ef%bc%9a%e5%be%ae%e8%b0%83-lstm-%e8%83%bd%e6%9b%b4%e5%a5%bd%e9%a2%84%e6%b5%8b%e8%82%a1%e7%a5%a8%ef%bc%9f\/\">Continue reading<span class=\"screen-reader-text\">RLHF \u7684\u542f\u793a\uff1a\u5fae\u8c03 LSTM \u80fd\u66f4\u597d\u9884\u6d4b\u80a1\u7968\uff1f<\/span><\/a><\/div>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"closed","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[2],"tags":[5,6],"class_list":["post-1452","post","type-post","status-publish","format-standard","hentry","category-aiinvest","tag-ai","tag-6","entry"],"_links":{"self":[{"href":"https:\/\/laoyulaoyu.com\/index.php\/wp-json\/wp\/v2\/posts\/1452","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/laoyulaoyu.com\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/laoyulaoyu.com\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/laoyulaoyu.com\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/laoyulaoyu.com\/index.php\/wp-json\/wp\/v2\/comments?post=1452"}],"version-history":[{"count":2,"href":"https:\/\/laoyulaoyu.com\/index.php\/wp-json\/wp\/v2\/posts\/1452\/revisions"}],"predecessor-version":[{"id":1454,"href":"https:\/\/laoyulaoyu.com\/index.php\/wp-json\/wp\/v2\/posts\/1452\/revisions\/1454"}],"wp:attachment":[{"href":"https:\/\/laoyulaoyu.com\/index.php\/wp-json\/wp\/v2\/media?parent=1452"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/laoyulaoyu.com\/index.php\/wp-json\/wp\/v2\/categories?post=1452"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/laoyulaoyu.com\/index.php\/wp-json\/wp\/v2\/tags?post=1452"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}