吾爱破解 - 52pojie.cn

 找回密码
 注册[Register]

QQ登录

只需一步,快速开始

查看: 507|回复: 1
收起左侧

[求助] 如何把py2中的代码替换到py1中呢?

[复制链接]
jtwc 发表于 2023-9-4 15:51
各位老师,如何把py2中的代码替换到py1中呢?谢谢了
[Python] 纯文本查看 复制代码
import numpy as np
import pandas as pd
#import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()

import matplotlib.pyplot as plt
import seaborn as sns
from collections import deque
import random

df = pd.read_csv('300565.csv')
# 时间戳、开盘,最高,最低,收盘
print(df.head())

class Agent:
    def __init__(self, state_size, window_size, trend, skip, batch_size):
        self.state_size = state_size  # 状态空间
        self.window_size = window_size  # 滑动窗口大小
        self.half_window = window_size // 2
        self.trend = trend  # data
        self.skip = skip  # 采取动作的步长,1代表每个时刻都操作
        self.action_size = 3  # 动作空间-买入、卖出、观望
        self.batch_size = batch_size
        self.memory = deque(maxlen=1000)  # 双向队列
        self.inventory = []  # 仓位

        self.gamma = 0.95  # 奖励衰减
        self.epsilon = 0.5  # 贪婪系数
        self.epsilon_min = 0.01  # 阈值
        self.epsilon_decay = 0.999  # 低于阈值将损失部分系数

        tf.reset_default_graph()
        self.sess = tf.InteractiveSession()  # 交互式session
        self.X = tf.placeholder(tf.float32, [None, self.state_size])  # 状态
        self.Y = tf.placeholder(tf.float32, [None, self.action_size])  # 动作
        feed = tf.layers.dense(self.X, 256, activation=tf.nn.relu)
        self.logits = tf.layers.dense(feed, self.action_size)  # 计算3种动作的概率
        self.cost = tf.reduce_mean(tf.square(self.Y - self.logits))  # 计算损失函数
        self.optimizer = tf.train.GradientDescentOptimizer(1e-5).minimize(self.cost)  # 优化器
        self.sess.run(tf.global_variables_initializer())

    def act(self, state):  # 选择动作
        if random.random() <= self.epsilon:  # 小于epsilon就随机探索
            return random.randrange(self.action_size)
        # 不然就选择最好的动作
        return np.argmax(
            self.sess.run(self.logits, feed_dict={self.X: state})[0]
        )

    def get_state(self, t):  # 某t时刻的状态
        window_size = self.window_size + 1
        d = t - window_size + 1
        # 早期天数不够窗口打小,用0时刻来凑,即填补相应个数
        block = self.trend[d: t + 1] if d >= 0 else -d * [self.trend[0]] + self.trend[0: t + 1]
        res = []
        for i in range(window_size - 1):
            res.append(block[i + 1] - block[i])  # 每步收益
        return np.array([res])  # 作为状态编码

    def replay(self, batch_size):
        mini_batch = []
        l = len(self.memory)
        for i in range(l - batch_size, l):
            mini_batch.append(self.memory[i])  # memory
        replay_size = len(mini_batch)
        X = np.empty((replay_size, self.state_size))
        Y = np.empty((replay_size, self.action_size))

        # 新旧状态及Q值计算
        # [state, action, reward, next_state, done],故0和3分别获取新旧

        states = np.array([a[0][0] for a in mini_batch])
        new_states = np.array([a[3][0] for a in mini_batch])
        Q = self.sess.run(self.logits, feed_dict={self.X: states})
        Q_new = self.sess.run(self.logits, feed_dict={self.X: new_states})
        # 更新Q表
        for i in range(len(mini_batch)):
            state, action, reward, next_state, done = mini_batch[i]
            target = Q[i]
            target[action] = reward
            if not done:  # 如果没有结束
                target[action] += self.gamma * np.amax(Q_new[i])
            # 结束了代表没有后续动作,直接等于
            X[i] = state
            Y[i] = target
        cost, _ = self.sess.run(
            [self.cost, self.optimizer], feed_dict={self.X: X, self.Y: Y}
        )
        # 调整贪婪系数
        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay
        return cost

    def buy(self, initial_money):
        starting_money = initial_money  # 启动资金
        states_sell = []
        states_buy = []
        inventory = []  # 仓位
        state = self.get_state(0)  # 初始状态
        for t in range(0, len(self.trend) - 1, self.skip):
            action = self.act(state)  # 根据状态选动作
            next_state = self.get_state(t + 1)  # 得到下一个状态
            # action=1为买入,资金够用,且剩下的长度足够
            if action == 1 and initial_money >= self.trend[t] and t < (len(self.trend) - self.half_window):
                inventory.append(self.trend[t])  # 买入
                initial_money -= self.trend[t]  # 交易
                states_buy.append(t)  # 记录
                print('day %d: buy 1 unit at price %f, total balance %f' % (t, self.trend[t], initial_money))

            # action=2为卖出
            elif action == 2 and len(inventory):
                bought_price = inventory.pop(0)  # 卖出
                initial_money += self.trend[t]  # 交易
                states_sell.append(t)  # 记录
                # 计算收益率
                invest = ((initial_money - starting_money) / starting_money) * 100
                total_gains = initial_money - starting_money
                return states_buy, states_sell, total_gains, invest

    def train(self, iterations, checkpoint, initial_money):
        # 迭代多次
        for i in range(iterations):
            total_profit = 0  # 累积利润
            inventory = []
            state = self.get_state(0)
            starting_money = initial_money
            for t in range(0, len(self.trend) - 1, self.skip):
                action = self.act(state)
                next_state = self.get_state(t + 1)

                if action == 1 and starting_money >= self.trend[t] and t < (len(self.trend) - self.half_window):
                    inventory.append(self.trend[t])
                    starting_money -= self.trend[t]

                elif action == 2 and len(inventory) > 0:
                    bought_price = inventory.pop(0)
                    total_profit += self.trend[t] - bought_price
                    starting_money += self.trend[t]

                invest = ((starting_money - initial_money) / initial_money)
                self.memory.append((state, action, invest,
                                    next_state, starting_money < initial_money))
                state = next_state
                batch_size = min(self.batch_size, len(self.memory))
                cost = self.replay(batch_size)
            if (i + 1) % checkpoint == 0:
                print('epoch: %d, total rewards: %f.3, cost: %f, total money: %f' % (
                    i + 1, total_profit, cost, starting_money))

close = df.close.values.tolist() #选取收盘数据做测试
initial_money = 10000
window_size = 30
skip = 1
batch_size = 32

agent = Agent(state_size = window_size,
              window_size = window_size,
              trend = close,
              skip = skip,
              batch_size = batch_size)
#"""
agent.train(iterations = 200, checkpoint = 10, initial_money = initial_money)
#"""

states_buy, states_sell, total_gains, invest = agent.buy( initial_money = initial_money)

fig = plt.figure(figsize = (15,5))
plt.plot(close, color='r', lw=2.)
plt.plot(close, '^', markersize=10, color='m', label = 'buying signal', markevery = states_buy)
plt.plot(close, 'v', markersize=10, color='k', label = 'selling signal', markevery = states_sell)
plt.title('total gains %f, total investment %f%%'%(total_gains, invest))
plt.legend()
plt.savefig('tmp.png')
plt.show()













发帖前要善用论坛搜索功能,那里可能会有你要找的答案或者已经有人发布过相同内容了,请勿重复发帖。

 楼主| jtwc 发表于 2023-9-4 15:52
[Python] 纯文本查看 复制代码
#定义A和C两个类
class Actor:
    def __init__(self, name, input_size, output_size, size_layer):
        with tf.variable_scope(name):
            self.X = tf.placeholder(tf.float32, (None, input_size))
            feed_actor = tf.layers.dense(self.X, size_layer, activation = tf.nn.relu)
            self.logits = tf.layers.dense(feed_actor, output_size)

class Critic:
    def __init__(self, name, input_size, output_size, size_layer, learning_rate):
        with tf.variable_scope(name):
            self.X = tf.placeholder(tf.float32, (None, input_size))
            self.Y = tf.placeholder(tf.float32, (None, output_size))
            self.REWARD = tf.placeholder(tf.float32, (None, 1))
            feed_critic = tf.layers.dense(self.X, size_layer, activation = tf.nn.relu)
            feed_critic = tf.layers.dense(feed_critic, output_size, activation = tf.nn.relu) + self.Y
            feed_critic = tf.layers.dense(feed_critic, size_layer//2, activation = tf.nn.relu)
            self.logits = tf.layers.dense(feed_critic, 1)
            self.cost = tf.reduce_mean(tf.square(self.REWARD - self.logits))
            self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)

#网络assign
def _assign(self, from_name, to_name):
        from_w = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=from_name)
        to_w = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=to_name)
        for i in range(len(from_w)):
            assign_op = to_w[i].assign(from_w[i])
            self.sess.run(assign_op)

def _construct_memories_and_train(self, replay):
        states = np.array([a[0] for a in replay])
        new_states = np.array([a[3] for a in replay])
        Q = self.sess.run(self.actor.logits, feed_dict={self.actor.X: states})
        Q_target = self.sess.run(self.actor_target.logits, feed_dict={self.actor_target.X: states})
        grads = self.sess.run(self.grad_critic, feed_dict={self.critic.X:states, self.critic.Y:Q})[0]
        self.sess.run(self.optimizer, feed_dict={self.actor.X:states, self.actor_critic_grad:grads})
        
        #计算rewards
        rewards = np.array([a[2] for a in replay]).reshape((-1, 1))
        rewards_target = self.sess.run(self.critic_target.logits, 
                                       feed_dict={self.critic_target.X:new_states,self.critic_target.Y:Q_target})
        for i in range(len(replay)):
            if not replay[0][-1]:
                rewards[i] += self.GAMMA * rewards_target[i]
        cost, _ = self.sess.run([self.critic.cost, self.critic.optimizer], 
                                feed_dict={self.critic.X:states, self.critic.Y:Q, self.critic.REWARD:rewards})
        return cost
py2源码
您需要登录后才可以回帖 登录 | 注册[Register]

本版积分规则

返回列表

RSS订阅|小黑屋|处罚记录|联系我们|吾爱破解 - LCG - LSG ( 京ICP备16042023号 | 京公网安备 11010502030087号 )

GMT+8, 2024-11-24 22:57

Powered by Discuz!

Copyright © 2001-2020, Tencent Cloud.

快速回复 返回顶部 返回列表