微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

在自定义环境中应用 q-learn 的问题python、强化学习、openai

如何解决在自定义环境中应用 q-learn 的问题python、强化学习、openai

我正在尝试将 q-learning 应用于我的自定义强化学习环境,该环境代表储能套利(用电池进行电力交易,价格低时充电,价格上涨时放电)。环境有效,但我无法对其应用 q-learning。环境下方是一个能够运行环境的脚本,但我不确定应该使状态变量做什么。关于如何应用 q-learning 来优化充电/放电循环的任何想法?重置功能在第二天从包含每小时电价的数据集开始。数据框的图片如下。

class BatteryEnv(gym.Env):

def __init__(self,df):

    self.dict_actions = {0:'discharge',1:'charge',2:'wait'}
    self.df = df
    self.action_space = spaces.discrete(3)
    self.observation_space = spaces.Box(low=0,high=100,shape=(1,1))
    
    self.reward_list = []
    self.actual_load_list = []#observations
    self.soE_list=[] #State of energy 
  
    self.state_idx = 0 #iteration (hour of the day)
    self.soE = 0 #SOE
    self.MAX_charge = 20 #C-rate kinda
    self.Capacity =100
    

def step(self,action): 
    #mapping integer to action for actual load calculation
    str_action = self.dict_actions[action]
    
    #increase state idx within episode (1= 1 hour)
    self.state_idx+=1  
    
    #calculating our actual load
    if str_action == 'charge' and self.soE < self.Capacity:
        SOE_charge = np.clip(self.Capacity - self.soE,self.MAX_charge)
        self.soE += SOE_charge
        obs = SOE_charge * self.df['prices'][self.state_idx]
        
    elif str_action == 'discharge' and self.soE > 0:
        SOE_discharge = np.clip(self.soE,self.MAX_charge)
        self.soE -= SOE_discharge
        obs = -SOE_discharge * self.df['prices'][self.state_idx]

        
    else:
        self.soE += 0
        obs = 0 * self.df['prices'][self.state_idx]

    
    # appending actual load to list for monitoring and comparison purposes
    self.actual_load_list.append(obs)
    self.soE_list.append(self.soE)
    
    #reward system
    if obs<0: #if observation is positive we spending money. if negative we earning
        reward =1
    else:
        reward =-1
    
    # appending curr reward to list for monitoring and comparison purposes
    self.reward_list.append(reward) 

    #checking whether our episode (day interval) ends
    if self.df.iloc[self.state_idx,:].Daynum != self.df.iloc[self.state_idx-1].Daynum: 
        done = True
    else:
        done = False
        
    return obs,reward,done
    
def reset(self): 
    return df.iloc[self.state_idx,:]

def render():
    pass

以下代码能够表明环境正在运行。

for episode in range(7):
observation = env.reset()
for t in range(24): #can't be smaller than 24 as 24 time points equal to 1 episode (1 day)
    #print(observation)
    action = env.action_space.sample() #random actions
    observation,done = env.step(action)
    if done:
        print("Episode finished after {} timesteps".format(t+1)),print (observation),print(reward)
        break

each timestep is one hour,prices are electricty price for that hour,and daynum is the number of  day out of 365

解决方法

我想我能够使代码与 Q-learning 一起工作。然而,奖励和重置功能需要一些工作才能更好地发挥作用。

class BatteryEnv(gym.Env):

def __init__(self,prices = np.array(df.prices),daynum = np.array(df.Daynum)):
    
    #self.df = df
    
    self.prices = prices
    self.daynum = daynum
    
    self.dict_actions = {0:'discharge',1:'charge',2:'wait'}

    self.action_space = spaces.Discrete(3)
    
    # our observation space is just one float value - our load 
    self.observation_space = spaces.Box(low=0,high=100,shape=(1,1))
    
    # reward list for monitoring
    self.reward_list = []

    # lists 4 monitoring
    self.actual_load_list = []
    self.SOE_list=[] #State of energy 
    self.chargio = [] #charge & discharge
    self.SOEe=[] #State of energy
    
    # index of current state within current episode
    self.state_idx = 0 #iteration
    self.SOE = 0 #SOE
    self.MAX_charge = 20 #C-rate kinda
    self.Capacity =100
    
    self.state = 0
    

def step(self,action): 
    #mapping integer to action for actual load calculation
    str_action = self.dict_actions[action]
    
    #increase state idx within episode (day)
    self.state_idx+=1  
    
    #calculating our actual load
    if str_action == 'charge' and self.SOE < self.Capacity:
        SOE_charge = np.clip(self.Capacity - self.SOE,self.MAX_charge)
        self.state += SOE_charge
        self.SOEe.append(self.SOE)
        self.chargio.append(SOE_charge)
        obs = SOE_charge * self.prices[self.state_idx]
        
    elif str_action == 'discharge' and self.SOE > 0:
        SOE_discharge = np.clip(self.SOE,self.MAX_charge)
        self.state -= SOE_discharge
        self.SOEe.append(self.SOE)
        self.chargio.append(-SOE_discharge)
        obs = -SOE_discharge * self.prices[self.state_idx]

        
    else:
        self.state += 0
        self.chargio.append(0)
        self.SOEe.append(self.SOE)
        obs = 0

    
    # appending actual load to list for monitoring and comparison purposes
    self.actual_load_list.append(obs)
    self.SOE_list.append(self.SOE)

    
    #reward system
    if obs<0: #if observation is positive we spending money. if negative we earning
        reward =1
    else:
        reward =-1
    
    # appending curr reward to list for monitoring and comparison purposes
    self.reward_list.append(reward) 

    #checking whether our episode (day interval) ends
    if self.daynum[self.state_idx] != self.daynum[self.state_idx-1]: 
        done = True
    else:
        done = False
            
        
    info = {
        #'step': self.state_idx,'SOE': self.SOE,#'reward': reward,'chargio': self.chargio
            }
        
    return obs,reward,done,info
    

def reset(self):
    self.state = 0
    return self.state


def render():
    pass

应用q-learning:

env.reset()
env = BatteryEnv()
discrete_os_size = [20] * len(env.observation_space.high)
discrete_os_win_size = (env.observation_space.high - 
env.observation_space.low)/discrete_os_size
discrete_os_win_size #buckets of 10
learning_rate = 0.1
discount =0.95 #measure of how important future actions are
episodes =25000
q_table = np.random.uniform(low=-2,high=2,size=(discrete_os_size + [env.action_space.n]))

def get_discrete_state(state): #change SOE for other states
discrete_state = (state - env.observation_space.low)/discrete_os_win_size
return tuple(discrete_state.astype(np.int))

discrete_state =get_discrete_state(env.reset())

SOE=[]

for episode in range (episodes):
if episode % 5000 ==0:
    print(episode)

discrete_state =get_discrete_state(env.reset())
done = False
while not done:
    action = np.argmax(q_table[discrete_state])
    new_state,_ =env.step(action)

    new_discrete_state = get_discrete_state(new_state)

    if not done:
        max_future_q = np.max(q_table[new_discrete_state])
        current_q = q_table[discrete_state + (action,)]
        new_q = (1-learning_rate) * current_q + learning_rate *(reward + discount * max_future_q)
        q_table[discrete_state +(action,)] = new_q

    #elif new_state[0] >= env.go:

    discrete_state = new_discrete_state
    
    SOE.append(new_state)



    print(reward,new_state)

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。