奇怪的未初始化 is not initializ
xtl8541 发布于2020-07 浏览:3858 回复:14
0
收藏

出现各种的is not initialized的问题。不是运行fluid.default_statrtup_program()就初始化所有量了吗??

收藏
点赞
0
个赞
共14条回复 最后由鹿鼎记肯定回复于2020-08
#15鹿鼎记肯定回复于2020-08
#13 xtl8541回复
[代码]

这个很简单,因为你定义fluid.data的时候包含三个var,staterewardnext_reward_,但是在第一个exe中只送入state,在第二个exe才将3个var全部占满。

因此在第一个exe计算的时候会缺少两个变量导致无法计算 self.td_error = reward + self.gamma * next_reward_ - self.value

我不清楚你这个是什么任务,下面这段代码可以作为参考

import numpy as np
import paddle.fluid as fluid
import gym


class Critic(object):
    def __init__(self, exe, gamma, state_dim, lr):
        # self.exe = fluid.Executor(fluid.CPUPlace())
        self.exe = exe
        self.state_dim = state_dim
        self.gamma = gamma
        self.lr = lr
        self._built_net()

    def _built_net(self):
        self.critic_program = fluid.Program()
        with fluid.program_guard(self.critic_program):
            state = fluid.data(name='state', shape=[None, self.state_dim], dtype='float32')
            reward = fluid.data(name='reward', shape=[None, 1], dtype='float32')
            next_reward = fluid.data(name='next_reward', shape=[None, 1], dtype='float32')
            next_reward_ = fluid.data(name='next_reward_', shape=[None, 1], dtype='float32')
            # 创建网络
            h1 = fluid.layers.fc(input=state, size=24, act='relu')
            self.value = fluid.layers.fc(input=h1, size=1)
            self.td_error = reward + self.gamma * next_reward_ - self.value
            cost = fluid.layers.reduce_mean(fluid.layers.square(self.td_error))

            self.critic_program_test = self.critic_program.clone(for_test=True)
            fluid.optimizer.AdamOptimizer(learning_rate=self.lr).minimize(cost)

    def train(self, state, reward, next_state):
        next_reward = self.exe.run(self.critic_program_test,
                                   feed={'state': next_state,
                                         "reward": 1.0,    # 添加的var
                                         "next_reward_": 1.0},    # 添加的var
                                   fetch_list=[self.value])[0]
        print(next_reward.shape)
        td_error = self.exe.run(self.critic_program,
                                feed={'state': state,
                                      'reward': reward,
                                      'next_reward_': next_reward},
                                fetch_list=[self.td_error])[0]


exe = fluid.Executor(fluid.CPUPlace())
env = gym.make('CartPole-v1')
critic = Critic(exe, gamma=0.9, state_dim=4, lr=0.005)
exe.run(fluid.default_startup_program())

state = env.reset()
next_state, reward, done, info = env.step(0)


def dat(data):
    return np.expand_dims(data, axis=0).astype(np.float32)


critic.train(dat(state), dat(reward), dat(next_state))

希望能帮到你

0
#14xtl8541回复于2020-08
#12 鹿鼎记肯定回复
方便把错误回溯发一下吗

D:\kj7541\Anaconda3\envs\pdl\python.exe F:/kj7541/AI/RL/MyStudy/AC_PDL/MY_AC_dygraph.py
D:\kj7541\Anaconda3\envs\pdl\lib\site-packages\paddle\fluid\executor.py:1070: UserWarning: The following exception is not an EOF exception.
"The following exception is not an EOF exception.")
Traceback (most recent call last):
File "F:/kj7541/AI/RL/MyStudy/AC_PDL/MY_AC_dygraph.py", line 56, in
critic.train(dat(state), dat(reward), dat(next_state))
File "F:/kj7541/AI/RL/MyStudy/AC_PDL/MY_AC_dygraph.py", line 34, in train
fetch_list=[self.value])[0]
File "D:\kj7541\Anaconda3\envs\pdl\lib\site-packages\paddle\fluid\executor.py", line 1071, in run
six.reraise(*sys.exc_info())
File "D:\kj7541\Anaconda3\envs\pdl\lib\site-packages\six.py", line 703, in reraise
raise value
File "D:\kj7541\Anaconda3\envs\pdl\lib\site-packages\paddle\fluid\executor.py", line 1066, in run
return_merged=return_merged)
File "D:\kj7541\Anaconda3\envs\pdl\lib\site-packages\paddle\fluid\executor.py", line 1154, in _run_impl
use_program_cache=use_program_cache)
File "D:\kj7541\Anaconda3\envs\pdl\lib\site-packages\paddle\fluid\executor.py", line 1229, in _run_program
fetch_var_name)
paddle.fluid.core_noavx.EnforceNotMet:

--------------------------------------------
C++ Call Stacks (More useful to developers):
--------------------------------------------
Windows not support stack backtrace yet.

------------------------------------------
Python Call Stacks (More useful to users):
------------------------------------------
File "D:\kj7541\Anaconda3\envs\pdl\lib\site-packages\paddle\fluid\framework.py", line 2610, in append_op
attrs=kwargs.get("attrs", None))
File "D:\kj7541\Anaconda3\envs\pdl\lib\site-packages\paddle\fluid\layers\math_op_patch.py", line 158, in _scalar_elementwise_op_
"bias": bias})
File "D:\kj7541\Anaconda3\envs\pdl\lib\site-packages\paddle\fluid\layers\math_op_patch.py", line 174, in _scalar_elementwise_mul_
return _scalar_elementwise_op_(var, value, 0.0)
File "D:\kj7541\Anaconda3\envs\pdl\lib\site-packages\paddle\fluid\layers\math_op_patch.py", line 193, in __impl__
return scalar_method(self, other_var)
File "F:/kj7541/AI/RL/MyStudy/AC_PDL/MY_AC_dygraph.py", line 25, in _built_net
self.td_error = reward + self.gamma * next_reward_ - self.value
File "F:/kj7541/AI/RL/MyStudy/AC_PDL/MY_AC_dygraph.py", line 13, in __init__
self._built_net()
File "F:/kj7541/AI/RL/MyStudy/AC_PDL/MY_AC_dygraph.py", line 45, in
critic = Critic(exe, gamma=0.9, state_dim=4, lr=0.005)

----------------------
Error Message Summary:
----------------------
InvalidArgumentError: The Tensor in the scale Op's Input Variable X(next_reward_) is not initialized.
[Hint: Expected t->IsInitialized() == true, but received t->IsInitialized():0 != true:1.] at (D:\1.8.1\paddle\paddle\fluid\framework\operator.cc:1289)
[operator < scale > error]

0
#13xtl8541回复于2020-08
#12 鹿鼎记肯定回复
方便把错误回溯发一下吗
import numpy as np
import paddle.fluid as fluid
import gym


class Critic(object):
    def __init__(self, exe, gamma, state_dim, lr):
        # self.exe = fluid.Executor(fluid.CPUPlace())
        self.exe = exe
        self.state_dim = state_dim
        self.gamma = gamma
        self.lr = lr
        self._built_net()

    def _built_net(self):
        self.critic_program = fluid.Program()
        with fluid.program_guard(self.critic_program):
            state = fluid.data(name='state', shape=[None, self.state_dim], dtype='float32')
            reward = fluid.data(name='reward', shape=[None, 1], dtype='float32')
            next_reward = fluid.data(name='next_reward', shape=[None, 1], dtype='float32')
            next_reward_ = fluid.data(name='next_reward_', shape=[None, 1], dtype='float32')
            # 创建网络
            h1 = fluid.layers.fc(input=state, size=24, act='relu')
            self.value = fluid.layers.fc(input=h1, size=1)
            self.td_error = reward + self.gamma * next_reward_ - self.value
            cost = fluid.layers.reduce_mean(fluid.layers.square(self.td_error))

            self.critic_program_test = self.critic_program.clone(for_test=True)
            fluid.optimizer.AdamOptimizer(learning_rate=self.lr).minimize(cost)

    def train(self, state, reward, next_state):
        next_reward = self.exe.run(self.critic_program_test,
                                   feed={'state': next_state},
                                   fetch_list=[self.value])[0]
        print(next_reward.shape)
        td_error = self.exe.run(self.critic_program,
                                feed={'state': state,
                                      'reward': reward,
                                      'next_reward_': next_reward},
                                fetch_list=[self.td_error])[0]


exe = fluid.Executor(fluid.CPUPlace())
env = gym.make('CartPole-v1')
critic = Critic(exe, gamma=0.9, state_dim=4, lr=0.005)
exe.run(fluid.default_startup_program())

state = env.reset()
next_state, reward, done, info = env.step(0)


def dat(data):
    return np.expand_dims(data, axis=0).astype(np.float32)


critic.train(dat(state), dat(reward), dat(next_state))
0
#12鹿鼎记肯定回复于2020-08

方便把错误回溯发一下吗

0
#11xtl8541回复于2020-08
#10 鹿鼎记肯定回复
辛苦提一下issue吧 https://github.com/PaddlePaddle/Paddle/issues
展开

提了,官方的人还没有搞定。刚刚又碰到了同样的错误了。突然感觉paddle很垃圾,但奈何他是国产的,还是要继续喜欢他啊。

0
#10鹿鼎记肯定回复于2020-08
#9 xtl8541回复
试了,没用。太蛋疼了  

辛苦提一下issue吧

https://github.com/PaddlePaddle/Paddle/issues

0
#9xtl8541回复于2020-08
#8 鹿鼎记肯定回复
你用cpu跑一下看看,如果显存不够或者在项目中跑多卡都有可能出现这种情况的
展开

试了,没用。太蛋疼了

 

0
#8鹿鼎记肯定回复于2020-07
#7 xtl8541回复
exe.run(fluid.default_startup_program())  
展开

你用cpu跑一下看看,如果显存不够或者在项目中跑多卡都有可能出现这种情况的

0
#7xtl8541回复于2020-07
#6 鹿鼎记肯定回复
你是运行  fluid.default_statrtup_program() 还是运行  exe.run(fluid.default_statrtup_program()) 呢?
展开

exe.run(fluid.default_startup_program())

 

0
#6鹿鼎记肯定回复于2020-07

你是运行  fluid.default_statrtup_program()

还是运行  exe.run(fluid.default_statrtup_program())

呢?

0
#5xtl8541回复于2020-07

还有这个未初始化

0
#4xtl8541回复于2020-07

被初始化给按在地上反复的摩擦

 

0
#3xtl8541回复于2020-07

在社区里面也没找到相关的答案

0
#2xtl8541回复于2020-07

不止这个fluid.data出现未初始化的情况,其他的有些计算的中间过程也出现过为初始化的情况

 

 

0
TOP
切换版块