batch_norm的输入能直接是数据输入层吗
徒尔为 发布于2018-11-08 19:57 浏览:18 回复:0
0
收藏
最后编辑于2018-11-08
    data = fluid.layers.data('data',shape=[1,250,3],lod_level=1)
    data_norm = fluid.layers.batch_norm(data)

这样会报错,如下

---------------------------------------------------------------------------
EnforceNotMet                             Traceback (most recent call last)
<ipython-input-11-183a8536b4c4> in <module>
----> 1 trainer = Trainer.Trainer(pp,optimizer,place=place)

/opt/conda/envs/py35-paddle1.0.0/lib/python3.5/site-packages/paddle/fluid/contrib/trainer.py in __init__(self, train_func, optimizer_func, param_path, place, parallel, checkpoint_config)
    269                 raise TypeError(
    270                     "The optimizer should be an instance of Optimizer")
--> 271             optimize_ops, params_grads = optimizer.minimize(loss)
    272 
    273         self.place = check_and_get_place(place)

/opt/conda/envs/py35-paddle1.0.0/lib/python3.5/site-packages/paddle/fluid/optimizer.py in minimize(self, loss, startup_program, parameter_list, no_grad_set)
    253         """
    254         params_grads = append_backward(loss, parameter_list, no_grad_set,
--> 255                                        [error_clip_callback])
    256 
    257         params_grads = sorted(params_grads, key=lambda x: x[0].name)

/opt/conda/envs/py35-paddle1.0.0/lib/python3.5/site-packages/paddle/fluid/backward.py in append_backward(loss, parameter_list, no_grad_set, callbacks)
    588     _rename_grad_(root_block, fwd_op_num, grad_to_var, {})
    589 
--> 590     _append_backward_vars_(root_block, fwd_op_num, grad_to_var, grad_info_map)
    591 
    592     program.current_block_idx = current_block_idx

/opt/conda/envs/py35-paddle1.0.0/lib/python3.5/site-packages/paddle/fluid/backward.py in _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map)
    424         # infer_shape and infer_type
    425         op_desc.infer_var_type(block.desc)
--> 426         op_desc.infer_shape(block.desc)
    427         # ncclInit dones't need to set data_type
    428         if op_desc.type() == 'ncclInit':

EnforceNotMet:  at [/paddle/paddle/fluid/operators/batch_norm_op.cc:335]
PaddlePaddle Call Stacks: 
0       0x7f638808c426p paddle::platform::EnforceNotMet::EnforceNotMet(std::__exception_ptr::exception_ptr, char const*, int) + 486
1       0x7f6388270065p paddle::operators::BatchNormGradOp::InferShape(paddle::framework::InferShapeContext*) const + 1717
2       0x7f6388134d86p paddle::framework::OpDesc::InferShape(paddle::framework::BlockDesc const&) const + 902
3       0x7f63880e1755p void pybind11::cpp_function::initialize<pybind11::cpp_function::initialize<void, paddle::framework::OpDesc, paddle::framework::BlockDesc const&, pybind11::name, pybind11::is_method, pybind11::sibling>(void (paddle::framework::OpDesc::*)(paddle::framework::BlockDesc const&) const, pybind11::name const&, pybind11::is_method const&, pybind11::sibling const&)::{lambda(paddle::framework::OpDesc const*, paddle::framework::BlockDesc const&)#1}, void, paddle::framework::OpDesc const*, paddle::framework::BlockDesc const&, pybind11::name, pybind11::is_method, pybind11::sibling>(pybind11::cpp_function::initialize<void, paddle::framework::OpDesc, paddle::framework::BlockDesc const&, pybind11::name, pybind11::is_method, pybind11::sibling>(void (paddle::framework::OpDesc::*)(paddle::framework::BlockDesc const&) const, pybind11::name const&, pybind11::is_method const&, pybind11::sibling const&)::{lambda(paddle::framework::OpDesc const*, paddle::framework::BlockDesc const&)#1}&&, void (*)(paddle::framework::OpDesc const*, paddle::framework::BlockDesc const&), pybind11::name const&, pybind11::is_method const&, pybind11::sibling const&)::{lambda(pybind11::detail::function_call&)#3}::_FUN(pybind11::detail::function_call) + 213
4       0x7f63880bebe4p pybind11::cpp_function::dispatcher(_object*, _object*, _object*) + 2596
5       0x7f63c4eae199p PyCFunction_Call + 233
6       0x7f63c4f493f9p PyEval_EvalFrameEx + 33545
7       0x7f63c4f491d0p PyEval_EvalFrameEx + 32992
8       0x7f63c4f4b4b6p
9       0x7f63c4f485b5p PyEval_EvalFrameEx + 29893
10      0x7f63c4f4b4b6p
11      0x7f63c4f485b5p PyEval_EvalFrameEx + 29893
12      0x7f63c4f4b4b6p
13      0x7f63c4f4b5a8p PyEval_EvalCodeEx + 72
14      0x7f63c4e8ac33p
15      0x7f63c4e5933ap PyObject_Call + 106
16      0x7f63c4e71d7dp
17      0x7f63c4e5933ap PyObject_Call + 106
18      0x7f63c4eca289p
19      0x7f63c4ec4fd6p
20      0x7f63c4e5933ap PyObject_Call + 106
21      0x7f63c4f454c5p PyEval_EvalFrameEx + 17365
22      0x7f63c4f4b4b6p
23      0x7f63c4f4b5a8p PyEval_EvalCodeEx + 72
24      0x7f63c4f4b5ebp PyEval_EvalCode + 59
25      0x7f63c4f3ec5dp
26      0x7f63c4eae179p PyCFunction_Call + 201
27      0x7f63c4f48dbep PyEval_EvalFrameEx + 31950
28      0x7f63c4e82410p _PyGen_Send + 128
29      0x7f63c4f47953p PyEval_EvalFrameEx + 26723
30      0x7f63c4e82410p _PyGen_Send + 128
31      0x7f63c4f47953p PyEval_EvalFrameEx + 26723
32      0x7f63c4e82410p _PyGen_Send + 128
33      0x7f63c4f48d60p PyEval_EvalFrameEx + 31856
34      0x7f63c4f491d0p PyEval_EvalFrameEx + 32992
35      0x7f63c4f491d0p PyEval_EvalFrameEx + 32992
36      0x7f63c4f4b4b6p
37      0x7f63c4f4b5a8p PyEval_EvalCodeEx + 72
38      0x7f63c4e8ac33p
39      0x7f63c4e5933ap PyObject_Call + 106
40      0x7f63c4f436eep PyEval_EvalFrameEx + 9726
41      0x7f63c4f4b4b6p
42      0x7f63c4f485b5p PyEval_EvalFrameEx + 29893
43      0x7f63c4e816bap
44      0x7f63c4f3caf6p
45      0x7f63c4eae179p PyCFunction_Call + 201
46      0x7f63c4f48dbep PyEval_EvalFrameEx + 31950
47      0x7f63c4f4b4b6p
48      0x7f63c4f485b5p PyEval_EvalFrameEx + 29893
49      0x7f63c4e816bap
50      0x7f63c4f3caf6p
51      0x7f63c4eae179p PyCFunction_Call + 201
52      0x7f63c4f48dbep PyEval_EvalFrameEx + 31950
53      0x7f63c4f4b4b6p
54      0x7f63c4f485b5p PyEval_EvalFrameEx + 29893
55      0x7f63c4e816bap
56      0x7f63c4f3caf6p
57      0x7f63c4eae179p PyCFunction_Call + 201
58      0x7f63c4f48dbep PyEval_EvalFrameEx + 31950
59      0x7f63c4f4b4b6p
60      0x7f63c4f4b5a8p PyEval_EvalCodeEx + 72
61      0x7f63c4e8ab56p
62      0x7f63c4e5933ap PyObject_Call + 106
63      0x7f63c4f436eep PyEval_EvalFrameEx + 9726
64      0x7f63c4e82410p _PyGen_Send + 128
65      0x7f63c4f48d60p PyEval_EvalFrameEx + 31856
66      0x7f63c4f491d0p PyEval_EvalFrameEx + 32992
67      0x7f63c4f4b4b6p
68      0x7f63c4f4b5a8p PyEval_EvalCodeEx + 72
69      0x7f63c4e8ab56p
70      0x7f63c4e5933ap PyObject_Call + 106
71      0x7f63c4e71d7dp
72      0x7f63c4e5933ap PyObject_Call + 106
73      0x7f63c4eca289p
74      0x7f63c4ec4fd6p
75      0x7f63c4e5933ap PyObject_Call + 106
76      0x7f63c4f454c5p PyEval_EvalFrameEx + 17365
77      0x7f63c4f4b4b6p
78      0x7f63c4f485b5p PyEval_EvalFrameEx + 29893
79      0x7f63c4e82410p _PyGen_Send + 128
80      0x7f63c4f48d60p PyEval_EvalFrameEx + 31856
81      0x7f63c4f491d0p PyEval_EvalFrameEx + 32992
82      0x7f63c4f4b4b6p
83      0x7f63c4f4b5a8p PyEval_EvalCodeEx + 72
84      0x7f63c4e8ac33p
85      0x7f63c4e5933ap PyObject_Call + 106
86      0x7f63c4f436eep PyEval_EvalFrameEx + 9726
87      0x7f63c4f4b4b6p
88      0x7f63c4f4b5a8p PyEval_EvalCodeEx + 72
89      0x7f63c4e8ab56p
90      0x7f63c4e5933ap PyObject_Call + 106
91      0x7f63c4fbeccap
92      0x7f63c4e5933ap PyObject_Call + 106
93      0x7f63c4f454c5p PyEval_EvalFrameEx + 17365
94      0x7f63c4f4b4b6p
95      0x7f63c4f4b5a8p PyEval_EvalCodeEx + 72
96      0x7f63c4e8ab56p
97      0x7f63c4e5933ap PyObject_Call + 106
98      0x7f63c4f436eep PyEval_EvalFrameEx + 9726
99      0x7f63c4f491d0p PyEval_EvalFrameEx + 32992

是原理上batch_norm层就不能这么用吗?

收藏
点赞
0
个赞
TOP
切换版块