在ubuntu下再次运行CNN图像分类的问题
asdfvgbnm3 发布于2019-03-04 10:19 浏览:361 回复:2
0
收藏

在ubuntu下运行教程中的CNN图片分类程序,打印出了这个消息:

Process finished with exit code 137 (interrupted by signal 9: SIGKILL)

然后过一会程序就停止了

背景是我先运行了一遍,在[===================================]结束之后,报错我没有装IPython

然后我安装了最新的IPython,再运行,就报了这个错误。

ubuntu版本:18.04.2   虚拟机版本:VM14  paddpaddle版本:paddlepaddle.fluid.1.3(cpu) IPython7.3.0  python版本:3.6

代码如下:

import paddle
import paddle.fluid as fluid
import numpy
import sys
# from __future__ import print_function

# 定义训练的过程
def vgg_bn_drop(input):
    def conv_block(ipt, num_filter, groups, dropouts):
        return fluid.nets.img_conv_group(
            input=ipt,
            pool_size=2,
            pool_stride=2,
            conv_num_filter=[num_filter] * groups,
            conv_filter_size=3,
            conv_act='relu',
            conv_with_batchnorm=True,
            conv_batchnorm_drop_rate=dropouts,
            pool_type='max')

    conv1 = conv_block(input, 64, 2, [0.3, 0])
    conv2 = conv_block(conv1, 128, 2, [0.4, 0])
    conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
    conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
    conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])

    drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)
    fc1 = fluid.layers.fc(input=drop, size=512, act=None)
    bn = fluid.layers.batch_norm(input=fc1, act='relu')
    drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5)
    fc2 = fluid.layers.fc(input=drop2, size=512, act=None)
    predict = fluid.layers.fc(input=fc2, size=10, act='softmax')
    return predict

# 定义训练网络
def inference_program():
    # The image is 32 * 32 with RGB representation.
    data_shape = [3, 32, 32]
    images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')

    # predict = resnet_cifar10(images, 32)
    predict = vgg_bn_drop(images) # un-comment to use vgg net
    return predict

# 定义均方差和精度
def train_program(predict = inference_program()):
    label = fluid.layers.data(name='label', shape=[1], dtype='int64')
    cost = fluid.layers.cross_entropy(input=predict, label=label)
    avg_cost = fluid.layers.mean(cost)
    accuracy = fluid.layers.accuracy(input=predict, label=label)
    return [avg_cost, accuracy]

# 定义优化器
def optimizer_program():
    return fluid.optimizer.Adam(learning_rate=0.001)

# 定义训练和测试样本
BATCH_SIZE = 128
# Reader for training
train_reader = paddle.batch(
    paddle.reader.shuffle(paddle.dataset.cifar.train10(), buf_size=50000),
    batch_size=BATCH_SIZE)
# Reader for testing. A separated data set for testing.
test_reader = paddle.batch(
    paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE)

# 运行各种函数,将优化器,训练网络实体化
feed_order = ['pixel', 'label']

main_program = fluid.default_main_program()
star_program = fluid.default_startup_program()
# Test program
test_program = main_program.clone(for_test=True)

predict = inference_program()
avg_cost, acc = train_program(predict)

optimizer = optimizer_program()
optimizer.minimize(avg_cost)

use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)

EPOCH_NUM = 2

# For training test cost
def train_test(program, reader):
    count = 0
    feed_var_list = [
        program.global_block().var(var_name) for var_name in feed_order
    ]
    feeder_test = fluid.DataFeeder(
        feed_list=feed_var_list, place=place)
    test_exe = fluid.Executor(place)
    accumulated = len([avg_cost, acc]) * [0]
    for tid, test_data in enumerate(reader()):
        avg_cost_np = test_exe.run(program=program,
                                   feed=feeder_test.feed(test_data),
                                   fetch_list=[avg_cost, acc])
        accumulated = [x[0] + x[1][0] for x in zip(accumulated, avg_cost_np)]
        count += 1
    return [x / count for x in accumulated]

params_dirname = "image_classification_resnet.inference.model"

from paddle.utils.plot import Ploter

train_prompt = "Train cost"
test_prompt = "Test cost"
plot_cost = Ploter(test_prompt,train_prompt)

# main train loop.
def train_loop():
    feed_var_list_loop = [
        main_program.global_block().var(var_name) for var_name in feed_order
    ]
    feeder = fluid.DataFeeder(
        feed_list=feed_var_list_loop, place=place)
    exe.run(star_program)

    step = 0
    for pass_id in range(EPOCH_NUM):
        for step_id, data_train in enumerate(train_reader()):
            avg_loss_value = exe.run(main_program,
                                     feed=feeder.feed(data_train),
                                     fetch_list=[avg_cost, acc])
            if step % 1 == 0:
                plot_cost.append(train_prompt, step, avg_loss_value[0])
                plot_cost.plot()
            step += 1

        avg_cost_test, accuracy_test = train_test(test_program,
                                                  reader=test_reader)
        plot_cost.append(test_prompt, step, avg_cost_test)

        # save parameters
        if params_dirname is not None:
            fluid.io.save_inference_model(params_dirname, ["pixel"],
                                          [predict], exe)

if __name__ == '__main__':
    train_loop()
收藏
点赞
0
个赞
共2条回复 最后由asdfvgbnm3回复于2019-03-07 08:05
#3asdfvgbnm3回复于2019-03-07 08:05:46
#2 天子骄子fly回复
你电脑支持AVX指令集吗?

支持的

0
#2天子骄子fly回复于2019-03-06 17:06:35

你电脑支持AVX指令集吗?

0
TOP
切换版块