保存模型报错
w不将就6 发布于2020-06 浏览:1110 回复:1
0
收藏
快速回复

按照官方文档操作的,还是出错了 ValueError: The target variable must have an associated operator that generates it.

 

收藏
点赞
0
个赞
共1条回复 最后由w不将就6回复于2020-06
#2w不将就6回复于2020-06

# 定义一个使用CPU的解析器 # place = fluid.CUDAPlace(0) place = fluid.CPUPlace() exe = fluid.Executor(place) # 进行参数初始化 exe.run(fluid.default_startup_program()) # 定义训练过程 def train(model): with fluid.dygraph.guard(): print("---- start training ----") model.train() epoch_num = 2 iter=0 train_loader = data_loader(DATADIR, batch_size=10, mode='train') iters = [] losses = [] accuracy = [] for epoch in range(epoch_num): for batch_id, data in enumerate(train_loader()): x_data, y_data = data img = fluid.dygraph.to_variable(x_data) label = fluid.dygraph.to_variable(y_data) logits = model(img) cost = fluid.layers.cross_entropy(logits, label) avg_cost = fluid.layers.mean(cost) acc = fluid.layers.accuracy(input=logits, label=label) if batch_id % 5 == 0: print("epoch: {}, batch_id: {}, loss is: {},acc is:{}".format( epoch, batch_id, avg_cost.numpy(), acc.numpy())) iters.append(iter) losses.append(avg_cost.numpy()) accuracy.append(acc.numpy()) iter = iter + 10 avg_cost.backward() opt = fluid.optimizer.AdamOptimizer(learning_rate=0.001, parameter_list=model.parameters()) opt.minimize(avg_cost) model.clear_gradients() fig, ax1 = plt.subplots() ax2 = ax1.twinx() lns1 = ax1.plot(losses, label="Loss") lns2 = ax2.plot(accuracy, 'r', label="Accuracy") ax1.set_xlabel('iteration') ax1.set_ylabel('training loss') ax2.set_ylabel('training accuracy') # 合并图例 lns = lns1 + lns2 labels = ["Loss", "Accuracy"] plt.legend(lns, labels, loc=0) plt.show() # 保存预测模型 save_path = '.model/eye_classification' # 删除旧的模型文件 #shutil.rmtree(save_path, ignore_errors=True) # 创建保持模型文件目录 #os.makedirs(save_path) # 保存预测模型 fluid.io.save_inference_model(dirname=save_path, feeded_var_names=['image'], target_vars=[logits], executor=exe) if __name__ == "__main__": with fluid.dygraph.guard(): model = AlexNet("AlexNet") train(model)

0
TOP
切换版块