打开微信,使用扫一扫进入页面后,点击右上角菜单,
点击“发送给朋友”或“分享到朋友圈”完成分享
【寒武纪硬件产品型号】必填*:
必填项,例如:MLU370
【MagicMind版本号】必填*:
0.14.1
【出错信息】必填*:
2023-09-16 02:50:17.790200: ERROR: magicmind/runtime/core/runtime_tensor.cc:276] Not found: Failed to find IRTTensor with name: input
2023-09-16 02:50:17.790233: ERROR: magicmind/runtime/core/runtime_tensor.cc:276] Not found: Failed to find IRTTensor with name: output
【操作步骤】选填:
【相关日志文档】选填
如有,可附件
【出错代码】选填:
#include <memory>
#include <dlfcn.h>
#include "cnrt.h"
#include "common/device.h"
#include "common/data.h"
#include "common/logger.h"
#include "mm_common.h"
#include "mm_builder.h"
#include "basic_samples/sample_runtime/sample_runtime.h"
using namespace magicmind;
int main()
{
IParser<ModelKind::kOnnx, std::string>* parser = CreateIParser<ModelKind::kOnnx, std::string>();
INetwork* network = CreateINetwork();
parser->Parse(network, "/root/resnet50-v1-7.onnx");
IBuilder* builder = CreateIBuilder();
IBuilderConfig* config = CreateIBuilderConfig();
config->ParseFromFile("/root/MLU370/build_config.json");
IModel* model_out = builder->BuildModel("/root/resnet50-v1-7.onnx", network, config);
std::cout << model_out << std::endl;
model_out->SerializeToFile("resnet50.mm");
model_out->Destroy();
IModel* model = CreateIModel();
model->DeserializeFromFile("/root/resnet50.mm");
cnrtSetDevice(0);
// 创建 IEngine
IEngine *engine = model->CreateIEngine();
IContext *context = engine->CreateIContext();
//为IContext设置输入输出地址、输入形状等信息,示例如下:
std::vector<IRTTensor*> inputs, outputs;
// 从IModel中获取输入输出Tensor的句柄
context->CreateInputTensors(&inputs);
context->CreateOutputTensors(&outputs);
// 按照Tensor名称获取Tensor的句柄
IRTTensor* input_tensor = FindIRTTensorByName(inputs, "input");
IRTTensor* output_tensor = FindIRTTensorByName(outputs, "output");
if (input_tensor == nullptr)
std::cout << "input_tensor empty" << std::endl;
if (output_tensor == nullptr)
std::cout << "output_tensor empty" << std::endl;
// 给输入Tensor设置形状和地址
input_tensor->SetDimensions(Dims({1, 224, 224, 3}));
// 根据获取的TensorLocation类型来决定给输入Tensor设置的是设备侧地址还是主机侧地址
// mlu_input_addr是由用户预分配的MLU设备侧输入内存,host_input_addr是由用户预分配的主机侧输入内存
if ((input_tensor->GetMemoryLocation() == magicmind::TensorLocation::kMLU) ||
(input_tensor->GetMemoryLocation() == magicmind::TensorLocation::kRemoteMLU)) {
input_tensor->SetData(const_cast<char*>("/root/image.jpg"));
} else {
input_tensor->SetData(const_cast<char*>("host_input_addr"));
}
// 使用IContext进行输出的形状推导,获取输出形状
context->InferOutputShape(inputs, outputs);
// 根据获取的TensorLocation类型来决定给输出Tensor设置的是设备侧地址还是主机侧地址
// mlu_output_addr是由用户预分配的MLU设备侧输出内存,host_output_addr是由用户预分配的主机侧输出内存
if ((output_tensor->GetMemoryLocation() == magicmind::TensorLocation::kMLU) ||
(output_tensor->GetMemoryLocation() == magicmind::TensorLocation::kRemoteMLU)) {
output_tensor->SetData(const_cast<char*>("./output"));
} else {
output_tensor->SetData(const_cast<char*>(""));
}
//magicmind::IRpcDevice *dev_ = nullptr;
//magicmind::IRpcQueue *queue_rpc_ = dev_->CreateQueue();
cnrtQueue_t queue = nullptr;
//queue_rpc_ = dev_->CreateQueue();
//queue_ = queue_rpc_->get();
// context->Enqueue(inputs, outputs, queue);
//使用capacity的情况下不必调用形状推导
std::vector<IRTTensor*> inputs1, outputs1;
context->CreateInputTensors(&inputs1);
context->CreateOutputTensors(&outputs1);
FindIRTTensorByName(inputs1, "input")->SetDimensions(Dims({1, 224, 224, 3}));
FindIRTTensorByName(inputs1, "input")->SetData(const_cast<char*>("./output"));
// 用户传入的输出形状仅供参考,可以与实际形状不一致,需要小于容量大小
FindIRTTensorByName(outputs1, "output")->SetDimensions(Dims({1, 224, 224, 3}));
// 设置容量大小,并传入满足容量大小的地址空间
FindIRTTensorByName(outputs1, "output")->SetCapacity(100);
FindIRTTensorByName(outputs1, "output")->SetData(const_cast<char*>(""));
context->Enqueue(inputs1, outputs1, queue);
const auto output_shape = FindIRTTensorByName(outputs1, "output")->GetDimensions();
for (auto tensor : inputs) {
tensor->Destroy();
}
for (auto tensor : outputs) {
tensor->Destroy();
}
context->Destroy();
engine->Destroy();
model->Destroy();
return 0;
}
热门帖子
精华帖子