Can't solve linker error in building S-Function Builder

54 ビュー (過去 30 日間)
翼
2025 年 1 月 26 日 16:19
編集済み: 2025 年 1 月 30 日 9:21
I got this kind of model with S-Function Builder as shown in this image.
But When I build it, I got this error.
#'smex_builder.cpp'
was created successfully
###'smex_builder_wrapper.cpp'
was created successfully
###'smex_builder.tlc'
was created successfully
????? smex_builder.lib ??????? smex_builder.exp ???? smex_builder_wrapper.obj : error LNK2019: ?????????? OrtGetApiBase ??? "void __cdecl `dynamic initializer for 'public: static struct OrtApi const * const Ort::Global::api_''(void)" (??__E?api_@?$Global@X@Ort@@2PEBUOrtApi@@EB@@YAXXZ) ???????? smex_builder.mexw64 : fatal error LNK1120: 1 ??????????
Also, I set parameter tab and library tab like this.
[Port and parameter] tab
[library] tab
I run " mex -setup C++" command and it shows Visual Studio 2022.
I use attached ONNX model as zip file.(out_modified_empty_model.onnx)
Structure of my onnx is like this.
Do you happen to know the solution?
I'm not sure about why this error... I'd be happy if you could give me any advice.
For your information,
I tried this C++ code in S-Function Builder Editor, but I can not figure out what is the cause and where I am missing.
#include "mex.h"
#include <math.h>
#include <onnxruntime_cxx_api.h>
#include <vector>
#include <memory>
extern std::unique_ptrOrt::Env g_env;
extern std::unique_ptrOrt::Session g_session;
extern std::vector<const char> g_input_node_names;
extern std::vector<const char*> g_output_node_names;
void smex_builder_Start_wrapper(void)
{
try {
g_env = std::make_uniqueOrt::Env(ORT_LOGGING_LEVEL_WARNING, "test");
Ort::SessionOptions session_options;
session_options.SetIntraOpNumThreads(1);
session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);
const wchar_t model_path = L"out_modified_empty_model.onnx";
g_session = std::make_uniqueOrt::Session(g_env, model_path, session_options);
Ort::AllocatorWithDefaultOptions allocator;
size_t num_input_nodes = g_session->GetInputCount();
g_input_node_names.resize(num_input_nodes);
for (size_t i = 0; i < num_input_nodes; i++) {
auto input_name = g_session->GetInputNameAllocated(i, allocator);
g_input_node_names[i] = input_name.get();
}
size_t num_output_nodes = g_session->GetOutputCount();
g_output_node_names.resize(num_output_nodes);
for (size_t i = 0; i < num_output_nodes; i++) {
auto output_name = g_session->GetOutputNameAllocated(i, allocator);
g_output_node_names[i] = output_name.get();
}
}
catch (const Ort::Exception& ex) {
mexErrMsgIdAndTxt("myOnnxSfunc:InitError", "初期化エラー: %s", ex.what());
}
}
void smex_builder_Outputs_wrapper(const real_T *u0,
const real_T *u1,
const real_T *u2,
const real_T *u3,
const real_T *u4,
const real_T *u5,
const real_T *u6,
real_T *y0,
real_T y1)
{
mexErrMsgIdAndTxt("mySfunc:TestError", "mexErrMsgIdAndTxt のテストエラー");
try {
std::vector<float> input_data(7);
input_data[0] = static_cast<float>(*u0);
input_data[1] = static_cast<float>(*u1);
input_data[2] = static_cast<float>(*u2);
input_data[3] = static_cast<float>(*u3);
input_data[4] = static_cast<float>(u4);
input_data[5] = static_cast<float>(u5);
input_data[6] = static_cast<float>(u6);
std::vector<int64_t> input_shape = { 1, 7 };
Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
Ort::Value input_tensor = Ort::Value::CreateTensor<float>(
memory_info,
input_data.data(),
input_data.size(),
input_shape.data(),
input_shape.size()
);
auto output_tensors = g_session->Run(
Ort::RunOptions{ nullptr },
g_input_node_names.data(),
&input_tensor,
1,
g_output_node_names.data(),
g_output_node_names.size()
);
// Assuming your model outputs two tensors.
if (output_tensors.size() != 2) {
mexErrMsgIdAndTxt("myOnnxSfunc:OutputError",
"モデルは2つのテンソルを出力する必要があります。");
}
// Check the shape of the first output tensor.
float output_data0 = output_tensors[0].GetTensorMutableData<float>();
auto type_info0 = output_tensors[0].GetTensorTypeAndShapeInfo();
auto output_shape0 = type_info0.GetShape();
if (output_shape0.size() != 2 || output_shape0[0] != 1 || output_shape0[1] != 1) {
mexErrMsgIdAndTxt("myOnnxSfunc:OutputError",
"出力テンソル0の形状が不正です。期待される形状: (1, 1)");
}
// Check the shape of the second output tensor.
float output_data1 = output_tensors[1].GetTensorMutableData<float>();
auto type_info1 = output_tensors[1].GetTensorTypeAndShapeInfo();
auto output_shape1 = type_info1.GetShape();
if (output_shape1.size() != 2 || output_shape1[0] != 1 || output_shape1[1] != 1) {
mexErrMsgIdAndTxt("myOnnxSfunc:OutputError",
"出力テンソル1の形状が不正です。期待される形状: (1, 1)");
}
// Simulink出力ポートへの書き込み
y0[0] = static_cast<double>(output_data0[0]); // y0 に最初の出力
y1[0] = static_cast<double>(output_data1[0]); // y1 2番目の出力
}
catch (const Ort::Exception& ex) {
mexErrMsgIdAndTxt("myOnnxSfunc:RuntimeError", "実行時エラー: %s", ex.what());
}
}
void smex_builder_Terminate_wrapper(void)
{
g_session.reset();
g_env.reset();
g_input_node_names.clear();
g_output_node_names.clear();
}

回答 (1 件)

埃博拉酱
埃博拉酱 2025 年 1 月 27 日 1:32
What is <onnxruntime_cxx_api.h> and is it provided by MATLAB or a third-party library?
If it's a third-party library, the link error is usually because you didn't link to the lib static library provided by the third party. You'll need to look for the presence of onnxruntime-related lib files, and then add #pragma comment(lib,"onnxruntime_related_lib_file_path") to your code to include the static library in the link.
  11 件のコメント
埃博拉酱
埃博拉酱 2025 年 1 月 29 日 23:26

It's also a good way to comment out some code as you did to exclude unrelated considerations. You may continue to comment out more lines (even all lines except for those required for successful compilation) until something different happens. Pure revision is often of little help.

翼
2025 年 1 月 30 日 8:58
編集済み: 2025 年 1 月 30 日 9:21
I modified C++ code and python code as below because I realized there may be a problem with the deference in Input and Output shape between Simulink and S-Function Builder
I was just wondering but ,am I right in thinking that there is a problem with the slash string in the file path on MATLAB?
Also, I'm using Windows environment.
I'm sure there is no problem with each Data Type on Simulink side and on C++ code, but I can not solve MATLAB crash....
I am still stuck on this issue..
I'm getting "Access violation detected" error on MATLAB crash log file....
I did debug on Visual Studio and it shows this Exception...
Exception: Invalid rank for input: input1 Got: 2 Expected: 1 Please fix either the inputs/outputs or the model
But I have no ideas where exactly I am missing... In the first place, I wonder if my approrch is wrong...
It is impossible to run ONNX model with S-function ?
File path on "Library" tab
・C++(S-Function Builder)
/* Includes_BEGIN */
#include "mex.h"
#pragma comment(lib, "C:\\Users\\Administrator\\Documents\\Cplusplus\\ConsoleApplication1\\onnxruntime\\lib\\onnxruntime.lib")
#include <math.h>
#include <onnxruntime_cxx_api.h>
#include <vector>
#include <memory>
/* Includes_END */
// Global variable definitions
std::unique_ptr<Ort::Env> g_env;
std::unique_ptr<Ort::Session> g_session;
std::vector<char*> g_input_node_names; // Changed from const char* to char*
std::vector<char*> g_output_node_names; // Changed from const char* to char*
/* Externs_BEGIN */
extern std::unique_ptr<Ort::Env> g_env;
extern std::unique_ptr<Ort::Session> g_session;
extern std::vector<char*> g_input_node_names; // Changed from const char* to char*
extern std::vector<char*> g_output_node_names; // Changed from const char* to char*
/* Externs_END */
void smex_builder_Start_wrapper(void)
{
/* Start_BEGIN */
try {
// Initialize ONNX Runtime environment
g_env = std::make_unique<Ort::Env>(ORT_LOGGING_LEVEL_WARNING, "test");
if (!g_env) {
mexErrMsgIdAndTxt("myOnnxSfunc:InitError", "Failed to initialize ONNX Runtime environment.");
return;
}
Ort::SessionOptions session_options;
session_options.SetIntraOpNumThreads(1);
session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);
// Load the ONNX model (It is recommended to be able to set the model path in the S-Function Builder dialog)
const wchar_t* model_path = L"out_modified_empty_model.onnx";
g_session = std::make_unique<Ort::Session>(*g_env, model_path, session_options);
if (!g_session) {
mexErrMsgIdAndTxt("myOnnxSfunc:InitError", "Failed to load ONNX model: %s", model_path);
return;
}
// Get input and output node information
Ort::AllocatorWithDefaultOptions allocator;
// Get input node names
size_t num_input_nodes = g_session->GetInputCount();
g_input_node_names.resize(num_input_nodes);
for (size_t i = 0; i < num_input_nodes; i++) {
Ort::AllocatedStringPtr input_name = g_session->GetInputNameAllocated(i, allocator);
g_input_node_names[i] = _strdup(input_name.get()); // Duplicate the string and store it as char*
}
// Get output node names
size_t num_output_nodes = g_session->GetOutputCount();
g_output_node_names.resize(num_output_nodes);
for (size_t i = 0; i < num_output_nodes; i++) {
Ort::AllocatedStringPtr output_name = g_session->GetOutputNameAllocated(i, allocator);
g_output_node_names[i] = _strdup(output_name.get()); // Duplicate the string and store it as char*
}
}
catch (const Ort::Exception& ex) {
mexErrMsgIdAndTxt("myOnnxSfunc:InitError", "Initialization error: %s", ex.what());
return;
}
/* Start_END */
}
void smex_builder_Outputs_wrapper(const real_T *u0,
const real_T *u1,
const real_T *u2,
const real_T *u3,
const real_T *u4,
const real_T *u5,
const real_T *u6,
real_T *y0,
real_T *y1)
{
/* Output_BEGIN */
try {
// Get input data (fixed batch_size = 1)
std::vector<float> input_data = {
static_cast<float>(*u0),
static_cast<float>(*u1),
static_cast<float>(*u2),
static_cast<float>(*u3),
static_cast<float>(*u4),
static_cast<float>(*u5),
static_cast<float>(*u6)
};
// Create input tensor (set shape to [7])
std::vector<int64_t> input_shape = {7};
Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
Ort::Value input_tensor = Ort::Value::CreateTensor<float>(
memory_info,
input_data.data(),
input_data.size(),
input_shape.data(),
input_shape.size()
);
// Run inference
auto output_tensors = g_session->Run(
Ort::RunOptions{ nullptr },
g_input_node_names.data(),
&input_tensor,
1,
g_output_node_names.data(),
g_output_node_names.size()
);
// Get output data and check shape
if (output_tensors.size() != 2) {
mexErrMsgIdAndTxt("myOnnxSfunc:OutputError", "The model must output two tensors.");
return;
}
for (size_t i = 0; i < 2; ++i) {
float* output_data = output_tensors[i].GetTensorMutableData<float>();
auto type_info = output_tensors[i].GetTensorTypeAndShapeInfo();
auto output_shape = type_info.GetShape();
// Check output shape: size is 1 and the number of elements is 1
if (output_shape.size() != 1 || output_shape[0] != 1) {
mexErrMsgIdAndTxt("myOnnxSfunc:OutputError", "The shape of output tensor %zu is invalid. Expected shape: [1]", i);
return;
}
if (i == 0) {
y0[0] = static_cast<double>(output_data[0]);
} else {
y1[0] = static_cast<double>(output_data[0]);
}
}
}
catch (const Ort::Exception& ex) {
mexErrMsgIdAndTxt("myOnnxSfunc:RuntimeError", "Runtime error: %s", ex.what());
return;
}
/* Output_END */
}
void smex_builder_Terminate_wrapper(void)
{
/* Terminate_BEGIN */
g_session.reset();
g_env.reset();
// Free the memory of g_input_node_names and g_output_node_names
for (char* name : g_input_node_names) {
free(name);
}
for (char* name : g_output_node_names) {
free(name);
}
g_input_node_names.clear();
g_output_node_names.clear();
/* Terminate_END */
}
・Python(to create ONNX model)
import torch
import torch.nn as nn
class ModifiedEmptyModel(nn.Module):
def __init__(self):
super(ModifiedEmptyModel, self).__init__()
self.linear = nn.Linear(7, 2, bias=False)
with torch.no_grad():
self.linear.weight.fill_(0.0)
def forward(self, input1, input2, input3, input4, input5, input6, input7):
x = torch.stack([input1, input2, input3, input4, input5, input6, input7], dim=1)
return x[:, 0], x[:, 1]
model = ModifiedEmptyModel()
batch_size = 1
dummy_input1 = torch.randn(batch_size, dtype=torch.float32)
dummy_input2 = torch.randn(batch_size, dtype=torch.float32)
dummy_input3 = torch.randn(batch_size, dtype=torch.float32)
dummy_input4 = torch.randn(batch_size, dtype=torch.float32)
dummy_input5 = torch.randn(batch_size, dtype=torch.float32)
dummy_input6 = torch.randn(batch_size, dtype=torch.float32)
dummy_input7 = torch.randn(batch_size, dtype=torch.float32)
torch.onnx.export(
model,
(dummy_input1, dummy_input2, dummy_input3, dummy_input4, dummy_input5, dummy_input6, dummy_input7),
"out_modified_empty_model.onnx",
export_params=True,
opset_version=11,
do_constant_folding=True,
input_names=["input1", "input2", "input3", "input4", "input5", "input6", "input7"],
output_names=["output1", "output2"],
dynamic_axes={
"input1": {0: "batch_size"},
"input2": {0: "batch_size"},
"input3": {0: "batch_size"},
"input4": {0: "batch_size"},
"input5": {0: "batch_size"},
"input6": {0: "batch_size"},
"input7": {0: "batch_size"},
"output1": {0: "batch_size"},
"output2": {0: "batch_size"},
},
)

サインインしてコメントする。

カテゴリ

Help Center および File ExchangeData Exchange and Mapping with C++ Applications についてさらに検索

タグ

製品


リリース

R2024b

Community Treasure Hunt

Find the treasures in MATLAB Central and discover how the community can help you!

Start Hunting!

Translated by