** "mindspore\lite\examples\transfer_learning\src\net_runner.cc"注释1**
一、代码用处
这段代码主要用于对数据的一些执行操作函数
二、代码注释
#include "src/net_runner.h"//导入头文件
#include <math.h>
#include <getopt.h>
#include <algorithm>
#include <cstring>
#include <fstream>
#include <iostream>
#include "include/context.h"
#include "src/utils.h"
static unsigned int seed = time(NULL);
//定义回调函数后转发给操作者
bool after_callback(const std::vector<mindspore::tensor::MSTensor *> &after_inputs,
const std::vector<mindspore::tensor::MSTensor *> &after_outputs,
const mindspore::CallBackParam &call_param) {
std::cout << call_param.node_name << std::endl;//输出call_param.node_name
for (size_t i = 0; i < after_inputs.size(); i++) {
int num2p = (after_inputs.at(i)->ElementsNum());
std::cout << "in" << i << "(" << num2p << "): ";//输出元素数量
if (num2p > 10) num2p = 10;
if (after_inputs.at(i)->data_type() == mindspore::kNumberTypeInt32) {
auto d = reinterpret_cast<int *>(after_inputs.at(i)->MutableData());//对输入后的数据存进数组d
for (int j = 0; j < num2p; j++) {
std::cout << d[j] << ", ";//依次输出d
}
} else {
auto d = reinterpret_cast<float *>(after_inputs.at(i)->MutableData());
for (int j = 0; j < num2p; j++) {
std::cout << d[j] << ", ";
}
}
std::cout << std::endl;
}
for (size_t i = 0; i < after_outputs.size(); i++) {
auto d = reinterpret_cast<float *>(after_outputs.at(i)->MutableData());
int num2p = (after_outputs.at(i)->ElementsNum());
std::cout << "ou" << i << "(" << num2p << "): ";
if (num2p > 10) num2p = 10;
for (int j = 0; j < num2p; j++) {
std::cout << d[j] << ", ";
}
std::cout << std::endl;
}
return true;
}
NetRunner::~NetRunner() {//重启数据
if (session_ != nullptr) delete session_;
}
void NetRunner::InitAndFigureInputs() {//初始化和图形输入函数
mindspore::lite::Context context;
context.device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_ = mindspore::lite::NO_BIND;
context.thread_num_ = 1;
session_ = mindspore::session::TrainSession::CreateTransferSession(ms_backbone_file_, ms_head_file_, &context);
MS_ASSERT(nullptr != session_);
auto inputs = session_->GetInputs();//得到输入数据
MS_ASSERT(inputs.size() > 1);//判断条件决定是否继续执行
data_index_ = 0;
label_index_ = 1;
batch_size_ = inputs[data_index_]->shape()[0];
data_size_ = inputs[data_index_]->Size() / batch_size_; // in bytes
if (verbose_) {
std::cout << "data size: " << data_size_ << std::endl << "batch size: " << batch_size_ << std::endl;//输出data size
}
}
mindspore::tensor::MSTensor *NetRunner::SearchOutputsForSize(size_t size) const {
auto outputs = session_->GetOutputs();//获取输出数据
for (auto it = outputs.begin(); it != outputs.end(); ++it) {
if (it->second->ElementsNum() == size) return it->second;//返回second
}
std::cout << "Model does not have an output tensor with size " << size << std::endl;
return nullptr;
}
//补全输入数据
std::vector<int> NetRunner::FillInputData(const std::vector<DataLabelTuple> &dataset, int serially) const {
std::vector<int> labels_vec;
static unsigned int idx = 1;
int total_size = dataset.size();//获取数据集大小
if (serially == 0) idx = 0;
auto inputs = session_->GetInputs();
char *input_data = reinterpret_cast<char *>(inputs.at(data_index_)->MutableData());//得到输入数据
auto labels = reinterpret_cast<float *>(inputs.at(label_index_)->MutableData());
MS_ASSERT(total_size > 0);
MS_ASSERT(input_data != nullptr);//两个条件判断是否继续执行下去
std::fill(labels, labels + inputs.at(label_index_)->ElementsNum(), 0.f);//填充数据
for (int i = 0; i < batch_size_; i++) {
if (serially >= 0) {
idx = ++idx % total_size;
} else {
idx = rand_r(&seed) % total_size;
}
int label = 0;
char *data = nullptr;
std::tie(data, label) = dataset[idx];
std::copy(data, data + data_size_, input_data + i * data_size_);
labels[i * num_of_classes_ + label] = 1.0; //模型需要 onehot 表示中的标签
labels_vec.push_back(label);//添加label
}
return labels_vec;
}