#include "tools/anf_exporter/anf_exporter.h"
#include <list>
#include <memory>
//提供了内存操作相关的一些函数及声明
#include <string>
#include <functional>
#include <utility>
#include <vector>
//vector是一种顺序容器,事实上和数组差不多,但它比数组更优越。一般来说数组不能动态拓展,因此在程序运行的时候不是浪费内存,就是造成越界。而vector正好弥补了这个缺陷,它的特征是相当于可分配拓展的数组,它的随机访问快,在中间插入和删除慢,但在末端插入和删除.
#include "tools/converter/converter_flags.h"
#include "abstract/abstract_value.h"
#include "mindspore/core/ir/primitive.h"
#include "mindspore/core/ops/op_utils.h"
#include "ops/fusion/partial_fusion.h"
#include "ops/depend.h"
#include "tools/converter/ops/ops_def.h"
#include "ops/quant_dtype_cast.h"
#include "tools/converter/quant_param_holder.h"
#include "tools/optimizer/common/gllo_utils.h"
#include "tools/converter/qua ntizer/bitpacking.h"
#include "src/common/utils.h"
#include "tools/common/graph_util.h"
#include "src/ops/ops_utils.h"
#include "tools/common/node_util.h"
#include "tools/converter/converter_context.h"
using mindspore::ops::PrimitiveC;
namespace mindspore::lite {//命名位置
namespace {
std::list<CNodePtr> GetOrderedCNodes(const FuncGraphPtr fg) {//定义一个获取有序的 CNode函数
auto BelongSameGraph = std::bind(IncludeBelongGraph, fg, std::placeholders::_1);
//std::bind可以预先把指定可调用实体的某些参数绑定到已有的变量,产生一个新的可调 用实体,这种机制在回调函数的使用过程中也颇为有用
auto succ_include_fv = [&fg](const AnfNodePtr &node) -> std::vector<AnfNodePtr> {
//将成功保存的代码存进 succ_include_fv
std::vector<AnfNodePtr> vecs;//将AnfNodePtr赋值给vecs
if (node == nullptr) {
return vecs;//node为null直接返回vecs
}
if (node->isa<CNode>()) {//判断是否为CNode
auto cnode = node->cast<CNodePtr>();//赋值给cnode
auto &inputs = cnode->inputs();
//检查是否使用了自由变量
for (const auto &input : inputs) {
auto input_fg = GetValueNode<FuncGraphPtr>(input);
if (input_fg) {
for (auto &fv : input_fg->free_variables_nodes()) {
if (fv->func_graph() == fg && fg->nodes().contains(fv)) {
vecs.push_back(fv);
}
}
}
}
(void)vecs.insert(vecs.end(), inputs.begin(), inputs.end());
}
return vecs;//检查完成返回
};
std::list<CNodePtr> cnodes;//初始化一个CNode
auto nodes = TopoSort(fg->get_return(), succ_include_fv, BelongSameGraph);
for (const auto &node : nodes) {
auto cnode = dyn_cast<CNode>(node);
if (cnode) {
cnodes.push_back(cnode);//push_back函数将一个新的元素加到vector的最后面,位置为当前最后一个元素的下一个元素
}
}
return cnodes;
}
} // namespace
//定义一个输出张量的函数
int AnfExporter::SetPostTrainOutputTensorType(const std::unique_ptr<schema::MetaGraphT> &meta_graph,
const std::shared_ptr<mindspore::Primitive> &primitive,
const std::unique_ptr<schema::CNodeT> &dst_node) {
auto first_output_index = dst_node->outputIndex[0];
auto first_tensor_output = meta_graph->allTensors[first_output_index].get();//初始化第一个输出的tensor
if (dst_node->quantType == schema::QuantType_PostTraining) {
if (primitive->name() != mindspore::ops::kNameQuantDTypeCast) {//调整输出格式
first_tensor_output->dataType = kNumberTypeInt8;
} else {
auto primc = primitive->cast<std::shared_ptr<mindspore::ops::QuantDTypeCast>>();//对数据格式进行检查,并转换成kNumberTypeInt8
}
if (primc == nullptr) {
MS_LOG(ERROR) << "primitive is nullptr.";
return RET_ERROR;
}
if (primc->get_dst_t() != kNumberTypeFloat32) {
first_tensor_output->dataType = kNumberTypeInt8;
}
}
}
return RET_OK;
}
static STATUS CompressTensor(schema::TensorT *tensor_input, const std::unique_ptr<schema::CNodeT> &dst_node) {//压缩tensor函数
if (!tensor_input->quantParams.empty() && tensor_input->quantParams.front()->inited) {//判断参数数量是否为空
int bit_num = tensor_input->quantParams.at(0)->numBits;
// Pack Repetition
auto repetition_packed = false;
MS_LOG(DEBUG) << dst_node->name;
if (dst_node->quantType == schema::QuantType_QUANT_WEIGHT) {
if (bit_num <= 8) {//判断数位是否小于8
repetition_packed = PackRepetition<int8_t>(bit_num, tensor_input);//重复包装
} else {
repetition_packed = PackRepetition<int16_t>(bit_num, tensor_input);位数大于8的转换为16位数
}
}
if (bit_num != 8 && bit_num != 16 && !repetition_packed) {//如果位数都不对
auto status = DoBitPack(bit_num, tensor_input);
if (status != RET_OK) {//判断是否pack成功,成功会返回Ret——ok
MS_LOG(ERROR) << "do bit pack failed. " << status;//输出失败
return RET_ERROR;
}
}
}
return RET_OK;//成功后返回RET_Ok
}