299 lines
8.4 KiB
C++
299 lines
8.4 KiB
C++
//
|
|
// Created by sjt on 3/1/23.
|
|
//
|
|
#include <cassert>
|
|
#include <vector>
|
|
#include <iostream>
|
|
#include <numeric>
|
|
#include "Operators_imp.h"
|
|
using namespace std;
|
|
|
|
template<typename T>
|
|
void cast(T*){
|
|
|
|
T* input0 = new T[100];
|
|
for(size_t i=0; i<100; i++) {
|
|
input0[i] = (T)i;
|
|
}
|
|
T* output = new T[100];
|
|
|
|
//set up tensors
|
|
C_Tensors inputs, weights, outputs;
|
|
{
|
|
inputs.counts = 1;
|
|
vector<int32_t> shape{ 1,2,3,4};
|
|
assert(shape.size() <= MAX_NUM_AXIS);
|
|
|
|
inputs.tensor[0].data = (float*)input0;
|
|
inputs.tensor[0].shape.axes = shape.size();
|
|
if constexpr (std::is_same<T, int8_t>::value){
|
|
inputs.tensor[0].data_type = INT8_T;
|
|
} else if constexpr (std::is_same<T, int16_t>::value) {
|
|
inputs.tensor[0].data_type = INT16_T;
|
|
} else {
|
|
inputs.tensor[0].data_type = FLOAT32_T;
|
|
}
|
|
copy(shape.begin(), shape.end(), inputs.tensor[0].shape.dim);
|
|
}
|
|
{
|
|
outputs.counts = 1;
|
|
vector<int32_t> shape{1, 2, 3, 4};
|
|
assert(shape.size() <= MAX_NUM_AXIS);
|
|
|
|
outputs.tensor[0].data = (float*)output;
|
|
outputs.tensor[0].shape.axes = shape.size();
|
|
copy(shape.begin(), shape.end(), outputs.tensor[0].shape.dim);
|
|
}
|
|
|
|
ElementUnaryOpAttribute attr;
|
|
attr.fn_ = copy_fn;
|
|
attr.name_ = "Cast";
|
|
ElementUnaryOperator_Run(&inputs, &weights, &outputs, &attr);
|
|
|
|
size_t input_size = getSize(&inputs, 0);
|
|
for(size_t i=0; i<input_size; i++) {
|
|
if(input0[i] != output[i]) {
|
|
std::cout << "cast not correct, please check" << std::endl;
|
|
}
|
|
}
|
|
delete[] input0;
|
|
delete[] output;
|
|
}
|
|
|
|
|
|
template<typename T>
|
|
void transpose(T*) {
|
|
|
|
T *input0 = new T[100];
|
|
for (size_t i = 0; i < 100; i++) {
|
|
input0[i] = (T) (i+1);
|
|
}
|
|
T *output = new T[100];
|
|
|
|
//set up tensors
|
|
C_Tensors inputs, weights, outputs;
|
|
{
|
|
inputs.counts = 1;
|
|
vector<int32_t> shape{ 1, 2, 3, 4};
|
|
assert(shape.size() <= MAX_NUM_AXIS);
|
|
|
|
inputs.tensor[0].data = (float *) input0;
|
|
inputs.tensor[0].shape.axes = shape.size();
|
|
if constexpr (std::is_same<T, int8_t>::value) {
|
|
inputs.tensor[0].data_type = INT8_T;
|
|
} else if constexpr (std::is_same<T, int16_t>::value) {
|
|
inputs.tensor[0].data_type = INT16_T;
|
|
} else {
|
|
inputs.tensor[0].data_type = FLOAT32_T;
|
|
}
|
|
copy(shape.begin(), shape.end(), inputs.tensor[0].shape.dim);
|
|
}
|
|
{
|
|
outputs.counts = 1;
|
|
vector<int32_t> shape{ 1, 2, 4, 3};
|
|
assert(shape.size() <= MAX_NUM_AXIS);
|
|
|
|
outputs.tensor[0].data = (float *) output;
|
|
outputs.tensor[0].shape.axes = shape.size();
|
|
copy(shape.begin(), shape.end(), outputs.tensor[0].shape.dim);
|
|
}
|
|
|
|
::TransposeAttribute attr;
|
|
attr.name_ = "Transpose";
|
|
attr.perm_.axes = 4;
|
|
attr.perm_.dim[0] = 0;
|
|
attr.perm_.dim[1] = 1;
|
|
attr.perm_.dim[2] = 3;
|
|
attr.perm_.dim[3] = 2;
|
|
TransposeOperator_Run(&inputs, &weights, &outputs, &attr);
|
|
// transpose from [2,3,4] to [2,4,3]
|
|
/* Fm:
|
|
* [[ 1, 2, 3 ,4] [13, 14, 15, 16]
|
|
* [ 5, 6, 7, 8] [17, 18, 19, 20]
|
|
* [9, 10, 11, 12], [21, 22, 23, 24]
|
|
* To:
|
|
* [[ 1, 5, 9] [13, 17, 21]
|
|
* [2, 6 ,10] [14, 18, 22]
|
|
* [3, 7, 11] [15, 19, 23]
|
|
* [4, 8, 12], 16, 20, 24] ]
|
|
*/
|
|
vector<T> expected_output{1, 5, 9,
|
|
2, 6, 10,
|
|
3, 7, 11,
|
|
4, 8, 12,
|
|
13, 17, 21,
|
|
14, 18, 22,
|
|
15, 19, 23,
|
|
16, 20, 24};
|
|
|
|
|
|
size_t output_size = getSize(&outputs, 0);
|
|
for (size_t i = 0; i < output_size; i++) {
|
|
if (expected_output[i] != output[i]) {
|
|
std::cout << "transpose not correct, please check" << std::endl;
|
|
break;
|
|
}
|
|
}
|
|
delete[] input0;
|
|
delete[] output;
|
|
}
|
|
|
|
template<typename T>
|
|
void gather(T*) {
|
|
|
|
T *input0 = new T[100];
|
|
for (size_t i = 0; i < 100; i++) {
|
|
input0[i] = (T) (i + 1);
|
|
}
|
|
T *output = new T[100];
|
|
|
|
// indices as [0,2]
|
|
T *weight = new T[2];
|
|
weight[0] = (T) 0;
|
|
weight[1] = (T) 2;
|
|
|
|
//set up tensors
|
|
C_Tensors inputs, weights, outputs;
|
|
{
|
|
inputs.counts = 2;
|
|
vector<int32_t> shape{2, 3, 4};
|
|
assert(shape.size() <= MAX_NUM_AXIS);
|
|
|
|
inputs.tensor[0].data = (float *) input0;
|
|
inputs.tensor[0].shape.axes = shape.size();
|
|
if constexpr (std::is_same<T, int8_t>::value) {
|
|
inputs.tensor[0].data_type = INT8_T;
|
|
} else if constexpr (std::is_same<T, int16_t>::value) {
|
|
inputs.tensor[0].data_type = INT16_T;
|
|
} else {
|
|
inputs.tensor[0].data_type = FLOAT32_T;
|
|
}
|
|
copy(shape.begin(), shape.end(), inputs.tensor[0].shape.dim);
|
|
|
|
vector<int32_t> shape1{1, 2};
|
|
inputs.tensor[1].data_type = FLOAT32_T;
|
|
inputs.tensor[1].data = (float *) weight;
|
|
inputs.tensor[1].shape.axes = shape1.size();
|
|
copy(shape1.begin(), shape1.end(), inputs.tensor[1].shape.dim);
|
|
}
|
|
{
|
|
outputs.counts = 1;
|
|
vector<int32_t> shape{2, 3, 1, 2};
|
|
assert(shape.size() <= MAX_NUM_AXIS);
|
|
|
|
outputs.tensor[0].data = (float *) output;
|
|
outputs.tensor[0].shape.axes = shape.size();
|
|
copy(shape.begin(), shape.end(), outputs.tensor[0].shape.dim);
|
|
}
|
|
|
|
::GatherAttribute attr;
|
|
attr.name_ = "Gather";
|
|
attr.axis_ = 2;
|
|
GatherOperator_Run(&inputs, &weights, &outputs, &attr);
|
|
// gather from [2,3,4] with indices [0,2] at axis = 2
|
|
/* Fm:
|
|
* [[ 1, 2, 3 ,4] [13, 14, 15, 16]
|
|
* [ 5, 6, 7, 8] [17, 18, 19, 20]
|
|
* [9, 10, 11, 12], [21, 22, 23, 24]
|
|
* To:
|
|
* [[ 1, 3] [13, 15]
|
|
* [5, 7] [17, 19 ]
|
|
* [9, 11] [21, 23]
|
|
*/
|
|
vector<T> expected_output{1, 3,
|
|
5, 7,
|
|
9, 11,
|
|
13, 15,
|
|
17, 19,
|
|
21, 23};
|
|
|
|
size_t output_size = getSize(&outputs, 0);
|
|
for (size_t i = 0; i < output_size; i++) {
|
|
if (expected_output[i] != output[i]) {
|
|
std::cout << "gather not correct, please check" << std::endl;
|
|
break;
|
|
}
|
|
}
|
|
delete[] input0;
|
|
delete[] output;
|
|
delete[] weight;
|
|
}
|
|
|
|
template<typename T>
|
|
void expand(T*){
|
|
|
|
T* input0 = new T[100];
|
|
for(size_t i=0; i<100; i++) {
|
|
input0[i] = (T)(i+1);
|
|
}
|
|
T* output = new T[100];
|
|
|
|
//set up tensors
|
|
C_Tensors inputs, weights, outputs;
|
|
{
|
|
inputs.counts = 1;
|
|
vector<int32_t> shape{ 1,4};
|
|
assert(shape.size() <= MAX_NUM_AXIS);
|
|
|
|
inputs.tensor[0].data = (float*)input0;
|
|
inputs.tensor[0].shape.axes = shape.size();
|
|
if constexpr (std::is_same<T, int8_t>::value){
|
|
inputs.tensor[0].data_type = INT8_T;
|
|
} else if constexpr (std::is_same<T, int16_t>::value) {
|
|
inputs.tensor[0].data_type = INT16_T;
|
|
} else {
|
|
inputs.tensor[0].data_type = FLOAT32_T;
|
|
}
|
|
copy(shape.begin(), shape.end(), inputs.tensor[0].shape.dim);
|
|
}
|
|
{
|
|
outputs.counts = 1;
|
|
vector<int32_t> shape{ 2, 4};
|
|
assert(shape.size() <= MAX_NUM_AXIS);
|
|
|
|
outputs.tensor[0].data = (float*)output;
|
|
outputs.tensor[0].shape.axes = shape.size();
|
|
copy(shape.begin(), shape.end(), outputs.tensor[0].shape.dim);
|
|
}
|
|
|
|
::ExpandAttribute attr;
|
|
attr.name_ = "Expand";
|
|
ExpandOperator_Run(&inputs, &weights, &outputs, &attr);
|
|
|
|
vector<T> expected_output{1, 2, 3, 4,
|
|
1, 2, 3, 4};
|
|
|
|
|
|
size_t output_size = getSize(&outputs, 0);
|
|
for (size_t i = 0; i < output_size; i++) {
|
|
if (expected_output[i] != output[i]) {
|
|
std::cout << "expand not correct, please check" << std::endl;
|
|
break;
|
|
}
|
|
}
|
|
delete[] input0;
|
|
delete[] output;
|
|
}
|
|
|
|
int main(int argc, char** argv){
|
|
float* float_in;
|
|
int8_t* int8_in;
|
|
int16_t* int16_in;
|
|
|
|
cast(float_in);
|
|
cast(int8_in);
|
|
cast(int16_in);
|
|
|
|
transpose(float_in);
|
|
transpose(int8_in);
|
|
transpose(int16_in);
|
|
|
|
gather(float_in);
|
|
gather(int8_in);
|
|
gather(int16_in);
|
|
|
|
expand(float_in);
|
|
expand(int8_in);
|
|
expand(int16_in);
|
|
return 0;
|
|
} |