forked from alibaba/MNN
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathTestUtils.h
120 lines (107 loc) · 3.36 KB
/
TestUtils.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
//
// TestUtils.h
// MNN
//
// Created by MNN on 2019/01/15.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef TestUtils_h
#define TestUtils_h
#include <assert.h>
#include <stdio.h>
#include <functional>
#include <string>
#include <MNN/MNNForwardType.h>
#include <MNN/Tensor.hpp>
#include <math.h>
#include <iostream>
#include "core/Backend.hpp"
#include <MNN/expr/Executor.hpp>
#include <MNN/expr/ExecutorScope.hpp>
#include "MNN_generated.h"
/**
* @brief dispatch payload on all available backends
* @param payload test to perform
*/
void dispatch(std::function<void(MNNForwardType)> payload);
/**
* @brief dispatch payload on given backend
* @param payload test to perform
* @param backend given backend
*/
void dispatch(std::function<void(MNNForwardType)> payload, MNNForwardType backend);
/**
@brief check the result with the ground truth
@param result data
@param rightData
@param size
@param threshold
*/
template <typename T>
bool checkVector(const T* result, const T* rightData, int size, T threshold){
MNN_ASSERT(result != nullptr);
MNN_ASSERT(rightData != nullptr);
MNN_ASSERT(size >= 0);
for(int i = 0; i < size; ++i){
if(fabs(result[i] - rightData[i]) > threshold){
std::cout << "No." << i << " error, right: " << rightData[i] << ", compute: " << result[i] << std::endl;
return false;
}
}
return true;
}
template <typename T>
bool checkVectorByRelativeError(const T* result, const T* rightData, int size, float rtol) {
MNN_ASSERT(result != nullptr);
MNN_ASSERT(rightData != nullptr);
MNN_ASSERT(size >= 0);
float maxValue = 0.0f;
for(int i = 0; i < size; ++i){
maxValue = fmax(fabs(rightData[i]), maxValue);
}
float reltiveError = maxValue * rtol;
for(int i = 0; i < size; ++i){
if (fabs(result[i] - rightData[i]) > reltiveError) {
std::cout << i << ": right: " << rightData[i] << ", compute: " << result[i] << std::endl;
return false;
}
}
return true;
}
template <typename T>
bool checkVectorByRelativeError(const T* result, const T* rightData, const T* alterRightData, int size, float rtol) {
MNN_ASSERT(result != nullptr);
MNN_ASSERT(rightData != nullptr);
MNN_ASSERT(size >= 0);
float maxValue = 0.0f;
for(int i = 0; i < size; ++i) {
maxValue = fmax(fmax(fabs(rightData[i]), fabs(alterRightData[i])), maxValue);
}
float reltiveError = maxValue * rtol;
for(int i = 0; i < size; ++i) {
if (fabs(result[i] - rightData[i]) > reltiveError && fabs(result[i] - alterRightData[i]) > reltiveError) {
std::cout << i << ": right: " << rightData[i] << " or " << alterRightData[i] << ", compute: " << result[i] << std::endl;
return false;
}
}
return true;
}
int getTestPrecision(MNNForwardType forwardType, MNN::BackendConfig::PrecisionMode precision, bool isSupportFp16);
float convertFP32ToBF16(float fp32Value);
float convertFP32ToFP16(float fp32Value);
inline float keepFP32Precision(float fp32Value) {
return fp32Value;
}
MNNForwardType getCurrentType();
using ConvertFP32 = float(*)(float fp32Value);
const static std::vector<ConvertFP32> FP32Converter = {
keepFP32Precision,
keepFP32Precision,
#ifdef MNN_SUPPORT_BF16
convertFP32ToBF16,
#else
keepFP32Precision,
#endif
convertFP32ToFP16
};
#endif /* TestUtils_h */