-
Notifications
You must be signed in to change notification settings - Fork 93
/
test_frcnn.cpp
198 lines (170 loc) · 6.53 KB
/
test_frcnn.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
#include <gflags/gflags.h>
#include <glog/logging.h>
#include "boost/algorithm/string.hpp"
#include "caffe/util/benchmark.hpp"
#include "caffe/util/signal_handler.h"
#include "caffe/FRCNN/util/frcnn_vis.hpp"
#include "caffe/api/api.hpp"
#include <caffe/common.hpp>
#include "caffe/layers/input_layer.hpp"
#include "caffe/layers/inner_product_layer.hpp"
#include "caffe/layers/dropout_layer.hpp"
#include "caffe/layers/conv_layer.hpp"
#include "caffe/layers/relu_layer.hpp"
#include "caffe/layers/prelu_layer.hpp"
#include "caffe/layers/pooling_layer.hpp"
#include "caffe/layers/lrn_layer.hpp"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/layers/flatten_layer.hpp"
#include "caffe/layers/concat_layer.hpp"
#include "caffe/layers/reshape_layer.hpp"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/layers/roi_pooling_layer.hpp"
//faster-rcnn
#include "caffe/FRCNN/frcnn_proposal_layer.hpp"
#include "caffe/api/FRCNN/frcnn_api.hpp"
//×¢²á²ã
namespace caffe
{
namespace Frcnn{
extern INSTANTIATE_CLASS(FrcnnProposalLayer);
REGISTER_LAYER_CLASS(FrcnnProposal);
}
extern INSTANTIATE_CLASS(InputLayer);
REGISTER_LAYER_CLASS(Input);
extern INSTANTIATE_CLASS(SplitLayer);
REGISTER_LAYER_CLASS(Split);
extern INSTANTIATE_CLASS(ConvolutionLayer);
REGISTER_LAYER_CLASS(Convolution);
extern INSTANTIATE_CLASS(InnerProductLayer);
REGISTER_LAYER_CLASS(InnerProduct);
extern INSTANTIATE_CLASS(DropoutLayer);
REGISTER_LAYER_CLASS(Dropout);
extern INSTANTIATE_CLASS(ReLULayer);
REGISTER_LAYER_CLASS(ReLU);
extern INSTANTIATE_CLASS(PReLULayer);
REGISTER_LAYER_CLASS(PReLU);
extern INSTANTIATE_CLASS(PoolingLayer);
REGISTER_LAYER_CLASS(Pooling);
extern INSTANTIATE_CLASS(LRNLayer);
REGISTER_LAYER_CLASS(LRN);
extern INSTANTIATE_CLASS(SoftmaxLayer);
REGISTER_LAYER_CLASS(Softmax);
extern INSTANTIATE_CLASS(ROIPoolingLayer);
REGISTER_LAYER_CLASS(ROIPooling);
extern INSTANTIATE_CLASS(FlattenLayer);
REGISTER_LAYER_CLASS(Flatten);
extern INSTANTIATE_CLASS(ConcatLayer);
REGISTER_LAYER_CLASS(Concat);
extern INSTANTIATE_CLASS(ReshapeLayer);
REGISTER_LAYER_CLASS(Reshape);
}
DEFINE_string(gpu, "",
"Optional; run in GPU mode on the given device ID, Empty is CPU");
DEFINE_string(model, "",
"The model definition protocol buffer text file.");
DEFINE_string(weights, "",
"Trained Model By Faster RCNN End-to-End Pipeline.");
DEFINE_string(default_c, "",
"Default config file path.");
DEFINE_string(image_list, "",
"Optional;Test images list.");
DEFINE_string(image_root, "",
"Optional;Test images root directory.");
DEFINE_string(out_file, "",
"Optional;Output images file.");
DEFINE_int32(max_per_image, 100,
"Limit to max_per_image detections *over all classes*");
inline std::string INT(float x) { char A[100]; sprintf_s(A, "%.1f", x); return std::string(A); };
inline std::string FloatToString(float x) { char A[100]; sprintf_s(A, "%.4f", x); return std::string(A); };
int main(int argc, char** argv){
// Print output to stderr (while still logging).
FLAGS_alsologtostderr = 1;
// Set version
gflags::SetVersionString(AS_STRING(CAFFE_VERSION));
// Usage message.
gflags::SetUsageMessage("command line brew\n"
"usage: demo_frcnn_api <args>\n\n"
"args:\n"
" --gpu 7 use 7-th gpu device, default is cpu model\n"
" --model file protocol buffer text file\n"
" --weights file Trained Model\n"
" --default_c file Default Config File\n"
" --image_list file input image list\n"
" --image_root file input image dir\n"
" --max_per_image file limit to max_per_image detections\n"
" --out_file file output amswer file");
// Run tool or show usage.
caffe::GlobalInit(&argc, &argv);
CHECK(FLAGS_gpu.size() == 0 || FLAGS_gpu.size() == 1 || (FLAGS_gpu.size() == 2 && FLAGS_gpu == "-1")) << "Can only support one gpu or none or -1(for cpu)";
int gpu_id = -1;
if (FLAGS_gpu.size() > 0)
gpu_id = boost::lexical_cast<int>(FLAGS_gpu);
if (gpu_id >= 0) {
#ifndef CPU_ONLY
caffe::Caffe::SetDevice(gpu_id);
caffe::Caffe::set_mode(caffe::Caffe::GPU);
#else
LOG(FATAL) << "CPU ONLY MODEL, BUT PROVIDE GPU ID";
#endif
}
else {
caffe::Caffe::set_mode(caffe::Caffe::CPU);
}
std::string proto_file = FLAGS_model.c_str();
std::string model_file = FLAGS_weights.c_str();
std::string default_config_file = FLAGS_default_c.c_str();
const std::string image_list = FLAGS_image_list.c_str();
const std::string image_root = FLAGS_image_root.c_str();
const std::string out_file = FLAGS_out_file.c_str();
const int max_per_image = FLAGS_max_per_image;
//API::Set_Config(default_config_file);
API::Detector detector(proto_file, model_file, default_config_file, gpu_id >= 0, false);
LOG(INFO) << "image list : " << image_list;
LOG(INFO) << "output file : " << out_file;
LOG(INFO) << "image root : " << image_root;
LOG(INFO) << "max_per_image : " << max_per_image;
std::ifstream infile(image_list.c_str());
std::ofstream otfile(out_file.c_str());
API::DataPrepare data_load;
int count = 0;
while (data_load.load_WithDiff(infile)) {
std::string image = data_load.GetImagePath("");
cv::Mat cv_image = cv::imread(image_root + image);
std::vector<caffe::Frcnn::BBox<float> > results = detector.predict(cv_image);
otfile << "# " << data_load.GetImageIndex() << std::endl;
otfile << image << std::endl;
float image_thresh = 0;
if (max_per_image > 0) {
std::vector<float> image_score;
for (size_t obj = 0; obj < results.size(); obj++) {
image_score.push_back(results[obj].confidence);
}
std::sort(image_score.begin(), image_score.end(), std::greater<float>());
if (max_per_image > image_score.size()) {
if (image_score.size() > 0)
image_thresh = image_score.back();
}
else {
image_thresh = image_score[max_per_image - 1];
}
}
std::vector<caffe::Frcnn::BBox<float> > filtered_res;
for (size_t obj = 0; obj < results.size(); obj++) {
if (results[obj].confidence >= image_thresh) {
filtered_res.push_back(results[obj]);
}
}
const int ori_res_size = results.size();
results = filtered_res;
otfile << results.size() << std::endl;
for (size_t obj = 0; obj < results.size(); obj++) {
otfile << results[obj].id << " " << INT(results[obj][0]) << " " << INT(results[obj][1]) << " " << INT(results[obj][2]) << " " << INT(results[obj][3]) << " " << FloatToString(results[obj].confidence) << std::endl;
}
LOG(INFO) << "Handle " << ++count << " th image : " << image << ", with image_thresh : " << image_thresh << ", "
<< ori_res_size << " -> " << results.size() << " boxes";
}
infile.close();
otfile.close();
return 0;
}