Skip to content

Commit d0f6ff4

Browse files
committed
added functional tests
1 parent 94c1d66 commit d0f6ff4

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

47 files changed

+4627
-109
lines changed

Dockerfile

+1-1
Original file line numberDiff line numberDiff line change
@@ -106,4 +106,4 @@ RUN bazel test --test_summary=detailed --test_output=all //tensorflow_serving/ov
106106

107107
RUN cp /openvino/bin/intel64/Release/lib/plugins.xml /root/.cache/bazel/_bazel_root/*/execroot/tf_serving/bazel-out/k8-opt/bin/_solib_k8/*/
108108

109-
ENTRYPOINT ./bazel-bin/tensorflow_serving/ovms/server_cc --config_path /models/config.json --port 9178
109+
ENTRYPOINT ["./bazel-bin/tensorflow_serving/ovms/server_cc"]

Jenkinsfile

+5
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,11 @@ pipeline {
1313
sh 'make test_perf'
1414
}
1515
}
16+
stage('functional tests') {
17+
steps {
18+
sh 'make test_functional'
19+
}
20+
}
1621

1722
stage('throughput test') {
1823
steps {

Makefile

+7-4
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ $(ACTIVATE):
3636
@test -d $(VIRTUALENV_DIR) || $(VIRTUALENV_EXE) $(VIRTUALENV_DIR)
3737
@. $(ACTIVATE); pip$(PY_VERSION) install --upgrade pip
3838
@. $(ACTIVATE); pip$(PY_VERSION) install -vUqq setuptools
39-
@. $(ACTIVATE); pip$(PY_VERSION) install -qq -r tests/performance/requirements.txt
39+
@. $(ACTIVATE); pip$(PY_VERSION) install -qq -r tests/requirements.txt
4040
@touch $(ACTIVATE)
4141

4242
style:
@@ -53,7 +53,7 @@ test_perf: venv
5353
@docker rm --force server-test || true
5454
@echo "Starting docker image"
5555
@./tests/performance/download_model.sh
56-
@docker run -d --name server-test -v $(HOME)/resnet50:/models/resnet50 -p 9178:9178 cpp-experiments:latest ; sleep 5
56+
@docker run -d --name server-test -v $(HOME)/resnet50:/models/resnet50 -p 9178:9178 cpp-experiments:latest --model_name resnet --model_path /models/resnet50/1 --port 9178; sleep 5
5757
@echo "Running latency test"
5858
@. $(ACTIVATE); python3 tests/performance/grpc_latency.py --images_numpy_path tests/performance/imgs.npy --labels_numpy_path tests/performance/labels.npy --iteration 1000 --batchsize 1 --report_every 100 --input_name data
5959
@echo "Removing test container"
@@ -64,8 +64,11 @@ test_throughput: venv
6464
@docker rm --force server-test || true
6565
@echo "Starting docker image"
6666
@./tests/performance/download_model.sh
67-
@docker run -d --name server-test -v $(HOME)/resnet50:/models/resnet50 -p 9178:9178 cpp-experiments:latest ; sleep 5
67+
@docker run -d --name server-test -v $(HOME)/resnet50:/models/resnet50 -p 9178:9178 cpp-experiments:latest --model_name resnet --model_path /models/resnet50/1 --port 9178; sleep 5
6868
@echo "Running throughput test"
6969
@. $(ACTIVATE); cd tests/performance; ./grpc_throughput.sh --images_numpy_path imgs.npy --labels_numpy_path labels.npy --iteration 500 --batchsize 1 --input_name data
7070
@echo "Removing test container"
71-
@docker rm --force server-test
71+
@docker rm --force server-test
72+
73+
test_functional: venv
74+
@. $(ACTIVATE); pytest -s --image=cpp-experiments:latest tests/functional/

config.json

+8-4
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,15 @@
11
{
2-
"models": [{
2+
"model_config_list": [
3+
{
4+
"config": {
35
"name": "resnet",
4-
"path": "/models/resnet50/1/resnet_50_i8.xml",
6+
"base_path": "/models/resnet50/1",
57
"version": 0,
6-
"backend": "CPU",
8+
"target_device": "CPU",
79
"layout": {
810
"data": "NCHW"
911
}
10-
}]
12+
}
13+
}
14+
]
1115
}

src/BUILD

+2
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ cc_binary(
2525
"config.h",
2626
"model.cpp",
2727
"model.h",
28+
"modelconfig.h",
2829
"modelmanager.cpp",
2930
"modelmanager.h",
3031
"modelinstance.cpp",
@@ -57,6 +58,7 @@ cc_test(
5758
"config.h",
5859
"model.cpp",
5960
"model.h",
61+
"modelconfig.h",
6062
"modelmanager.cpp",
6163
"modelmanager.h",
6264
"modelinstance.cpp",

src/config.cpp

+6-6
Original file line numberDiff line numberDiff line change
@@ -81,17 +81,17 @@ Config& Config::parse(int argc, char** argv) {
8181

8282
result = std::make_unique<cxxopts::ParseResult>(options->parse(argc, argv));
8383

84-
if (result->count("help"))
84+
if (result->count("help") || result->arguments().size() == 0)
8585
{
86-
std::cout << options->help({"", "config", "model"}) << std::endl;
87-
exit(1);
86+
std::cout << options->help({"", "config", "model"}) << std::endl;
87+
exit(0);
8888
}
8989

9090
validate();
9191
}
9292
catch (const cxxopts::OptionException& e)
9393
{
94-
std::cerr << "error parsing options: " << e.what() << std::endl;
94+
std::cout << "error parsing options: " << e.what() << std::endl;
9595
exit(1);
9696
}
9797

@@ -101,13 +101,13 @@ Config& Config::parse(int argc, char** argv) {
101101
bool Config::validate() {
102102
// cannot set both config path & model_name/model_path
103103
if (result->count("config_path") && (result->count("model_name") || result->count("model_path"))) {
104-
std::cerr << "Use either config_path or model_path with model_name" << std::endl;
104+
std::cout << "Use either config_path or model_path with model_name" << std::endl;
105105
exit(2);
106106
}
107107

108108
// port and rest_port cannot be the same
109109
if (port() == restPort()) {
110-
std::cerr << "port and rest_port cannot have the same values" << std::endl;
110+
std::cout << "port and rest_port cannot have the same values" << std::endl;
111111
exit(3);
112112
}
113113

src/config.h

+3-1
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,9 @@ namespace ovms {
8686
* @return std::string
8787
*/
8888
const std::string& configPath() {
89-
return result->operator[]("config_path").as<std::string>();
89+
if (result->count("config_path"))
90+
return result->operator[]("config_path").as<std::string>();
91+
return empty;
9092
}
9193

9294
/**

src/model.cpp

+6-12
Original file line numberDiff line numberDiff line change
@@ -17,22 +17,16 @@
1717

1818
namespace ovms {
1919

20-
Status Model::addVersion( const std::string& name,
21-
const std::string& path,
22-
const std::string& backend,
23-
const model_version_t& version,
24-
const size_t batchSize,
25-
const shapesMap& shapes,
26-
const layoutsMap& layouts) {
20+
Status Model::addVersion(const ModelConfig& config) {
2721
std::shared_ptr<ModelInstance> modelInstance = std::make_shared<ModelInstance>();
28-
auto status = modelInstance->loadModel(path, backend, version, batchSize, shapes, layouts);
22+
auto status = modelInstance->loadModel(config);
2923
if (status != Status::OK) {
3024
return status;
3125
}
32-
this->name = name;
33-
if (this->defaultVersion < version)
34-
this->defaultVersion = version;
35-
modelVersions[version] = std::move(modelInstance);
26+
this->name = config.name;
27+
if (this->defaultVersion < config.version)
28+
this->defaultVersion = config.version;
29+
modelVersions[config.version] = std::move(modelInstance);
3630

3731
return Status::OK;
3832
}

src/model.h

+2-14
Original file line numberDiff line numberDiff line change
@@ -98,23 +98,11 @@ namespace ovms {
9898
/**
9999
* @brief Adds a new version of ModelInstance to the list of versions
100100
*
101-
* @param name model name
102-
* @param path to the model
103-
* @param backend
104-
* @param version
105-
* @param batchSize
106-
* @param shape
107-
* @param layout
101+
* @param config model configuration
108102
*
109103
* @return status
110104
*/
111-
Status addVersion( const std::string& name,
112-
const std::string& path,
113-
const std::string& backend,
114-
const model_version_t& version,
115-
const size_t batchSize,
116-
const shapesMap& shapes = {},
117-
const layoutsMap& layouts = {});
105+
Status addVersion(const ModelConfig& config);
118106

119107
/**
120108
* @brief Removes model version from the list

src/modelconfig.h

+109
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,109 @@
1+
//*****************************************************************************
2+
// Copyright 2020 Intel Corporation
3+
//
4+
// Licensed under the Apache License, Version 2.0 (the "License");
5+
// you may not use this file except in compliance with the License.
6+
// You may obtain a copy of the License at
7+
//
8+
// http://www.apache.org/licenses/LICENSE-2.0
9+
//
10+
// Unless required by applicable law or agreed to in writing, software
11+
// distributed under the License is distributed on an "AS IS" BASIS,
12+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
// See the License for the specific language governing permissions and
14+
// limitations under the License.
15+
//*****************************************************************************
16+
#pragma once
17+
18+
#include <algorithm>
19+
#include <string>
20+
21+
namespace ovms {
22+
23+
using shapesMap = std::map<std::string, std::vector<size_t>>;
24+
using layoutsMap = std::map<std::string, std::string>;
25+
using model_version_t = int64_t;
26+
27+
/**
28+
* @brief This class represents model configuration
29+
*/
30+
class ModelConfig {
31+
public:
32+
/**
33+
* @brief Model name
34+
*/
35+
std::string name;
36+
37+
/**
38+
* @brief Model uri path
39+
*/
40+
std::string basePath;
41+
42+
/**
43+
* @brief Device backend
44+
*/
45+
std::string backend;
46+
47+
/**
48+
* @brief Batch size
49+
*/
50+
size_t batchSize;
51+
52+
/**
53+
* @brief Model version policy
54+
*/
55+
std::string modelVersionPolicy;
56+
57+
/**
58+
* @brief Nireq
59+
*/
60+
uint64_t nireq;
61+
62+
/**
63+
* @brief Plugin config
64+
*/
65+
std::string pluginConfig;
66+
67+
/**
68+
* @brief Shape for single input
69+
*/
70+
std::vector<size_t> shape;
71+
72+
/**
73+
* @brief Map of shapes
74+
*/
75+
shapesMap shapes;
76+
77+
/**
78+
* @brief Map of layouts
79+
*/
80+
layoutsMap layouts;
81+
82+
/**
83+
* @brief Model version
84+
*
85+
*/
86+
model_version_t version;
87+
88+
/**
89+
* @brief Construct a new ModelConfig with default values
90+
*/
91+
ModelConfig() {
92+
backend = "CPU";
93+
nireq = 1;
94+
version = 0;
95+
}
96+
97+
/**
98+
* @brief Parse shapes given as string for backward compatibility with OVMS python version
99+
*/
100+
void addShapes(std::string) {
101+
}
102+
103+
/**
104+
* @brief Parse layout if given by string
105+
*/
106+
void addLayouts(std::string) {
107+
}
108+
};
109+
}

src/modelinstance.cpp

+41-13
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,10 @@
1313
// See the License for the specific language governing permissions and
1414
// limitations under the License.
1515
//*****************************************************************************
16+
#include <dirent.h>
17+
#include <iostream>
1618
#include <string>
19+
#include <sys/types.h>
1720

1821
#include "modelinstance.h"
1922

@@ -51,23 +54,48 @@ void ModelInstance::loadTensors(tensorMap& map,
5154
}
5255
}
5356

54-
Status ModelInstance::loadModel( const std::string& path,
55-
const std::string& backend,
56-
const model_version_t& version,
57-
const size_t batchSize,
58-
const shapesMap& shapes,
59-
const layoutsMap& layouts) {
60-
this->path = path;
61-
this->version = version;
62-
this->backend = backend;
57+
// Temporary methods. To be replaces with proper storage class.
58+
bool endsWith(std::string token, std::string match)
59+
{
60+
auto it = match.begin();
61+
return token.size() >= match.size() &&
62+
std::all_of(std::next(token.begin(),token.size() - match.size()), token.end(), [&it](const char & c){
63+
return ::tolower(c) == ::tolower(*(it++)) ;
64+
});
65+
}
66+
67+
std::string getModelFile(const std::string path) {
68+
struct dirent *entry;
69+
DIR *dir = opendir(path.c_str());
70+
71+
while ((entry = readdir(dir)) != NULL) {
72+
auto name = std::string(entry->d_name);
73+
if (endsWith(name, ".xml")) {
74+
closedir(dir);
75+
if (endsWith(name, "/")) {
76+
return path + name;
77+
} else {
78+
return path + "/" + name;
79+
}
80+
}
81+
}
82+
closedir(dir);
83+
84+
return path;
85+
}
86+
87+
Status ModelInstance::loadModel(const ModelConfig& config) {
88+
this->path = config.basePath;
89+
this->version = config.version;
90+
this->backend = config.backend;
6391

6492
// load network
6593
try {
66-
network = engine.ReadNetwork(path);
67-
this->batchSize = batchSize > 0 ? batchSize : network.getBatchSize();
94+
network = engine.ReadNetwork(getModelFile(path));
95+
this->batchSize = config.batchSize > 0 ? config.batchSize : network.getBatchSize();
6896

69-
loadTensors(inputsInfo, network.getInputsInfo(), shapes, layouts);
70-
loadTensors(outputsInfo, network.getOutputsInfo(), shapes, layouts);
97+
loadTensors(inputsInfo, network.getInputsInfo(), config.shapes, config.layouts);
98+
loadTensors(outputsInfo, network.getOutputsInfo(), config.shapes, config.layouts);
7199

72100
execNetwork = engine.LoadNetwork(network, backend, {{ "CPU_THROUGHPUT_STREAMS", std::to_string(OV_STREAMS_COUNT)}});
73101
request = execNetwork.CreateInferRequest();

0 commit comments

Comments
 (0)