forked from Lapis-Hong/wide_deep
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathserving.yaml
47 lines (44 loc) · 964 Bytes
/
serving.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
### Config for TensorFlow Serving.
## SavedModel config.
SavedModel:
model_dir: SavedModel
model_type: wide_deep
checkpoint_path:
as_text: false
model_version: 1
## TODO
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: inception-deployment
spec:
replicas: 3
template:
metadata:
labels:
app: inception-server
spec:
containers:
- name: inception-container
image: gcr.io/tensorflow-serving/inception
command:
- /bin/sh
- -c
args:
- serving/bazel-bin/tensorflow_serving/model_servers/tensorflow_model_server
--port=9000 --model_name=inception --model_base_path=serving/inception-export
ports:
- containerPort: 9000
apiVersion: v1
kind: Service
metadata:
labels:
run: inception-service
name: inception-service
spec:
ports:
- port: 9000
targetPort: 9000
selector:
app: inception-server
type: LoadBalancer