Skip to content

Commit ebfe5a0

Browse files
committed
merge develop branch
2 parents c8adc2c + 3c957af commit ebfe5a0

File tree

1,170 files changed

+47690
-45681
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,170 files changed

+47690
-45681
lines changed

.gitignore

+2
Original file line numberDiff line numberDiff line change
@@ -25,5 +25,7 @@ third_party/
2525
bazel-*
2626
third_party/
2727

28+
build_*
2829
# clion workspace.
2930
cmake-build-*
31+
model_test

.travis.yml

-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@ services:
1212
os:
1313
- linux
1414
env:
15-
- JOB=doc
1615
- JOB=check_style
1716
- JOB=build_android
1817
addons:

CMakeLists.txt

+18-1
Original file line numberDiff line numberDiff line change
@@ -70,8 +70,11 @@ option(WITH_ANAKIN "Compile with Anakin library" OFF)
7070
option(WITH_GRPC "Use grpc as the default rpc framework" ${WITH_DISTRIBUTE})
7171
option(WITH_BRPC_RDMA "Use brpc rdma as the rpc protocal" OFF)
7272
option(WITH_INFERENCE "Compile fluid inference library" ON)
73+
option(ON_INFER "Turn on inference optimization." OFF)
74+
option(WITH_INFERENCE_API_TEST "Test fluid inference high-level api interface" OFF)
7375
option(WITH_SYSTEM_BLAS "Use system blas library" OFF)
7476
option(PY_VERSION "Compile PaddlePaddle with python3 support" ${PY_VERSION})
77+
option(WITH_FAST_MATH "Make use of fast math library, might affect the precision to some extent" ON)
7578

7679
# PY_VERSION
7780
if(NOT PY_VERSION)
@@ -126,6 +129,9 @@ set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING
126129
set(FLUID_INSTALL_DIR "${CMAKE_BINARY_DIR}/fluid_install_dir" CACHE STRING
127130
"A path setting fluid shared and static libraries")
128131

132+
set(FLUID_INFERENCE_INSTALL_DIR "${CMAKE_BINARY_DIR}/fluid_inference_install_dir" CACHE STRING
133+
"A path setting fluid inference shared and static libraries")
134+
129135
if (WITH_C_API AND WITH_PYTHON)
130136
message(WARNING "It is suggest not embedded a python interpreter in Paddle "
131137
"when using C-API. It will give an unpredictable behavior when using a "
@@ -175,6 +181,7 @@ include(external/eigen) # download eigen3
175181
include(external/pybind11) # download pybind11
176182
include(external/cares)
177183
include(external/cub)
184+
include(external/xxhash) # download xxhash
178185

179186
if (NOT WIN32)
180187
# there is no official support of snappystream, warpctc, nccl, cupti in windows
@@ -213,9 +220,11 @@ include(configure) # add paddle env configuration
213220
if(WITH_GPU)
214221
include(cuda)
215222
include(tensorrt)
223+
endif()
224+
if(WITH_MKL OR WITH_MKLML)
216225
include(external/anakin)
217226
elseif()
218-
set(WITH_ANAKIN OFF CACHE STRING "Anakin is used in GPU only now." FORCE)
227+
set(WITH_ANAKIN OFF CACHE STRING "Anakin is used in MKL only now." FORCE)
219228
endif()
220229

221230
include(flags) # set paddle compile flags
@@ -297,3 +306,11 @@ if(WITH_DOC)
297306
find_python_module(recommonmark REQUIRED)
298307
add_subdirectory(doc)
299308
endif()
309+
310+
if (ON_INFER)
311+
message(STATUS "On inference mode, will take place some specific optimization.")
312+
add_definitions(-DPADDLE_ON_INFERENCE)
313+
else()
314+
#TODO(luotao), combine this warning with `make inference_lib_dist` command.
315+
message(WARNING "On inference mode, will take place some specific optimization. Turn on the ON_INFER flag when building inference_lib only.")
316+
endif()

Dockerfile

+14-4
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ COPY ./paddle/scripts/docker/root/ /root/
2424

2525
RUN apt-get update && \
2626
apt-get install -y --allow-downgrades patchelf \
27+
python3 python3-dev python3-pip \
2728
git python-pip python-dev python-opencv openssh-server bison \
2829
libnccl2=2.1.2-1+cuda8.0 libnccl-dev=2.1.2-1+cuda8.0 \
2930
wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \
@@ -53,7 +54,7 @@ RUN curl -s -q https://glide.sh/get | sh
5354
# and its size is only one-third of the official one.
5455
# 2. Manually add ~IPluginFactory() in IPluginFactory class of NvInfer.h, otherwise, it couldn't work in paddle.
5556
# See https://github.com/PaddlePaddle/Paddle/issues/10129 for details.
56-
RUN wget -qO- http://paddlepaddledeps.bj.bcebos.com/TensorRT-4.0.0.3.Ubuntu-16.04.4.x86_64-gnu.cuda-8.0.cudnn7.0.tar.gz | \
57+
RUN wget -qO- http://paddlepaddledeps.cdn.bcebos.com/TensorRT-4.0.0.3.Ubuntu-16.04.4.x86_64-gnu.cuda-8.0.cudnn7.0.tar.gz | \
5758
tar -xz -C /usr/local && \
5859
cp -rf /usr/local/TensorRT/include /usr && \
5960
cp -rf /usr/local/TensorRT/lib /usr
@@ -70,24 +71,33 @@ RUN localedef -i en_US -f UTF-8 en_US.UTF-8
7071
# specify sphinx version as 1.5.6 and remove -U option for [pip install -U
7172
# sphinx-rtd-theme] since -U option will cause sphinx being updated to newest
7273
# version(1.7.1 for now), which causes building documentation failed.
73-
RUN easy_install -U pip && \
74-
pip install -U wheel && \
74+
RUN pip3 install -U wheel && \
75+
pip3 install -U docopt PyYAML sphinx==1.5.6 && \
76+
pip3 install sphinx-rtd-theme==0.1.9 recommonmark && \
77+
easy_install -U pip && \
78+
pip install -U pip setuptools wheel && \
7579
pip install -U docopt PyYAML sphinx==1.5.6 && \
7680
pip install sphinx-rtd-theme==0.1.9 recommonmark
7781

78-
RUN pip install pre-commit 'ipython==5.3.0' && \
82+
RUN pip3 install 'pre-commit==1.10.4' 'ipython==5.3.0' && \
83+
pip3 install 'ipykernel==4.6.0' 'jupyter==1.0.0' && \
84+
pip3 install opencv-python && \
85+
pip install 'pre-commit==1.10.4' 'ipython==5.3.0' && \
7986
pip install 'ipykernel==4.6.0' 'jupyter==1.0.0' && \
8087
pip install opencv-python
8188

8289
#For docstring checker
90+
RUN pip3 install pylint pytest astroid isort
8391
RUN pip install pylint pytest astroid isort LinkChecker
8492

8593
COPY ./python/requirements.txt /root/
94+
RUN pip3 install -r /root/requirements.txt
8695
RUN pip install -r /root/requirements.txt
8796

8897
# To fix https://github.com/PaddlePaddle/Paddle/issues/1954, we use
8998
# the solution in https://urllib3.readthedocs.io/en/latest/user-guide.html#ssl-py2
9099
RUN apt-get install -y libssl-dev libffi-dev
100+
RUN pip3 install certifi urllib3[secure]
91101
RUN pip install certifi urllib3[secure]
92102

93103

README.md

+12-19
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,8 @@
22

33

44
[![Build Status](https://travis-ci.org/PaddlePaddle/Paddle.svg?branch=develop)](https://travis-ci.org/PaddlePaddle/Paddle)
5-
[![Documentation Status](https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat)](http://www.paddlepaddle.org/docs/develop/documentation/en/getstarted/index_en.html)
6-
[![Documentation Status](https://img.shields.io/badge/中文文档-最新-brightgreen.svg)](http://www.paddlepaddle.org/docs/develop/documentation/zh/getstarted/index_cn.html)
5+
[![Documentation Status](https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat)](http://paddlepaddle.org/documentation/docs/en/1.0/getstarted/index_en.html)
6+
[![Documentation Status](https://img.shields.io/badge/中文文档-最新-brightgreen.svg)](http://paddlepaddle.org/documentation/docs/zh/1.0/beginners_guide/index.html)
77
[![Release](https://img.shields.io/github/release/PaddlePaddle/Paddle.svg)](https://github.com/PaddlePaddle/Paddle/releases)
88
[![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE)
99

@@ -19,17 +19,17 @@ Our vision is to enable deep learning for everyone via PaddlePaddle.
1919
Please refer to our [release announcement](https://github.com/PaddlePaddle/Paddle/releases) to track the latest feature of PaddlePaddle.
2020

2121

22-
### Latest PaddlePaddle Release: [Fluid 0.14.0](https://github.com/PaddlePaddle/Paddle/tree/v0.14.0)
22+
### Latest PaddlePaddle Release: [Fluid 1.0.1](https://github.com/PaddlePaddle/Paddle/tree/release/1.0.0)
2323
### Install Latest Stable Release:
2424
```
2525
# Linux CPU
2626
pip install paddlepaddle
2727
# Linux GPU cuda9cudnn7
2828
pip install paddlepaddle-gpu
2929
# Linux GPU cuda8cudnn7
30-
pip install paddlepaddle-gpu==0.14.0.post87
30+
pip install paddlepaddle-gpu==1.0.1.post87
3131
# Linux GPU cuda8cudnn5
32-
pip install paddlepaddle-gpu==0.14.0.post85
32+
pip install paddlepaddle-gpu==1.0.1.post85
3333
3434
# For installation on other platform, refer to http://paddlepaddle.org/
3535
```
@@ -76,33 +76,26 @@ pip install paddlepaddle-gpu==0.14.0.post85
7676

7777
## Installation
7878

79-
It is recommended to check out the
80-
[Docker installation guide](http://www.paddlepaddle.org/docs/develop/documentation/fluid/en/build_and_install/docker_install_en.html)
81-
before looking into the
82-
[build from source guide](http://www.paddlepaddle.org/docs/develop/documentation/fluid/en/build_and_install/build_from_source_en.html).
79+
It is recommended to read [this doc](http://paddlepaddle.org/documentation/docs/zh/1.0/beginners_guide/index.html) on our website.
8380

8481
## Documentation
8582

86-
We provide [English](http://www.paddlepaddle.org/docs/develop/documentation/en/getstarted/index_en.html) and
87-
[Chinese](http://www.paddlepaddle.org/docs/develop/documentation/zh/getstarted/index_cn.html) documentation.
83+
We provide [English](http://paddlepaddle.org/documentation/docs/en/1.0.0/getstarted/index_en.html) and
84+
[Chinese](http://paddlepaddle.org/documentation/docs/zh/1.0/beginners_guide/index.html) documentation.
8885

89-
- [Deep Learning 101](http://www.paddlepaddle.org/docs/develop/book/01.fit_a_line/index.html)
86+
- [Deep Learning 101](https://github.com/PaddlePaddle/book)
9087

9188
You might want to start from this online interactive book that can run in a Jupyter Notebook.
9289

93-
- [Distributed Training](http://www.paddlepaddle.org/docs/develop/documentation/en/howto/cluster/index_en.html)
90+
- [Distributed Training](http://paddlepaddle.org/documentation/docs/zh/1.0/user_guides/howto/training/cluster_howto.html)
9491

9592
You can run distributed training jobs on MPI clusters.
9693

97-
- [Distributed Training on Kubernetes](http://www.paddlepaddle.org/docs/develop/documentation/en/howto/cluster/multi_cluster/k8s_en.html)
98-
99-
You can also run distributed training jobs on Kubernetes clusters.
100-
101-
- [Python API](http://www.paddlepaddle.org/docs/develop/api/en/overview.html)
94+
- [Python API](http://paddlepaddle.org/documentation/api/zh/1.0/fluid.html)
10295

10396
Our new API enables much shorter programs.
10497

105-
- [How to Contribute](http://www.paddlepaddle.org/docs/develop/documentation/fluid/en/dev/contribute_to_paddle_en.html)
98+
- [How to Contribute](http://paddlepaddle.org/documentation/docs/zh/1.0/advanced_usage/development/contribute_to_paddle.html)
10699

107100
We appreciate your contributions!
108101

benchmark/fluid/Dockerfile

+3-1
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ RUN ln -s /usr/lib/x86_64-linux-gnu/libcudnn.so.7 /usr/lib/libcudnn.so && ln -s
1111
# Add "ENV http_proxy=http://ip:port" if your download is slow, and don't forget to unset it at runtime.
1212
# exmaple: unset http_proxy && unset https_proxy && python fluid_benchmark.py ...
1313

14+
1415
RUN pip install -U pip
1516
RUN pip install -U kubernetes paddlepaddle
1617

@@ -27,5 +28,6 @@ ADD *.whl /
2728
RUN pip install /*.whl && rm -f /*.whl
2829

2930
ENV LD_LIBRARY_PATH=/usr/local/lib
30-
ADD fluid_benchmark.py recordio_converter.py args.py recordio_converter.py run.sh run_fluid_benchmark.sh /workspace/
31+
ADD fluid_benchmark.py recordio_converter.py args.py recordio_converter.py run.sh run_fluid_benchmark.sh imagenet_reader.py /workspace/
3132
ADD models/ /workspace/models/
33+

benchmark/fluid/args.py

+20-3
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,8 @@
1717
__all__ = ['parse_args', ]
1818

1919
BENCHMARK_MODELS = [
20-
"machine_translation", "resnet", "vgg", "mnist", "stacked_dynamic_lstm"
20+
"machine_translation", "resnet", "se_resnext", "vgg", "mnist",
21+
"stacked_dynamic_lstm", "resnet_with_preprocess"
2122
]
2223

2324

@@ -67,12 +68,12 @@ def parse_args():
6768
'--cpus',
6869
type=int,
6970
default=1,
70-
help='If cpus > 1, will use ParallelDo to run, else use Executor.')
71+
help='If cpus > 1, will set ParallelExecutor to use multiple threads.')
7172
parser.add_argument(
7273
'--data_set',
7374
type=str,
7475
default='flowers',
75-
choices=['cifar10', 'flowers'],
76+
choices=['cifar10', 'flowers', 'imagenet'],
7677
help='Optional dataset for benchmark.')
7778
parser.add_argument(
7879
'--infer_only', action='store_true', help='If set, run forward only.')
@@ -122,6 +123,11 @@ def parse_args():
122123
type=str,
123124
default="",
124125
help='Directory that contains all the training recordio files.')
126+
parser.add_argument(
127+
'--test_data_path',
128+
type=str,
129+
default="",
130+
help='Directory that contains all the test data (NOT recordio).')
125131
parser.add_argument(
126132
'--use_inference_transpiler',
127133
action='store_true',
@@ -130,5 +136,16 @@ def parse_args():
130136
'--no_random',
131137
action='store_true',
132138
help='If set, keep the random seed and do not shuffle the data.')
139+
parser.add_argument(
140+
'--reduce_strategy',
141+
type=str,
142+
choices=['reduce', 'all_reduce'],
143+
default='all_reduce',
144+
help='Specify the reduce strategy, can be reduce, all_reduce')
145+
parser.add_argument(
146+
'--fuse_broadcast_op',
147+
action='store_true',
148+
help='If set, would fuse multiple broadcast operators into one fused_broadcast operator.'
149+
)
133150
args = parser.parse_args()
134151
return args

0 commit comments

Comments
 (0)