diff --git a/cmake/cblas.cmake b/cmake/cblas.cmake
index 5568f927572f5..529b4b9d15d09 100644
--- a/cmake/cblas.cmake
+++ b/cmake/cblas.cmake
@@ -44,8 +44,8 @@ set(ATLAS_LIB_SEARCH_PATHS
/usr/lib
/usr/lib/blas/atlas
/usr/lib/atlas
- /usr/lib/atlas-base) # special for ubuntu 14.04.
-
+ /usr/lib/atlas-base # special for ubuntu 14.04.
+ )
find_path(ATLAS_INC_DIR NAMES cblas.h
PATHS ${ATLAS_INCLUDE_SEARCH_PATHS})
find_library(ATLAS_CBLAS_LIB NAMES cblas libcblas.so.3
diff --git a/cmake/util.cmake b/cmake/util.cmake
index 4e9efd3c187b0..d776c3ae49952 100644
--- a/cmake/util.cmake
+++ b/cmake/util.cmake
@@ -24,7 +24,9 @@ function(target_circle_link_libraries TARGET_NAME)
list(APPEND libsInArgn ${arg})
endif()
endforeach()
-
+ if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
+ list(APPEND LIBS "-undefined dynamic_lookup")
+ endif()
list(REVERSE libsInArgn)
target_link_libraries(${TARGET_NAME}
${LIBS}
diff --git a/doc/build/build_from_source.md b/doc/build/build_from_source.md
index 6f9e03f2c28f1..f9899086bf060 100644
--- a/doc/build/build_from_source.md
+++ b/doc/build/build_from_source.md
@@ -1,148 +1,189 @@
-Build and Install
+Installing from Sources
=================
-* [1. Requirement](#Requirement)
-* [2. Build on Ubuntu](#ubuntu)
-* [3. Build on Mac OS X](#mac)
+* [1. Download and Setup](#download)
+* [2. Requirements](#requirements)
+* [3. Build on Ubuntu](#ubuntu)
+* [4. Build on Mac OS X](#mac)
-## Requirement
+## Download and Setup
+You can download PaddlePaddle from the [github source](https://github.com/gangliao/Paddle).
-### Dependents
+```bash
+git clone https://github.com/baidu/Paddle paddle
+```
-- **CMake**: required for 2.8+ version
-- **g++**: a recent c++ compiler supporting c++11, >= 4.6, < 5
-- **BLAS library**: such as openBLAS, MKL, ATLAS
-- **protobuf**: required for 2.4+ version, 3.x is not supported
-- **python**: currently only 2.7 version is supported
+## Requirements
-### Optional
+To compile the source code, your computer must be equipped with GCC >=4.6 or Clang Compiler.
+### Dependencies
-PaddlePaddle also support some build options, you have to install related libraries.
+- **CMake**: version >= 2.8
+- **BLAS**: MKL, OpenBlas or ATLAS
+- **protobuf**: version >= 2.4, **Note: 3.x is not supported**
+- **python**: only python 2.7 is supported currently
-- **WITH_GPU**: Compile with gpu mode
- - The GPU version works best with Cuda Toolkit 7.5 and cuDNN v5
- - Other versions Cuda Toolkit 6.5, 7.0 and cuDNN v2, v3, v4 are also supported
- - Note: to utilize cuDNN v5, Cuda Toolkit 7.5 is prerequisite and vice versa
-- **WITH_DOUBLE**: Compile with double precision, otherwise use single precision
-- **WITH_GLOG**: Compile with glog, otherwise use a log implement internally
-- **WITH_GFLAGS**: Compile with gflags, otherwise use a flag implement internally
-- **WITH_TESTING**: Compile with gtest and run unittest for PaddlePaddle
-- **WITH_DOC**: Compile with documentation
-- **WITH_SWIG_PY**: Compile with python predict api
-- **WITH_STYLE_CHECK**: Style check for source code
+### Options
+PaddlePaddle supports some build options. To enable it, first you need to install the related libraries.
-## Building on Ubuntu14.04
+ Optional | Description
+ ------------ | :-----------
+ **WITH_GPU** | Compile with GPU mode.
+ **WITH_DOUBLE** | Compile with double precision floating-point, default: single precision. |
+ **WITH_GLOG** | Compile with glog. If not found, default: an internal log implementation.
+ **WITH_GFLAGS** | Compile with gflags. If not found, default: an internal flag implementation.
+ **WITH_TESTING** | Compile with gtest for PaddlePaddle's unit testing.
+ **WITH_DOC** | Compile to generate PaddlePaddle's docs, default: disabled (OFF).
+ **WITH_SWIG_PY** | Compile with python predict API, default: disabled (OFF).
+ **WITH_STYLE_CHECK**| Compile with code style check, default: enabled (ON).
+|
-### Install Dependencies
+**Note:**
+ - The GPU version works best with Cuda Toolkit 7.5 and cuDNN v5.
+ - Other versions like Cuda Toolkit 6.5, 7.0, 8.0 and cuDNN v2, v3, v4 are also supported.
+ - **To utilize cuDNN v5, Cuda Toolkit 7.5 is prerequisite and vice versa.**
-- **CPU Dependencies**
+As a simple example, consider the following:
-```bash
-# necessary
-sudo apt-get update
-sudo apt-get install -y g++ make cmake build-essential libatlas-base-dev python python-pip libpython-dev m4 libprotobuf-dev protobuf-compiler python-protobuf python-numpy git
-# optional
-sudo apt-get install libgoogle-glog-dev
-sudo apt-get install libgflags-dev
-sudo apt-get install libgtest-dev
-sudo pip install wheel
-pushd /usr/src/gtest
-cmake .
-make
-sudo cp *.a /usr/lib
-popd
-```
-
+1. **Python Dependencies(optional)**
-- **GPU Dependencies(optional)**
+ To compile PaddlePaddle with python predict API, make sure swig installed and set `-DWITH_SWIG_PY=ON` as follows:
-If you need to build GPU version, the first thing you need is a machine that has GPU and CUDA installed.
-And you also need to install cuDNN.
+ ```bash
+ # install swig on ubuntu
+ sudo apt-get install swig
+ # install swig on Mac OS X
+ brew install swig
-You can download CUDA toolkit and cuDNN from nvidia website:
-
-```bash
-https://developer.nvidia.com/cuda-downloads
-https://developer.nvidia.com/cudnn
-```
-You can copy cuDNN files into the CUDA toolkit directory, such as:
+ # active swig in cmake
+ cmake .. -DWITH_SWIG_PY=ON
+ ```
-```bash
-sudo tar -xzf cudnn-7.5-linux-x64-v5.1.tgz -C /usr/local
-sudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib64/libcudnn*
-```
-Then you need to set LD\_LIBRARY\_PATH, CUDA\_HOME and PATH environment variables in ~/.bashrc.
+2. **Doc Dependencies(optional)**
-```bash
-export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH
-export CUDA_HOME=/usr/local/cuda
-export PATH=/usr/local/cuda/bin:$PATH
-```
-- **Python Dependencies(optional)**
+ To generate PaddlePaddle's documentation, install dependencies and set `-DWITH_DOC=ON` as follows:
-If you want to compile PaddlePaddle with python predict api, you need to add -DWITH_SWIG_PY=ON in cmake command and install these first:
+ ```bash
+ pip install 'sphinx>=1.4.0'
+ pip install sphinx_rtd_theme breathe recommonmark
-```bash
-sudo apt-get install swig
-```
+ # install doxygen on Ubuntu
+ sudo apt-get install doxygen
+ # install doxygen on Mac OS X
+ brew install doxygen
-- **Doc Dependencies(optional)**
+ # active docs in cmake
+ cmake .. -DWITH_DOC=ON`
+ ```
-If you want to compile PaddlePaddle with doc, you need to add -DWITH_DOC=ON in cmake command and install these first:
+## Build on Ubuntu 14.04
-```bash
-pip install 'sphinx>=1.4.0'
-pip install sphinx_rtd_theme breathe recommonmark
-sudo apt-get install doxygen
-```
+### Install Dependencies
-### Build and Install
+- **CPU Dependencies**
-CMake will find dependent libraries in system default paths first. After installing some optional libraries, corresponding build option will automatically be on(such as glog, gtest and gflags). And if libraries are not found, you have to set following variables manually in cmake command(CUDNN_ROOT, ATLAS_ROOT, MKL_ROOT, OPENBLAS_ROOT).
+ ```bash
+ # necessary
+ sudo apt-get update
+ sudo apt-get install -y g++ make cmake build-essential libatlas-base-dev python python-pip libpython-dev m4 libprotobuf-dev protobuf-compiler python-protobuf python-numpy git
+ # optional
+ sudo apt-get install libgoogle-glog-dev
+ sudo apt-get install libgflags-dev
+ sudo apt-get install libgtest-dev
+ sudo pip install wheel
+ pushd /usr/src/gtest
+ cmake .
+ make
+ sudo cp *.a /usr/lib
+ popd
+ ```
+
+- **GPU Dependencies (optional)**
-Here are some examples of cmake command with different options:
+ To build GPU version, you will need the following installed:
-**only cpu**
+ 1. a CUDA-capable GPU
+ 2. A supported version of Linux with a gcc compiler and toolchain
+ 3. NVIDIA CUDA Toolkit (available at http://developer.nvidia.com/cuda-downloads)
+ 4. NVIDIA cuDNN Library (availabel at https://developer.nvidia.com/cudnn)
-```bash
-cmake -DWITH_GPU=OFF -DWITH_DOC=OFF
-```
+ The CUDA development environment relies on tight integration with the host development environment,
+ including the host compiler and C runtime libraries, and is therefore only supported on
+ distribution versions that have been qualified for this CUDA Toolkit release.
+
+ After downloading cuDNN library, issue the following commands:
-**gpu**
+ ```bash
+ sudo tar -xzf cudnn-7.5-linux-x64-v5.1.tgz -C /usr/local
+ sudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib64/libcudnn*
+ ```
+ Then you need to set LD\_LIBRARY\_PATH, CUDA\_HOME and PATH environment variables in ~/.bashrc.
+
+ ```bash
+ export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH
+ export CUDA_HOME=/usr/local/cuda
+ export PATH=/usr/local/cuda/bin:$PATH
+ ```
+
+### Build and Install
+
+As usual, the best option is to create build folder under paddle project directory.
```bash
-cmake -DWITH_GPU=ON -DWITH_DOC=OFF
+mkdir build && cd build
+cmake ..
```
-**gpu with doc and swig**
+CMake first check PaddlePaddle's dependecies in system default path. After installing some optional
+libraries, corresponding build option will be set automatically (for instance, glog, gtest and gflags).
+If still not found, you can manually set it based on CMake error information from your screen.
-```bash
-cmake -DWITH_GPU=ON -DWITH_DOC=ON -DWITH_SWIG_PY=ON
-```
+As a simple example, consider the following:
+
+- **Only CPU**
+
+ ```bash
+ cmake .. -DWITH_GPU=OFF -DWITH_DOC=OFF
+ ```
+- **GPU**
+
+ ```bash
+ cmake .. -DWITH_GPU=ON -DWITH_DOC=OFF
+ ```
+
+- **GPU with doc and swig**
+
+ ```bash
+ cmake .. -DWITH_GPU=ON -DWITH_DOC=ON -DWITH_SWIG_PY=ON
+ ```
Finally, you can download source code and build:
```bash
-git clone https://github.com/baidu/Paddle paddle
-cd paddle
-mkdir build
-cd build
# you can add build option here, such as:
-cmake -DWITH_GPU=ON -DWITH_DOC=OFF -DCMAKE_INSTALL_PREFIX= ..
+cmake .. -DWITH_GPU=ON -DWITH_DOC=OFF -DCMAKE_INSTALL_PREFIX=
# please use sudo make install, if you want
# to install PaddlePaddle into the system
make -j `nproc` && make install
-# PaddlePaddle installation path
+# set PaddlePaddle installation path in ~/.bashrc
export PATH=/bin:$PATH
```
-**Note**
-And if you set WITH_SWIG_PY=ON, you have to install related python predict api at the same time:
+**Note:**
+
+If you set `WITH_SWIG_PY=ON`, related python dependencies also need to be installed.
+Otherwise, PaddlePaddle will automatically install python dependencies
+at first time when user run paddle commands, such as `paddle version`, `paddle train`.
+It may require sudo privileges:
```bash
-pip install /opt/paddle/share/wheels/*.whl
+# you can run
+sudo pip install /opt/paddle/share/wheels/*.whl
+# or just run
+sudo paddle version
```
+
## Building on Mac OS X
### Prerequisites
@@ -150,7 +191,7 @@ This guide is based on Mac OS X 10.11 (El Capitan). Note that if you are running
you will already have Python 2.7.10 and Numpy 1.8 installed.
The best option is to use the package manager homebrew to handle installations and upgrades for you.
-To install homebrew, first open a terminal window (you can find Terminal in the Utilities folder in Applications), and issue the command:
+To install [homebrew](http://brew.sh/), first open a terminal window (you can find Terminal in the Utilities folder in Applications), and issue the command:
```bash
# install brew
@@ -163,109 +204,103 @@ easy_install pip
- **CPU Dependencies**
-```bash
-# Install fundamental dependents
-brew install glog gflags cmake protobuf openblas
-
-# Install google test on Mac OS X
-# Download gtest 1.7.0
-wget https://github.com/google/googletest/archive/release-1.7.0.tar.gz
-tar -xvf googletest-release-1.7.0.tar.gz && cd googletest-release-1.7.0
-# Build gtest
-mkdir build && cmake ..
-make
-# Install gtest library
-sudo cp -r ../include/gtest /usr/local/include/
-sudo cp lib*.a /usr/local/lib
-```
-
-
+ ```bash
+ # Install fundamental dependents
+ brew install glog gflags cmake protobuf openblas
+
+ # Install google test on Mac OS X
+ # Download gtest 1.7.0
+ wget https://github.com/google/googletest/archive/release-1.7.0.tar.gz
+ tar -xvf googletest-release-1.7.0.tar.gz && cd googletest-release-1.7.0
+ # Build gtest
+ mkdir build && cmake ..
+ make
+ # Install gtest library
+ sudo cp -r ../include/gtest /usr/local/include/
+ sudo cp lib*.a /usr/local/lib
+ ```
+
- **GPU Dependencies(optional)**
-If you need to build GPU version, the first thing you need is a machine that has NVIDIA GPU and CUDA installed.
-And you also need to install cuDNN.
+ To build GPU version, you will need the following installed:
-You can download CUDA toolkit and cuDNN from nvidia website:
-
-```bash
-https://developer.nvidia.com/cuda-downloads
-https://developer.nvidia.com/cudnn
-```
-You can copy cuDNN files into the CUDA toolkit directory, for instance:
-
-```bash
-sudo tar -xzf cudnn-7.5-osx-x64-v5.0-ga.tgz -C /usr/local
-sudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib64/libcudnn*
-```
-Then you need to set DYLD\_LIBRARY\_PATH, CUDA\_HOME and PATH environment variables in ~/.bashrc.
+ 1. a CUDA-capable GPU
+ 2. Mac OS X 10.11 or later
+ 2. the Clang compiler and toolchain installed using Xcode
+ 3. NVIDIA CUDA Toolkit (available at http://developer.nvidia.com/cuda-downloads)
+ 4. NVIDIA cuDNN Library (availabel at https://developer.nvidia.com/cudnn)
-```bash
-export DYLD_LIBRARY_PATH=/usr/local/cuda/lib:$DYLD_LIBRARY_PATH
-export PATH=/usr/local/cuda/bin:$PATH
-```
-- **Python Dependencies(optional)**
+ The CUDA development environment relies on tight integration with the host development environment,
+ including the host compiler and C runtime libraries, and is therefore only supported on
+ distribution versions that have been qualified for this CUDA Toolkit release.
+
+ 1. After downloading cuDNN library, issue the following commands:
-If you want to compile PaddlePaddle with python predict API, you need to add -DWITH_SWIG_PY=ON in cmake command and install these first:
+ ```bash
+ sudo tar -xzf cudnn-7.5-osx-x64-v5.0-ga.tgz -C /usr/local
+ sudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib64/libcudnn*
+ ```
+ 2. Then you need to set DYLD\_LIBRARY\_PATH, CUDA\_HOME and PATH environment variables in ~/.bashrc.
-```bash
-brew install swig
-```
+ ```bash
+ export DYLD_LIBRARY_PATH=/usr/local/cuda/lib:$DYLD_LIBRARY_PATH
+ export PATH=/usr/local/cuda/bin:$PATH
+ ```
-- **Doc Dependencies(optional)**
+### Build and Install
-If you want to compile PaddlePaddle with doc, you need to add -DWITH_DOC=ON in cmake command and install these first:
+As usual, the best option is to create build folder under paddle project directory.
```bash
-pip install 'sphinx>=1.4.0'
-pip install sphinx_rtd_theme breathe recommonmark
-brew install doxygen
+mkdir build && cd build
+cmake ..
```
-### Build and Install
-
-CMake can find dependent libraries in system default paths firstly.
-After installing some optional libraries, corresponding build option will be on automatically (for instance, glog, gtest and gflags).
-If not found, you have to set following variables manually via CMake command (CUDNN_ROOT, ATLAS_ROOT, MKL_ROOT, OPENBLAS_ROOT).
-
-Here are some examples of CMake command with different options:
+CMake first check PaddlePaddle's dependecies in system default path. After installing some optional
+libraries, corresponding build option will be set automatically (for instance, glog, gtest and gflags).
+If still not found, you can manually set it based on CMake error information from your screen.
-**only cpu**
+As a simple example, consider the following:
-```bash
-cmake -DWITH_GPU=OFF -DWITH_DOC=OFF
-```
+- **Only CPU**
-**gpu**
+ ```bash
+ cmake .. -DWITH_GPU=OFF -DWITH_DOC=OFF
+ ```
+- **GPU**
-```bash
-cmake -DWITH_GPU=ON -DWITH_DOC=OFF
-```
+ ```bash
+ cmake .. -DWITH_GPU=ON -DWITH_DOC=OFF
+ ```
-**gpu with doc and swig**
+- **GPU with doc and swig**
-```bash
-cmake -DWITH_GPU=ON -DWITH_DOC=ON -DWITH_SWIG_PY=ON
-```
+ ```bash
+ cmake .. -DWITH_GPU=ON -DWITH_DOC=ON -DWITH_SWIG_PY=ON
+ ```
-Finally, you can download source code and build:
+Finally, you can build PaddlePaddle:
```bash
-git clone https://github.com/baidu/Paddle paddle
-cd paddle
-mkdir build
-cd build
# you can add build option here, such as:
-cmake -DWITH_GPU=ON -DWITH_DOC=OFF -DCMAKE_INSTALL_PREFIX= ..
-# please use sudo make install, if you want
-# to install PaddlePaddle into the system
+cmake .. -DWITH_GPU=ON -DWITH_DOC=OFF -DCMAKE_INSTALL_PREFIX=
+# please use sudo make install, if you want to install PaddlePaddle into the system
make -j `nproc` && make install
-# PaddlePaddle installation path
-export PATH=/bin:$PATH
+# set PaddlePaddle installation path in ~/.bashrc
+export PATH=/bin:$PATH
```
-**Note**
-And if you set WITH_SWIG_PY=ON, you have to install related python predict api at the same time:
+
+**Note:**
+
+If you set `WITH_SWIG_PY=ON`, related python dependencies also need to be installed.
+Otherwise, PaddlePaddle will automatically install python dependencies
+at first time when user run paddle commands, such as `paddle version`, `paddle train`.
+It may require sudo privileges:
```bash
+# you can run
sudo pip install /opt/paddle/share/wheels/*.whl
+# or just run
+sudo paddle version
```
\ No newline at end of file
diff --git a/paddle/api/Matrix.cpp b/paddle/api/Matrix.cpp
index 9ae3716fa862c..6a79f83495a56 100644
--- a/paddle/api/Matrix.cpp
+++ b/paddle/api/Matrix.cpp
@@ -95,7 +95,7 @@ float Matrix::get(size_t x, size_t y) const throw(RangeError) {
}
void Matrix::set(size_t x, size_t y, float val) throw(RangeError,
- UnsupportError) {
+ UnsupportError) {
if (x > this->getWidth() || y > this->getHeight()) {
RangeError e;
throw e;
@@ -239,7 +239,7 @@ void Matrix::toNumpyMatInplace(float** view_data, int* dim1,
}
void Matrix::copyToNumpyMat(float** view_m_data, int* dim1,
int* dim2) throw(UnsupportError) {
- static_assert(sizeof(float) == sizeof(float),
+ static_assert(sizeof(paddle::real) == sizeof(float),
"Currently PaddleAPI only support for single "
"precision version of paddle.");
if (this->isSparse()) {
@@ -251,12 +251,12 @@ void Matrix::copyToNumpyMat(float** view_m_data, int* dim1,
if (auto cpuMat = dynamic_cast(m->mat.get())) {
auto src = cpuMat->getData();
auto dest = *view_m_data;
- std::memcpy(dest, src, sizeof(float) * (*dim1) * (*dim2));
+ std::memcpy(dest, src, sizeof(paddle::real) * (*dim1) * (*dim2));
} else if (auto gpuMat = dynamic_cast(m->mat.get())) {
auto src = gpuMat->getData();
auto dest = *view_m_data;
hl_memcpy_device2host(dest, src,
- sizeof(float) * (*dim1) * (*dim2));
+ sizeof(paddle::real) * (*dim1) * (*dim2));
} else {
LOG(WARNING) << "Unexpected Situation";
throw UnsupportError();
diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.cpp b/paddle/gserver/gradientmachines/NeuralNetwork.cpp
index 0f497e44d4c25..3127b4dd9a2fd 100644
--- a/paddle/gserver/gradientmachines/NeuralNetwork.cpp
+++ b/paddle/gserver/gradientmachines/NeuralNetwork.cpp
@@ -385,10 +385,17 @@ void NeuralNetwork::setOutputGrad(const std::vector& args) {
}
}
+extern NeuralNetwork* newCustomNerualNetwork(
+ const std::string& name, NeuralNetwork* network) __attribute__((weak));
+
NeuralNetwork* NeuralNetwork::newNeuralNetwork(
const std::string& name,
NeuralNetwork* rootNetwork) {
- return new NeuralNetwork(name, rootNetwork);
+ if (newCustomNerualNetwork) {
+ return newCustomNerualNetwork(name, rootNetwork);
+ } else {
+ return new NeuralNetwork(name, rootNetwork);
+ }
}
} // namespace paddle
diff --git a/paddle/trainer/tests/test_Trainer.cpp b/paddle/trainer/tests/test_Trainer.cpp
index 2044279c2151f..ad2a715ef89c6 100644
--- a/paddle/trainer/tests/test_Trainer.cpp
+++ b/paddle/trainer/tests/test_Trainer.cpp
@@ -94,7 +94,11 @@ TEST(checkGradient, multi) {
TEST(checkGradient, hsigmoid) { checkGradientTest(configFile2, false, false); }
TEST(checkGradient, chunk) {
+#if defined(__APPLE__) || defined (__OSX__)
EXPECT_EQ(0, system("python trainer/tests/gen_proto_data.py"));
+#else
+ EXPECT_EQ(0, system("python2 trainer/tests/gen_proto_data.py"));
+#endif
checkGradientTest(configFile3, false, false);
#ifndef PADDLE_ONLY_CPU
checkGradientTest(configFile3, true, true);
diff --git a/paddle/utils/PythonUtil.cpp b/paddle/utils/PythonUtil.cpp
index 9ee7a29aad0b6..78c3a80674f9c 100644
--- a/paddle/utils/PythonUtil.cpp
+++ b/paddle/utils/PythonUtil.cpp
@@ -144,12 +144,12 @@ PyObjectPtr createPythonClass(
const std::map& kwargs) {
PyGuard guard;
PyObjectPtr pyModule(PyImport_ImportModule(moduleName.c_str()));
- // LOG(INFO) << "createPythonClass moduleName.c_str:" << moduleName.c_str();
+ LOG(INFO) << "createPythonClass moduleName.c_str:" << moduleName.c_str();
CHECK_PY(pyModule) << "Import module " << moduleName << " failed.";
PyObjectPtr pyDict(PyModule_GetDict(pyModule.get()));
CHECK_PY(pyDict) << "Get Dict failed.";
PyObjectPtr pyClass(PyDict_GetItemString(pyDict.get(), className.c_str()));
- // LOG(INFO) << "createPythonClass className.c_str():" << className.c_str();
+ LOG(INFO) << "createPythonClass className.c_str():" << className.c_str();
CHECK_PY(pyClass) << "Import class " << className << " failed.";
PyObjectPtr argsObjectList(PyTuple_New(args.size()));
for (size_t i = 0; i < args.size(); ++i) {
diff --git a/paddle/utils/PythonUtil.h b/paddle/utils/PythonUtil.h
index 2808338fbdf59..db02d1252b405 100644
--- a/paddle/utils/PythonUtil.h
+++ b/paddle/utils/PythonUtil.h
@@ -35,13 +35,6 @@ limitations under the License. */
#include
#include
-// #ifndef _POSIX_C_SOURCE
-// #warning "no _POSIX_C_SOURCE defined in Python.h"
-// #endif
-// #ifndef _XOPEN_SOURCE
-// #warning "no _XOPEN_SOURCE defined in Python.h"
-// #endif
-
#endif
#include "paddle/utils/Util.h"
diff --git a/paddle/utils/Stat.cpp b/paddle/utils/Stat.cpp
index ff6e8ade2cd48..d7b20ca5eb2f4 100644
--- a/paddle/utils/Stat.cpp
+++ b/paddle/utils/Stat.cpp
@@ -13,28 +13,12 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "Stat.h"
-
-#include // for syscall()
-#include
+#include "Util.h"
#include
#include
namespace paddle {
-// return the thread id used by glog
-pid_t getTID() {
- #if defined(__APPLE__) || defined(__OSX__)
- pid_t tid = syscall(SYS_thread_selfid);
- #else
- #ifndef __NR_gettid
- #define __NR_gettid 224
- #endif
- pid_t tid = syscall(__NR_gettid);
- #endif
- CHECK_NE(tid, -1);
- return tid;
-}
-
StatSet globalStat("GlobalStatInfo");
void Stat::addSample(uint64_t value) {
diff --git a/paddle/utils/Thread.h b/paddle/utils/Thread.h
index f1352e75d73a0..f6c826a1eeb65 100644
--- a/paddle/utils/Thread.h
+++ b/paddle/utils/Thread.h
@@ -13,24 +13,10 @@ See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
+#include "Util.h"
#include "Logging.h"
#include
-#include
-#include
-inline pid_t gettid() {
-#if defined(__APPLE__) || defined(__OSX__)
- pid_t tid = syscall(SYS_thread_selfid);
-#else
- #ifndef __NR_gettid
- #define __NR_gettid 224
- #endif
- pid_t tid = syscall(__NR_gettid);
-#endif
- CHECK_NE(tid, -1);
- return tid;
-}
-
#include "Queue.h"
#include "ThreadLocal.h"
@@ -186,7 +172,7 @@ class SyncThreadPool {
jobFinishBarrier_(numWorkers + 1),
jobFunc_(nullptr),
checkOwner_(checkOwner) {
- ownerThreadId_ = ::gettid();
+ ownerThreadId_ = getTID();
workers_.resize(numWorkers);
start();
}
@@ -210,7 +196,7 @@ class SyncThreadPool {
*/
void exec(JobFunc jobFunc, JobFunc ownerFunc = nullptr) {
if (checkOwner_) {
- CHECK_EQ(ownerThreadId_, ::gettid())
+ CHECK_EQ(ownerThreadId_, getTID())
<< "this sync thread pool should be used in one thread";
}
diff --git a/paddle/utils/ThreadLocal.cpp b/paddle/utils/ThreadLocal.cpp
index a4b399d144ee3..0f948f1029af8 100644
--- a/paddle/utils/ThreadLocal.cpp
+++ b/paddle/utils/ThreadLocal.cpp
@@ -12,10 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
+#include "Util.h"
#include "ThreadLocal.h"
-
-#include "Thread.h"
-
#include "CommandLineParser.h"
P_DEFINE_bool(thread_local_rand_use_global_seed, false,
@@ -31,11 +29,11 @@ unsigned int* ThreadLocalRand::getSeed() {
if (!p) { // init seed
if (FLAGS_thread_local_rand_use_global_seed) {
p = new unsigned int(defaultSeed_);
- } else if (getpid() == gettid()) { // main thread
+ } else if (getpid() == getTID()) { // main thread
// deterministic, but differs from global srand()
p = new unsigned int(defaultSeed_ - 1);
} else {
- p = new unsigned int(defaultSeed_ + gettid());
+ p = new unsigned int(defaultSeed_ + getTID());
LOG(INFO) << "thread use undeterministic rand seed:" << *p;
}
seed_.set(p);
@@ -51,7 +49,7 @@ std::default_random_engine& ThreadLocalRandomEngine::get() {
int defaultSeed = ThreadLocalRand::getDefaultSeed();
engine->seed(FLAGS_thread_local_rand_use_global_seed
? defaultSeed
- : defaultSeed + gettid());
+ : defaultSeed + getTID());
engine_.set(engine);
}
return *engine;
diff --git a/paddle/utils/Util.cpp b/paddle/utils/Util.cpp
index d8c3376fb18c4..c3c76f907d40e 100644
--- a/paddle/utils/Util.cpp
+++ b/paddle/utils/Util.cpp
@@ -93,6 +93,19 @@ static void installProfilerSwitch() {}
namespace paddle {
+pid_t getTID() {
+ #if defined(__APPLE__) || defined(__OSX__)
+ pid_t tid = syscall(SYS_thread_selfid);
+ #else
+ #ifndef __NR_gettid
+ #define __NR_gettid 224
+ #endif
+ pid_t tid = syscall(__NR_gettid);
+ #endif
+ CHECK_NE(tid, -1);
+ return tid;
+}
+
static bool g_initialized = false;
typedef std::pair> PriorityFuncPair;
typedef std::vector InitFuncList;
diff --git a/paddle/utils/Util.h b/paddle/utils/Util.h
index 11a03e141dec5..a059ca50a538d 100644
--- a/paddle/utils/Util.h
+++ b/paddle/utils/Util.h
@@ -24,6 +24,8 @@ limitations under the License. */
#include
#include
#include
+#include // for syscall()
+#include
#include "CommandLineParser.h"
#include "Logging.h"
@@ -63,6 +65,9 @@ limitations under the License. */
namespace paddle {
+// return the thread id used by glog
+pid_t getTID();
+
/**
* return the 1-based index of the highest bit set
*