Skip to content

Commit

Permalink
revert clang-format
Browse files Browse the repository at this point in the history
  • Loading branch information
gangliao committed May 3, 2017
1 parent b13871c commit 6237f6f
Show file tree
Hide file tree
Showing 24 changed files with 129 additions and 136 deletions.
26 changes: 13 additions & 13 deletions paddle/cuda/src/hl_cuda_device.cc
Original file line number Diff line number Diff line change
Expand Up @@ -97,11 +97,11 @@ int g_cuda_lib_version = 0;
* Check build-in cuda function using glog and it **does not**
* support << operator for more details error info.
*/
#define CHECK_CUDA(cudaFunc) \
do { \
cudaError_t cudaStat = cudaFunc; \
CHECK_EQ(cudaSuccess, cudaStat) \
<< "Cuda Error: " << cudaGetErrorString(cudaStat); \
#define CHECK_CUDA(cudaFunc) \
do { \
cudaError_t cudaStat = cudaFunc; \
CHECK_EQ(cudaSuccess, cudaStat) << "Cuda Error: " \
<< cudaGetErrorString(cudaStat); \
} while (0)

/**
Expand Down Expand Up @@ -468,8 +468,8 @@ void hl_specify_devices_start(int *device, int number) {
CHECK(tmp) << "[Start failed] System memory is not enough.";

g_device = (hl_device_prop *)tmp;
device_prop = (hl_device_prop)((char *)tmp + g_system_device_num *
sizeof(hl_device_prop *));
device_prop = (hl_device_prop)(
(char *)tmp + g_system_device_num * sizeof(hl_device_prop *));
memset(g_device, 0, g_system_device_num * sizeof(hl_device_prop *));
int num = 0;
for (int i = 0; i < number; i++) {
Expand Down Expand Up @@ -558,8 +558,8 @@ bool hl_get_sync_flag() { return g_sync_flag; }
void hl_stream_synchronize(hl_stream_t stream) {
cudaStream_t cu_stream;

CHECK_LT(stream, HPPL_STREAM_END)
<< __func__ << ": the parameter stream is error.";
CHECK_LT(stream, HPPL_STREAM_END) << __func__
<< ": the parameter stream is error.";

cu_stream = t_resource.stream[stream];
CHECK_CUDA(cudaStreamSynchronize(cu_stream));
Expand Down Expand Up @@ -589,8 +589,8 @@ void hl_stream_record_event(hl_stream_t stream, hl_event_t event) {
cudaStream_t cu_stream;

CHECK_NOTNULL(event);
CHECK_LT(stream, HPPL_STREAM_END)
<< __func__ << ": the parameter stream is error.";
CHECK_LT(stream, HPPL_STREAM_END) << __func__
<< ": the parameter stream is error.";

cu_stream = t_resource.stream[stream];
CHECK_CUDA(cudaEventRecord(event->cu_event, cu_stream));
Expand All @@ -600,8 +600,8 @@ void hl_stream_wait_event(hl_stream_t stream, hl_event_t event) {
cudaStream_t cu_stream;

CHECK_NOTNULL(event);
CHECK_LT(stream, HPPL_STREAM_END)
<< __func__ << ": the parameter stream is error.";
CHECK_LT(stream, HPPL_STREAM_END) << __func__
<< ": the parameter stream is error.";

cu_stream = t_resource.stream[stream];
CHECK_CUDA(cudaStreamWaitEvent(cu_stream, event->cu_event, 0));
Expand Down
2 changes: 1 addition & 1 deletion paddle/function/BufferArgTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <gtest/gtest.h>
#include "BufferArg.h"
#include <gtest/gtest.h>
#include "paddle/math/MemoryHandle.h"

namespace paddle {
Expand Down
12 changes: 6 additions & 6 deletions paddle/function/CosSimOp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -165,12 +165,12 @@ void CosSimBackward<DEVICE_TYPE_CPU>(const CpuMatrix& out_grad,
real reciprocal_square_sum_x = 1.0f / square_sum_x;
real reciprocal_square_sum_y = 1.0f / square_sum_y;
for (size_t j = 0; j < dim; ++j) {
prev_grad_x[j] += out[i] * grad[i] *
(prev_out_y[j] * reciprocal_xy -
prev_out_x[j] * reciprocal_square_sum_x);
prev_grad_y[j] += out[i] * grad[i] *
(prev_out_x[j] * reciprocal_xy -
prev_out_y[j] * reciprocal_square_sum_y);
prev_grad_x[j] +=
out[i] * grad[i] * (prev_out_y[j] * reciprocal_xy -
prev_out_x[j] * reciprocal_square_sum_x);
prev_grad_y[j] +=
out[i] * grad[i] * (prev_out_x[j] * reciprocal_xy -
prev_out_y[j] * reciprocal_square_sum_y);
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/function/FunctionTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <gtest/gtest.h>
#include "Function.h"
#include <gtest/gtest.h>
#include "paddle/math/SparseMatrix.h"

namespace paddle {
Expand Down
2 changes: 1 addition & 1 deletion paddle/function/TensorShapeTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <gtest/gtest.h>
#include "TensorShape.h"
#include <gtest/gtest.h>

namespace paddle {

Expand Down
2 changes: 1 addition & 1 deletion paddle/function/TensorTypeTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <gtest/gtest.h>
#include "TensorType.h"
#include <gtest/gtest.h>

namespace paddle {

Expand Down
4 changes: 2 additions & 2 deletions paddle/gserver/dataproviders/PyDataProvider.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -194,8 +194,8 @@ void PyDataProvider::fillSlotsByStr(const std::string& samples) {
auto& slot = slots_[j];
CHECK(SlotDef::INDEX >= slot.type || SlotDef::STRING == slot.type)
<< " Slot type:" << slot.type << " is out of range.";
CHECK_GE(slot.type, SlotDef::VECTOR_DENSE)
<< " Slot type:" << slot.type << " is out of range.";
CHECK_GE(slot.type, SlotDef::VECTOR_DENSE) << " Slot type:" << slot.type
<< " is out of range.";
switch (slot.type) {
case SlotDef::VECTOR_DENSE:
fillDenseSlot(slot, data, dataEnd);
Expand Down
6 changes: 3 additions & 3 deletions paddle/gserver/evaluators/Evaluator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -446,9 +446,9 @@ real AucEvaluator::evalImp(std::vector<Argument>& arguments) {
for (size_t i = 0; i < insNum; ++i) {
real value = outputD[pos];
uint32_t binIdx = static_cast<uint32_t>(value * kBinNum_);
CHECK(binIdx <= kBinNum_)
<< "bin index [" << binIdx << "] out of range, predict value[" << value
<< "]";
CHECK(binIdx <= kBinNum_) << "bin index [" << binIdx
<< "] out of range, predict value[" << value
<< "]";
real w = supportWeight ? weightD[i] : 1.0;
if (labelD[i] == kNegativeLabel_) {
statNeg_[binIdx] += w;
Expand Down
5 changes: 2 additions & 3 deletions paddle/gserver/layers/Layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -263,9 +263,8 @@ void Layer::zeroGrad() {
}

void Layer::initNeedFlags() {
auto initFlag = [this](bool& flag,
bool (Layer::*flagQueryFunc)() const,
ParameterType type) {
auto initFlag = [this](
bool& flag, bool (Layer::*flagQueryFunc)() const, ParameterType type) {
flag = false;
if (biasParameter_ && biasParameter_->hasType(type)) {
flag = true;
Expand Down
39 changes: 19 additions & 20 deletions paddle/gserver/tests/test_RecurrentLayer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -292,27 +292,26 @@ void checkRecurrentLayer(LayerConfig layerConfig,
TestRecurrentLayer<T> testGpu(layerConfig, true, gpuBatch);
testCpu.init(batchSize);
testGpu.init(batchSize);
auto checkError =
[](MatrixPtr cpu, MatrixPtr gpu, int numSequences, const char* str) {
CpuMatrix check(gpu->getHeight(), gpu->getWidth());
check.copyFrom(*gpu);
int height = cpu->getHeight();
int width = cpu->getWidth();
const real* data1 = cpu->getData();
const real* data2 = check.getData();
int count = 0;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
if (fabs(data1[i * width + j] - data2[i * width + j]) /
numSequences >
1e-4) {
count++;
}
}
auto checkError = [](
MatrixPtr cpu, MatrixPtr gpu, int numSequences, const char* str) {
CpuMatrix check(gpu->getHeight(), gpu->getWidth());
check.copyFrom(*gpu);
int height = cpu->getHeight();
int width = cpu->getWidth();
const real* data1 = cpu->getData();
const real* data2 = check.getData();
int count = 0;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
if (fabs(data1[i * width + j] - data2[i * width + j]) / numSequences >
1e-4) {
count++;
}
EXPECT_EQ(count, 0) << "[" << str << "]"
<< "There are " << count << " different element.";
};
}
}
EXPECT_EQ(count, 0) << "[" << str << "]"
<< "There are " << count << " different element.";
};
T* cpuLayer = dynamic_cast<T*>(testCpu.testLayer_.get());
T* gpuLayer = dynamic_cast<T*>(testGpu.testLayer_.get());

Expand Down
30 changes: 14 additions & 16 deletions paddle/math/MatrixBitCode.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -174,10 +174,8 @@ void CpuMatrix::mulByBitCode(size_t numClasses,
const IVector& codes,
const Matrix& weight,
const Matrix& input) {
auto op = [](real& t,
const real* weightRow,
const real* inputRow,
size_t inputDim) {
auto op = [](
real& t, const real* weightRow, const real* inputRow, size_t inputDim) {
real sum = 0;
for (size_t k = 0; k < inputDim; ++k) {
sum += weightRow[k] * inputRow[k];
Expand All @@ -195,12 +193,12 @@ void CpuMatrix::mulByBitCodeBackwardWeight(size_t numClasses,
const IVector& codes,
Matrix& weight,
const Matrix& input) {
auto op =
[](const real t, real* weightRow, const real* inputRow, size_t inputDim) {
for (size_t k = 0; k < inputDim; ++k) {
weightRow[k] += t * inputRow[k];
}
};
auto op = [](
const real t, real* weightRow, const real* inputRow, size_t inputDim) {
for (size_t k = 0; k < inputDim; ++k) {
weightRow[k] += t * inputRow[k];
}
};

mulByBitCodeT(op, SimpleCodeTable(numClasses), codes, *this, weight, input);
}
Expand All @@ -212,12 +210,12 @@ void CpuMatrix::mulByBitCodeBackwardError(size_t numClasses,
const IVector& codes,
const Matrix& weight,
Matrix& input) {
auto op =
[](const real t, const real* weightRow, real* inputRow, size_t inputDim) {
for (size_t k = 0; k < inputDim; ++k) {
inputRow[k] += t * weightRow[k];
}
};
auto op = [](
const real t, const real* weightRow, real* inputRow, size_t inputDim) {
for (size_t k = 0; k < inputDim; ++k) {
inputRow[k] += t * weightRow[k];
}
};

mulByBitCodeT(op, SimpleCodeTable(numClasses), codes, *this, weight, input);
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/math/tests/TensorCheck.h
Original file line number Diff line number Diff line change
Expand Up @@ -183,8 +183,8 @@ void TensorCheck(AssertEq compare,

template <typename AssertEq>
void TensorCheck(AssertEq compare, real args1, real args2) {
EXPECT_EQ(compare(args1, args2), true)
<< "[Test error] args1 = " << args1 << ", args2 = " << args2;
EXPECT_EQ(compare(args1, args2), true) << "[Test error] args1 = " << args1
<< ", args2 = " << args2;
}

template <typename AssertEq>
Expand Down
18 changes: 9 additions & 9 deletions paddle/math/tests/test_SIMDFunctions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -126,15 +126,15 @@ TEST(SIMDFunction, decayL1_WithLR) {
typedef std::function<void(float*, float*, float*, float, size_t)>
DecayL1MethodType;

DecayL1MethodType naive =
[](float* d, float* s, float* lr, float l, size_t len) {
paddle::simd::naive::decayL1<float>(d, s, lr, l, len);
};

DecayL1MethodType simd =
[](float* d, float* s, float* lr, float l, size_t len) {
paddle::simd::decayL1<float>(d, s, lr, l, len);
};
DecayL1MethodType naive = [](
float* d, float* s, float* lr, float l, size_t len) {
paddle::simd::naive::decayL1<float>(d, s, lr, l, len);
};

DecayL1MethodType simd = [](
float* d, float* s, float* lr, float l, size_t len) {
paddle::simd::decayL1<float>(d, s, lr, l, len);
};

naive(dest.get(), src.get(), lr.get(), lambda, VECTOR_LEN);
simd(simd_dest.get(), src.get(), lr.get(), lambda, VECTOR_LEN);
Expand Down
50 changes: 24 additions & 26 deletions paddle/parameter/Argument.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,7 @@ void Argument::concat(const std::vector<Argument>& args,
}

auto copyArg = [batchSize, stream](
MatrixPtr& dst, MatrixPtr src, int startRow, bool useGpu) {
MatrixPtr& dst, MatrixPtr src, int startRow, bool useGpu) {
if (!src) {
dst.reset();
return;
Expand All @@ -395,31 +395,29 @@ void Argument::concat(const std::vector<Argument>& args,
tmpMatrix->copyFrom(*src, stream);
};

auto copyIds =
[batchSize, stream](
IVectorPtr& dst, const IVectorPtr& src, int startRow, bool useGpu) {
if (!src) {
dst.reset();
return;
}
IVector::resizeOrCreate(dst, batchSize, useGpu);
dst->subVec(startRow, src->getSize())->copyFrom(*src, stream);
};

auto copyStrs =
[batchSize, stream](
SVectorPtr& dst, const SVectorPtr& src, int startRow, bool useGpu) {
if (!src) {
dst.reset();
return;
}
if (!dst) {
dst = std::make_shared<std::vector<std::string>>(batchSize);
} else {
dst->resize(batchSize);
}
std::copy(src->begin(), src->end(), dst->begin() + startRow);
};
auto copyIds = [batchSize, stream](
IVectorPtr& dst, const IVectorPtr& src, int startRow, bool useGpu) {
if (!src) {
dst.reset();
return;
}
IVector::resizeOrCreate(dst, batchSize, useGpu);
dst->subVec(startRow, src->getSize())->copyFrom(*src, stream);
};

auto copyStrs = [batchSize, stream](
SVectorPtr& dst, const SVectorPtr& src, int startRow, bool useGpu) {
if (!src) {
dst.reset();
return;
}
if (!dst) {
dst = std::make_shared<std::vector<std::string>>(batchSize);
} else {
dst->resize(batchSize);
}
std::copy(src->begin(), src->end(), dst->begin() + startRow);
};

auto copySequencePos = [](ICpuGpuVectorPtr& dstSeq,
const ICpuGpuVectorPtr& srcSeq,
Expand Down
5 changes: 2 additions & 3 deletions paddle/parameter/AverageOptimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -155,9 +155,8 @@ ParameterOptimizer::TraverseCallback AverageOptimizer::restore() {
return nullptr;
}

return [](const VectorPtr vecs[],
const ParameterConfig& config,
size_t sparseId) {
return [](
const VectorPtr vecs[], const ParameterConfig& config, size_t sparseId) {
vecs[PARAMETER_VALUE]->copyFrom(*vecs[PARAMETER_GRADIENT]);
vecs[PARAMETER_GRADIENT]->zeroMem();
};
Expand Down
4 changes: 2 additions & 2 deletions paddle/parameter/Parameter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -352,8 +352,8 @@ bool Parameter::load(std::istream& s) {
Header header;
CHECK(s.read(reinterpret_cast<char*>(&header), sizeof(header)))
<< "Fail to read parameter " << getName();
CHECK_EQ(header.version, kFormatVersion)
<< "Incorrect format version: " << header.version;
CHECK_EQ(header.version, kFormatVersion) << "Incorrect format version: "
<< header.version;
CHECK_EQ(header.size, getSize())
<< "The size (" << header.size << ") in the file does not match the size "
<< "(" << getSize() << ") of the parameter: " << getName();
Expand Down
4 changes: 2 additions & 2 deletions paddle/pserver/LightNetwork.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -359,8 +359,8 @@ void SocketClient::TcpClient(const std::string &serverAddr, int serverPort) {

#if defined(__OSX__) || defined(__APPLE__)
server = getipnodebyname(serverAddr.c_str(), AF_INET, AI_DEFAULT, &errRet);
CHECK_NE(HOST_NOT_FOUND, errRet)
<< "ERROR, no such host: " << serverAddr << " ret = " << errRet;
CHECK_NE(HOST_NOT_FOUND, errRet) << "ERROR, no such host: " << serverAddr
<< " ret = " << errRet;
CHECK(server) << "getipnodebyname error!";
#else
struct hostent hostinfo;
Expand Down
Loading

0 comments on commit 6237f6f

Please sign in to comment.