Skip to content

Commit

Permalink
Merge pull request PaddlePaddle#1534 from helinwang/arg_sum
Browse files Browse the repository at this point in the history
Rename Argument::sumCost to Argument::cost since Argument should not …
  • Loading branch information
helinwang authored Mar 4, 2017
2 parents 044ad94 + 3219c83 commit 349e799
Show file tree
Hide file tree
Showing 8 changed files with 13 additions and 15 deletions.
4 changes: 1 addition & 3 deletions paddle/api/Arguments.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -144,9 +144,7 @@ void Arguments::setSlotSequenceDim(size_t idx, IVector* vec) throw(RangeError) {
a.cpuSequenceDims = m->cast<paddle::IVector>(vec->getSharedPtr());
}

float Arguments::sumCosts() const {
return paddle::Argument::sumCosts(m->outputs);
}
float Arguments::sum() const { return paddle::Argument::sum(m->outputs); }

int64_t Arguments::getBatchSize(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
Expand Down
2 changes: 1 addition & 1 deletion paddle/api/PaddleAPI.h
Original file line number Diff line number Diff line change
Expand Up @@ -453,7 +453,7 @@ class Arguments {
IVector* vec) throw(RangeError);
void setSlotSequenceDim(size_t idx, IVector* vec) throw(RangeError);

float sumCosts() const;
float sum() const;

private:
static Arguments* createByPaddleArgumentVector(void* ptr);
Expand Down
2 changes: 1 addition & 1 deletion paddle/api/test/testArguments.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def test_load_arguments(self):
args = swig_paddle.Arguments.createArguments(1)
args.setSlotValue(0, m)

self.assertAlmostEqual(27.0, args.sumCosts())
self.assertAlmostEqual(27.0, args.sum())

mat = args.getSlotValue(0)
assert isinstance(mat, swig_paddle.Matrix)
Expand Down
6 changes: 3 additions & 3 deletions paddle/gserver/tests/LayerGradUtil.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ real getCostSum(LayerPtr& testLayer, MatrixPtr weights) {
if (weights) {
outArgs[0].value->dotMul(*outArgs[0].value, *weights);
}
return Argument::sumCosts(outArgs);
return Argument::sum(outArgs);
}

real getDiffAndPrint(real newCost1,
Expand Down Expand Up @@ -241,7 +241,7 @@ void testBatchState(LayerPtr testLayer,

std::vector<Argument> args;
args.push_back(out);
EXPECT_EQ(0, Argument::sumCosts(args)) << "testBatchState failed";
EXPECT_EQ(0, Argument::sum(args)) << "testBatchState failed";
for (size_t seqId = 0; seqId < numSequences; ++seqId) {
start[seqId] += seqLens[seqId];
}
Expand Down Expand Up @@ -672,7 +672,7 @@ void testLayerGradKernel(TestConfig testConf,
outArgs[0].value->dotMul(*testLayer->getOutput().value, *weights);
}

real cost = Argument::sumCosts(outArgs);
real cost = Argument::sum(outArgs);
LOG(INFO) << " cost " << cost;
EXPECT_FALSE(std::isnan(cost));

Expand Down
2 changes: 1 addition & 1 deletion paddle/parameter/Argument.h
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ struct Argument {
: sequenceStartPositions->getData(false);
}

static inline real sumCosts(const std::vector<Argument>& arguments) {
static inline real sum(const std::vector<Argument>& arguments) {
real cost = 0;
for (auto& arg : arguments) {
if (arg.value) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/trainer/Tester.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ real Tester::forwardOneBatch(const DataBatch& dataBatch,
return 0.0; // In this case, there is no meaning to calculate cost
}

return Argument::sumCosts(outArgs);
return Argument::sum(outArgs);
}

void Tester::testOnePassBatch(int passId) {
Expand Down
8 changes: 4 additions & 4 deletions paddle/trainer/Trainer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ real Trainer::checkGradient() {
std::vector<Argument> outArgs;

trainerInternal_.getGradientMachine()->forward(inArgs, &outArgs, PASS_GC);
real cost = Argument::sumCosts(outArgs);
real cost = Argument::sum(outArgs);
LOG(INFO) << "original cost=" << cost;
trainerInternal_.getGradientMachine()->backward();

Expand Down Expand Up @@ -340,7 +340,7 @@ real Trainer::checkGradient() {
parameter->getBuf(PARAMETER_VALUE)->copyFrom(newPara);
parameter->setValueUpdated();
trainerInternal_.getGradientMachine()->forward(inArgs, &outArgs, PASS_GC);
real newCost1 = Argument::sumCosts(outArgs);
real newCost1 = Argument::sum(outArgs);

for (size_t i = 0; i < dim; ++i) {
newp[i] = oldp[i] - step * d[i];
Expand All @@ -349,7 +349,7 @@ real Trainer::checkGradient() {
parameter->getBuf(PARAMETER_VALUE)->copyFrom(newPara);
parameter->setValueUpdated();
trainerInternal_.getGradientMachine()->forward(inArgs, &outArgs, PASS_GC);
real newCost2 = Argument::sumCosts(outArgs);
real newCost2 = Argument::sum(outArgs);

real trueDelta = 0.5 * (newCost1 - newCost2);
real diff = (1e-20 + trueDelta) / (1e-20 + delta) - 1;
Expand Down Expand Up @@ -575,7 +575,7 @@ real Trainer::calcGradient(const DataBatch& dataBatch,

trainerInternal_.getGradientMachine()->forwardBackward(
inArgs, &outArgs, PASS_TRAIN);
real cost = Argument::sumCosts(outArgs);
real cost = Argument::sum(outArgs);

offset = 0;
for (auto& para : parameters) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/trainer/TrainerInternal.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ void TrainerInternal::trainOneBatch(int64_t batchId,
real cost = 0;
{
REGISTER_TIMER("sumCost");
cost = Argument::sumCosts(*outArgs);
cost = Argument::sum(*outArgs);
}

if (batchId % intconfig_->log_period == 0) {
Expand Down

0 comments on commit 349e799

Please sign in to comment.