Skip to content

Commit

Permalink
a bunch of updates. to be checked on durian. does not build.
Browse files Browse the repository at this point in the history
  • Loading branch information
Yangqing committed Sep 13, 2013
1 parent 7149de5 commit 746599a
Show file tree
Hide file tree
Showing 15 changed files with 28,877 additions and 50 deletions.
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,7 @@
*.lai
*.la
*.a

# Compiled protocol buffers
*.pb.h
*.pb.cc
41 changes: 0 additions & 41 deletions Makefile

This file was deleted.

59 changes: 59 additions & 0 deletions src/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
#
# The following defines a variable named "NAME" with a value of "myprogram". By convention,
# a lowercase prefix (in this case "program") and an uppercased suffix (in this case "NAME"), separated
# by an underscore is used to name attributes for a common element. Think of this like
# using program.NAME, program.C_SRCS, etc. There are no structs in Make, so we use this convention
# to keep track of attributes that all belong to the same target or program.
#
CXX := nvcc

PROJECT := caffeine
NAME := lib$(PROJECT).so
TEST_NAME := test_$(PROJECT)
CXX_SRCS := $(shell find . ! -name "test_*.cpp" -name "*.cpp")
TEST_SRCS := $(shell find . -name "test_*.cpp")
PROTO_SRCS := $(wildcard caffeine/proto/*.proto)
PROTO_GEN_HEADER := ${PROTO_SRCS:.proto=.pb.h}
PROTO_GEN_CC := ${PROTO_SRCS:.proto=.pb.cc}
CXX_OBJS := ${CXX_SRCS:.cpp=.o}
PROTO_OBJS := ${PROTO_SRCS:.proto=.pb.o}
OBJS := $(CXX_OBJS) $(PROTO_OBJS)
TEST_OBJS := ${TEST_SRCS:.cpp=.o}

CUDA_DIR = /usr/local/cuda

CUDA_INCLUDE_DIR = $(CUDA_DIR)/include
CUDA_LIB_DIR = $(CUDA_DIR)/lib

INCLUDE_DIRS := . $(CUDA_INCLUDE_DIR)
LIBRARY_DIRS := . $(CUDA_LIB_DIR)
LIBRARIES := cuda cudart cublas protobuf
WARNINGS := -Wall

CPPFLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir))
LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir))
LDFLAGS += $(foreach library,$(LIBRARIES),-l$(library))

LINK = $(CXX) $(CXXFLAGS) $(CPPFLAGS) $(LDFLAGS) $(WARNINGS)

.PHONY: all test clean distclean

all: $(NAME)

test: $(TEST_NAME)

$(TEST_NAME): $(TEST_OBJS) $(OBJS)
$(LINK) -o $(TEST_NAME) -l$(PROJECT) $(CXX_SRCS) $(TEST_SRCS) gtest/gtest-all.cc

$(NAME): $(PROTO_GEN_CC) $(OBJS)
$(LINK) -shared $(OBJS) -o $(NAME)

$(PROTO_GEN_CC): $(PROTO_SRCS)
protoc $(PROTO_SRCS) --cpp_out=.

clean:
$(RM) $(NAME)
$(RM) $(OBJS)
$(RM) $(PROTO_GEN_HEADER) $(PROTO_GEN_CC)

distclean: clean
36 changes: 36 additions & 0 deletions src/caffeine/base.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
#ifndef CAFFEINE_BASE_H_
#define CAFFEINE_BASE_H_

#include <vector>
#include "caffeine/blob.hpp"
#include "caffeine/proto/layer_param.pb.h"

using std::vector;

namespace caffeine {

template <typename Dtype>
class Layer {
public:
explicit Layer(const LayerParameter& param)
: initialized_(false), layer_param_(param) {};
~Layer();
virtual void SetUp(vector<const Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) = 0;
virtual void Forward(vector<const Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) = 0;
virtual void Predict(vector<const Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) = 0;
virtual void Backward(vector<const Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top, bool propagate_down) = 0;
protected:
bool initialized_;
// The protobuf that stores the layer parameters
LayerParameter layer_param_;
// The vector that stores the parameters as a set of blobs.
vector<Blob<Dtype> > blobs;
}; // class Layer

} // namespace caffeine

#endif // CAFFEINE_BASE_H_
9 changes: 7 additions & 2 deletions src/caffeine/blob.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ void Blob<Dtype>::Reshape(const int num, const int channels, const int height,
height_ = height;
width_ = width;
count_ = num_ * channels_ * height_ * width_;
data_.reset(SyncedMemory(count_ * sizeof(Dtype)));
diff_.reset(SyncedMemory(count_ * sizeof(Dtype)));
data_.reset(new SyncedMemory(count_ * sizeof(Dtype)));
diff_.reset(new SyncedMemory(count_ * sizeof(Dtype)));
}

template <typename Dtype>
Expand Down Expand Up @@ -64,4 +64,9 @@ Dtype* Blob<Dtype>::mutable_gpu_diff() {
return diff_->mutable_gpu_data();
}

template <typename Dtype>
void Blob<Dtype>::update() {

}

} // namespace caffeine
3 changes: 2 additions & 1 deletion src/caffeine/blob.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@ class Blob {
Dtype* mutable_cpu_data();
Dtype* mutable_gpu_data();
Dtype* mutable_cpu_diff();
Dtype* mutable_gpu_diff();
Dtype* mutable_gpu_diff();
void update();
private:
void check_data();
void check_diff();
Expand Down
36 changes: 34 additions & 2 deletions src/caffeine/common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,41 @@ static std::ostream nullout(0);

// TODO(Yangqing): make a better logging scheme
#define LOG_IF(condition) \
!(condition) ? nullout : std::cout

#define CHECK(condition) \
LOG_IF(condition) << "Check failed: " #condition " "

#ifndef NDEBUG

#define DCHECK(condition) CHECK(condition)

#else

#define DCHECK(condition)

#endif // NDEBUG


#define CUDA_CHECK(condition) \
CUDA_LOG_IF(condition) << "Check failed: " #condition " "


// TODO(Yangqing): make a better logging scheme
#define CUDA_LOG_IF(condition) \
((condition) != cudaSuccess) ? nullout : std::cout

#define CUDA_CHECK(condition) \
LOG_IF(condition) << "Check failed: " #condition " "

CUDA_LOG_IF(condition) << "Check failed: " #condition " "

#ifndef NDEBUG

#define CUDA_DCHECK(condition) CUDA_CHECK(condition)

#else

#define CUDA_DCHECK(condition)

#endif // NDEBUG

#endif // CAFFEINE_COMMON_HPP_
6 changes: 6 additions & 0 deletions src/caffeine/neuron_layer.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#include "caffeine/base.h"

namespace caffeine {


} // namespace caffeine
5 changes: 5 additions & 0 deletions src/caffeine/proto/layer_param.proto
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
package caffeine;

message LayerParameter {
required string name = 1;
}
3 changes: 2 additions & 1 deletion src/caffeine/syncedmem.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,4 +79,5 @@ inline void* SyncedMemory::mutable_gpu_data() {
}


} // namespace caffeine
} // namespace caffeine

7 changes: 4 additions & 3 deletions src/caffeine/syncedmem.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,21 +6,22 @@ namespace caffeine {
class SyncedMemory {
public:
SyncedMemory()
: cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(SYNCED) {};
: cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED) {};
explicit SyncedMemory(size_t size)
: cpu_ptr_(NULL), gpu_ptr_(NULL), head_(SYNCED), size_(size) {};
: cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), head_(UNINITIALIZED) {};
~SyncedMemory();
const void* cpu_data();
const void* gpu_data();
void* mutable_cpu_data();
void* mutable_gpu_data();
enum SyncedHead { UNINITIALIZED, HEAD_AT_CPU, HEAD_AT_GPU, SYNCED };
SyncedHead head() { return head_; }
private:
void to_cpu();
void to_gpu();
void* cpu_ptr_;
void* gpu_ptr_;
size_t size_;
enum SyncedHead { UNINITIALIZED, HEAD_AT_CPU, HEAD_AT_GPU, SYNCED };
SyncedHead head_;
}; // class SyncedMemory

Expand Down
24 changes: 24 additions & 0 deletions src/caffeine/test_syncedmem.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
#include "gtest/gtest.h"
#include "caffeine/syncedmem.hpp"

namespace caffeine {

class SyncedMemoryTest : public ::testing::Test {};

TEST_F(SyncedMemoryTest, TestInitialization) {
SyncedMemory mem(10);
EXPECT_EQ(mem.head(), SyncedMemory::UNINITIALIZED);
}

TEST_F(SyncedMemoryTest, TestAllocation) {
SyncedMemory mem(10);
EXPECT_NE(mem.cpu_data(), (void*)NULL);
EXPECT_NE(mem.gpu_data(), (void*)NULL);
}

}

int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
Loading

0 comments on commit 746599a

Please sign in to comment.