Skip to content

Commit

Permalink
[Build] fix various build problems (dmlc#3117)
Browse files Browse the repository at this point in the history
  • Loading branch information
BarclayII authored Jul 8, 2021
1 parent bb89dee commit 2f41fcd
Show file tree
Hide file tree
Showing 6 changed files with 13 additions and 13 deletions.
3 changes: 2 additions & 1 deletion include/dgl/runtime/c_runtime_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -542,8 +542,9 @@ DGL_DLL int DGLStreamStreamSynchronize(int device_type,

/*!
* \brief Load tensor adapter.
* \return 0 when success, -1 when failure happens.
*/
DGL_DLL void DGLLoadTensorAdapter(const char *path);
DGL_DLL int DGLLoadTensorAdapter(const char *path);

/*!
* \brief Bug report macro.
Expand Down
2 changes: 1 addition & 1 deletion include/dgl/runtime/tensordispatch.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ class TensorDispatcher {
}

/*! \brief Load symbols from the given tensor adapter library path */
void Load(const char *path_cstr);
bool Load(const char *path_cstr);

/*!
* \brief Allocate an empty tensor.
Expand Down
4 changes: 3 additions & 1 deletion python/dgl/_ffi/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,7 @@ def decorate(func, fwrapped):
import decorator
return decorator.decorate(func, fwrapped)

tensor_adapter_loaded = False

def load_tensor_adapter(backend, version):
"""Tell DGL to load a tensoradapter library for given backend and version.
Expand All @@ -123,6 +124,7 @@ def load_tensor_adapter(backend, version):
version : str
The version number of the backend.
"""
global tensor_adapter_loaded
version = version.split('+')[0]
if sys.platform.startswith('linux'):
basename = 'libtensoradapter_%s_%s.so' % (backend, version)
Expand All @@ -133,4 +135,4 @@ def load_tensor_adapter(backend, version):
else:
raise NotImplementedError('Unsupported system: %s' % sys.platform)
path = os.path.join(_DIR_NAME, 'tensoradapter', backend, basename)
_LIB.DGLLoadTensorAdapter(path.encode('utf-8'))
tensor_adapter_loaded = (_LIB.DGLLoadTensorAdapter(path.encode('utf-8')) == 0)
4 changes: 0 additions & 4 deletions src/array/cuda/spmm.cu
Original file line number Diff line number Diff line change
Expand Up @@ -526,7 +526,6 @@ void SpMMCsrHetero(const std::string& op, const std::string& reduce,
(CUDART_VERSION < 11000) &&
((op == "copy_lhs" && cusparse_available<bits, IdType>()) ||
(op == "mul" && is_scalar_efeat && cusparse_available<bits, IdType>()));
#if CUDART_VERSION < 11000
// Create temporary output buffer to store non-transposed output
if (use_legacy_cusparsemm) {
for (dgl_type_t ntype = 0; ntype < vec_out.size(); ++ntype) {
Expand All @@ -539,7 +538,6 @@ void SpMMCsrHetero(const std::string& op, const std::string& reduce,
trans_out[ntype] = out;
}
}
#endif

// Check shape of ufeat for all relation type and compute feature size
int64_t x_length = 1;
Expand Down Expand Up @@ -634,7 +632,6 @@ void SpMMCsrHetero(const std::string& op, const std::string& reduce,
}
}

#if CUDART_VERSION < 11000
if (use_legacy_cusparsemm) {
// transpose output
for (dgl_type_t ntype = 0; ntype < vec_out.size(); ++ntype) {
Expand All @@ -646,7 +643,6 @@ void SpMMCsrHetero(const std::string& op, const std::string& reduce,
device->FreeWorkspace(vec_csr[0].indptr->ctx, trans_out[ntype]);
}
}
#endif
});
}

Expand Down
4 changes: 2 additions & 2 deletions src/runtime/c_runtime_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -379,8 +379,8 @@ int DGLCbArgToReturn(DGLValue* value, int code) {
API_END();
}

void DGLLoadTensorAdapter(const char *path) {
TensorDispatcher::Global()->Load(path);
int DGLLoadTensorAdapter(const char *path) {
return TensorDispatcher::Global()->Load(path) ? 0 : -1;
}

// set device api
Expand Down
9 changes: 5 additions & 4 deletions src/runtime/tensordispatch.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,18 +19,18 @@ namespace runtime {

constexpr const char *TensorDispatcher::names_[];

void TensorDispatcher::Load(const char *path) {
bool TensorDispatcher::Load(const char *path) {
CHECK(!available_) << "The tensor adapter can only load once.";

if (path == nullptr || strlen(path) == 0)
// does not have dispatcher library; all operators fall back to DGL's implementation
return;
return false;

#if defined(WIN32) || defined(_WIN32)
handle_ = LoadLibrary(path);

if (!handle_)
return;
return false;

for (int i = 0; i < num_entries_; ++i) {
entrypoints_[i] = reinterpret_cast<void*>(GetProcAddress(handle_, names_[i]));
Expand All @@ -40,7 +40,7 @@ void TensorDispatcher::Load(const char *path) {
handle_ = dlopen(path, RTLD_LAZY);

if (!handle_)
return;
return false;

for (int i = 0; i < num_entries_; ++i) {
entrypoints_[i] = dlsym(handle_, names_[i]);
Expand All @@ -49,6 +49,7 @@ void TensorDispatcher::Load(const char *path) {
#endif // WIN32

available_ = true;
return true;
}

TensorDispatcher::~TensorDispatcher() {
Expand Down

0 comments on commit 2f41fcd

Please sign in to comment.