Skip to content

Commit

Permalink
[Refactor] Fix compiler warnings (taichi-dev#3322)
Browse files Browse the repository at this point in the history
* Fix compiler warnings

* Auto Format

Co-authored-by: Taichi Gardener <[email protected]>
  • Loading branch information
sjwsl and taichi-gardener authored Oct 30, 2021
1 parent a0947af commit 628167a
Show file tree
Hide file tree
Showing 13 changed files with 22 additions and 33 deletions.
2 changes: 1 addition & 1 deletion taichi/analysis/gather_statements.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ class StmtSearcher : public BasicStmtVisitor {
invoke_default_visitor = true;
}

void visit(Stmt *stmt) {
void visit(Stmt *stmt) override {
if (test(stmt))
results.push_back(stmt);
}
Expand Down
2 changes: 1 addition & 1 deletion taichi/backends/cuda/codegen_cuda.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -487,7 +487,7 @@ class CodeGenLLVMCUDA : public CodeGenLLVM {
data_ptr = builder->CreateBitCast(data_ptr, llvm_ptr_type(dtype));
auto data = create_intrinsic_load(dtype, data_ptr);
llvm_val[stmt] = extract_custom_int(data, bit_offset, int_in_mem);
} else if (auto cft = val_type->cast<CustomFloatType>()) {
} else if (val_type->cast<CustomFloatType>()) {
// TODO: support __ldg
llvm_val[stmt] = load_custom_float(stmt->src);
} else {
Expand Down
2 changes: 2 additions & 0 deletions taichi/backends/cuda/cuda_profiler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,8 @@ bool KernelProfilerCUDA::reinit_with_metrics(
metric_list_.size());
return true;
}

TI_NOT_IMPLEMENTED;
}

// deprecated, move to trace()
Expand Down
6 changes: 3 additions & 3 deletions taichi/backends/cuda/cupti_toolkit.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ enum class CuptiMetricsDefault : uint {
CUPTI_METRIC_DEFAULT_TOTAL = 2
};

constexpr char *MetricListDeafult[] = {
constexpr const char *MetricListDefault[] = {
"smsp__cycles_elapsed.avg", // CUPTI_METRIC_KERNEL_ELAPSED_CLK_NUMS
"smsp__cycles_elapsed.avg.per_second", // CUPTI_METRIC_CORE_FREQUENCY_HZS
};
Expand Down Expand Up @@ -798,7 +798,7 @@ CuptiToolkit::CuptiToolkit() {
uint metric_list_size =
static_cast<uint>(CuptiMetricsDefault::CUPTI_METRIC_DEFAULT_TOTAL);
for (uint idx = 0; idx < metric_list_size; idx++) {
cupti_config_.metric_list.push_back(MetricListDeafult[idx]);
cupti_config_.metric_list.push_back(MetricListDefault[idx]);
}
}

Expand All @@ -812,7 +812,7 @@ void CuptiToolkit::reset_metrics(const std::vector<std::string> &metrics) {
uint metric_list_size =
static_cast<uint>(CuptiMetricsDefault::CUPTI_METRIC_DEFAULT_TOTAL);
for (uint idx = 0; idx < metric_list_size; idx++) {
cupti_config_.metric_list.push_back(MetricListDeafult[idx]);
cupti_config_.metric_list.push_back(MetricListDefault[idx]);
}
// user selected metrics
for (auto metric : metrics)
Expand Down
1 change: 1 addition & 0 deletions taichi/backends/metal/codegen_metal.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ class RootIdsExtractor : public BasicStmtVisitor {
}

private:
using BasicStmtVisitor::visit;
std::unordered_set<int> roots_;
};

Expand Down
2 changes: 2 additions & 0 deletions taichi/backends/opengl/aot_module_builder_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ void AotModuleBuilderImpl::add_per_backend_field(const std::string &identifier,
gl_dtype_enum = GL_DOUBLE;
} else if (dt == PrimitiveType::f32) {
gl_dtype_enum = GL_FLOAT;
} else {
TI_NOT_IMPLEMENTED
}

aot_data_.fields.push_back({identifier, gl_dtype_enum, dt.to_string(), shape,
Expand Down
2 changes: 1 addition & 1 deletion taichi/backends/opengl/codegen_opengl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,8 @@ class KernelGen : public IRVisitor {
const StructCompiledResult *struct_compiled,
const std::string &kernel_name)
: kernel_(kernel),
kernel_name_(kernel_name),
struct_compiled_(struct_compiled),
kernel_name_(kernel_name),
root_snode_type_name_(struct_compiled->root_snode_type_name),
glsl_kernel_prefix_(kernel_name) {
compiled_program_.init_args(kernel);
Expand Down
17 changes: 1 addition & 16 deletions taichi/backends/opengl/opengl_api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,21 +31,6 @@ int opengl_max_grid_dim = 1024;

#ifdef TI_WITH_OPENGL

static std::string add_line_markers(std::string x) {
std::string marker;
size_t pos = 0, npos;
int line = 0;
while (1) {
npos = x.find_first_of('\n', pos);
marker = fmt::format("{:3d} ", ++line);
if (npos == std::string::npos)
break;
x.insert(pos, marker);
pos = npos + 1 + marker.size();
}
return x;
}

struct OpenGlRuntimeImpl {
struct {
DeviceAllocation runtime = kDeviceNullAllocation;
Expand Down Expand Up @@ -374,7 +359,7 @@ void DeviceCompiledProgram::launch(Context &ctx, OpenGlRuntime *runtime) const {

DeviceCompiledProgram::DeviceCompiledProgram(CompiledProgram &&program,
Device *device)
: program_(std::move(program)), device_(device) {
: device_(device), program_(std::move(program)) {
if (program_.args_buf_size || program_.total_ext_arr_size ||
program_.ret_buf_size) {
args_buf_ = device->allocate_memory(
Expand Down
2 changes: 2 additions & 0 deletions taichi/backends/opengl/opengl_device.h
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,8 @@ class GLCommandList : public CommandList {
struct Cmd {
virtual void execute() {
}
virtual ~Cmd() {
}
};

struct CmdBindPipeline : public Cmd {
Expand Down
9 changes: 3 additions & 6 deletions taichi/backends/vulkan/runtime.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,10 @@ class HostDeviceContextBlitter {
DeviceAllocation *host_shadow_buffer)
: ctx_attribs_(ctx_attribs),
host_ctx_(host_ctx),
device_(device),
host_result_buffer_(host_result_buffer),
device_buffer_(device_buffer),
host_shadow_buffer_(host_shadow_buffer) {
host_shadow_buffer_(host_shadow_buffer),
device_(device) {
}

void host_to_device() {
Expand Down Expand Up @@ -227,7 +227,6 @@ CompiledTaichiKernel::CompiledTaichiKernel(const Params &ti_params)
}
const auto ctx_sz = ti_kernel_attribs_.ctx_attribs.total_bytes();
if (!ti_kernel_attribs_.ctx_attribs.empty()) {
Device::AllocParams params;
ctx_buffer_ = ti_params.device->allocate_memory_unique(
{size_t(ctx_sz),
/*host_write=*/true, /*host_read=*/false,
Expand Down Expand Up @@ -298,7 +297,7 @@ void CompiledTaichiKernel::command_list(CommandList *cmdlist) const {
}

VkRuntime::VkRuntime(const Params &params)
: host_result_buffer_(params.host_result_buffer), device_(params.device) {
: device_(params.device), host_result_buffer_(params.host_result_buffer) {
TI_ASSERT(host_result_buffer_ != nullptr);
init_buffers();
}
Expand Down Expand Up @@ -349,9 +348,7 @@ VkRuntime::KernelHandle VkRuntime::register_taichi_kernel(
params.global_tmps_buffer = global_tmps_buffer_.get();

for (int i = 0; i < reg_params.task_spirv_source_codes.size(); ++i) {
const auto &attribs = reg_params.kernel_attribs.tasks_attribs[i];
const auto &spirv_src = reg_params.task_spirv_source_codes[i];
const auto &task_name = attribs.name;

// If we can reach here, we have succeeded. Otherwise
// std::optional::value() would have killed us.
Expand Down
2 changes: 1 addition & 1 deletion taichi/codegen/codegen_llvm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1225,7 +1225,7 @@ void CodeGenLLVM::visit(GlobalLoadStmt *stmt) {
auto val_type = ptr_type->get_pointee_type();
if (val_type->is<CustomIntType>()) {
llvm_val[stmt] = load_as_custom_int(llvm_val[stmt->src], val_type);
} else if (auto cft = val_type->cast<CustomFloatType>()) {
} else if (val_type->cast<CustomFloatType>()) {
TI_ASSERT(stmt->src->is<GetChStmt>());
llvm_val[stmt] = load_custom_float(stmt->src);
} else {
Expand Down
4 changes: 2 additions & 2 deletions taichi/program/program.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@ Kernel &Program::get_snode_writer(SNode *snode) {

Kernel &Program::get_ndarray_reader(Ndarray *ndarray) {
auto kernel_name = fmt::format("ndarray_reader");
auto &ker = kernel([ndarray, this] {
auto &ker = kernel([ndarray] {
ExprGroup indices;
for (int i = 0; i < ndarray->num_active_indices; i++) {
indices.push_back(Expr::make<ArgLoadExpression>(i, PrimitiveType::i32));
Expand All @@ -413,7 +413,7 @@ Kernel &Program::get_ndarray_reader(Ndarray *ndarray) {

Kernel &Program::get_ndarray_writer(Ndarray *ndarray) {
auto kernel_name = fmt::format("ndarray_writer");
auto &ker = kernel([ndarray, this] {
auto &ker = kernel([ndarray] {
ExprGroup indices;
for (int i = 0; i < ndarray->num_active_indices; i++) {
indices.push_back(Expr::make<ArgLoadExpression>(i, PrimitiveType::i32));
Expand Down
4 changes: 2 additions & 2 deletions taichi/transforms/bit_loop_vectorize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ class BitLoopVectorize : public IRVisitor {
void visit(GlobalLoadStmt *stmt) override {
auto ptr_type = stmt->src->ret_type->as<PointerType>();
if (in_struct_for_loop && bit_vectorize != 1) {
if (auto cit = ptr_type->get_pointee_type()->cast<CustomIntType>()) {
if (ptr_type->get_pointee_type()->cast<CustomIntType>()) {
// rewrite the previous GlobalPtrStmt's return type from *cit to
// *phy_type
auto ptr = stmt->src->cast<GlobalPtrStmt>();
Expand Down Expand Up @@ -127,7 +127,7 @@ class BitLoopVectorize : public IRVisitor {
void visit(GlobalStoreStmt *stmt) override {
auto ptr_type = stmt->dest->ret_type->as<PointerType>();
if (in_struct_for_loop && bit_vectorize != 1) {
if (auto cit = ptr_type->get_pointee_type()->cast<CustomIntType>()) {
if (ptr_type->get_pointee_type()->cast<CustomIntType>()) {
// rewrite the previous GlobalPtrStmt's return type from *cit to
// *phy_type
auto ptr = stmt->dest->cast<GlobalPtrStmt>();
Expand Down

0 comments on commit 628167a

Please sign in to comment.