Skip to content

Commit

Permalink
Make PyTorch code-base clang-tidy compliant (pytorch#56892)
Browse files Browse the repository at this point in the history
Summary:
This is an automatic change generated by the following script:
```
#!/usr/bin/env python3
from subprocess import check_output, check_call
import os

def get_compiled_files_list():
    import json
    with open("build/compile_commands.json") as f:
        data = json.load(f)
    files = [os.path.relpath(node['file']) for node in data]
    for idx, fname in enumerate(files):
        if fname.startswith('build/') and fname.endswith('.DEFAULT.cpp'):
            files[idx] = fname[len('build/'):-len('.DEFAULT.cpp')]
    return files

def run_clang_tidy(fname):
    check_call(["python3", "tools/clang_tidy.py", "-c", "build", "-x", fname,"-s"])
    changes = check_output(["git", "ls-files", "-m"])
    if len(changes) == 0:
        return
    check_call(["git", "commit","--all", "-m", f"NOLINT stubs for {fname}"])

def main():
    git_files = check_output(["git", "ls-files"]).decode("ascii").split("\n")
    compiled_files = get_compiled_files_list()
    for idx, fname in enumerate(git_files):
        if fname not in compiled_files:
            continue
        if fname.startswith("caffe2/contrib/aten/"):
            continue
        print(f"[{idx}/{len(git_files)}] Processing {fname}")
        run_clang_tidy(fname)

if __name__ == "__main__":
    main()
```

Pull Request resolved: pytorch#56892

Reviewed By: H-Huang

Differential Revision: D27991944

Pulled By: malfet

fbshipit-source-id: 5415e1eb2c1b34319a4f03024bfaa087007d7179
  • Loading branch information
malfet authored and facebook-github-bot committed Apr 28, 2021
1 parent 5a10ee7 commit 4cb534f
Show file tree
Hide file tree
Showing 1,327 changed files with 20,540 additions and 148 deletions.
7 changes: 7 additions & 0 deletions aten/src/ATen/BatchedFallback.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ static void warnFallback(const c10::FunctionSchema& schema, bool is_inplace) {
// the operator, and then pop the results off the stack.
void batchedTensorInplaceForLoopFallback(const c10::OperatorHandle& op, torch::jit::Stack* stack) {
const auto& schema = op.schema();
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores,clang-diagnostic-unused-variable)
const auto num_returns = schema.returns().size();
warnFallback(schema, /*in_place*/true);

Expand All @@ -106,6 +107,7 @@ void batchedTensorInplaceForLoopFallback(const c10::OperatorHandle& op, torch::j
// For each BatchedTensor, also record what position of `arguments` they came from.
SmallVector<Tensor,kVmapTransformStaticInputSize> batched_tensor_inputs;
VmapDimVector batched_tensor_inputs_position;
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (int64_t idx = 0; idx < arguments.size(); ++idx) {
const auto& ivalue = arguments[idx];
if (!ivalue.isTensor()) {
Expand Down Expand Up @@ -177,6 +179,7 @@ void batchedTensorInplaceForLoopFallback(const c10::OperatorHandle& op, torch::j
auto index = computeIndex(linear_idx, batch_sizes);
auto batched_tensor_inputs_pos_iter = batched_tensor_inputs_position.begin();
auto input_physical_views_iter = input_physical_views.begin();
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (int64_t arg_idx = 0; arg_idx < num_arguments; ++arg_idx) {
// We assume that torch::jit::Stack is backed by vector<IValue> for
// simplicity. When that is not the case, this code should be updated.
Expand Down Expand Up @@ -270,6 +273,7 @@ void batchedTensorForLoopFallback(const c10::OperatorHandle& op, torch::jit::Sta
// For each BatchedTensor, also record what position of `arguments` they came from.
SmallVector<Tensor,kVmapTransformStaticInputSize> batched_tensor_inputs;
VmapDimVector batched_tensor_inputs_position;
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (int64_t idx = 0; idx < arguments.size(); ++idx) {
const auto& ivalue = arguments[idx];
if (!ivalue.isTensor()) {
Expand Down Expand Up @@ -320,6 +324,7 @@ void batchedTensorForLoopFallback(const c10::OperatorHandle& op, torch::jit::Sta
auto index = computeIndex(linear_idx, batch_sizes);
auto batched_tensor_inputs_pos_iter = batched_tensor_inputs_position.begin();
auto input_physical_views_iter = input_physical_views.begin();
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (int64_t arg_idx = 0; arg_idx < num_arguments; ++arg_idx) {
// We assume that torch::jit::Stack is backed by vector<IValue> for
// simplicity. When that is not the case, this code should be updated.
Expand All @@ -343,6 +348,7 @@ void batchedTensorForLoopFallback(const c10::OperatorHandle& op, torch::jit::Sta
// Store the result into `output_shards`. See NOTE: [Output shards layout]
// to learn about the details of how we store the shards.
const auto returns = torch::jit::last(stack, num_returns);
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (int64_t return_idx = 0; return_idx < returns.size(); ++return_idx) {
output_shards[num_batches * return_idx + linear_idx] = returns[return_idx].toTensor();
}
Expand All @@ -352,6 +358,7 @@ void batchedTensorForLoopFallback(const c10::OperatorHandle& op, torch::jit::Sta
// For each output Tensor, stack the shards of the tensor together to form a return
torch::jit::drop(stack, num_arguments);
auto output_shards_chunks = MatrixRef<Tensor>(output_shards, num_batches);
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (int64_t return_idx = 0; return_idx < num_returns; ++return_idx) {
auto shards = output_shards_chunks[return_idx];
auto flat_output = safeStack(shards);
Expand Down
1 change: 1 addition & 0 deletions aten/src/ATen/BatchedTensorImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ BatchedTensorImpl::BatchedTensorImpl(Tensor value, BatchDims bdims)
const auto value_sizes = value_.sizes();
const auto value_strides = value_.strides();
sizes_and_strides_.resize(public_dims);
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (int64_t dim = 0; dim < public_dims; dim++) {
auto actual_dim = actualDim(dim, /*wrap_dim=*/false);
sizes_and_strides_.size_at_unchecked(dim) = value_sizes.at(actual_dim);
Expand Down
3 changes: 3 additions & 0 deletions aten/src/ATen/BatchingRegistrations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -146,16 +146,19 @@ Tensor expand_batching_rule(const Tensor& self, IntArrayRef size, bool implicit)
auto size_physical = self_physical.getPhysicalShape(size);
auto self_physical_dim = self_physical.tensor().dim();

// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
TORCH_CHECK(self_physical_dim <= size_physical.size(),
"expand: the number of sizes provided (", /*logical*/size.size(), ") ",
"must be greater or equal to the number of dimensions in the tensor (",
/*logical dim*/self.dim(), ")");

// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
if (self_physical_dim == size_physical.size()) {
auto result = self_physical.tensor().expand(size_physical, implicit);
return self_physical.getPhysicalToLogicalMap().apply(result);
}

// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
TORCH_INTERNAL_ASSERT(self_physical_dim < size_physical.size());
// Here, we know we are expanding a (logical) tensor to a larger number
// of dimensions. We have to be careful because we can't call expand directly
Expand Down
5 changes: 5 additions & 0 deletions aten/src/ATen/CPUGeneratorImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ struct CPUGeneratorImplStateLegacy {
int left; /* = 1; */
int seeded; /* = 0; */
uint64_t next;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
uint64_t state[at::MERSENNE_STATE_N]; /* the array for the state vector */

/********************************/
Expand Down Expand Up @@ -70,6 +71,7 @@ Generator createCPUGenerator(uint64_t seed_val) {
* and return them as a 64 bit unsigned int
*/
inline uint64_t make64BitsFrom32Bits(uint32_t hi, uint32_t lo) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return (static_cast<uint64_t>(hi) << 32) | lo;
}

Expand Down Expand Up @@ -140,6 +142,7 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
auto double_normal_sample = c10::optional<double>();

// Construct the state of at::CPUGeneratorImpl based on input byte tensor size.
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
CPUGeneratorImplStateLegacy* legacy_pod;
auto new_state_size = new_state.numel();
if (new_state_size == size_legacy) {
Expand All @@ -154,6 +157,7 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
// intermediate values.
if (legacy_pod->normal_is_valid) {
auto r = legacy_pod->normal_rho;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto theta = 2.0 * c10::pi<double> * legacy_pod->normal_x;
// we return the sin version of the normal sample when in caching mode
double_normal_sample = c10::optional<double>(r * ::sin(theta));
Expand Down Expand Up @@ -183,6 +187,7 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
// Note that CPUGeneratorImplStateLegacy stored a state array of 64 bit uints, whereas in our
// redefined mt19937, we have changed to a state array of 32 bit uints. Hence, we are
// doing a std::copy.
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
at::mt19937_data_pod rng_data;
std::copy(std::begin(legacy_pod->state), std::end(legacy_pod->state), rng_data.state_.begin());
rng_data.seed_ = legacy_pod->the_initial_seed;
Expand Down
6 changes: 6 additions & 0 deletions aten/src/ATen/Context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -92,13 +92,16 @@ void Context::setAllowTF32CuDNN(bool b) {
allow_tf32_cudnn = b;
}

// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
static const char cublas_config_var_name[] = "CUBLAS_WORKSPACE_CONFIG";
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
static const char* const cublas_deterministic_configs[] = { ":4096:8", ":16:8" };

bool Context::checkCuBLASConfigDeterministic() {
bool cublas_config_deterministic = true;
// If using CUDA 10.2 or greater, need to make sure CuBLAS workspace config
// is set to deterministic setting
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (hasCUDART() && (versionCUDART() >= 10020)) {
char* workspace_config = std::getenv(cublas_config_var_name);
cublas_config_deterministic = (workspace_config != nullptr) && (
Expand Down Expand Up @@ -240,6 +243,7 @@ Allocator* getCPUAllocator() {
// means the allow_tf32 flags are overrided and tf32 is force disabled
// override_allow_tf32_flag = false
// means the original allow_tf32 flags are followed
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
thread_local bool override_allow_tf32_flag = false;

NoTF32Guard::NoTF32Guard() {
Expand Down Expand Up @@ -273,6 +277,7 @@ void Context::setDefaultMobileCPUAllocator() {
"Cannot set another allocator.");
// Setting the priority high to make sure no other allocator gets used instead of this.
prev_allocator_ptr_ = c10::GetCPUAllocator();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
c10::SetCPUAllocator(c10::GetDefaultMobileCPUAllocator(), /*priority*/ 100);
}

Expand All @@ -281,6 +286,7 @@ void Context::unsetDefaultMobileCPUAllocator() {
"setDefaultMobileCPUAllocator must have been called "
"before unsetDefaultMobileCPUAllocator.");
// Setting the priority high to make sure no other allocator gets used instead of this.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
c10::SetCPUAllocator(prev_allocator_ptr_ , /*priority*/ 100);
prev_allocator_ptr_ = nullptr;
}
Expand Down
15 changes: 15 additions & 0 deletions aten/src/ATen/DLConvertor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ namespace at {
DLDataType getDLDataType(const Tensor& t) {
DLDataType dtype;
dtype.lanes = 1;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dtype.bits = t.element_size() * 8;
switch (t.scalar_type()) {
case ScalarType::Byte:
Expand All @@ -18,12 +19,14 @@ DLDataType getDLDataType(const Tensor& t) {
case ScalarType::Char:
dtype.code = DLDataTypeCode::kDLInt;
break;
// NOLINTNEXTLINE(bugprone-branch-clone)
case ScalarType::Double:
dtype.code = DLDataTypeCode::kDLFloat;
break;
case ScalarType::Float:
dtype.code = DLDataTypeCode::kDLFloat;
break;
// NOLINTNEXTLINE(bugprone-branch-clone)
case ScalarType::Int:
dtype.code = DLDataTypeCode::kDLInt;
break;
Expand Down Expand Up @@ -124,6 +127,7 @@ ScalarType toScalarType(const DLDataType& dtype) {
switch (dtype.code) {
case DLDataTypeCode::kDLUInt:
switch (dtype.bits) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 8:
stype = ScalarType::Byte;
break;
Expand All @@ -134,15 +138,19 @@ ScalarType toScalarType(const DLDataType& dtype) {
break;
case DLDataTypeCode::kDLInt:
switch (dtype.bits) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 8:
stype = ScalarType::Char;
break;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 16:
stype = ScalarType::Short;
break;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 32:
stype = ScalarType::Int;
break;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 64:
stype = ScalarType::Long;
break;
Expand All @@ -153,12 +161,15 @@ ScalarType toScalarType(const DLDataType& dtype) {
break;
case DLDataTypeCode::kDLFloat:
switch (dtype.bits) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 16:
stype = ScalarType::Half;
break;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 32:
stype = ScalarType::Float;
break;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 64:
stype = ScalarType::Double;
break;
Expand All @@ -173,6 +184,7 @@ ScalarType toScalarType(const DLDataType& dtype) {
return stype;
}

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct ATenDLMTensor {
Tensor handle;
DLManagedTensor tensor;
Expand All @@ -198,8 +210,10 @@ DLManagedTensor* toDLPack(const Tensor& src) {
atDLMTensor->tensor.dl_tensor.ndim = src.dim();
atDLMTensor->tensor.dl_tensor.dtype = getDLDataType(src);
atDLMTensor->tensor.dl_tensor.shape =
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
const_cast<int64_t*>(src.sizes().data());
atDLMTensor->tensor.dl_tensor.strides =
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
const_cast<int64_t*>(src.strides().data());
atDLMTensor->tensor.dl_tensor.byte_offset = 0;
return &(atDLMTensor->tensor);
Expand All @@ -209,6 +223,7 @@ Tensor fromDLPack(const DLManagedTensor* src) {
Device device = getATenDevice(src->dl_tensor.ctx);
ScalarType stype = toScalarType(src->dl_tensor.dtype);
auto deleter = [src](void* self) {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
src->deleter(const_cast<DLManagedTensor*>(src));
};
if (!src->dl_tensor.strides) {
Expand Down
1 change: 1 addition & 0 deletions aten/src/ATen/LegacyTHFunctionsCPU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ namespace {
ScalarType infer_scalar_type(const Tensor & t) {
return t.scalar_type();
}
// NOLINTNEXTLINE(clang-diagnostic-unused-function)
ScalarType infer_scalar_type(const TensorList & tl) {
TORCH_CHECK(tl.size() > 0, "expected a non-empty list of Tensors");
return tl[0].scalar_type();
Expand Down
8 changes: 8 additions & 0 deletions aten/src/ATen/NamedTensorUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,7 @@ void propagate_names_except(const Tensor& result, const Tensor& src, IntArrayRef
auto src_names = src.names();
auto result_dim = result.dim();
auto src_dim = src_names.size();
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
TORCH_INTERNAL_ASSERT(src_dim - excluded_idxs.size() == result_dim);

// fast path
Expand Down Expand Up @@ -253,6 +254,7 @@ std::vector<Dimname> compute_diagonal_outnames(
// tensors that we contract together. Usually other_dotted_dim is 0
// and tensor_dotted_dim is the last dim of tensor, but there are some special
// cases like einsum and tensordot where one can contract arbitrary dims.
// NOLINTNEXTLINE(clang-diagnostic-unused-function)
static std::vector<Dimname> compute_dot_product_outnames(
DimnameList tensor_names,
int64_t tensor_dotted_dim,
Expand All @@ -265,10 +267,12 @@ static std::vector<Dimname> compute_dot_product_outnames(
std::vector<Dimname> outnames(num_outnames, Dimname::wildcard());
int64_t index = 0;
for (size_t j = 0; j < tensor_names.size(); ++j) {
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
if (j == tensor_dotted_dim) continue;
outnames[index++] = tensor_names[j];
}
for (size_t j = 0; j < other_names.size(); ++j) {
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
if (j == other_dotted_dim) continue;
outnames[index++] = other_names[j];
}
Expand All @@ -294,20 +298,23 @@ static void check_feature_names_are_distinct(
". Please rename the input tensors with `Tensor.rename` to prevent this.");
}

// NOLINTNEXTLINE(clang-diagnostic-unused-function)
static DimnameList batch_dims(DimnameList names) {
if (names.size() <= 2) {
return {};
}
return DimnameList(names.begin(), names.end() - 2);
}

// NOLINTNEXTLINE(clang-diagnostic-unused-function)
static DimnameList feature_dims(DimnameList names) {
if (names.size() <= 2) {
return names;
}
return DimnameList(names.end() - 2, 2);
}

// NOLINTNEXTLINE(clang-diagnostic-unused-function)
static bool are_distinct(DimnameList batch_dims, DimnameList feature_dims) {
for (const auto& target : feature_dims) {
if (target.isWildcard()) {
Expand Down Expand Up @@ -366,6 +373,7 @@ static std::vector<Dimname> compute_matmul_outnames(
const auto result = working_names.toDimnameVec();

check_feature_names_are_distinct(self_names, other_names, result);
// NOLINTNEXTLINE(performance-no-automatic-move)
return result;
}

Expand Down
7 changes: 7 additions & 0 deletions aten/src/ATen/ParallelNative.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,11 @@ namespace at {
namespace {
// used with _set_in_parallel_region to mark master thread
// as in parallel region while executing parallel primitives
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
thread_local bool in_parallel_region_ = false;

// thread number (task_id) set by parallel primitive
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
thread_local size_t thread_num_ = 0;

void _set_in_parallel_region(bool in_region) {
Expand Down Expand Up @@ -53,6 +55,7 @@ const int CONSUMED = -2;
// - NOT_SET - pool not initialized, user value is not set
// - positive value - pool not initialized, user value set
// - CONSUMED - pool is initialized
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
std::atomic<int> num_intraop_threads{NOT_SET};

int _num_pool_threads(int nthreads) {
Expand Down Expand Up @@ -123,10 +126,12 @@ void _parallel_run(
const std::function<void(int64_t, int64_t, size_t)>& f) {
at::internal::lazy_init_num_threads();

// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
size_t num_tasks, chunk_size;
std::tie(num_tasks, chunk_size) =
internal::calc_num_tasks_and_chunk_size(begin, end, grain_size);

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct {
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
Expand Down Expand Up @@ -197,6 +202,7 @@ void set_num_threads(int nthreads) {
int stored_nthreads = num_intraop_threads.load();
if (stored_nthreads <= 0) {
// plus one because of master thread
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
stored_nthreads = _get_intraop_pool().size() + 1;
}
if (stored_nthreads != nthreads) {
Expand Down Expand Up @@ -224,6 +230,7 @@ int get_num_threads() {
return intraop_default_num_threads();
} else {
TORCH_INTERNAL_ASSERT(nthreads == CONSUMED);
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
return _get_intraop_pool().size() + 1;
}
#else
Expand Down
Loading

0 comments on commit 4cb534f

Please sign in to comment.