Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][Typos][C-59] Fix typos (Conver) #70259

Merged
merged 15 commits into from
Dec 17, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
c53-58
  • Loading branch information
rich04lin committed Dec 15, 2024
commit cf48a28934442b8fd4c29e03c7dbf468503ed109
14 changes: 7 additions & 7 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,13 @@ caculate = 'caculate'
calcualtion = 'calcualtion'
checkings = 'checkings'
childs = 'childs'
Continous = 'Continous'
contibute = 'contibute'
controled = 'controled'
contorl = 'contorl'
converage = 'converage'
Converage = 'Converage'
convertion = 'convertion'
#Continous = 'Continous'
#contibute = 'contibute'
#controled = 'controled'
#contorl = 'contorl'
#converage = 'converage'
#Converage = 'Converage'
#convertion = 'convertion'
Conver = 'Conver'
convience = 'convience'
coodinate = 'coodinate'
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/print_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,6 @@ REGISTER_OPERATOR(print,

REGISTER_OP_VERSION(print).AddCheckpoint(
R"ROC(Upgrade print add a new attribute [print_tensor_layout] to "
"contorl whether to print tensor's layout.)ROC",
"control whether to print tensor's layout.)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"print_tensor_layout", "Whether to print the tensor's layout.", true));
2 changes: 1 addition & 1 deletion paddle/phi/backends/onednn/onednn_reuse.h
Original file line number Diff line number Diff line change
Expand Up @@ -1166,7 +1166,7 @@ class BinaryOneDNNHandler : public OneDNNHandlerNoCachingT<T, dnnl::binary> {
float scale_y,
float scale_out,
dnnl::post_ops post_ops = dnnl::post_ops{}) {
// Scales set in attributes for inputs contibute to the output equation
// Scales set in attributes for inputs contribute to the output equation
// in the following way (assuming no broadcasting takes place):
// output_i = scale_0 * x_i <+ or *> scale_1 * y_i;
// Hence we have to create scales that will:
Expand Down
10 changes: 5 additions & 5 deletions paddle/phi/kernels/funcs/weight_only_gemv.cu
Original file line number Diff line number Diff line change
Expand Up @@ -394,7 +394,7 @@ struct WeightLayoutDetails<WeightOnlyQuantType::Int4b> {
// 20 21 28 29 6 7 14 15 22 23 30 31
static constexpr int kShuffleSize = 32;
static constexpr int kShuffleBasicTile = 2;
static constexpr int kShuffleContinous = 4;
static constexpr int kShuffleContinuous = 4;
static constexpr int kShuffleStrided = 4;

// The rearrangement here counteracts the effect of
Expand Down Expand Up @@ -456,7 +456,7 @@ struct WeightLayoutDetails<WeightOnlyQuantType::Int8b> {
// 13 14 15 weight 0 1 8 9 2 3 10 11 4 5 12 13 6 7 14 15
static constexpr int kShuffleSize = 16;
static constexpr int kShuffleBasicTile = 2;
static constexpr int kShuffleContinous = 2;
static constexpr int kShuffleContinuous = 2;
static constexpr int kShuffleStrided = 4;

// The rearrangement here counteracts the effect of
Expand Down Expand Up @@ -504,7 +504,7 @@ struct WeightOnlyKernelDetails {

static constexpr int kShuffleSize = Layout::kShuffleSize;
static constexpr int kShuffleBasicTile = Layout::kShuffleBasicTile;
static constexpr int kShuffleContinous = Layout::kShuffleContinous;
static constexpr int kShuffleContinuous = Layout::kShuffleContinuous;
static constexpr int kShuffleStrided = Layout::kShuffleStrided;

// using Converter = typename Layout::Converter;
Expand Down Expand Up @@ -848,14 +848,14 @@ struct WeightPostProcessor<T, WeightOnlyQuantType::Int4b, Details> {
int idx) {
using HALF_2_TYPE = typename CUDA_HALF_2_TYPE_TARIS<T>::type;
#pragma unroll
for (int i = 0; i < Details::kShuffleContinous; ++i) {
for (int i = 0; i < Details::kShuffleContinuous; ++i) {
#pragma unroll
for (int j = 0; j < Details::kShuffleStrided; ++j) {
// Dequantize the weights and arrange the shuffled elements back to
// the correct order in the register array
HALF_2_TYPE v = *reinterpret_cast<HALF_2_TYPE*>(
weights_vec + i * Details::kShuffleBasicTile +
j * Details::kShuffleContinous * Details::kShuffleBasicTile);
j * Details::kShuffleContinuous * Details::kShuffleBasicTile);
v = HalfMulAdd<HALF_2_TYPE>::apply(
v,
ConvertDstFunc_2<HALF_2_TYPE>::apply(scale[idx]),
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/jit/dy2static/program_translator.py
Original file line number Diff line number Diff line change
Expand Up @@ -1034,7 +1034,7 @@ def concrete_program_specify_input_spec(
# if specific the `input_spec`, the length of program_cache will always 1,
# else, return the last one.
cached_program_len = len(self._program_cache)
# If specific `input_spec`, apply convertion from dygraph layers into static Program.
# If specific `input_spec`, apply conversion from dygraph layers into static Program.
# NOTE(jiabin): is_prim_infer indicates this method called by paddle.jit.save and it is worked in prim mode

desired_input_spec = input_spec
Expand Down
2 changes: 1 addition & 1 deletion test/cpp/inference/api/analyzer_capi_ner_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ TEST(PD_ZeroCopyRun, zero_copy_run) {
"%s",
PD_GetOutputName(predictor, 0));

// not necessary, just for converage tests
// not necessary, just for coverage tests
output.lod.data = std::malloc(sizeof(size_t));

PD_GetZeroCopyOutput(predictor, &output);
Expand Down
2 changes: 1 addition & 1 deletion test/cpp/inference/infer_ut/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ busybox bash ./run.sh $PADDLE_ROOT $TURN_ON_MKL $TEST_GPU_CPU $DATA_DIR
- `$TEST_GPU_CPU`: test both GPU/CPU mode or only CPU mode
- `$DATA_DIR`: download data path

now only support 4 kinds of tests which controled by `--gtest_filter` argument, test suite name should be same as following.
now only support 4 kinds of tests which controlled by `--gtest_filter` argument, test suite name should be same as following.
- `TEST(gpu_tester_*, test_name)`
- `TEST(cpu_tester_*, test_name)`
- `TEST(mkldnn_tester_*, test_name)`
Expand Down
2 changes: 1 addition & 1 deletion test/dygraph_to_static/test_logging_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def test_set_code_level(self):
paddle.jit.set_code_level(3.3)

def test_log_api(self):
# test api for CI Converage
# test api for CI Coverage
logging_utils.set_verbosity(1, True)

logging_utils.warn("warn")
Expand Down
8 changes: 4 additions & 4 deletions test/legacy_test/test_lbfgs_class.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def func(w, x):
np.testing.assert_allclose(net.w, weight, rtol=1e-05)

def test_inf_minima_incubate(self):
# not converage
# not converge
input = np.random.rand(1).astype(np.float32)

def outputs1(x):
Expand Down Expand Up @@ -169,7 +169,7 @@ def error_func1():
self.assertRaises(TypeError, error_func1)

def test_error2_incubate(self):
# not converage
# not converge
input = np.random.rand(1).astype(np.float32)

def outputs2(x):
Expand Down Expand Up @@ -339,7 +339,7 @@ def func(w, x):
np.testing.assert_allclose(net.w, weight, rtol=1e-05)

def test_inf_minima(self):
# not converage
# not converge
input = np.random.rand(1).astype(np.float32)

def outputs1(x):
Expand Down Expand Up @@ -419,7 +419,7 @@ def error_func1():
self.assertRaises(TypeError, error_func1)

def test_error2(self):
# not converage
# not converge
input = np.random.rand(1).astype(np.float32)

def outputs2(x):
Expand Down
2 changes: 1 addition & 1 deletion tools/gen_pybind11_stub.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@
# ref:
# - https://pybind11.readthedocs.io/en/latest/advanced/misc.html#avoiding-cpp-types-in-docstrings
# - https://pybind11.readthedocs.io/en/latest/advanced/functions.html#default-arguments-revisited
# we can add some mappings for convertion, e.g. {'paddle::Tensor': 'paddle.Tensor'}
# we can add some mappings for conversion, e.g. {'paddle::Tensor': 'paddle.Tensor'}
PYBIND11_ATTR_MAPPING = {}

# some bad full expression pybind11-stubgen can not catch as invalid exp
Expand Down