Skip to content

Commit

Permalink
[trivial] Remove trailing whitespace
Browse files Browse the repository at this point in the history
  • Loading branch information
bertmaher committed Oct 12, 2018
1 parent cb2b12a commit 39323e0
Show file tree
Hide file tree
Showing 14 changed files with 31 additions and 31 deletions.
6 changes: 3 additions & 3 deletions .clang-format
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: false
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
Expand Down Expand Up @@ -54,11 +54,11 @@ DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeCategories:
IncludeCategories:
- Regex: '^"(llvm|llvm-c|clang|clang-c)/'
Priority: 2
- Regex: '^(<|"(gtest|gmock|isl|json)/)'
Expand Down
4 changes: 2 additions & 2 deletions .travis/run_coverage.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@ fi
# from a fork. Upload coverage only if secure vars are set.
if [ "${TRAVIS_SECURE_ENV_VARS}" != "false" ]; then
echo "INFO: Uploading coverage to S3."

BRANCH_NAME="${TRAVIS_BRANCH}"
COVERAGE_DIR="$(dirname "${COVERAGE_FILE}")"
UPLOAD_LOCATION="fb-glow-assets/coverage/coverage-${BRANCH_NAME}"

aws s3 cp "${COVERAGE_DIR}" "s3://${UPLOAD_LOCATION}" --recursive --acl public-read
echo "INFO: Coverage report for branch '${BRANCH_NAME}': https://fb-glow-assets.s3.amazonaws.com/coverage/coverage-${BRANCH_NAME}/index.html"
else
echo "WARNING: Coverage cannot be uploaded to s3 for PR from a fork."
echo "WARNING: Coverage cannot be uploaded to s3 for PR from a fork."
fi
4 changes: 2 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,8 @@ find_package(LLVM 7 CONFIG)
if (NOT LLVM_FOUND)
# Fallback to LLVM 6
find_package(LLVM 6 CONFIG)
# Fallback to whatever is available.

# Fallback to whatever is available.
if (NOT LLVM_FOUND)
find_package(LLVM CONFIG)
endif()
Expand Down
14 changes: 7 additions & 7 deletions cmake/modules/CoverageSupport.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -23,28 +23,28 @@ if(GLOW_USE_COVERAGE)

# Add compilation flags for coverage.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage")

# Add glow_coverage target.
add_custom_target(glow_coverage
add_custom_target(glow_coverage
# Cleanup lcov counters.
COMMAND ${LCOV_PATH} --directory . --zerocounters
COMMAND echo "Cleaning is done. Running tests"

# Run all tests.
COMMAND ctest -j 4

# Capture lcov counters based on the test run.
COMMAND ${LCOV_PATH} --no-checksum --directory . --capture --output-file glow_coverage.info

# Ignore not related files.
COMMAND ${LCOV_PATH} --remove glow_coverage.info '*v1*' '/usr/*' '*tests/*' '*llvm_install*' --output-file ${PROJECT_BINARY_DIR}/glow_coverage_result.info

# Generate HTML report based on the profiles.
COMMAND ${GENHTML_PATH} -o glow_coverage ${PROJECT_BINARY_DIR}/glow_coverage_result.info
# Cleanup info files.

# Cleanup info files.
COMMAND ${CMAKE_COMMAND} -E remove glow_coverage.info ${PROJECT_BINARY_DIR}/glow_coverage_result.info

WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
)

Expand Down
4 changes: 2 additions & 2 deletions cmake/modules/GlowTestSupport.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ function(add_glow_test)
list(GET ARG_UNPARSED_ARGUMENTS 0 ARG_NAME)
list(REMOVE_AT ARG_UNPARSED_ARGUMENTS 0)
endif()

if (NOT ARG_NAME)
message(FATAL_ERROR "Name mandatory")
endif()
Expand All @@ -26,7 +26,7 @@ function(add_glow_test)

list(GET ARG_COMMAND 0 TEST_EXEC)
list(APPEND ARG_DEPENDS ${TEST_EXEC})

set_property(GLOBAL APPEND PROPERTY GLOW_TEST_DEPENDS ${ARG_DEPENDS})

# Produce the specific test rule using the default built-in.
Expand Down
4 changes: 2 additions & 2 deletions cmake/modules/SanitizerSupport.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,12 @@ if(GLOW_USE_SANITIZER)
elseif(GLOW_USE_SANITIZER MATCHES "Memory(WithOrigins)?")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=memory")
if(GLOW_USE_SANITIZER STREQUAL "MemoryWithOrigins")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize-memory-track-origins")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize-memory-track-origins")
endif()
elseif(GLOW_USE_SANITIZER STREQUAL "Undefined")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-sanitize-recover=all")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-sanitize=vptr,function")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-sanitize=vptr,function")
elseif(GLOW_USE_SANITIZER STREQUAL "Thread")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=thread")
elseif(GLOW_USE_SANITIZER STREQUAL "Address;Undefined" OR
Expand Down
8 changes: 4 additions & 4 deletions docs/NewBackendSpecificNode.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ In TensorRT, the convolution, bias and ReLU layers of various sizes can be combi

Actually, a Tensor is a view of a block of memory. Besides a pointer to the memory, we also have to get some other descriptions of this block of memory, such as shape, stride and layout.

Different layout leads to different implementation of the operator kernel on certain backend.
Different layout leads to different implementation of the operator kernel on certain backend.
For example, in the subgraph from ResNet50, a `CPUConvDKKC8` node with memory layout modified for efficient SIMD access is introduced to optimized for CPU backend. Please refer to "5.3 Use Case: Optimizing Resnet50 for the CPU" in [Glow paper](https://arxiv.org/abs/1805.00907).

We should take both fast operator kernel implementation and extra potential layout transformation into consideration to get better performance.
Expand All @@ -44,7 +44,7 @@ Here are mainly three steps to add a new backend-specific node in Glow:

ReLU is max between zero and the input value. Glow lowers `ReLUNode` to two basic low-level linear algebra operator nodes, `SplatNode` and `MaxNode`. The `SplatNode` first fills a Tensor with zero, and `MaxNode` compare `Input` with the filling Tensor. We can fuse these two operations which work with the same shape of tensors into a single kernel.

Please refer to the document in [Backend](https://github.com/pytorch/glow/blob/master/docs/Backends.md#backend-specific-nodes-and-instructions) part for source code details on adding a new backend-specific CPUMaxSplatNode on CPU.
Please refer to the document in [Backend](https://github.com/pytorch/glow/blob/master/docs/Backends.md#backend-specific-nodes-and-instructions) part for source code details on adding a new backend-specific CPUMaxSplatNode on CPU.

#### Data Layout Transformation for Conv Operator in OpenCL

Expand All @@ -67,7 +67,7 @@ BB.newNode("OCLConvolution")
"filter, the bias and the input are in the NCHW format");
```

During `transformPostLowering()`, this `convertConvToNCHWConv` node which contains a `NCHWConvNode` node and multiple`Transpose` nodes for `Input`, `Filter` and `Result` replaces the aforementioned pattern.
During `transformPostLowering()`, this `convertConvToNCHWConv` node which contains a `NCHWConvNode` node and multiple`Transpose` nodes for `Input`, `Filter` and `Result` replaces the aforementioned pattern.

A corresponding backend-specific `OCLConvolution` instruction is also needed, defined in
`tools/ClassGen/Backends/OpenCL/OpenCLSpecificInstrs.h`:
Expand All @@ -93,4 +93,4 @@ BB.newBackendSpecificInstr("OCLConvolution")
- [Glow: Graph Lowering Compiler Techniques for Neural Networks](https://arxiv.org/abs/1805.00907)
- [TVM: An Automated End-to-End Optimizing Compiler for Deep Learning](https://arxiv.org/abs/1802.04799)
- [TensorRT 3: Faster TensorFlow Inference and Volta Support](https://devblogs.nvidia.com/tensorrt-3-faster-tensorflow-inference/)
- [Discussions in Glow issue 1549](https://github.com/pytorch/glow/issues/1549#issuecomment-416283664)
- [Discussions in Glow issue 1549](https://github.com/pytorch/glow/issues/1549#issuecomment-416283664)
4 changes: 2 additions & 2 deletions lib/Backends/OpenCL/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
add_custom_command(
OUTPUT "${CMAKE_BINARY_DIR}/glow/kernels.inc"
COMMAND include-bin
COMMAND include-bin
"${CMAKE_CURRENT_SOURCE_DIR}/kernels.cl"
"${CMAKE_BINARY_DIR}/glow/kernels.inc"
DEPENDS include-bin CPURuntime "${CMAKE_CURRENT_SOURCE_DIR}/kernels.cl")
Expand All @@ -24,7 +24,7 @@ add_custom_command(
add_library(OpenCL
"${CMAKE_BINARY_DIR}/glow/kernels.inc"
"${CMAKE_BINARY_DIR}/glow/kernels_fwd_conv.inc"
"${CMAKE_BINARY_DIR}/glow/kernels_fwd_quantized_conv.inc"
"${CMAKE_BINARY_DIR}/glow/kernels_fwd_quantized_conv.inc"
OpenCL.cpp
Transforms.cpp)

Expand Down
2 changes: 1 addition & 1 deletion lib/Importer/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ add_library(Importer
ProtobufLoader.cpp
Caffe2ModelLoader.cpp
ONNXModelLoader.cpp
ONNXIFIModelLoader.cpp
ONNXIFIModelLoader.cpp
${CAFFE_SRCS}
${GLOW_BINARY_DIR}/caffe2/proto/caffe2.pb.h)
target_include_directories(Importer PUBLIC ${ONNX_INCLUDE_DIRS})
Expand Down
2 changes: 1 addition & 1 deletion tests/models/caffe2Models/clip_op_default_net.pbtxt
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,4 @@ op {
type: "Clip"
}
external_input: "inputs_0"
external_output: "clip_result"
external_output: "clip_result"
2 changes: 1 addition & 1 deletion tests/models/caffe2Models/clip_op_net.pbtxt
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,4 @@ op {
}
}
external_input: "inputs_0"
external_output: "clip_result"
external_output: "clip_result"
2 changes: 1 addition & 1 deletion tests/models/caffe2Models/fcTransposed_init_net.pbtxt
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ op {
floats: 4.0
floats: 7.0
floats: 10.0
floats: 2.0
floats: 2.0
floats: 5.0
floats: 8.0
floats: 11.0
Expand Down
2 changes: 1 addition & 1 deletion tests/models/caffe2Models/fc_init_net.pbtxt
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ op {
floats: 2.0
floats: 3.0
floats: 4.0
floats: 5.0
floats: 5.0
floats: 6.0
floats: 7.0
floats: 8.0
Expand Down
4 changes: 2 additions & 2 deletions utils/scripts/gen_caffe2_model.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
## This is a helper script that generates Caffe2 models.
## The generated model will be used for Caffe2 importer unittest:
## ./tests/unittests/caffe2ImporterTest.cpp
## The generated model will be used for Caffe2 importer unittest:
## ./tests/unittests/caffe2ImporterTest.cpp
## Run $>python gen_caffe2_model.py to get the model files.

from caffe2.proto import caffe2_pb2
Expand Down

0 comments on commit 39323e0

Please sign in to comment.