Skip to content

Commit

Permalink
Revert D24010742: [pytorch][PR] Add callgrind collection to Timer
Browse files Browse the repository at this point in the history
Test Plan: revert-hammer

Differential Revision:
D24010742 (pytorch@9b27e09)

Original commit changeset: df6bc765f8ef

fbshipit-source-id: 4c1edd57ea932896f7052716427059c924222501
  • Loading branch information
Mike Ruberry authored and facebook-github-bot committed Sep 30, 2020
1 parent 6c4aa2a commit 51d0ae9
Show file tree
Hide file tree
Showing 12 changed files with 2 additions and 554 deletions.
3 changes: 1 addition & 2 deletions .circleci/docker/common/install_base.sh
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ install_ubuntu() {
software-properties-common \
sudo \
wget \
valgrind \
vim

# TODO: THIS IS A HACK!!!
Expand Down Expand Up @@ -93,7 +92,6 @@ install_centos() {
opencv-devel \
sudo \
wget \
valgrind \
vim

# Cleanup
Expand Down Expand Up @@ -133,3 +131,4 @@ sudo make install
cd ../../
rm -rf valgrind_build
alias valgrind="/usr/local/bin/valgrind"

1 change: 0 additions & 1 deletion .circleci/scripts/binary_ios_build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ source ~/anaconda/bin/activate

# Install dependencies
conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi typing requests --yes
conda install -c conda-forge valgrind
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}

# sync submodules
Expand Down
4 changes: 0 additions & 4 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,3 @@
ignore = dirty
path = third_party/tensorpipe
url = https://github.com/pytorch/tensorpipe.git
[submodule "third_party/valgrind"]
ignore = dirty
path = third_party/valgrind
url = https://sourceware.org/git/valgrind.git
12 changes: 1 addition & 11 deletions test/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import torch.hub as hub
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, retry, IS_SANDCASTLE, IS_WINDOWS, slowTest
from torch.testing._internal.common_utils import load_tests, retry, IS_SANDCASTLE, IS_WINDOWS
from urllib.error import URLError
import numpy as np

Expand Down Expand Up @@ -768,16 +768,6 @@ class MockCudaTimer(benchmark_utils.Timer):
self.assertEqual(len(measurement.times), repeats)
self.assertEqual(measurement.number_per_run, number_per_run)

@slowTest
@unittest.skipIf(IS_WINDOWS, "Valgrind is not supported on Windows.")
def test_collect_callgrind(self):
timer = benchmark_utils.Timer("y = torch.ones((1,)) + 1")

# Don't collect baseline to speed up unit test by ~30 seconds.
stats = timer.collect_callgrind(number=1000, collect_baseline=False)

self.assertEqual(stats.counts(include_lookdict_unicode=False), 38803198, atol=0, rtol=0.0001)

def test_compare(self):
# Simulate several approaches.
costs = (
Expand Down
1 change: 0 additions & 1 deletion third_party/valgrind
Submodule valgrind deleted from 2593cc
3 changes: 0 additions & 3 deletions torch/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -66,9 +66,6 @@ set(TORCH_PYTHON_INCLUDE_DIRECTORIES
${CMAKE_BINARY_DIR}/third_party
${CMAKE_BINARY_DIR}/third_party/onnx

${TORCH_ROOT}/third_party/valgrind/callgrind
${TORCH_ROOT}/third_party/valgrind/include

${TORCH_ROOT}/third_party/gloo
${TORCH_ROOT}/third_party/onnx
${pybind11_INCLUDE_DIRS}
Expand Down
4 changes: 0 additions & 4 deletions torch/_C/__init__.pyi.in
Original file line number Diff line number Diff line change
Expand Up @@ -388,10 +388,6 @@ def _vmapmode_increment_nesting() -> _int: ... # THPModule_vmapmode_increment_n
def _vmapmode_decrement_nesting() -> _int: ... # THPModule_vmapmode_decrement_nesting
def _log_api_usage_once(str) -> None: ... # LogAPIUsageOnceFromPython

# Defined in `valgrind.h` and `callgrind.h` respecitively.
def valgrind_supported_platform() -> _bool: ... # NVALGRIND
def valgrind_toggle() -> None: ... # CALLGRIND_TOGGLE_COLLECT

has_openmp: _bool
has_mkl: _bool
has_lapack: _bool
Expand Down
26 changes: 0 additions & 26 deletions torch/csrc/Module.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -61,12 +61,6 @@
#endif
#endif

#if (defined(_WIN32) || defined(_WIN64) || defined(FBCODE_CAFFE2) || defined(C10_MOBILE))
#define NVALGRIND
#else
#include <callgrind.h>
#endif

#define WITH_NUMPY_IMPORT_ARRAY
#include <torch/csrc/utils/numpy_stub.h>

Expand Down Expand Up @@ -827,26 +821,6 @@ Call this whenever a new thread is created in order to propagate values from
ASSERT_TRUE(set_module_attr("has_mkl", at::hasMKL() ? Py_True : Py_False));
ASSERT_TRUE(set_module_attr("has_lapack", at::hasLAPACK() ? Py_True : Py_False));

py_module.def(
"valgrind_supported_platform", [](){
#if defined(NVALGRIND)
return false;
#else
return true;
#endif
}
);

py_module.def(
"valgrind_toggle", [](){
#if defined(NVALGRIND)
TORCH_CHECK(false, "Valgrind is not supported.");
#else
CALLGRIND_TOGGLE_COLLECT;
#endif
}
);

#ifdef USE_CUDA
PyObject *has_cuda = Py_True;
#else
Expand Down
2 changes: 0 additions & 2 deletions torch/overrides.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,8 +156,6 @@ def get_ignored_functions() -> Set[Callable]:
torch.is_deterministic,
torch.set_deterministic,
torch.unify_type_list,
torch.valgrind_supported_platform,
torch.valgrind_toggle,
Tensor.__delitem__,
Tensor.__dir__,
Tensor.__getattribute__,
Expand Down
27 changes: 0 additions & 27 deletions torch/utils/benchmark/utils/timer.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
import numpy as np
import torch
from torch.utils.benchmark.utils import common
from torch.utils.benchmark.utils.valgrind_wrapper import timer_interface as valgrind_timer_interface


__all__ = ["Timer", "timer"]
Expand Down Expand Up @@ -43,7 +42,6 @@ def __init__(
# specified as a convenience feature.
globals = dict(globals or {})
globals.setdefault("torch", torch)
self._globals = globals

self._timer = self._timer_cls(stmt=stmt, setup=setup, timer=timer, globals=globals)
self._task_spec = common.TaskSpec(
Expand Down Expand Up @@ -161,28 +159,3 @@ def stop_hook(times) -> bool:
raw_times=times,
task_spec=self._task_spec
)

def collect_callgrind(self, number=100, collect_baseline=True):
if not isinstance(self._task_spec.stmt, str):
raise ValueError("`collect_callgrind` currently only supports string `stmt`")

# __init__ adds torch, and Timer adds __builtins__
allowed_keys = {"torch", "__builtins__"}
if any(k not in allowed_keys for k in self._globals.keys()):
raise ValueError(
"`collect_callgrind` does not currently support passing globals. "
"Please define a `setup` str instead.")

if self._globals.get("torch", torch) is not torch:
raise ValueError("`collect_callgrind` does not support mocking out `torch`.")

# Check that the statement is valid. It doesn't guarantee success, but it's much
# simpler and quicker to raise an exception for a faulty `stmt` or `setup` in
# the parent process rather than the valgrind subprocess.
self._timer.timeit(1)
return valgrind_timer_interface.wrapper_singleton().collect_callgrind(
stmt=self._task_spec.stmt,
setup=self._task_spec.setup,
number=number,
num_threads=self._task_spec.num_threads,
collect_baseline=collect_baseline)
Empty file.
Loading

0 comments on commit 51d0ae9

Please sign in to comment.