diff --git a/Makefile b/Makefile
index 2250192255..27a60cb94d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,28 +1,243 @@
-default: build
-
-build: build_server build_client
- @echo "build done"
-
-pre_build:
- @mkdir -p docker/bin
-
-build_server: pre_build
- @{ \
- echo -n "build server " \
- && (go build -o docker/bin/cfs-server cmd/*.go ) \
- && (echo "success") \
- }
-
-build_client: pre_build
- @{ \
- echo -n "build client " \
- && (go build -o docker/bin/cfs-client client/*.go ) \
- && (echo "success") \
- }
-
-ci-test:
- @{ \
- echo "ci test" \
- && ( go test ./... ) \
- }
+GOFLAGS :=
+TAGS :=
+INSTALL := install
+prefix := /usr/local
+BUILDTARGET := ./cmd
+BranchName := git rev-parse --abbrev-ref HEAD
+CommitID := git rev-parse HEAD
+BuildTime := date +%Y-%m-%d\ %H:%M
+
+ifeq "$(findstring -j,$(shell ps -o args= $$PPID))" ""
+ifdef NCPUS
+MAKEFLAGS += -j$(NCPUS)
+$(info Running make with -j$(NCPUS))
+endif
+endif
+
+BUILDTYPE := development
+
+# Build C/C++ with basic debugging information.
+CFLAGS += -g1
+CXXFLAGS += -g1
+LDFLAGS ?=
+
+# golang/go#16651, respectively, are fixed.
+CGO_CFLAGS = -I$(ROCKSDB_SRC_DIR)/include
+CGO_CXXFLAGS = $(CXXFLAGS)
+CGO_LDFLAGS = $(addprefix -L,$(SNAPPY_DIR) $(ROCKSDB_DIR)) -lrocksdb -lsnappy
+
+export CFLAGS CXXFLAGS LDFLAGS CGO_CFLAGS CGO_CXXFLAGS CGO_LDFLAGS
+
+# We intentionally use LINKFLAGS instead of the more traditional LDFLAGS
+# because LDFLAGS has built-in semantics that don't make sense with the Go
+# toolchain.
+override LINKFLAGS = -X github.com/chubaofs/chubaofs/cmd/build.typ=$(BUILDTYPE) -X "main.CommitID=$(CommitID)" -X "main.BranchName=$(BranchName)" -X "main.BuildTime=$(BuildTime)" -extldflags "$(LDFLAGS)"
+
+
+GO ?= go
+GOFLAGS ?=
+TAR ?= tar
+
+# Ensure we have an unambiguous GOPATH.
+GOPATH := $(shell $(GO) env GOPATH)
+
+ifneq "$(or $(findstring :,$(GOPATH)),$(findstring ;,$(GOPATH)))" ""
+$(error GOPATHs with multiple entries are not supported)
+endif
+
+GOPATH := $(realpath $(GOPATH))
+ifeq "$(strip $(GOPATH))" ""
+$(error GOPATH is not set and could not be automatically determined)
+endif
+
+ifeq "$(filter $(GOPATH)%,$(CURDIR))" ""
+$(error Current directory "$(CURDIR)" is not within GOPATH "$(GOPATH)")
+endif
+
+ifeq "$(GOPATH)" "/"
+$(error GOPATH=/ is not supported)
+endif
+
+$(info GOPATH set to $(GOPATH))
+
+# We install our vendored tools to a directory within this repository to avoid
+# overwriting any user-installed binaries of the same name in the default GOBIN.
+GO_INSTALL := GOBIN='$(abspath bin)' GOFLAGS= $(GO) install
+
+# Prefer tools we've installed with go install and Yarn to those elsewhere on
+# the PATH.
+export PATH := $(abspath bin):$(PATH)
+
+
+export SHELL := env PWD=$(CURDIR) bash
+ifeq ($(SHELL),)
+$(error bash is required)
+endif
+
+
+# make-lazy converts a recursive variable, which is evaluated every time it's
+# referenced, to a lazy variable, which is evaluated only the first time it's
+# used. See: http://blog.jgc.org/2016/07/lazy-gnu-make-variables.html
+override make-lazy = $(eval $1 = $$(eval $1 := $(value $1))$$($1))
+
+# GNU tar and BSD tar both support transforming filenames according to a regular
+# expression, but have different flags to do so.
+TAR_XFORM_FLAG = $(shell $(TAR) --version | grep -q GNU && echo "--xform='flags=r;s'" || echo "-s")
+$(call make-lazy,TAR_XFORM_FLAG)
+
+# To edit in-place without creating a backup file, GNU sed requires a bare -i,
+# while BSD sed requires an empty string as the following argument.
+SED_INPLACE = sed $(shell sed --version 2>&1 | grep -q GNU && echo -i || echo "-i ''")
+$(call make-lazy,SED_INPLACE)
+
+# MAKE_TERMERR is set automatically in Make v4.1+, but macOS is still shipping
+# v3.81.
+MAKE_TERMERR ?= $(shell [[ -t 2 ]] && echo true)
+
+# This is how you get a literal space into a Makefile.
+space := $(eval) $(eval)
+
+# Color support.
+yellow = $(shell { tput setaf 3 || tput AF 3; } 2>/dev/null)
+cyan = $(shell { tput setaf 6 || tput AF 6; } 2>/dev/null)
+term-reset = $(shell { tput sgr0 || tput me; } 2>/dev/null)
+$(call make-lazy,yellow)
+$(call make-lazy,cyan)
+$(call make-lazy,term-reset)
+
+
+host-is-macos := $(findstring Darwin,$(UNAME))
+host-is-mingw := $(findstring MINGW,$(UNAME))
+
+
+ifdef host-is-macos
+# On macOS 10.11, XCode SDK v8.1 (and possibly others) indicate the presence of
+# symbols that don't exist until macOS 10.12. Setting MACOSX_DEPLOYMENT_TARGET
+# to the host machine's actual macOS version works around this. See:
+# https://github.com/jemalloc/jemalloc/issues/494.
+export MACOSX_DEPLOYMENT_TARGET ?= $(macos-version)
+endif
+
+# Cross-compilation occurs when you set TARGET_TRIPLE to something other than
+# HOST_TRIPLE. You'll need to ensure the cross-compiling toolchain is on your
+# path and override the rest of the variables that immediately follow as
+# necessary. For an example, see build/builder/cmd/mkrelease, which sets these
+# variables appropriately for the toolchains baked into the builder image.
+TARGET_TRIPLE := $(HOST_TRIPLE)
+XCMAKE_SYSTEM_NAME :=
+XGOOS :=
+XGOARCH :=
+XCC := $(TARGET_TRIPLE)-cc
+XCXX := $(TARGET_TRIPLE)-c++
+EXTRA_XCMAKE_FLAGS :=
+EXTRA_XCONFIGURE_FLAGS :=
+
+ifneq ($(HOST_TRIPLE),$(TARGET_TRIPLE))
+is-cross-compile := 1
+endif
+
+# CMAKE_TARGET_MESSAGES=OFF prevents CMake from printing progress messages
+# whenever a target is fully built to prevent spammy output from make when
+# c-deps are all already built. Progress messages are still printed when actual
+# compilation is being performed.
+cmake-flags := -DCMAKE_TARGET_MESSAGES=OFF $(if $(host-is-mingw),-G 'MSYS Makefiles')
+configure-flags :=
+
+# Use xcmake-flags when invoking CMake on libraries/binaries for the target
+# platform (i.e., the cross-compiled platform, if specified); use plain
+# cmake-flags when invoking CMake on libraries/binaries for the host platform.
+# Similarly for xconfigure-flags and configure-flags, and xgo and GO.
+xcmake-flags := $(cmake-flags) $(EXTRA_XCMAKE_FLAGS)
+xconfigure-flags := $(configure-flags) $(EXTRA_XCONFIGURE_FLAGS)
+override xgo := GOFLAGS= $(GO)
+
+# If we're cross-compiling, inform Autotools and CMake.
+ifdef is-cross-compile
+xconfigure-flags += --host=$(TARGET_TRIPLE) CC=$(XCC) CXX=$(XCXX)
+xcmake-flags += -DCMAKE_SYSTEM_NAME=$(XCMAKE_SYSTEM_NAME) -DCMAKE_C_COMPILER=$(XCC) -DCMAKE_CXX_COMPILER=$(XCXX)
+override xgo := GOFLAGS= GOOS=$(XGOOS) GOARCH=$(XGOARCH) CC=$(XCC) CXX=$(XCXX) $(xgo)
+endif
+
+C_DEPS_DIR := $(abspath c-deps)
+ROCKSDB_SRC_DIR := $(C_DEPS_DIR)/rocksdb
+SNAPPY_SRC_DIR := $(C_DEPS_DIR)/snappy
+
+# Derived build variants.
+use-stdmalloc := $(findstring stdmalloc,$(TAGS))
+use-msan := $(findstring msan,$(GOFLAGS))
+
+# User-requested build variants.
+USE_ROCKSDB_ASSERTIONS :=
+
+BUILD_DIR := $(GOPATH)/cfs/$(TARGET_TRIPLE)
+
+
+# In MinGW, cgo flags don't handle Unix-style paths, so convert our base path to
+# a Windows-style path.
+#
+# TODO(benesch): Figure out why. MinGW transparently converts Unix-style paths
+# everywhere else.
+ifdef host-is-mingw
+BUILD_DIR := $(shell cygpath -m $(BUILD_DIR))
+endif
+
+ROCKSDB_DIR := $(BUILD_DIR)/rocksdb$(if $(use-msan),_msan)$(if $(use-stdmalloc),_stdmalloc)$(if $(USE_ROCKSDB_ASSERTIONS),_assert)
+SNAPPY_DIR := $(BUILD_DIR)/snappy$(if $(use-msan),_msan)
+
+LIBROCKSDB := $(ROCKSDB_DIR)/librocksdb.a
+LIBSNAPPY := $(SNAPPY_DIR)/libsnappy.a
+
+C_LIBS_COMMON = $(LIBSNAPPY) $(LIBROCKSDB)
+
+# Go does not permit dashes in build tags. This is undocumented.
+native-tag := $(subst -,_,$(TARGET_TRIPLE))$(if $(use-stdmalloc),_stdmalloc)$(if $(use-msan),_msan)
+
+# Targets that name a real file that must be rebuilt on every Make invocation
+# should depend on .ALWAYS_REBUILD. (.PHONY should only be used on targets that
+# don't name a real file because .DELETE_ON_ERROR does not apply to .PHONY
+# targets.)
+.ALWAYS_REBUILD:
+.PHONY: .ALWAYS_REBUILD
+
+$(ROCKSDB_DIR)/Makefile: sse := $(if $(findstring x86_64,$(TARGET_TRIPLE)),-msse3)
+$(ROCKSDB_DIR)/Makefile: $(C_DEPS_DIR)/rocksdb-rebuild $(LIBSNAPPY)
+ rm -rf $(ROCKSDB_DIR)
+ mkdir -p $(ROCKSDB_DIR)
+ @# NOTE: If you change the CMake flags below, bump the version in
+ @# $(C_DEPS_DIR)/rocksdb-rebuild. See above for rationale.
+ cd $(ROCKSDB_DIR) && CFLAGS+=" $(sse)" && CXXFLAGS+=" $(sse)" && cmake $(xcmake-flags) $(ROCKSDB_SRC_DIR) \
+ $(if $(findstring release,$(BUILDTYPE)),-DPORTABLE=ON) -DWITH_GFLAGS=OFF \
+ -DCMAKE_BUILD_TYPE=$(if $(ENABLE_ROCKSDB_ASSERTIONS),Debug,Release) \
+ -DFAIL_ON_WARNINGS=$(if $(findstring windows,$(XGOOS)),0,1) \
+ -DUSE_RTTI=1
+
+$(SNAPPY_DIR)/Makefile: $(C_DEPS_DIR)/snappy-rebuild
+ rm -rf $(SNAPPY_DIR)
+ mkdir -p $(SNAPPY_DIR)
+ @# NOTE: If you change the CMake flags below, bump the version in
+ @# $(C_DEPS_DIR)/snappy-rebuild. See above for rationale.
+ cd $(SNAPPY_DIR) && cmake $(xcmake-flags) $(SNAPPY_SRC_DIR) \
+ -DCMAKE_BUILD_TYPE=Release
+
+
+$(LIBSNAPPY): $(SNAPPY_DIR)/Makefile bin/uptodate .ALWAYS_REBUILD
+ @uptodate $@ $(SNAPPY_SRC_DIR) || $(MAKE) --no-print-directory -C $(SNAPPY_DIR) snappy
+
+$(LIBROCKSDB): $(ROCKSDB_DIR)/Makefile bin/uptodate .ALWAYS_REBUILD
+ @uptodate $@ $(ROCKSDB_SRC_DIR) || $(MAKE) --no-print-directory -C $(ROCKSDB_DIR) rocksdb
+
+CFS := ./cfs-server(SUFFIX)
+
+go-targets := $(CFS)
+
+build-mode = build -o $@
+
+go-install: build-mode = install
+
+$(CFS) : $(C_LIBS_COMMON)
+ $(xgo) $(build-mode) -v $(GOFLAGS) -tags '$(TAGS)' -ldflags '$(LINKFLAGS)' $(BUILDTARGET)
+
+.PHONY: build
+build: ## Build the CFS binary.
+build: $(CFS)
\ No newline at end of file
diff --git a/bin/.submodules-initialized b/bin/.submodules-initialized
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/bin/uptodate b/bin/uptodate
new file mode 100755
index 0000000000..0da04fff3d
Binary files /dev/null and b/bin/uptodate differ
diff --git a/c-deps/.gitignore b/c-deps/.gitignore
new file mode 100644
index 0000000000..5053fb0890
--- /dev/null
+++ b/c-deps/.gitignore
@@ -0,0 +1 @@
+*.src
diff --git a/c-deps/.idea/c-deps.iml b/c-deps/.idea/c-deps.iml
new file mode 100644
index 0000000000..bc2cd87409
--- /dev/null
+++ b/c-deps/.idea/c-deps.iml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/c-deps/.idea/misc.xml b/c-deps/.idea/misc.xml
new file mode 100644
index 0000000000..28a804d893
--- /dev/null
+++ b/c-deps/.idea/misc.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/c-deps/.idea/modules.xml b/c-deps/.idea/modules.xml
new file mode 100644
index 0000000000..9f2df015fd
--- /dev/null
+++ b/c-deps/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/c-deps/.idea/vcs.xml b/c-deps/.idea/vcs.xml
new file mode 100644
index 0000000000..5edd58b6db
--- /dev/null
+++ b/c-deps/.idea/vcs.xml
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/c-deps/.idea/workspace.xml b/c-deps/.idea/workspace.xml
new file mode 100644
index 0000000000..9dd9249192
--- /dev/null
+++ b/c-deps/.idea/workspace.xml
@@ -0,0 +1,307 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ rocksdb_options_increase_parallelism
+ IncreaseParallelism
+ rocksdb::Options
+ sync
+ PUT
+ commit
+ DBCommitAndCloseBatch
+
+
+ $PROJECT_DIR$/rocksdb
+ $PROJECT_DIR$/libroach
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 1560828045737
+
+
+ 1560828045737
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/c-deps/rocksdb-rebuild b/c-deps/rocksdb-rebuild
new file mode 100644
index 0000000000..29a10be9b3
--- /dev/null
+++ b/c-deps/rocksdb-rebuild
@@ -0,0 +1,4 @@
+Bump the version below when changing rocksdb CMake flags. Search for "BUILD
+ARTIFACT CACHING" in build/common.mk for rationale.
+
+12
diff --git a/c-deps/rocksdb/.clang-format b/c-deps/rocksdb/.clang-format
new file mode 100644
index 0000000000..7c279811ac
--- /dev/null
+++ b/c-deps/rocksdb/.clang-format
@@ -0,0 +1,5 @@
+# Complete list of style options can be found at:
+# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
+---
+BasedOnStyle: Google
+...
diff --git a/c-deps/rocksdb/.gitignore b/c-deps/rocksdb/.gitignore
new file mode 100644
index 0000000000..03b805983a
--- /dev/null
+++ b/c-deps/rocksdb/.gitignore
@@ -0,0 +1,74 @@
+make_config.mk
+
+*.a
+*.arc
+*.d
+*.dylib*
+*.gcda
+*.gcno
+*.o
+*.so
+*.so.*
+*_test
+*_bench
+*_stress
+*.out
+*.class
+*.jar
+*.*jnilib*
+*.d-e
+*.o-*
+*.swp
+*~
+*.vcxproj
+*.vcxproj.filters
+*.sln
+*.cmake
+CMakeCache.txt
+CMakeFiles/
+build/
+
+ldb
+manifest_dump
+sst_dump
+blob_dump
+column_aware_encoding_exp
+util/build_version.cc
+build_tools/VALGRIND_LOGS/
+coverage/COVERAGE_REPORT
+.gdbhistory
+.gdb_history
+package/
+unity.a
+tags
+etags
+rocksdb_dump
+rocksdb_undump
+db_test2
+
+java/out
+java/target
+java/test-libs
+java/*.log
+java/include/org_rocksdb_*.h
+
+.idea/
+*.iml
+
+rocksdb.cc
+rocksdb.h
+unity.cc
+java/crossbuild/.vagrant
+.vagrant/
+java/**/*.asc
+java/javadoc
+
+scan_build_report/
+t
+LOG
+
+db_logs/
+tp2/
+fbcode/
+fbcode
+buckifier/*.pyc
diff --git a/c-deps/rocksdb/.travis.yml b/c-deps/rocksdb/.travis.yml
new file mode 100644
index 0000000000..5fcc7d52ba
--- /dev/null
+++ b/c-deps/rocksdb/.travis.yml
@@ -0,0 +1,77 @@
+sudo: false
+dist: trusty
+language: cpp
+os:
+ - linux
+ - osx
+compiler:
+ - clang
+ - gcc
+osx_image: xcode8.3
+jdk:
+ - oraclejdk7
+cache:
+ - ccache
+ - apt
+
+addons:
+ apt:
+ packages: ['zlib1g-dev', 'libbz2-dev', 'libsnappy-dev', 'curl', 'libgflags-dev', 'mingw-w64']
+env:
+ - TEST_GROUP=platform_dependent # 16-18 minutes
+ - TEST_GROUP=1 # 33-35 minutes
+ - TEST_GROUP=2 # 30-32 minutes
+ - TEST_GROUP=3 # ? minutes - under development
+ # Run java tests
+ - JOB_NAME=java_test # 4-11 minutes
+ # Build ROCKSDB_LITE
+ - JOB_NAME=lite_build # 3-4 minutes
+ # Build examples
+ - JOB_NAME=examples # 5-7 minutes
+ - JOB_NAME=cmake # 3-5 minutes
+ - JOB_NAME=cmake-mingw # 3 minutes
+
+matrix:
+ exclude:
+ - os: osx
+ env: TEST_GROUP=1
+ - os: osx
+ env: TEST_GROUP=2
+ - os: osx
+ env: TEST_GROUP=3
+ - os : osx
+ env: JOB_NAME=cmake-mingw
+ - os : linux
+ compiler: clang
+ - os : osx
+ compiler: gcc
+
+# https://docs.travis-ci.com/user/caching/#ccache-cache
+install:
+ - if [ "${TRAVIS_OS_NAME}" == osx ]; then
+ brew install ccache;
+ PATH=$PATH:/usr/local/opt/ccache/libexec;
+ fi
+
+before_script:
+ # Increase the maximum number of open file descriptors, since some tests use
+ # more FDs than the default limit.
+ - ulimit -n 8192
+
+script:
+ - ${CXX} --version
+ - if [ `command -v ccache` ]; then ccache -C; fi
+ - if [ "${TEST_GROUP}" == 'platform_dependent' ]; then OPT=-DTRAVIS V=1 ROCKSDBTESTS_END=db_block_cache_test make -j4 all_but_some_tests check_some; fi
+ - if [ "${TEST_GROUP}" == '1' ]; then OPT=-DTRAVIS V=1 ROCKSDBTESTS_START=db_block_cache_test ROCKSDBTESTS_END=comparator_db_test make -j4 check_some; fi
+ - if [ "${TEST_GROUP}" == '2' ]; then OPT=-DTRAVIS V=1 ROCKSDBTESTS_START=comparator_db_test ROCKSDBTESTS_END=write_prepared_transaction_test make -j4 check_some; fi
+ - if [ "${TEST_GROUP}" == '3' ]; then OPT=-DTRAVIS V=1 ROCKSDBTESTS_START=write_prepared_transaction_test make -j4 check_some; fi
+ - if [ "${JOB_NAME}" == 'java_test' ]; then OPT=-DTRAVIS V=1 make clean jclean && make rocksdbjava jtest; fi
+ - if [ "${JOB_NAME}" == 'lite_build' ]; then OPT="-DTRAVIS -DROCKSDB_LITE" V=1 make -j4 static_lib tools; fi
+ - if [ "${JOB_NAME}" == 'examples' ]; then OPT=-DTRAVIS V=1 make -j4 static_lib; cd examples; make -j4; fi
+ - if [ "${JOB_NAME}" == 'cmake' ]; then mkdir build && cd build && cmake .. && make -j4 rocksdb; fi
+ - if [ "${JOB_NAME}" == 'cmake-mingw' ]; then mkdir build && cd build && cmake .. -DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc -DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++ -DCMAKE_SYSTEM_NAME=Windows && make -j4 rocksdb; fi
+notifications:
+ email:
+ - leveldb@fb.com
+ webhooks:
+ - https://buildtimetrend.herokuapp.com/travis
diff --git a/c-deps/rocksdb/AUTHORS b/c-deps/rocksdb/AUTHORS
new file mode 100644
index 0000000000..a451875f1a
--- /dev/null
+++ b/c-deps/rocksdb/AUTHORS
@@ -0,0 +1,12 @@
+Facebook Inc.
+Facebook Engineering Team
+
+Google Inc.
+# Initial version authors:
+Jeffrey Dean
+Sanjay Ghemawat
+
+# Partial list of contributors:
+Kevin Regan
+Johan Bilien
+Matthew Von-Maszewski (Basho Technologies)
diff --git a/c-deps/rocksdb/CMakeLists.txt b/c-deps/rocksdb/CMakeLists.txt
new file mode 100644
index 0000000000..77ff7cfb6a
--- /dev/null
+++ b/c-deps/rocksdb/CMakeLists.txt
@@ -0,0 +1,931 @@
+# Prerequisites for Windows:
+# This cmake build is for Windows 64-bit only.
+#
+# Prerequisites:
+# You must have at least Visual Studio 2015 Update 3. Start the Developer Command Prompt window that is a part of Visual Studio installation.
+# Run the build commands from within the Developer Command Prompt window to have paths to the compiler and runtime libraries set.
+# You must have git.exe in your %PATH% environment variable.
+#
+# To build Rocksdb for Windows is as easy as 1-2-3-4-5:
+#
+# 1. Update paths to third-party libraries in thirdparty.inc file
+# 2. Create a new directory for build artifacts
+# mkdir build
+# cd build
+# 3. Run cmake to generate project files for Windows, add more options to enable required third-party libraries.
+# See thirdparty.inc for more information.
+# sample command: cmake -G "Visual Studio 14 Win64" -DGFLAGS=1 -DSNAPPY=1 -DJEMALLOC=1 -DJNI=1 ..
+# 4. Then build the project in debug mode (you may want to add /m[:] flag to run msbuild in parallel threads
+# or simply /m ot use all avail cores)
+# msbuild rocksdb.sln
+#
+# rocksdb.sln build features exclusions of test only code in Release. If you build ALL_BUILD then everything
+# will be attempted but test only code does not build in Release mode.
+#
+# 5. And release mode (/m[:] is also supported)
+# msbuild rocksdb.sln /p:Configuration=Release
+#
+# Linux:
+#
+# 1. Install a recent toolchain such as devtoolset-3 if you're on a older distro. C++11 required.
+# 2. mkdir build; cd build
+# 3. cmake ..
+# 4. make -j
+
+cmake_minimum_required(VERSION 2.8.12)
+project(rocksdb)
+
+if(POLICY CMP0042)
+ cmake_policy(SET CMP0042 NEW)
+endif()
+
+list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/modules/")
+
+option(WITH_JEMALLOC "build with JeMalloc" OFF)
+if(MSVC)
+ include(${CMAKE_CURRENT_SOURCE_DIR}/thirdparty.inc)
+else()
+ if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
+ # FreeBSD has jemaloc as default malloc
+ # but it does not have all the jemalloc files in include/...
+ set(WITH_JEMALLOC ON)
+ else()
+ if(WITH_JEMALLOC)
+ find_package(JeMalloc REQUIRED)
+ add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE)
+ include_directories(${JEMALLOC_INCLUDE_DIR})
+ endif()
+ endif()
+
+ option(WITH_SNAPPY "build with SNAPPY" OFF)
+ if(WITH_SNAPPY)
+ find_package(snappy REQUIRED)
+ add_definitions(-DSNAPPY)
+ include_directories(${SNAPPY_INCLUDE_DIR})
+ list(APPEND THIRDPARTY_LIBS ${SNAPPY_LIBRARIES})
+ endif()
+
+ option(WITH_ZLIB "build with zlib" OFF)
+ if(WITH_ZLIB)
+ find_package(zlib REQUIRED)
+ add_definitions(-DZLIB)
+ include_directories(${ZLIB_INCLUDE_DIR})
+ list(APPEND THIRDPARTY_LIBS ${ZLIB_LIBRARIES})
+ endif()
+
+ option(WITH_BZ2 "build with bzip2" OFF)
+ if(WITH_BZ2)
+ find_package(bzip2 REQUIRED)
+ add_definitions(-DBZIP2)
+ include_directories(${BZIP2_INCLUDE_DIR})
+ list(APPEND THIRDPARTY_LIBS ${BZIP2_LIBRARIES})
+ endif()
+
+ option(WITH_LZ4 "build with lz4" OFF)
+ if(WITH_LZ4)
+ find_package(lz4 REQUIRED)
+ add_definitions(-DLZ4)
+ include_directories(${LZ4_INCLUDE_DIR})
+ list(APPEND THIRDPARTY_LIBS ${LZ4_LIBRARIES})
+ endif()
+
+ option(WITH_ZSTD "build with zstd" OFF)
+ if(WITH_ZSTD)
+ find_package(zstd REQUIRED)
+ add_definitions(-DZSTD)
+ include_directories(${ZSTD_INCLUDE_DIR})
+ list(APPEND THIRDPARTY_LIBS ${ZSTD_LIBRARIES})
+ endif()
+endif()
+
+string(TIMESTAMP GIT_DATE_TIME "%Y/%m/%d %H:%M:%S" UTC)
+
+find_package(Git)
+
+if(GIT_FOUND AND EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git")
+ if(WIN32)
+ execute_process(COMMAND $ENV{COMSPEC} /C ${GIT_EXECUTABLE} -C ${CMAKE_CURRENT_SOURCE_DIR} rev-parse HEAD OUTPUT_VARIABLE GIT_SHA)
+ else()
+ execute_process(COMMAND ${GIT_EXECUTABLE} -C ${CMAKE_CURRENT_SOURCE_DIR} rev-parse HEAD OUTPUT_VARIABLE GIT_SHA)
+ endif()
+else()
+ set(GIT_SHA 0)
+endif()
+
+string(REGEX REPLACE "[^0-9a-f]+" "" GIT_SHA "${GIT_SHA}")
+
+if(NOT WIN32)
+ execute_process(COMMAND
+ "./build_tools/version.sh" "full"
+ WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
+ OUTPUT_VARIABLE ROCKSDB_VERSION
+ )
+ string(STRIP "${ROCKSDB_VERSION}" ROCKSDB_VERSION)
+ execute_process(COMMAND
+ "./build_tools/version.sh" "major"
+ WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
+ OUTPUT_VARIABLE ROCKSDB_VERSION_MAJOR
+ )
+ string(STRIP "${ROCKSDB_VERSION_MAJOR}" ROCKSDB_VERSION_MAJOR)
+endif()
+
+option(WITH_MD_LIBRARY "build with MD" ON)
+if(WIN32 AND MSVC)
+ if(WITH_MD_LIBRARY)
+ set(RUNTIME_LIBRARY "MD")
+ else()
+ set(RUNTIME_LIBRARY "MT")
+ endif()
+endif()
+
+set(BUILD_VERSION_CC ${CMAKE_BINARY_DIR}/build_version.cc)
+configure_file(util/build_version.cc.in ${BUILD_VERSION_CC} @ONLY)
+add_library(build_version OBJECT ${BUILD_VERSION_CC})
+target_include_directories(build_version PRIVATE
+ ${CMAKE_CURRENT_SOURCE_DIR}/util)
+if(MSVC)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zi /nologo /EHsc /GS /Gd /GR /GF /fp:precise /Zc:wchar_t /Zc:forScope /errorReport:queue")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W4 /wd4127 /wd4800 /wd4996 /wd4351 /wd4100 /wd4204 /wd4324")
+else()
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -W -Wextra -Wall")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wsign-compare -Wshadow -Wno-unused-parameter -Wno-unused-variable -Woverloaded-virtual -Wnon-virtual-dtor -Wno-missing-field-initializers -Wno-strict-aliasing")
+ if(MINGW)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-format")
+ endif()
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
+ if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2 -fno-omit-frame-pointer")
+ include(CheckCXXCompilerFlag)
+ CHECK_CXX_COMPILER_FLAG("-momit-leaf-frame-pointer" HAVE_OMIT_LEAF_FRAME_POINTER)
+ if(HAVE_OMIT_LEAF_FRAME_POINTER)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -momit-leaf-frame-pointer")
+ endif()
+ endif()
+endif()
+
+option(PORTABLE "build a portable binary" OFF)
+option(FORCE_SSE42 "force building with SSE4.2, even when PORTABLE=ON" OFF)
+if(PORTABLE)
+ # MSVC does not need a separate compiler flag to enable SSE4.2; if nmmintrin.h
+ # is available, it is available by default.
+ if(FORCE_SSE42 AND NOT MSVC)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2")
+ endif()
+else()
+ if(MSVC)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX2")
+ else()
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native")
+ endif()
+endif()
+
+include(CheckCXXSourceCompiles)
+if(NOT MSVC)
+ set(CMAKE_REQUIRED_FLAGS "-msse4.2")
+endif()
+CHECK_CXX_SOURCE_COMPILES("
+#include
+#include
+int main() {
+ volatile uint32_t x = _mm_crc32_u32(0, 0);
+}
+" HAVE_SSE42)
+unset(CMAKE_REQUIRED_FLAGS)
+if(HAVE_SSE42)
+ add_definitions(-DHAVE_SSE42)
+elseif(FORCE_SSE42)
+ message(FATAL_ERROR "FORCE_SSE42=ON but unable to compile with SSE4.2 enabled")
+endif()
+
+CHECK_CXX_SOURCE_COMPILES("
+#if defined(_MSC_VER) && !defined(__thread)
+#define __thread __declspec(thread)
+#endif
+int main() {
+ static __thread int tls;
+}
+" HAVE_THREAD_LOCAL)
+if(HAVE_THREAD_LOCAL)
+ add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL)
+endif()
+
+option(FAIL_ON_WARNINGS "Treat compile warnings as errors" ON)
+if(FAIL_ON_WARNINGS)
+ if(MSVC)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /WX")
+ else() # assume GCC
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror")
+ endif()
+endif()
+
+option(WITH_ASAN "build with ASAN" OFF)
+if(WITH_ASAN)
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address")
+ if(WITH_JEMALLOC)
+ message(FATAL "ASAN does not work well with JeMalloc")
+ endif()
+endif()
+
+option(WITH_TSAN "build with TSAN" OFF)
+if(WITH_TSAN)
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=thread -pie")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=thread -fPIC")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=thread -fPIC")
+ if(WITH_JEMALLOC)
+ message(FATAL "TSAN does not work well with JeMalloc")
+ endif()
+endif()
+
+option(WITH_UBSAN "build with UBSAN" OFF)
+if(WITH_UBSAN)
+ add_definitions(-DROCKSDB_UBSAN_RUN)
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=undefined")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=undefined")
+ if(WITH_JEMALLOC)
+ message(FATAL "UBSAN does not work well with JeMalloc")
+ endif()
+endif()
+
+# Used to run CI build and tests so we can run faster
+set(OPTIMIZE_DEBUG_DEFAULT 0) # Debug build is unoptimized by default use -DOPTDBG=1 to optimize
+
+if(DEFINED OPTDBG)
+ set(OPTIMIZE_DEBUG ${OPTDBG})
+else()
+ set(OPTIMIZE_DEBUG ${OPTIMIZE_DEBUG_DEFAULT})
+endif()
+
+if(MSVC)
+ if((${OPTIMIZE_DEBUG} EQUAL 1))
+ message(STATUS "Debug optimization is enabled")
+ set(CMAKE_CXX_FLAGS_DEBUG "/Oxt /${RUNTIME_LIBRARY}d")
+ else()
+ set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Od /RTC1 /Gm /${RUNTIME_LIBRARY}d")
+ endif()
+ set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Oxt /Zp8 /Gm- /Gy /${RUNTIME_LIBRARY}")
+
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /DEBUG")
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /DEBUG")
+endif()
+
+if(CMAKE_COMPILER_IS_GNUCXX)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-builtin-memcmp")
+endif()
+
+option(ROCKSDB_LITE "Build RocksDBLite version" OFF)
+if(ROCKSDB_LITE)
+ add_definitions(-DROCKSDB_LITE)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
+endif()
+
+if(CMAKE_SYSTEM_NAME MATCHES "Cygwin")
+ add_definitions(-fno-builtin-memcmp -DCYGWIN)
+elseif(CMAKE_SYSTEM_NAME MATCHES "Darwin")
+ add_definitions(-DOS_MACOSX)
+ if(CMAKE_SYSTEM_PROCESSOR MATCHES arm)
+ add_definitions(-DIOS_CROSS_COMPILE -DROCKSDB_LITE)
+ # no debug info for IOS, that will make our library big
+ add_definitions(-DNDEBUG)
+ endif()
+elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
+ add_definitions(-DOS_LINUX)
+elseif(CMAKE_SYSTEM_NAME MATCHES "SunOS")
+ add_definitions(-DOS_SOLARIS)
+elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
+ add_definitions(-DOS_FREEBSD)
+elseif(CMAKE_SYSTEM_NAME MATCHES "NetBSD")
+ add_definitions(-DOS_NETBSD)
+elseif(CMAKE_SYSTEM_NAME MATCHES "OpenBSD")
+ add_definitions(-DOS_OPENBSD)
+elseif(CMAKE_SYSTEM_NAME MATCHES "DragonFly")
+ add_definitions(-DOS_DRAGONFLYBSD)
+elseif(CMAKE_SYSTEM_NAME MATCHES "Android")
+ add_definitions(-DOS_ANDROID)
+elseif(CMAKE_SYSTEM_NAME MATCHES "Windows")
+ add_definitions(-DWIN32 -DOS_WIN -D_MBCS -DWIN64 -DNOMINMAX)
+ if(MINGW)
+ add_definitions(-D_WIN32_WINNT=_WIN32_WINNT_VISTA)
+ endif()
+endif()
+
+if(NOT WIN32)
+ add_definitions(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX)
+endif()
+
+option(WITH_FALLOCATE "build with fallocate" ON)
+
+if(WITH_FALLOCATE)
+ set(CMAKE_REQUIRED_FLAGS ${CMAKE_C_FLAGS})
+ include(CheckCSourceCompiles)
+ CHECK_C_SOURCE_COMPILES("
+#include
+#include
+int main() {
+ int fd = open(\"/dev/null\", 0);
+ fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, 1024);
+}
+" HAVE_FALLOCATE)
+ if(HAVE_FALLOCATE)
+ add_definitions(-DROCKSDB_FALLOCATE_PRESENT)
+ endif()
+endif()
+
+include(CheckFunctionExists)
+CHECK_FUNCTION_EXISTS(malloc_usable_size HAVE_MALLOC_USABLE_SIZE)
+if(HAVE_MALLOC_USABLE_SIZE)
+ add_definitions(-DROCKSDB_MALLOC_USABLE_SIZE)
+endif()
+
+include_directories(${PROJECT_SOURCE_DIR})
+include_directories(${PROJECT_SOURCE_DIR}/include)
+include_directories(SYSTEM ${PROJECT_SOURCE_DIR}/third-party/gtest-1.7.0/fused-src)
+find_package(Threads REQUIRED)
+
+add_subdirectory(third-party/gtest-1.7.0/fused-src/gtest)
+
+# Main library source code
+
+set(SOURCES
+ cache/clock_cache.cc
+ cache/lru_cache.cc
+ cache/sharded_cache.cc
+ db/builder.cc
+ db/c.cc
+ db/column_family.cc
+ db/compacted_db_impl.cc
+ db/compaction.cc
+ db/compaction_iterator.cc
+ db/compaction_job.cc
+ db/compaction_picker.cc
+ db/compaction_picker_universal.cc
+ db/convenience.cc
+ db/db_filesnapshot.cc
+ db/db_impl.cc
+ db/db_impl_write.cc
+ db/db_impl_compaction_flush.cc
+ db/db_impl_files.cc
+ db/db_impl_open.cc
+ db/db_impl_debug.cc
+ db/db_impl_experimental.cc
+ db/db_impl_readonly.cc
+ db/db_info_dumper.cc
+ db/db_iter.cc
+ db/dbformat.cc
+ db/event_helpers.cc
+ db/experimental.cc
+ db/external_sst_file_ingestion_job.cc
+ db/file_indexer.cc
+ db/flush_job.cc
+ db/flush_scheduler.cc
+ db/forward_iterator.cc
+ db/internal_stats.cc
+ db/log_reader.cc
+ db/log_writer.cc
+ db/malloc_stats.cc
+ db/managed_iterator.cc
+ db/memtable.cc
+ db/memtable_list.cc
+ db/merge_helper.cc
+ db/merge_operator.cc
+ db/range_del_aggregator.cc
+ db/repair.cc
+ db/snapshot_impl.cc
+ db/table_cache.cc
+ db/table_properties_collector.cc
+ db/transaction_log_impl.cc
+ db/version_builder.cc
+ db/version_edit.cc
+ db/version_set.cc
+ db/wal_manager.cc
+ db/write_batch.cc
+ db/write_batch_base.cc
+ db/write_controller.cc
+ db/write_thread.cc
+ env/env.cc
+ env/env_chroot.cc
+ env/env_encryption.cc
+ env/env_hdfs.cc
+ env/mock_env.cc
+ memtable/alloc_tracker.cc
+ memtable/hash_cuckoo_rep.cc
+ memtable/hash_linklist_rep.cc
+ memtable/hash_skiplist_rep.cc
+ memtable/skiplistrep.cc
+ memtable/vectorrep.cc
+ memtable/write_buffer_manager.cc
+ monitoring/histogram.cc
+ monitoring/histogram_windowing.cc
+ monitoring/instrumented_mutex.cc
+ monitoring/iostats_context.cc
+ monitoring/perf_context.cc
+ monitoring/perf_level.cc
+ monitoring/statistics.cc
+ monitoring/thread_status_impl.cc
+ monitoring/thread_status_updater.cc
+ monitoring/thread_status_util.cc
+ monitoring/thread_status_util_debug.cc
+ options/cf_options.cc
+ options/db_options.cc
+ options/options.cc
+ options/options_helper.cc
+ options/options_parser.cc
+ options/options_sanity_check.cc
+ port/stack_trace.cc
+ table/adaptive_table_factory.cc
+ table/block.cc
+ table/block_based_filter_block.cc
+ table/block_based_table_builder.cc
+ table/block_based_table_factory.cc
+ table/block_based_table_reader.cc
+ table/block_builder.cc
+ table/block_prefix_index.cc
+ table/bloom_block.cc
+ table/cuckoo_table_builder.cc
+ table/cuckoo_table_factory.cc
+ table/cuckoo_table_reader.cc
+ table/flush_block_policy.cc
+ table/format.cc
+ table/full_filter_block.cc
+ table/get_context.cc
+ table/index_builder.cc
+ table/iterator.cc
+ table/merging_iterator.cc
+ table/meta_blocks.cc
+ table/partitioned_filter_block.cc
+ table/persistent_cache_helper.cc
+ table/plain_table_builder.cc
+ table/plain_table_factory.cc
+ table/plain_table_index.cc
+ table/plain_table_key_coding.cc
+ table/plain_table_reader.cc
+ table/sst_file_writer.cc
+ table/table_properties.cc
+ table/two_level_iterator.cc
+ tools/db_bench_tool.cc
+ tools/dump/db_dump_tool.cc
+ tools/ldb_cmd.cc
+ tools/ldb_tool.cc
+ tools/sst_dump_tool.cc
+ util/arena.cc
+ util/auto_roll_logger.cc
+ util/bloom.cc
+ util/coding.cc
+ util/compaction_job_stats_impl.cc
+ util/comparator.cc
+ util/concurrent_arena.cc
+ util/crc32c.cc
+ util/delete_scheduler.cc
+ util/dynamic_bloom.cc
+ util/event_logger.cc
+ util/file_reader_writer.cc
+ util/file_util.cc
+ util/filename.cc
+ util/filter_policy.cc
+ util/hash.cc
+ util/log_buffer.cc
+ util/murmurhash.cc
+ util/random.cc
+ util/rate_limiter.cc
+ util/slice.cc
+ util/sst_file_manager_impl.cc
+ util/status.cc
+ util/status_message.cc
+ util/string_util.cc
+ util/sync_point.cc
+ util/testutil.cc
+ util/thread_local.cc
+ util/threadpool_imp.cc
+ util/transaction_test_util.cc
+ util/xxhash.cc
+ utilities/backupable/backupable_db.cc
+ utilities/blob_db/blob_db.cc
+ utilities/blob_db/blob_db_impl.cc
+ utilities/blob_db/blob_dump_tool.cc
+ utilities/blob_db/blob_file.cc
+ utilities/blob_db/blob_log_reader.cc
+ utilities/blob_db/blob_log_writer.cc
+ utilities/blob_db/blob_log_format.cc
+ utilities/blob_db/ttl_extractor.cc
+ utilities/cassandra/cassandra_compaction_filter.cc
+ utilities/cassandra/format.cc
+ utilities/cassandra/merge_operator.cc
+ utilities/checkpoint/checkpoint_impl.cc
+ utilities/col_buf_decoder.cc
+ utilities/col_buf_encoder.cc
+ utilities/column_aware_encoding_util.cc
+ utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
+ utilities/date_tiered/date_tiered_db_impl.cc
+ utilities/debug.cc
+ utilities/document/document_db.cc
+ utilities/document/json_document.cc
+ utilities/document/json_document_builder.cc
+ utilities/env_mirror.cc
+ utilities/env_timed.cc
+ utilities/geodb/geodb_impl.cc
+ utilities/leveldb_options/leveldb_options.cc
+ utilities/lua/rocks_lua_compaction_filter.cc
+ utilities/memory/memory_util.cc
+ utilities/merge_operators/max.cc
+ utilities/merge_operators/put.cc
+ utilities/merge_operators/string_append/stringappend.cc
+ utilities/merge_operators/string_append/stringappend2.cc
+ utilities/merge_operators/uint64add.cc
+ utilities/option_change_migration/option_change_migration.cc
+ utilities/options/options_util.cc
+ utilities/persistent_cache/block_cache_tier.cc
+ utilities/persistent_cache/block_cache_tier_file.cc
+ utilities/persistent_cache/block_cache_tier_metadata.cc
+ utilities/persistent_cache/persistent_cache_tier.cc
+ utilities/persistent_cache/volatile_tier_impl.cc
+ utilities/redis/redis_lists.cc
+ utilities/simulator_cache/sim_cache.cc
+ utilities/spatialdb/spatial_db.cc
+ utilities/table_properties_collectors/compact_on_deletion_collector.cc
+ utilities/transactions/optimistic_transaction_db_impl.cc
+ utilities/transactions/optimistic_transaction.cc
+ utilities/transactions/pessimistic_transaction.cc
+ utilities/transactions/pessimistic_transaction_db.cc
+ utilities/transactions/snapshot_checker.cc
+ utilities/transactions/transaction_base.cc
+ utilities/transactions/transaction_db_mutex_impl.cc
+ utilities/transactions/transaction_lock_mgr.cc
+ utilities/transactions/transaction_util.cc
+ utilities/transactions/write_prepared_txn.cc
+ utilities/transactions/write_prepared_txn_db.cc
+ utilities/ttl/db_ttl_impl.cc
+ utilities/write_batch_with_index/write_batch_with_index.cc
+ utilities/write_batch_with_index/write_batch_with_index_internal.cc
+ $)
+
+if(HAVE_SSE42 AND NOT FORCE_SSE42)
+ if(NOT MSVC)
+set_source_files_properties(
+ util/crc32c.cc
+ PROPERTIES COMPILE_FLAGS "-msse4.2")
+ endif()
+endif()
+
+if(WIN32)
+ list(APPEND SOURCES
+ port/win/io_win.cc
+ port/win/env_win.cc
+ port/win/env_default.cc
+ port/win/port_win.cc
+ port/win/win_logger.cc
+ port/win/win_thread.cc
+ port/win/xpress_win.cc)
+
+if(WITH_JEMALLOC)
+ list(APPEND SOURCES
+ port/win/win_jemalloc.cc)
+endif()
+
+else()
+ list(APPEND SOURCES
+ port/port_posix.cc
+ env/env_posix.cc
+ env/io_posix.cc)
+endif()
+
+set(ROCKSDB_STATIC_LIB rocksdb${ARTIFACT_SUFFIX})
+set(ROCKSDB_SHARED_LIB rocksdb-shared${ARTIFACT_SUFFIX})
+set(ROCKSDB_IMPORT_LIB ${ROCKSDB_SHARED_LIB})
+if(WIN32)
+ set(SYSTEM_LIBS ${SYSTEM_LIBS} Shlwapi.lib Rpcrt4.lib)
+ set(LIBS ${ROCKSDB_STATIC_LIB} ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
+else()
+ set(SYSTEM_LIBS ${CMAKE_THREAD_LIBS_INIT})
+ set(LIBS ${ROCKSDB_SHARED_LIB} ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
+
+ add_library(${ROCKSDB_SHARED_LIB} SHARED ${SOURCES})
+ target_link_libraries(${ROCKSDB_SHARED_LIB}
+ ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
+ set_target_properties(${ROCKSDB_SHARED_LIB} PROPERTIES
+ LINKER_LANGUAGE CXX
+ VERSION ${ROCKSDB_VERSION}
+ SOVERSION ${ROCKSDB_VERSION_MAJOR}
+ CXX_STANDARD 11
+ OUTPUT_NAME "rocksdb")
+endif()
+
+option(WITH_LIBRADOS "Build with librados" OFF)
+if(WITH_LIBRADOS)
+ list(APPEND SOURCES
+ utilities/env_librados.cc)
+ list(APPEND THIRDPARTY_LIBS rados)
+endif()
+
+add_library(${ROCKSDB_STATIC_LIB} STATIC ${SOURCES})
+target_link_libraries(${ROCKSDB_STATIC_LIB}
+ ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
+
+if(WIN32)
+ add_library(${ROCKSDB_IMPORT_LIB} SHARED ${SOURCES})
+ target_link_libraries(${ROCKSDB_IMPORT_LIB}
+ ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
+ set_target_properties(${ROCKSDB_IMPORT_LIB} PROPERTIES
+ COMPILE_DEFINITIONS "ROCKSDB_DLL;ROCKSDB_LIBRARY_EXPORTS")
+ if(MSVC)
+ set_target_properties(${ROCKSDB_STATIC_LIB} PROPERTIES
+ COMPILE_FLAGS "/Fd${CMAKE_CFG_INTDIR}/${ROCKSDB_STATIC_LIB}.pdb")
+ set_target_properties(${ROCKSDB_IMPORT_LIB} PROPERTIES
+ COMPILE_FLAGS "/Fd${CMAKE_CFG_INTDIR}/${ROCKSDB_IMPORT_LIB}.pdb")
+ endif()
+endif()
+
+option(WITH_JNI "build with JNI" OFF)
+if(WITH_JNI OR JNI)
+ message(STATUS "JNI library is enabled")
+ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/java)
+else()
+ message(STATUS "JNI library is disabled")
+endif()
+
+# Installation and packaging
+if(WIN32)
+ option(ROCKSDB_INSTALL_ON_WINDOWS "Enable install target on Windows" OFF)
+endif()
+if(NOT WIN32 OR ROCKSDB_INSTALL_ON_WINDOWS)
+ if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
+ if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
+ # Change default installation prefix on Linux to /usr
+ set(CMAKE_INSTALL_PREFIX /usr CACHE PATH "Install path prefix, prepended onto install directories." FORCE)
+ endif()
+ endif()
+
+ include(GNUInstallDirs)
+ include(CMakePackageConfigHelpers)
+
+ set(package_config_destination ${CMAKE_INSTALL_LIBDIR}/cmake/rocksdb)
+
+ configure_package_config_file(
+ ${CMAKE_SOURCE_DIR}/cmake/RocksDBConfig.cmake.in RocksDBConfig.cmake
+ INSTALL_DESTINATION ${package_config_destination}
+ )
+
+ write_basic_package_version_file(
+ RocksDBConfigVersion.cmake
+ VERSION ${ROCKSDB_VERSION}
+ COMPATIBILITY SameMajorVersion
+ )
+
+ install(DIRECTORY include/rocksdb COMPONENT devel DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}")
+
+ install(
+ TARGETS ${ROCKSDB_STATIC_LIB}
+ EXPORT RocksDBTargets
+ COMPONENT devel
+ ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}"
+ INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}"
+ )
+
+ install(
+ TARGETS ${ROCKSDB_SHARED_LIB}
+ EXPORT RocksDBTargets
+ COMPONENT runtime
+ RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}"
+ LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}"
+ INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}"
+ )
+
+ install(
+ EXPORT RocksDBTargets
+ COMPONENT devel
+ DESTINATION ${package_config_destination}
+ NAMESPACE RocksDB::
+ )
+
+ install(
+ FILES
+ ${CMAKE_CURRENT_BINARY_DIR}/RocksDBConfig.cmake
+ ${CMAKE_CURRENT_BINARY_DIR}/RocksDBConfigVersion.cmake
+ COMPONENT devel
+ DESTINATION ${package_config_destination}
+ )
+endif()
+
+option(WITH_TESTS "build with tests" ON)
+if(WITH_TESTS)
+ set(TESTS
+ cache/cache_test.cc
+ cache/lru_cache_test.cc
+ db/column_family_test.cc
+ db/compact_files_test.cc
+ db/compaction_iterator_test.cc
+ db/compaction_job_stats_test.cc
+ db/compaction_job_test.cc
+ db/compaction_picker_test.cc
+ db/comparator_db_test.cc
+ db/corruption_test.cc
+ db/cuckoo_table_db_test.cc
+ db/db_basic_test.cc
+ db/db_blob_index_test.cc
+ db/db_block_cache_test.cc
+ db/db_bloom_filter_test.cc
+ db/db_compaction_filter_test.cc
+ db/db_compaction_test.cc
+ db/db_dynamic_level_test.cc
+ db/db_flush_test.cc
+ db/db_inplace_update_test.cc
+ db/db_io_failure_test.cc
+ db/db_iter_test.cc
+ db/db_iterator_test.cc
+ db/db_log_iter_test.cc
+ db/db_memtable_test.cc
+ db/db_merge_operator_test.cc
+ db/db_options_test.cc
+ db/db_properties_test.cc
+ db/db_range_del_test.cc
+ db/db_sst_test.cc
+ db/db_statistics_test.cc
+ db/db_table_properties_test.cc
+ db/db_tailing_iter_test.cc
+ db/db_test.cc
+ db/db_test2.cc
+ db/db_universal_compaction_test.cc
+ db/db_wal_test.cc
+ db/db_write_test.cc
+ db/dbformat_test.cc
+ db/deletefile_test.cc
+ db/external_sst_file_basic_test.cc
+ db/external_sst_file_test.cc
+ db/fault_injection_test.cc
+ db/file_indexer_test.cc
+ db/filename_test.cc
+ db/flush_job_test.cc
+ db/listener_test.cc
+ db/log_test.cc
+ db/manual_compaction_test.cc
+ db/memtable_list_test.cc
+ db/merge_helper_test.cc
+ db/merge_test.cc
+ db/options_file_test.cc
+ db/perf_context_test.cc
+ db/plain_table_db_test.cc
+ db/prefix_test.cc
+ db/repair_test.cc
+ db/table_properties_collector_test.cc
+ db/version_builder_test.cc
+ db/version_edit_test.cc
+ db/version_set_test.cc
+ db/wal_manager_test.cc
+ db/write_batch_test.cc
+ db/write_callback_test.cc
+ db/write_controller_test.cc
+ env/env_basic_test.cc
+ env/env_test.cc
+ env/mock_env_test.cc
+ memtable/inlineskiplist_test.cc
+ memtable/skiplist_test.cc
+ memtable/write_buffer_manager_test.cc
+ monitoring/histogram_test.cc
+ monitoring/iostats_context_test.cc
+ monitoring/statistics_test.cc
+ options/options_settable_test.cc
+ options/options_test.cc
+ table/block_based_filter_block_test.cc
+ table/block_test.cc
+ table/cleanable_test.cc
+ table/cuckoo_table_builder_test.cc
+ table/cuckoo_table_reader_test.cc
+ table/full_filter_block_test.cc
+ table/merger_test.cc
+ table/table_test.cc
+ tools/ldb_cmd_test.cc
+ tools/reduce_levels_test.cc
+ tools/sst_dump_test.cc
+ util/arena_test.cc
+ util/auto_roll_logger_test.cc
+ util/autovector_test.cc
+ util/bloom_test.cc
+ util/coding_test.cc
+ util/crc32c_test.cc
+ util/delete_scheduler_test.cc
+ util/dynamic_bloom_test.cc
+ util/event_logger_test.cc
+ util/file_reader_writer_test.cc
+ util/filelock_test.cc
+ util/hash_test.cc
+ util/heap_test.cc
+ util/rate_limiter_test.cc
+ util/slice_transform_test.cc
+ util/timer_queue_test.cc
+ util/thread_list_test.cc
+ util/thread_local_test.cc
+ utilities/backupable/backupable_db_test.cc
+ utilities/blob_db/blob_db_test.cc
+ utilities/cassandra/cassandra_functional_test.cc
+ utilities/cassandra/cassandra_format_test.cc
+ utilities/cassandra/cassandra_row_merge_test.cc
+ utilities/cassandra/cassandra_serialize_test.cc
+ utilities/checkpoint/checkpoint_test.cc
+ utilities/column_aware_encoding_test.cc
+ utilities/date_tiered/date_tiered_test.cc
+ utilities/document/document_db_test.cc
+ utilities/document/json_document_test.cc
+ utilities/geodb/geodb_test.cc
+ utilities/lua/rocks_lua_test.cc
+ utilities/memory/memory_test.cc
+ utilities/merge_operators/string_append/stringappend_test.cc
+ utilities/object_registry_test.cc
+ utilities/option_change_migration/option_change_migration_test.cc
+ utilities/options/options_util_test.cc
+ utilities/persistent_cache/hash_table_test.cc
+ utilities/persistent_cache/persistent_cache_test.cc
+ utilities/redis/redis_lists_test.cc
+ utilities/spatialdb/spatial_db_test.cc
+ utilities/simulator_cache/sim_cache_test.cc
+ utilities/table_properties_collectors/compact_on_deletion_collector_test.cc
+ utilities/transactions/optimistic_transaction_test.cc
+ utilities/transactions/transaction_test.cc
+ utilities/transactions/write_prepared_transaction_test.cc
+ utilities/ttl/ttl_test.cc
+ utilities/write_batch_with_index/write_batch_with_index_test.cc
+ )
+ if(WITH_LIBRADOS)
+ list(APPEND TESTS utilities/env_librados_test.cc)
+ endif()
+
+ set(BENCHMARKS
+ cache/cache_bench.cc
+ memtable/memtablerep_bench.cc
+ tools/db_bench.cc
+ table/table_reader_bench.cc
+ utilities/column_aware_encoding_exp.cc
+ utilities/persistent_cache/hash_table_bench.cc)
+ add_library(testharness OBJECT util/testharness.cc)
+ foreach(sourcefile ${BENCHMARKS})
+ get_filename_component(exename ${sourcefile} NAME_WE)
+ add_executable(${exename}${ARTIFACT_SUFFIX} ${sourcefile}
+ $)
+ target_link_libraries(${exename}${ARTIFACT_SUFFIX} gtest ${LIBS})
+ endforeach(sourcefile ${BENCHMARKS})
+
+ # For test util library that is build only in DEBUG mode
+ # and linked to tests. Add test only code that is not #ifdefed for Release here.
+ set(TESTUTIL_SOURCE
+ db/db_test_util.cc
+ monitoring/thread_status_updater_debug.cc
+ table/mock_table.cc
+ util/fault_injection_test_env.cc
+ utilities/cassandra/test_utils.cc
+ )
+ # test utilities are only build in debug
+ enable_testing()
+ add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND})
+ set(TESTUTILLIB testutillib${ARTIFACT_SUFFIX})
+ add_library(${TESTUTILLIB} STATIC ${TESTUTIL_SOURCE})
+ if(MSVC)
+ set_target_properties(${TESTUTILLIB} PROPERTIES COMPILE_FLAGS "/Fd${CMAKE_CFG_INTDIR}/testutillib${ARTIFACT_SUFFIX}.pdb")
+ endif()
+ set_target_properties(${TESTUTILLIB}
+ PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD_RELEASE 1
+ EXCLUDE_FROM_DEFAULT_BUILD_MINRELEASE 1
+ EXCLUDE_FROM_DEFAULT_BUILD_RELWITHDEBINFO 1
+ )
+
+ # Tests are excluded from Release builds
+ set(TEST_EXES ${TESTS})
+
+ foreach(sourcefile ${TEST_EXES})
+ get_filename_component(exename ${sourcefile} NAME_WE)
+ add_executable(${exename}${ARTIFACT_SUFFIX} ${sourcefile}
+ $)
+ set_target_properties(${exename}${ARTIFACT_SUFFIX}
+ PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD_RELEASE 1
+ EXCLUDE_FROM_DEFAULT_BUILD_MINRELEASE 1
+ EXCLUDE_FROM_DEFAULT_BUILD_RELWITHDEBINFO 1
+ )
+ target_link_libraries(${exename}${ARTIFACT_SUFFIX} testutillib${ARTIFACT_SUFFIX} gtest ${LIBS})
+ if(NOT "${exename}" MATCHES "db_sanity_test")
+ add_test(NAME ${exename} COMMAND ${exename}${ARTIFACT_SUFFIX})
+ add_dependencies(check ${exename}${ARTIFACT_SUFFIX})
+ endif()
+ endforeach(sourcefile ${TEST_EXES})
+
+ # C executables must link to a shared object
+ set(C_TESTS db/c_test.c)
+ set(C_TEST_EXES ${C_TESTS})
+
+ foreach(sourcefile ${C_TEST_EXES})
+ string(REPLACE ".c" "" exename ${sourcefile})
+ string(REGEX REPLACE "^((.+)/)+" "" exename ${exename})
+ add_executable(${exename}${ARTIFACT_SUFFIX} ${sourcefile})
+ set_target_properties(${exename}${ARTIFACT_SUFFIX}
+ PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD_RELEASE 1
+ EXCLUDE_FROM_DEFAULT_BUILD_MINRELEASE 1
+ EXCLUDE_FROM_DEFAULT_BUILD_RELWITHDEBINFO 1
+ )
+ target_link_libraries(${exename}${ARTIFACT_SUFFIX} ${ROCKSDB_IMPORT_LIB} testutillib${ARTIFACT_SUFFIX})
+ add_test(NAME ${exename} COMMAND ${exename}${ARTIFACT_SUFFIX})
+ add_dependencies(check ${exename}${ARTIFACT_SUFFIX})
+ endforeach(sourcefile ${C_TEST_EXES})
+endif()
+
+option(WITH_TOOLS "build with tools" ON)
+if(WITH_TOOLS)
+ add_subdirectory(tools)
+endif()
diff --git a/c-deps/rocksdb/CONTRIBUTING.md b/c-deps/rocksdb/CONTRIBUTING.md
new file mode 100644
index 0000000000..b8b1a412e3
--- /dev/null
+++ b/c-deps/rocksdb/CONTRIBUTING.md
@@ -0,0 +1,14 @@
+# Contributing to RocksDB
+
+## Contributor License Agreement ("CLA")
+
+In order to accept your pull request, we need you to submit a CLA. You
+only need to do this once, so if you've done this for another Facebook
+open source project, you're good to go. If you are submitting a pull
+request for the first time, just let us know that you have completed
+the CLA and we can cross-check with your GitHub username.
+
+Complete your CLA here:
+
+If you prefer to sign a paper copy, we can send you a PDF. Send us an
+e-mail or create a new github issue to request the CLA in PDF format.
diff --git a/c-deps/rocksdb/COPYING b/c-deps/rocksdb/COPYING
new file mode 100644
index 0000000000..d159169d10
--- /dev/null
+++ b/c-deps/rocksdb/COPYING
@@ -0,0 +1,339 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ , 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/c-deps/rocksdb/DEFAULT_OPTIONS_HISTORY.md b/c-deps/rocksdb/DEFAULT_OPTIONS_HISTORY.md
new file mode 100644
index 0000000000..26280ee34d
--- /dev/null
+++ b/c-deps/rocksdb/DEFAULT_OPTIONS_HISTORY.md
@@ -0,0 +1,24 @@
+# RocksDB default options change log
+## Unreleased
+* delayed_write_rate takes the rate given by rate_limiter if not specified.
+
+## 5.2
+* Change the default of delayed slowdown value to 16MB/s and further increase the L0 stop condition to 36 files.
+
+## 5.0 (11/17/2016)
+* Options::allow_concurrent_memtable_write and Options::enable_write_thread_adaptive_yield are now true by default
+* Options.level0_stop_writes_trigger default value changes from 24 to 32.
+
+## 4.8.0 (5/2/2016)
+* options.max_open_files changes from 5000 to -1. It improves performance, but users need to set file descriptor limit to be large enough and watch memory usage for index and bloom filters.
+* options.base_background_compactions changes from max_background_compactions to 1. When users set higher max_background_compactions but the write throughput is not high, the writes are less spiky to disks.
+* options.wal_recovery_mode changes from kTolerateCorruptedTailRecords to kPointInTimeRecovery. Avoid some false positive when file system or hardware reorder the writes for file data and metadata.
+
+## 4.7.0 (4/8/2016)
+* options.write_buffer_size changes from 4MB to 64MB.
+* options.target_file_size_base changes from 2MB to 64MB.
+* options.max_bytes_for_level_base changes from 10MB to 256MB.
+* options.soft_pending_compaction_bytes_limit changes from 0 (disabled) to 64GB.
+* options.hard_pending_compaction_bytes_limit changes from 0 (disabled) to 256GB.
+* table_cache_numshardbits changes from 4 to 6.
+* max_file_opening_threads changes from 1 to 16.
diff --git a/c-deps/rocksdb/DUMP_FORMAT.md b/c-deps/rocksdb/DUMP_FORMAT.md
new file mode 100644
index 0000000000..009dabad52
--- /dev/null
+++ b/c-deps/rocksdb/DUMP_FORMAT.md
@@ -0,0 +1,16 @@
+## RocksDB dump format
+
+The version 1 RocksDB dump format is fairly simple:
+
+1) The dump starts with the magic 8 byte identifier "ROCKDUMP"
+
+2) The magic is followed by an 8 byte big-endian version which is 0x00000001.
+
+3) Next are arbitrarily sized chunks of bytes prepended by 4 byte little endian number indicating how large each chunk is.
+
+4) The first chunk is special and is a json string indicating some things about the creation of this dump. It contains the following keys:
+* database-path: The path of the database this dump was created from.
+* hostname: The hostname of the machine where the dump was created.
+* creation-time: Unix seconds since epoc when this dump was created.
+
+5) Following the info dump the slices paired into are key/value pairs.
diff --git a/c-deps/rocksdb/HISTORY.md b/c-deps/rocksdb/HISTORY.md
new file mode 100644
index 0000000000..01440c384e
--- /dev/null
+++ b/c-deps/rocksdb/HISTORY.md
@@ -0,0 +1,611 @@
+# Rocksdb Change Log
+## 5.9.2 (12/6/2017)
+### Bug Fixes
+* Fix possible corruption to LSM structure when `DeleteFilesInRange()` deletes a subset of files spanned by a `DeleteRange()` marker.
+
+## 5.9.1 (11/28/2017)
+### Bug Fixes
+* Fix IOError on WAL write doesn't propagate to write group follower
+* Fix calculating filter partition target size
+
+## 5.9.0 (11/1/2017)
+### Public API Change
+* `BackupableDBOptions::max_valid_backups_to_open == 0` now means no backups will be opened during BackupEngine initialization. Previously this condition disabled limiting backups opened.
+* `DBOptions::preserve_deletes` is a new option that allows one to specify that DB should not drop tombstones for regular deletes if they have sequence number larger than what was set by the new API call `DB::SetPreserveDeletesSequenceNumber(SequenceNumber seqnum)`. Disabled by default.
+* API call `DB::SetPreserveDeletesSequenceNumber(SequenceNumber seqnum)` was added, users who wish to preserve deletes are expected to periodically call this function to advance the cutoff seqnum (all deletes made before this seqnum can be dropped by DB). It's user responsibility to figure out how to advance the seqnum in the way so the tombstones are kept for the desired period of time, yet are eventually processed in time and don't eat up too much space.
+* `ReadOptions::iter_start_seqnum` was added; if set to something > 0 user will see 2 changes in iterators behavior 1) only keys written with sequence larger than this parameter would be returned and 2) the `Slice` returned by iter->key() now points to the the memory that keep User-oriented representation of the internal key, rather than user key. New struct `FullKey` was added to represent internal keys, along with a new helper function `ParseFullKey(const Slice& internal_key, FullKey* result);`.
+* Deprecate trash_dir param in NewSstFileManager, right now we will rename deleted files to .trash instead of moving them to trash directory
+* Return an error on write if write_options.sync = true and write_options.disableWAL = true to warn user of inconsistent options. Previously we will not write to WAL and not respecting the sync options in this case.
+
+### New Features
+* `DBOptions::writable_file_max_buffer_size` can now be changed dynamically.
+* `DBOptions::bytes_per_sync` and `DBOptions::wal_bytes_per_sync` can now be changed dynamically, `DBOptions::wal_bytes_per_sync` will flush all memtables and switch to a new WAL file.
+* Support dynamic adjustment of rate limit according to demand for background I/O. It can be enabled by passing `true` to the `auto_tuned` parameter in `NewGenericRateLimiter()`. The value passed as `rate_bytes_per_sec` will still be respected as an upper-bound.
+* Support dynamically changing `ColumnFamilyOptions::compaction_options_fifo`.
+* Introduce `EventListener::OnStallConditionsChanged()` callback. Users can implement it to be notified when user writes are stalled, stopped, or resumed.
+* Add a new db property "rocksdb.estimate-oldest-key-time" to return oldest data timestamp. The property is available only for FIFO compaction with compaction_options_fifo.allow_compaction = false.
+* Upon snapshot release, recompact bottommost files containing deleted/overwritten keys that previously could not be dropped due to the snapshot. This alleviates space-amp caused by long-held snapshots.
+* Support lower bound on iterators specified via `ReadOptions::iterate_lower_bound`.
+* Support for differential snapshots (via iterator emitting the sequence of key-values representing the difference between DB state at two different sequence numbers). Supports preserving and emitting puts and regular deletes, doesn't support SingleDeletes, MergeOperator, Blobs and Range Deletes.
+
+### Bug Fixes
+* Fix a potential data inconsistency issue during point-in-time recovery. `DB:Open()` will abort if column family inconsistency is found during PIT recovery.
+* Fix possible metadata corruption in databases using `DeleteRange()`.
+
+## 5.8.0 (08/30/2017)
+### Public API Change
+* Users of `Statistics::getHistogramString()` will see fewer histogram buckets and different bucket endpoints.
+* `Slice::compare` and BytewiseComparator `Compare` no longer accept `Slice`s containing nullptr.
+* `Transaction::Get` and `Transaction::GetForUpdate` variants with `PinnableSlice` added.
+
+### New Features
+* Add Iterator::Refresh(), which allows users to update the iterator state so that they can avoid some initialization costs of recreating iterators.
+* Replace dynamic_cast<> (except unit test) so people can choose to build with RTTI off. With make, release mode is by default built with -fno-rtti and debug mode is built without it. Users can override it by setting USE_RTTI=0 or 1.
+* Universal compactions including the bottom level can be executed in a dedicated thread pool. This alleviates head-of-line blocking in the compaction queue, which cause write stalling, particularly in multi-instance use cases. Users can enable this feature via `Env::SetBackgroundThreads(N, Env::Priority::BOTTOM)`, where `N > 0`.
+* Allow merge operator to be called even with a single merge operand during compactions, by appropriately overriding `MergeOperator::AllowSingleOperand`.
+* Add `DB::VerifyChecksum()`, which verifies the checksums in all SST files in a running DB.
+* Block-based table support for disabling checksums by setting `BlockBasedTableOptions::checksum = kNoChecksum`.
+
+### Bug Fixes
+* Fix wrong latencies in `rocksdb.db.get.micros`, `rocksdb.db.write.micros`, and `rocksdb.sst.read.micros`.
+* Fix incorrect dropping of deletions during intra-L0 compaction.
+* Fix transient reappearance of keys covered by range deletions when memtable prefix bloom filter is enabled.
+* Fix potentially wrong file smallest key when range deletions separated by snapshot are written together.
+
+## 5.7.0 (07/13/2017)
+### Public API Change
+* DB property "rocksdb.sstables" now prints keys in hex form.
+
+### New Features
+* Measure estimated number of reads per file. The information can be accessed through DB::GetColumnFamilyMetaData or "rocksdb.sstables" DB property.
+* RateLimiter support for throttling background reads, or throttling the sum of background reads and writes. This can give more predictable I/O usage when compaction reads more data than it writes, e.g., due to lots of deletions.
+* [Experimental] FIFO compaction with TTL support. It can be enabled by setting CompactionOptionsFIFO.ttl > 0.
+* Introduce `EventListener::OnBackgroundError()` callback. Users can implement it to be notified of errors causing the DB to enter read-only mode, and optionally override them.
+* Partitioned Index/Filters exiting the experimental mode. To enable partitioned indexes set index_type to kTwoLevelIndexSearch and to further enable partitioned filters set partition_filters to true. To configure the partition size set metadata_block_size.
+
+
+### Bug Fixes
+* Fix discarding empty compaction output files when `DeleteRange()` is used together with subcompactions.
+
+## 5.6.0 (06/06/2017)
+### Public API Change
+* Scheduling flushes and compactions in the same thread pool is no longer supported by setting `max_background_flushes=0`. Instead, users can achieve this by configuring their high-pri thread pool to have zero threads.
+* Replace `Options::max_background_flushes`, `Options::max_background_compactions`, and `Options::base_background_compactions` all with `Options::max_background_jobs`, which automatically decides how many threads to allocate towards flush/compaction.
+* options.delayed_write_rate by default take the value of options.rate_limiter rate.
+* Replace global variable `IOStatsContext iostats_context` with `IOStatsContext* get_iostats_context()`; replace global variable `PerfContext perf_context` with `PerfContext* get_perf_context()`.
+
+### New Features
+* Change ticker/histogram statistics implementations to use core-local storage. This improves aggregation speed compared to our previous thread-local approach, particularly for applications with many threads.
+* Users can pass a cache object to write buffer manager, so that they can cap memory usage for memtable and block cache using one single limit.
+* Flush will be triggered when 7/8 of the limit introduced by write_buffer_manager or db_write_buffer_size is triggered, so that the hard threshold is hard to hit.
+* Introduce WriteOptions.low_pri. If it is true, low priority writes will be throttled if the compaction is behind.
+* `DB::IngestExternalFile()` now supports ingesting files into a database containing range deletions.
+
+### Bug Fixes
+* Shouldn't ignore return value of fsync() in flush.
+
+## 5.5.0 (05/17/2017)
+### New Features
+* FIFO compaction to support Intra L0 compaction too with CompactionOptionsFIFO.allow_compaction=true.
+* DB::ResetStats() to reset internal stats.
+* Statistics::Reset() to reset user stats.
+* ldb add option --try_load_options, which will open DB with its own option file.
+* Introduce WriteBatch::PopSavePoint to pop the most recent save point explicitly.
+* Support dynamically change `max_open_files` option via SetDBOptions()
+* Added DB::CreateColumnFamilie() and DB::DropColumnFamilies() to bulk create/drop column families.
+* Add debugging function `GetAllKeyVersions` to see internal versions of a range of keys.
+* Support file ingestion with universal compaction style
+* Support file ingestion behind with option `allow_ingest_behind`
+* New option enable_pipelined_write which may improve write throughput in case writing from multiple threads and WAL enabled.
+
+### Bug Fixes
+* Fix the bug that Direct I/O uses direct reads for non-SST file
+
+## 5.4.0 (04/11/2017)
+### Public API Change
+* random_access_max_buffer_size no longer has any effect
+* Removed Env::EnableReadAhead(), Env::ShouldForwardRawRequest()
+* Support dynamically change `stats_dump_period_sec` option via SetDBOptions().
+* Added ReadOptions::max_skippable_internal_keys to set a threshold to fail a request as incomplete when too many keys are being skipped when using iterators.
+* DB::Get in place of std::string accepts PinnableSlice, which avoids the extra memcpy of value to std::string in most of cases.
+ * PinnableSlice releases the pinned resources that contain the value when it is destructed or when ::Reset() is called on it.
+ * The old API that accepts std::string, although discouraged, is still supported.
+* Replace Options::use_direct_writes with Options::use_direct_io_for_flush_and_compaction. Read Direct IO wiki for details.
+* Added CompactionEventListener and EventListener::OnFlushBegin interfaces.
+
+### New Features
+* Memtable flush can be avoided during checkpoint creation if total log file size is smaller than a threshold specified by the user.
+* Introduce level-based L0->L0 compactions to reduce file count, so write delays are incurred less often.
+* (Experimental) Partitioning filters which creates an index on the partitions. The feature can be enabled by setting partition_filters when using kFullFilter. Currently the feature also requires two-level indexing to be enabled. Number of partitions is the same as the number of partitions for indexes, which is controlled by metadata_block_size.
+
+## 5.3.0 (03/08/2017)
+### Public API Change
+* Remove disableDataSync option.
+* Remove timeout_hint_us option from WriteOptions. The option has been deprecated and has no effect since 3.13.0.
+* Remove option min_partial_merge_operands. Partial merge operands will always be merged in flush or compaction if there are more than one.
+* Remove option verify_checksums_in_compaction. Compaction will always verify checksum.
+
+### Bug Fixes
+* Fix the bug that iterator may skip keys
+
+## 5.2.0 (02/08/2017)
+### Public API Change
+* NewLRUCache() will determine number of shard bits automatically based on capacity, if the user doesn't pass one. This also impacts the default block cache when the user doesn't explict provide one.
+* Change the default of delayed slowdown value to 16MB/s and further increase the L0 stop condition to 36 files.
+* Options::use_direct_writes and Options::use_direct_reads are now ready to use.
+* (Experimental) Two-level indexing that partition the index and creates a 2nd level index on the partitions. The feature can be enabled by setting kTwoLevelIndexSearch as IndexType and configuring index_per_partition.
+
+### New Features
+* Added new overloaded function GetApproximateSizes that allows to specify if memtable stats should be computed only without computing SST files' stats approximations.
+* Added new function GetApproximateMemTableStats that approximates both number of records and size of memtables.
+* Add Direct I/O mode for SST file I/O
+
+### Bug Fixes
+* RangeSync() should work if ROCKSDB_FALLOCATE_PRESENT is not set
+* Fix wrong results in a data race case in Get()
+* Some fixes related to 2PC.
+* Fix bugs of data corruption in direct I/O
+
+## 5.1.0 (01/13/2017)
+* Support dynamically change `delete_obsolete_files_period_micros` option via SetDBOptions().
+* Added EventListener::OnExternalFileIngested which will be called when IngestExternalFile() add a file successfully.
+* BackupEngine::Open and BackupEngineReadOnly::Open now always return error statuses matching those of the backup Env.
+
+### Bug Fixes
+* Fix the bug that if 2PC is enabled, checkpoints may loss some recent transactions.
+* When file copying is needed when creating checkpoints or bulk loading files, fsync the file after the file copying.
+
+## 5.0.0 (11/17/2016)
+### Public API Change
+* Options::max_bytes_for_level_multiplier is now a double along with all getters and setters.
+* Support dynamically change `delayed_write_rate` and `max_total_wal_size` options via SetDBOptions().
+* Introduce DB::DeleteRange for optimized deletion of large ranges of contiguous keys.
+* Support dynamically change `delayed_write_rate` option via SetDBOptions().
+* Options::allow_concurrent_memtable_write and Options::enable_write_thread_adaptive_yield are now true by default.
+* Remove Tickers::SEQUENCE_NUMBER to avoid confusion if statistics object is shared among RocksDB instance. Alternatively DB::GetLatestSequenceNumber() can be used to get the same value.
+* Options.level0_stop_writes_trigger default value changes from 24 to 32.
+* New compaction filter API: CompactionFilter::FilterV2(). Allows to drop ranges of keys.
+* Removed flashcache support.
+* DB::AddFile() is deprecated and is replaced with DB::IngestExternalFile(). DB::IngestExternalFile() remove all the restrictions that existed for DB::AddFile.
+
+### New Features
+* Add avoid_flush_during_shutdown option, which speeds up DB shutdown by not flushing unpersisted data (i.e. with disableWAL = true). Unpersisted data will be lost. The options is dynamically changeable via SetDBOptions().
+* Add memtable_insert_with_hint_prefix_extractor option. The option is mean to reduce CPU usage for inserting keys into memtable, if keys can be group by prefix and insert for each prefix are sequential or almost sequential. See include/rocksdb/options.h for more details.
+* Add LuaCompactionFilter in utilities. This allows developers to write compaction filters in Lua. To use this feature, LUA_PATH needs to be set to the root directory of Lua.
+* No longer populate "LATEST_BACKUP" file in backup directory, which formerly contained the number of the latest backup. The latest backup can be determined by finding the highest numbered file in the "meta/" subdirectory.
+
+## 4.13.0 (10/18/2016)
+### Public API Change
+* DB::GetOptions() reflect dynamic changed options (i.e. through DB::SetOptions()) and return copy of options instead of reference.
+* Added Statistics::getAndResetTickerCount().
+
+### New Features
+* Add DB::SetDBOptions() to dynamic change base_background_compactions and max_background_compactions.
+* Added Iterator::SeekForPrev(). This new API will seek to the last key that less than or equal to the target key.
+
+## 4.12.0 (9/12/2016)
+### Public API Change
+* CancelAllBackgroundWork() flushes all memtables for databases containing writes that have bypassed the WAL (writes issued with WriteOptions::disableWAL=true) before shutting down background threads.
+* Merge options source_compaction_factor, max_grandparent_overlap_bytes and expanded_compaction_factor into max_compaction_bytes.
+* Remove ImmutableCFOptions.
+* Add a compression type ZSTD, which can work with ZSTD 0.8.0 or up. Still keep ZSTDNotFinal for compatibility reasons.
+
+### New Features
+* Introduce NewClockCache, which is based on CLOCK algorithm with better concurrent performance in some cases. It can be used to replace the default LRU-based block cache and table cache. To use it, RocksDB need to be linked with TBB lib.
+* Change ticker/histogram statistics implementations to accumulate data in thread-local storage, which improves CPU performance by reducing cache coherency costs. Callers of CreateDBStatistics do not need to change anything to use this feature.
+* Block cache mid-point insertion, where index and filter block are inserted into LRU block cache with higher priority. The feature can be enabled by setting BlockBasedTableOptions::cache_index_and_filter_blocks_with_high_priority to true and high_pri_pool_ratio > 0 when creating NewLRUCache.
+
+## 4.11.0 (8/1/2016)
+### Public API Change
+* options.memtable_prefix_bloom_huge_page_tlb_size => memtable_huge_page_size. When it is set, RocksDB will try to allocate memory from huge page for memtable too, rather than just memtable bloom filter.
+
+### New Features
+* A tool to migrate DB after options change. See include/rocksdb/utilities/option_change_migration.h.
+* Add ReadOptions.background_purge_on_iterator_cleanup. If true, we avoid file deletion when destorying iterators.
+
+## 4.10.0 (7/5/2016)
+### Public API Change
+* options.memtable_prefix_bloom_bits changes to options.memtable_prefix_bloom_bits_ratio and deprecate options.memtable_prefix_bloom_probes
+* enum type CompressionType and PerfLevel changes from char to unsigned char. Value of all PerfLevel shift by one.
+* Deprecate options.filter_deletes.
+
+### New Features
+* Add avoid_flush_during_recovery option.
+* Add a read option background_purge_on_iterator_cleanup to avoid deleting files in foreground when destroying iterators. Instead, a job is scheduled in high priority queue and would be executed in a separate background thread.
+* RepairDB support for column families. RepairDB now associates data with non-default column families using information embedded in the SST/WAL files (4.7 or later). For data written by 4.6 or earlier, RepairDB associates it with the default column family.
+* Add options.write_buffer_manager which allows users to control total memtable sizes across multiple DB instances.
+
+## 4.9.0 (6/9/2016)
+### Public API changes
+* Add bottommost_compression option, This option can be used to set a specific compression algorithm for the bottommost level (Last level containing files in the DB).
+* Introduce CompactionJobInfo::compression, This field state the compression algorithm used to generate the output files of the compaction.
+* Deprecate BlockBaseTableOptions.hash_index_allow_collision=false
+* Deprecate options builder (GetOptions()).
+
+### New Features
+* Introduce NewSimCache() in rocksdb/utilities/sim_cache.h. This function creates a block cache that is able to give simulation results (mainly hit rate) of simulating block behavior with a configurable cache size.
+
+## 4.8.0 (5/2/2016)
+### Public API Change
+* Allow preset compression dictionary for improved compression of block-based tables. This is supported for zlib, zstd, and lz4. The compression dictionary's size is configurable via CompressionOptions::max_dict_bytes.
+* Delete deprecated classes for creating backups (BackupableDB) and restoring from backups (RestoreBackupableDB). Now, BackupEngine should be used for creating backups, and BackupEngineReadOnly should be used for restorations. For more details, see https://github.com/facebook/rocksdb/wiki/How-to-backup-RocksDB%3F
+* Expose estimate of per-level compression ratio via DB property: "rocksdb.compression-ratio-at-levelN".
+* Added EventListener::OnTableFileCreationStarted. EventListener::OnTableFileCreated will be called on failure case. User can check creation status via TableFileCreationInfo::status.
+
+### New Features
+* Add ReadOptions::readahead_size. If non-zero, NewIterator will create a new table reader which performs reads of the given size.
+
+## 4.7.0 (4/8/2016)
+### Public API Change
+* rename options compaction_measure_io_stats to report_bg_io_stats and include flush too.
+* Change some default options. Now default options will optimize for server-workloads. Also enable slowdown and full stop triggers for pending compaction bytes. These changes may cause sub-optimal performance or significant increase of resource usage. To avoid these risks, users can open existing RocksDB with options extracted from RocksDB option files. See https://github.com/facebook/rocksdb/wiki/RocksDB-Options-File for how to use RocksDB option files. Or you can call Options.OldDefaults() to recover old defaults. DEFAULT_OPTIONS_HISTORY.md will track change history of default options.
+
+## 4.6.0 (3/10/2016)
+### Public API Changes
+* Change default of BlockBasedTableOptions.format_version to 2. It means default DB created by 4.6 or up cannot be opened by RocksDB version 3.9 or earlier.
+* Added strict_capacity_limit option to NewLRUCache. If the flag is set to true, insert to cache will fail if no enough capacity can be free. Signature of Cache::Insert() is updated accordingly.
+* Tickers [NUMBER_DB_NEXT, NUMBER_DB_PREV, NUMBER_DB_NEXT_FOUND, NUMBER_DB_PREV_FOUND, ITER_BYTES_READ] are not updated immediately. The are updated when the Iterator is deleted.
+* Add monotonically increasing counter (DB property "rocksdb.current-super-version-number") that increments upon any change to the LSM tree.
+
+### New Features
+* Add CompactionPri::kMinOverlappingRatio, a compaction picking mode friendly to write amplification.
+* Deprecate Iterator::IsKeyPinned() and replace it with Iterator::GetProperty() with prop_name="rocksdb.iterator.is.key.pinned"
+
+## 4.5.0 (2/5/2016)
+### Public API Changes
+* Add a new perf context level between kEnableCount and kEnableTime. Level 2 now does not include timers for mutexes.
+* Statistics of mutex operation durations will not be measured by default. If you want to have them enabled, you need to set Statistics::stats_level_ to kAll.
+* DBOptions::delete_scheduler and NewDeleteScheduler() are removed, please use DBOptions::sst_file_manager and NewSstFileManager() instead
+
+### New Features
+* ldb tool now supports operations to non-default column families.
+* Add kPersistedTier to ReadTier. This option allows Get and MultiGet to read only the persited data and skip mem-tables if writes were done with disableWAL = true.
+* Add DBOptions::sst_file_manager. Use NewSstFileManager() in include/rocksdb/sst_file_manager.h to create a SstFileManager that can be used to track the total size of SST files and control the SST files deletion rate.
+
+## 4.4.0 (1/14/2016)
+### Public API Changes
+* Change names in CompactionPri and add a new one.
+* Deprecate options.soft_rate_limit and add options.soft_pending_compaction_bytes_limit.
+* If options.max_write_buffer_number > 3, writes will be slowed down when writing to the last write buffer to delay a full stop.
+* Introduce CompactionJobInfo::compaction_reason, this field include the reason to trigger the compaction.
+* After slow down is triggered, if estimated pending compaction bytes keep increasing, slowdown more.
+* Increase default options.delayed_write_rate to 2MB/s.
+* Added a new parameter --path to ldb tool. --path accepts the name of either MANIFEST, SST or a WAL file. Either --db or --path can be used when calling ldb.
+
+## 4.3.0 (12/8/2015)
+### New Features
+* CompactionFilter has new member function called IgnoreSnapshots which allows CompactionFilter to be called even if there are snapshots later than the key.
+* RocksDB will now persist options under the same directory as the RocksDB database on successful DB::Open, CreateColumnFamily, DropColumnFamily, and SetOptions.
+* Introduce LoadLatestOptions() in rocksdb/utilities/options_util.h. This function can construct the latest DBOptions / ColumnFamilyOptions used by the specified RocksDB intance.
+* Introduce CheckOptionsCompatibility() in rocksdb/utilities/options_util.h. This function checks whether the input set of options is able to open the specified DB successfully.
+
+### Public API Changes
+* When options.db_write_buffer_size triggers, only the column family with the largest column family size will be flushed, not all the column families.
+
+## 4.2.0 (11/9/2015)
+### New Features
+* Introduce CreateLoggerFromOptions(), this function create a Logger for provided DBOptions.
+* Add GetAggregatedIntProperty(), which returns the sum of the GetIntProperty of all the column families.
+* Add MemoryUtil in rocksdb/utilities/memory.h. It currently offers a way to get the memory usage by type from a list rocksdb instances.
+
+### Public API Changes
+* CompactionFilter::Context includes information of Column Family ID
+* The need-compaction hint given by TablePropertiesCollector::NeedCompact() will be persistent and recoverable after DB recovery. This introduces a breaking format change. If you use this experimental feature, including NewCompactOnDeletionCollectorFactory() in the new version, you may not be able to directly downgrade the DB back to version 4.0 or lower.
+* TablePropertiesCollectorFactory::CreateTablePropertiesCollector() now takes an option Context, containing the information of column family ID for the file being written.
+* Remove DefaultCompactionFilterFactory.
+
+
+## 4.1.0 (10/8/2015)
+### New Features
+* Added single delete operation as a more efficient way to delete keys that have not been overwritten.
+* Added experimental AddFile() to DB interface that allow users to add files created by SstFileWriter into an empty Database, see include/rocksdb/sst_file_writer.h and DB::AddFile() for more info.
+* Added support for opening SST files with .ldb suffix which enables opening LevelDB databases.
+* CompactionFilter now supports filtering of merge operands and merge results.
+
+### Public API Changes
+* Added SingleDelete() to the DB interface.
+* Added AddFile() to DB interface.
+* Added SstFileWriter class.
+* CompactionFilter has a new method FilterMergeOperand() that RocksDB applies to every merge operand during compaction to decide whether to filter the operand.
+* We removed CompactionFilterV2 interfaces from include/rocksdb/compaction_filter.h. The functionality was deprecated already in version 3.13.
+
+## 4.0.0 (9/9/2015)
+### New Features
+* Added support for transactions. See include/rocksdb/utilities/transaction.h for more info.
+* DB::GetProperty() now accepts "rocksdb.aggregated-table-properties" and "rocksdb.aggregated-table-properties-at-levelN", in which case it returns aggregated table properties of the target column family, or the aggregated table properties of the specified level N if the "at-level" version is used.
+* Add compression option kZSTDNotFinalCompression for people to experiment ZSTD although its format is not finalized.
+* We removed the need for LATEST_BACKUP file in BackupEngine. We still keep writing it when we create new backups (because of backward compatibility), but we don't read it anymore.
+
+### Public API Changes
+* Removed class Env::RandomRWFile and Env::NewRandomRWFile().
+* Renamed DBOptions.num_subcompactions to DBOptions.max_subcompactions to make the name better match the actual functionality of the option.
+* Added Equal() method to the Comparator interface that can optionally be overwritten in cases where equality comparisons can be done more efficiently than three-way comparisons.
+* Previous 'experimental' OptimisticTransaction class has been replaced by Transaction class.
+
+## 3.13.0 (8/6/2015)
+### New Features
+* RollbackToSavePoint() in WriteBatch/WriteBatchWithIndex
+* Add NewCompactOnDeletionCollectorFactory() in utilities/table_properties_collectors, which allows rocksdb to mark a SST file as need-compaction when it observes at least D deletion entries in any N consecutive entries in that SST file. Note that this feature depends on an experimental NeedCompact() API --- the result of this API will not persist after DB restart.
+* Add DBOptions::delete_scheduler. Use NewDeleteScheduler() in include/rocksdb/delete_scheduler.h to create a DeleteScheduler that can be shared among multiple RocksDB instances to control the file deletion rate of SST files that exist in the first db_path.
+
+### Public API Changes
+* Deprecated WriteOptions::timeout_hint_us. We no longer support write timeout. If you really need this option, talk to us and we might consider returning it.
+* Deprecated purge_redundant_kvs_while_flush option.
+* Removed BackupEngine::NewBackupEngine() and NewReadOnlyBackupEngine() that were deprecated in RocksDB 3.8. Please use BackupEngine::Open() instead.
+* Deprecated Compaction Filter V2. We are not aware of any existing use-cases. If you use this filter, your compile will break with RocksDB 3.13. Please let us know if you use it and we'll put it back in RocksDB 3.14.
+* Env::FileExists now returns a Status instead of a boolean
+* Add statistics::getHistogramString() to print detailed distribution of a histogram metric.
+* Add DBOptions::skip_stats_update_on_db_open. When it is on, DB::Open() will run faster as it skips the random reads required for loading necessary stats from SST files to optimize compaction.
+
+## 3.12.0 (7/2/2015)
+### New Features
+* Added experimental support for optimistic transactions. See include/rocksdb/utilities/optimistic_transaction.h for more info.
+* Added a new way to report QPS from db_bench (check out --report_file and --report_interval_seconds)
+* Added a cache for individual rows. See DBOptions::row_cache for more info.
+* Several new features on EventListener (see include/rocksdb/listener.h):
+ - OnCompationCompleted() now returns per-compaction job statistics, defined in include/rocksdb/compaction_job_stats.h.
+ - Added OnTableFileCreated() and OnTableFileDeleted().
+* Add compaction_options_universal.enable_trivial_move to true, to allow trivial move while performing universal compaction. Trivial move will happen only when all the input files are non overlapping.
+
+### Public API changes
+* EventListener::OnFlushCompleted() now passes FlushJobInfo instead of a list of parameters.
+* DB::GetDbIdentity() is now a const function. If this function is overridden in your application, be sure to also make GetDbIdentity() const to avoid compile error.
+* Move listeners from ColumnFamilyOptions to DBOptions.
+* Add max_write_buffer_number_to_maintain option
+* DB::CompactRange()'s parameter reduce_level is changed to change_level, to allow users to move levels to lower levels if allowed. It can be used to migrate a DB from options.level_compaction_dynamic_level_bytes=false to options.level_compaction_dynamic_level_bytes.true.
+* Change default value for options.compaction_filter_factory and options.compaction_filter_factory_v2 to nullptr instead of DefaultCompactionFilterFactory and DefaultCompactionFilterFactoryV2.
+* If CancelAllBackgroundWork is called without doing a flush after doing loads with WAL disabled, the changes which haven't been flushed before the call to CancelAllBackgroundWork will be lost.
+* WBWIIterator::Entry() now returns WriteEntry instead of `const WriteEntry&`
+* options.hard_rate_limit is deprecated.
+* When options.soft_rate_limit or options.level0_slowdown_writes_trigger is triggered, the way to slow down writes is changed to: write rate to DB is limited to to options.delayed_write_rate.
+* DB::GetApproximateSizes() adds a parameter to allow the estimation to include data in mem table, with default to be not to include. It is now only supported in skip list mem table.
+* DB::CompactRange() now accept CompactRangeOptions instead of multiple parameters. CompactRangeOptions is defined in include/rocksdb/options.h.
+* CompactRange() will now skip bottommost level compaction for level based compaction if there is no compaction filter, bottommost_level_compaction is introduced in CompactRangeOptions to control when it's possible to skip bottommost level compaction. This mean that if you want the compaction to produce a single file you need to set bottommost_level_compaction to BottommostLevelCompaction::kForce.
+* Add Cache.GetPinnedUsage() to get the size of memory occupied by entries that are in use by the system.
+* DB:Open() will fail if the compression specified in Options is not linked with the binary. If you see this failure, recompile RocksDB with compression libraries present on your system. Also, previously our default compression was snappy. This behavior is now changed. Now, the default compression is snappy only if it's available on the system. If it isn't we change the default to kNoCompression.
+* We changed how we account for memory used in block cache. Previously, we only counted the sum of block sizes currently present in block cache. Now, we count the actual memory usage of the blocks. For example, a block of size 4.5KB will use 8KB memory with jemalloc. This might decrease your memory usage and possibly decrease performance. Increase block cache size if you see this happening after an upgrade.
+* Add BackupEngineImpl.options_.max_background_operations to specify the maximum number of operations that may be performed in parallel. Add support for parallelized backup and restore.
+* Add DB::SyncWAL() that does a WAL sync without blocking writers.
+
+## 3.11.0 (5/19/2015)
+### New Features
+* Added a new API Cache::SetCapacity(size_t capacity) to dynamically change the maximum configured capacity of the cache. If the new capacity is less than the existing cache usage, the implementation will try to lower the usage by evicting the necessary number of elements following a strict LRU policy.
+* Added an experimental API for handling flashcache devices (blacklists background threads from caching their reads) -- NewFlashcacheAwareEnv
+* If universal compaction is used and options.num_levels > 1, compact files are tried to be stored in none-L0 with smaller files based on options.target_file_size_base. The limitation of DB size when using universal compaction is greatly mitigated by using more levels. You can set num_levels = 1 to make universal compaction behave as before. If you set num_levels > 1 and want to roll back to a previous version, you need to compact all files to a big file in level 0 (by setting target_file_size_base to be large and CompactRange(, nullptr, nullptr, true, 0) and reopen the DB with the same version to rewrite the manifest, and then you can open it using previous releases.
+* More information about rocksdb background threads are available in Env::GetThreadList(), including the number of bytes read / written by a compaction job, mem-table size and current number of bytes written by a flush job and many more. Check include/rocksdb/thread_status.h for more detail.
+
+### Public API changes
+* TablePropertiesCollector::AddUserKey() is added to replace TablePropertiesCollector::Add(). AddUserKey() exposes key type, sequence number and file size up to now to users.
+* DBOptions::bytes_per_sync used to apply to both WAL and table files. As of 3.11 it applies only to table files. If you want to use this option to sync WAL in the background, please use wal_bytes_per_sync
+
+## 3.10.0 (3/24/2015)
+### New Features
+* GetThreadStatus() is now able to report detailed thread status, including:
+ - Thread Operation including flush and compaction.
+ - The stage of the current thread operation.
+ - The elapsed time in micros since the current thread operation started.
+ More information can be found in include/rocksdb/thread_status.h. In addition, when running db_bench with --thread_status_per_interval, db_bench will also report thread status periodically.
+* Changed the LRU caching algorithm so that referenced blocks (by iterators) are never evicted. This change made parameter removeScanCountLimit obsolete. Because of that NewLRUCache doesn't take three arguments anymore. table_cache_remove_scan_limit option is also removed
+* By default we now optimize the compilation for the compilation platform (using -march=native). If you want to build portable binary, use 'PORTABLE=1' before the make command.
+* We now allow level-compaction to place files in different paths by
+ specifying them in db_paths along with the target_size.
+ Lower numbered levels will be placed earlier in the db_paths and higher
+ numbered levels will be placed later in the db_paths vector.
+* Potentially big performance improvements if you're using RocksDB with lots of column families (100-1000)
+* Added BlockBasedTableOptions.format_version option, which allows user to specify which version of block based table he wants. As a general guideline, newer versions have more features, but might not be readable by older versions of RocksDB.
+* Added new block based table format (version 2), which you can enable by setting BlockBasedTableOptions.format_version = 2. This format changes how we encode size information in compressed blocks and should help with memory allocations if you're using Zlib or BZip2 compressions.
+* MemEnv (env that stores data in memory) is now available in default library build. You can create it by calling NewMemEnv().
+* Add SliceTransform.SameResultWhenAppended() to help users determine it is safe to apply prefix bloom/hash.
+* Block based table now makes use of prefix bloom filter if it is a full fulter.
+* Block based table remembers whether a whole key or prefix based bloom filter is supported in SST files. Do a sanity check when reading the file with users' configuration.
+* Fixed a bug in ReadOnlyBackupEngine that deleted corrupted backups in some cases, even though the engine was ReadOnly
+* options.level_compaction_dynamic_level_bytes, a feature to allow RocksDB to pick dynamic base of bytes for levels. With this feature turned on, we will automatically adjust max bytes for each level. The goal of this feature is to have lower bound on size amplification. For more details, see comments in options.h.
+* Added an abstract base class WriteBatchBase for write batches
+* Fixed a bug where we start deleting files of a dropped column families even if there are still live references to it
+
+### Public API changes
+* Deprecated skip_log_error_on_recovery and table_cache_remove_scan_count_limit options.
+* Logger method logv with log level parameter is now virtual
+
+### RocksJava
+* Added compression per level API.
+* MemEnv is now available in RocksJava via RocksMemEnv class.
+* lz4 compression is now included in rocksjava static library when running `make rocksdbjavastatic`.
+* Overflowing a size_t when setting rocksdb options now throws an IllegalArgumentException, which removes the necessity for a developer to catch these Exceptions explicitly.
+
+## 3.9.0 (12/8/2014)
+
+### New Features
+* Add rocksdb::GetThreadList(), which in the future will return the current status of all
+ rocksdb-related threads. We will have more code instruments in the following RocksDB
+ releases.
+* Change convert function in rocksdb/utilities/convenience.h to return Status instead of boolean.
+ Also add support for nested options in convert function
+
+### Public API changes
+* New API to create a checkpoint added. Given a directory name, creates a new
+ database which is an image of the existing database.
+* New API LinkFile added to Env. If you implement your own Env class, an
+ implementation of the API LinkFile will have to be provided.
+* MemTableRep takes MemTableAllocator instead of Arena
+
+### Improvements
+* RocksDBLite library now becomes smaller and will be compiled with -fno-exceptions flag.
+
+## 3.8.0 (11/14/2014)
+
+### Public API changes
+* BackupEngine::NewBackupEngine() was deprecated; please use BackupEngine::Open() from now on.
+* BackupableDB/RestoreBackupableDB have new GarbageCollect() methods, which will clean up files from corrupt and obsolete backups.
+* BackupableDB/RestoreBackupableDB have new GetCorruptedBackups() methods which list corrupt backups.
+
+### Cleanup
+* Bunch of code cleanup, some extra warnings turned on (-Wshadow, -Wshorten-64-to-32, -Wnon-virtual-dtor)
+
+### New features
+* CompactFiles and EventListener, although they are still in experimental state
+* Full ColumnFamily support in RocksJava.
+
+## 3.7.0 (11/6/2014)
+### Public API changes
+* Introduce SetOptions() API to allow adjusting a subset of options dynamically online
+* Introduce 4 new convenient functions for converting Options from string: GetColumnFamilyOptionsFromMap(), GetColumnFamilyOptionsFromString(), GetDBOptionsFromMap(), GetDBOptionsFromString()
+* Remove WriteBatchWithIndex.Delete() overloads using SliceParts
+* When opening a DB, if options.max_background_compactions is larger than the existing low pri pool of options.env, it will enlarge it. Similarly, options.max_background_flushes is larger than the existing high pri pool of options.env, it will enlarge it.
+
+## 3.6.0 (10/7/2014)
+### Disk format changes
+* If you're using RocksDB on ARM platforms and you're using default bloom filter, there is a disk format change you need to be aware of. There are three steps you need to do when you convert to new release: 1. turn off filter policy, 2. compact the whole database, 3. turn on filter policy
+
+### Behavior changes
+* We have refactored our system of stalling writes. Any stall-related statistics' meanings are changed. Instead of per-write stall counts, we now count stalls per-epoch, where epochs are periods between flushes and compactions. You'll find more information in our Tuning Perf Guide once we release RocksDB 3.6.
+* When disableDataSync=true, we no longer sync the MANIFEST file.
+* Add identity_as_first_hash property to CuckooTable. SST file needs to be rebuilt to be opened by reader properly.
+
+### Public API changes
+* Change target_file_size_base type to uint64_t from int.
+* Remove allow_thread_local. This feature was proved to be stable, so we are turning it always-on.
+
+## 3.5.0 (9/3/2014)
+### New Features
+* Add include/utilities/write_batch_with_index.h, providing a utility class to query data out of WriteBatch when building it.
+* Move BlockBasedTable related options to BlockBasedTableOptions from Options. Change corresponding JNI interface. Options affected include:
+ no_block_cache, block_cache, block_cache_compressed, block_size, block_size_deviation, block_restart_interval, filter_policy, whole_key_filtering. filter_policy is changed to shared_ptr from a raw pointer.
+* Remove deprecated options: disable_seek_compaction and db_stats_log_interval
+* OptimizeForPointLookup() takes one parameter for block cache size. It now builds hash index, bloom filter, and block cache.
+
+### Public API changes
+* The Prefix Extractor used with V2 compaction filters is now passed user key to SliceTransform::Transform instead of unparsed RocksDB key.
+
+## 3.4.0 (8/18/2014)
+### New Features
+* Support Multiple DB paths in universal style compactions
+* Add feature of storing plain table index and bloom filter in SST file.
+* CompactRange() will never output compacted files to level 0. This used to be the case when all the compaction input files were at level 0.
+* Added iterate_upper_bound to define the extent upto which the forward iterator will return entries. This will prevent iterating over delete markers and overwritten entries for edge cases where you want to break out the iterator anyways. This may improve performance in case there are a large number of delete markers or overwritten entries.
+
+### Public API changes
+* DBOptions.db_paths now is a vector of a DBPath structure which indicates both of path and target size
+* NewPlainTableFactory instead of bunch of parameters now accepts PlainTableOptions, which is defined in include/rocksdb/table.h
+* Moved include/utilities/*.h to include/rocksdb/utilities/*.h
+* Statistics APIs now take uint32_t as type instead of Tickers. Also make two access functions getTickerCount and histogramData const
+* Add DB property rocksdb.estimate-num-keys, estimated number of live keys in DB.
+* Add DB::GetIntProperty(), which returns DB properties that are integer as uint64_t.
+* The Prefix Extractor used with V2 compaction filters is now passed user key to SliceTransform::Transform instead of unparsed RocksDB key.
+
+## 3.3.0 (7/10/2014)
+### New Features
+* Added JSON API prototype.
+* HashLinklist reduces performance outlier caused by skewed bucket by switching data in the bucket from linked list to skip list. Add parameter threshold_use_skiplist in NewHashLinkListRepFactory().
+* RocksDB is now able to reclaim storage space more effectively during the compaction process. This is done by compensating the size of each deletion entry by the 2X average value size, which makes compaction to be triggered by deletion entries more easily.
+* Add TimeOut API to write. Now WriteOptions have a variable called timeout_hint_us. With timeout_hint_us set to non-zero, any write associated with this timeout_hint_us may be aborted when it runs longer than the specified timeout_hint_us, and it is guaranteed that any write completes earlier than the specified time-out will not be aborted due to the time-out condition.
+* Add a rate_limiter option, which controls total throughput of flush and compaction. The throughput is specified in bytes/sec. Flush always has precedence over compaction when available bandwidth is constrained.
+
+### Public API changes
+* Removed NewTotalOrderPlainTableFactory because it is not used and implemented semantically incorrect.
+
+## 3.2.0 (06/20/2014)
+
+### Public API changes
+* We removed seek compaction as a concept from RocksDB because:
+1) It makes more sense for spinning disk workloads, while RocksDB is primarily designed for flash and memory,
+2) It added some complexity to the important code-paths,
+3) None of our internal customers were really using it.
+Because of that, Options::disable_seek_compaction is now obsolete. It is still a parameter in Options, so it does not break the build, but it does not have any effect. We plan to completely remove it at some point, so we ask users to please remove this option from your code base.
+* Add two parameters to NewHashLinkListRepFactory() for logging on too many entries in a hash bucket when flushing.
+* Added new option BlockBasedTableOptions::hash_index_allow_collision. When enabled, prefix hash index for block-based table will not store prefix and allow hash collision, reducing memory consumption.
+
+### New Features
+* PlainTable now supports a new key encoding: for keys of the same prefix, the prefix is only written once. It can be enabled through encoding_type parameter of NewPlainTableFactory()
+* Add AdaptiveTableFactory, which is used to convert from a DB of PlainTable to BlockBasedTabe, or vise versa. It can be created using NewAdaptiveTableFactory()
+
+### Performance Improvements
+* Tailing Iterator re-implemeted with ForwardIterator + Cascading Search Hint , see ~20% throughput improvement.
+
+## 3.1.0 (05/21/2014)
+
+### Public API changes
+* Replaced ColumnFamilyOptions::table_properties_collectors with ColumnFamilyOptions::table_properties_collector_factories
+
+### New Features
+* Hash index for block-based table will be materialized and reconstructed more efficiently. Previously hash index is constructed by scanning the whole table during every table open.
+* FIFO compaction style
+
+## 3.0.0 (05/05/2014)
+
+### Public API changes
+* Added _LEVEL to all InfoLogLevel enums
+* Deprecated ReadOptions.prefix and ReadOptions.prefix_seek. Seek() defaults to prefix-based seek when Options.prefix_extractor is supplied. More detail is documented in https://github.com/facebook/rocksdb/wiki/Prefix-Seek-API-Changes
+* MemTableRepFactory::CreateMemTableRep() takes info logger as an extra parameter.
+
+### New Features
+* Column family support
+* Added an option to use different checksum functions in BlockBasedTableOptions
+* Added ApplyToAllCacheEntries() function to Cache
+
+## 2.8.0 (04/04/2014)
+
+* Removed arena.h from public header files.
+* By default, checksums are verified on every read from database
+* Change default value of several options, including: paranoid_checks=true, max_open_files=5000, level0_slowdown_writes_trigger=20, level0_stop_writes_trigger=24, disable_seek_compaction=true, max_background_flushes=1 and allow_mmap_writes=false
+* Added is_manual_compaction to CompactionFilter::Context
+* Added "virtual void WaitForJoin()" in class Env. Default operation is no-op.
+* Removed BackupEngine::DeleteBackupsNewerThan() function
+* Added new option -- verify_checksums_in_compaction
+* Changed Options.prefix_extractor from raw pointer to shared_ptr (take ownership)
+ Changed HashSkipListRepFactory and HashLinkListRepFactory constructor to not take SliceTransform object (use Options.prefix_extractor implicitly)
+* Added Env::GetThreadPoolQueueLen(), which returns the waiting queue length of thread pools
+* Added a command "checkconsistency" in ldb tool, which checks
+ if file system state matches DB state (file existence and file sizes)
+* Separate options related to block based table to a new struct BlockBasedTableOptions.
+* WriteBatch has a new function Count() to return total size in the batch, and Data() now returns a reference instead of a copy
+* Add more counters to perf context.
+* Supports several more DB properties: compaction-pending, background-errors and cur-size-active-mem-table.
+
+### New Features
+* If we find one truncated record at the end of the MANIFEST or WAL files,
+ we will ignore it. We assume that writers of these records were interrupted
+ and that we can safely ignore it.
+* A new SST format "PlainTable" is added, which is optimized for memory-only workloads. It can be created through NewPlainTableFactory() or NewTotalOrderPlainTableFactory().
+* A new mem table implementation hash linked list optimizing for the case that there are only few keys for each prefix, which can be created through NewHashLinkListRepFactory().
+* Merge operator supports a new function PartialMergeMulti() to allow users to do partial merges against multiple operands.
+* Now compaction filter has a V2 interface. It buffers the kv-pairs sharing the same key prefix, process them in batches, and return the batched results back to DB. The new interface uses a new structure CompactionFilterContext for the same purpose as CompactionFilter::Context in V1.
+* Geo-spatial support for locations and radial-search.
+
+## 2.7.0 (01/28/2014)
+
+### Public API changes
+
+* Renamed `StackableDB::GetRawDB()` to `StackableDB::GetBaseDB()`.
+* Renamed `WriteBatch::Data()` `const std::string& Data() const`.
+* Renamed class `TableStats` to `TableProperties`.
+* Deleted class `PrefixHashRepFactory`. Please use `NewHashSkipListRepFactory()` instead.
+* Supported multi-threaded `EnableFileDeletions()` and `DisableFileDeletions()`.
+* Added `DB::GetOptions()`.
+* Added `DB::GetDbIdentity()`.
+
+### New Features
+
+* Added [BackupableDB](https://github.com/facebook/rocksdb/wiki/How-to-backup-RocksDB%3F)
+* Implemented [TailingIterator](https://github.com/facebook/rocksdb/wiki/Tailing-Iterator), a special type of iterator that
+ doesn't create a snapshot (can be used to read newly inserted data)
+ and is optimized for doing sequential reads.
+* Added property block for table, which allows (1) a table to store
+ its metadata and (2) end user to collect and store properties they
+ are interested in.
+* Enabled caching index and filter block in block cache (turned off by default).
+* Supported error report when doing manual compaction.
+* Supported additional Linux platform flavors and Mac OS.
+* Put with `SliceParts` - Variant of `Put()` that gathers output like `writev(2)`
+* Bug fixes and code refactor for compatibility with upcoming Column
+ Family feature.
+
+### Performance Improvements
+
+* Huge benchmark performance improvements by multiple efforts. For example, increase in readonly QPS from about 530k in 2.6 release to 1.1 million in 2.7 [1]
+* Speeding up a way RocksDB deleted obsolete files - no longer listing the whole directory under a lock -- decrease in p99
+* Use raw pointer instead of shared pointer for statistics: [5b825d](https://github.com/facebook/rocksdb/commit/5b825d6964e26ec3b4bb6faa708ebb1787f1d7bd) -- huge increase in performance -- shared pointers are slow
+* Optimized locking for `Get()` -- [1fdb3f](https://github.com/facebook/rocksdb/commit/1fdb3f7dc60e96394e3e5b69a46ede5d67fb976c) -- 1.5x QPS increase for some workloads
+* Cache speedup - [e8d40c3](https://github.com/facebook/rocksdb/commit/e8d40c31b3cca0c3e1ae9abe9b9003b1288026a9)
+* Implemented autovector, which allocates first N elements on stack. Most of vectors in RocksDB are small. Also, we never want to allocate heap objects while holding a mutex. -- [c01676e4](https://github.com/facebook/rocksdb/commit/c01676e46d3be08c3c140361ef1f5884f47d3b3c)
+* Lots of efforts to move malloc, memcpy and IO outside of locks
diff --git a/c-deps/rocksdb/INSTALL.md b/c-deps/rocksdb/INSTALL.md
new file mode 100644
index 0000000000..a570aaec72
--- /dev/null
+++ b/c-deps/rocksdb/INSTALL.md
@@ -0,0 +1,144 @@
+## Compilation
+
+**Important**: If you plan to run RocksDB in production, don't compile using default
+`make` or `make all`. That will compile RocksDB in debug mode, which is much slower
+than release mode.
+
+RocksDB's library should be able to compile without any dependency installed,
+although we recommend installing some compression libraries (see below).
+We do depend on newer gcc/clang with C++11 support.
+
+There are few options when compiling RocksDB:
+
+* [recommended] `make static_lib` will compile librocksdb.a, RocksDB static library. Compiles static library in release mode.
+
+* `make shared_lib` will compile librocksdb.so, RocksDB shared library. Compiles shared library in release mode.
+
+* `make check` will compile and run all the unit tests. `make check` will compile RocksDB in debug mode.
+
+* `make all` will compile our static library, and all our tools and unit tests. Our tools
+depend on gflags. You will need to have gflags installed to run `make all`. This will compile RocksDB in debug mode. Don't
+use binaries compiled by `make all` in production.
+
+* By default the binary we produce is optimized for the platform you're compiling on
+(`-march=native` or the equivalent). SSE4.2 will thus be enabled automatically if your
+CPU supports it. To print a warning if your CPU does not support SSE4.2, build with
+`USE_SSE=1 make static_lib` or, if using CMake, `cmake -DFORCE_SSE42=ON`. If you want
+to build a portable binary, add `PORTABLE=1` before your make commands, like this:
+`PORTABLE=1 make static_lib`.
+
+## Dependencies
+
+* You can link RocksDB with following compression libraries:
+ - [zlib](http://www.zlib.net/) - a library for data compression.
+ - [bzip2](http://www.bzip.org/) - a library for data compression.
+ - [lz4](https://github.com/lz4/lz4) - a library for extremely fast data compression.
+ - [snappy](http://google.github.io/snappy/) - a library for fast
+ data compression.
+ - [zstandard](http://www.zstd.net) - Fast real-time compression
+ algorithm.
+
+* All our tools depend on:
+ - [gflags](https://gflags.github.io/gflags/) - a library that handles
+ command line flags processing. You can compile rocksdb library even
+ if you don't have gflags installed.
+
+## Supported platforms
+
+* **Linux - Ubuntu**
+ * Upgrade your gcc to version at least 4.8 to get C++11 support.
+ * Install gflags. First, try: `sudo apt-get install libgflags-dev`
+ If this doesn't work and you're using Ubuntu, here's a nice tutorial:
+ (http://askubuntu.com/questions/312173/installing-gflags-12-04)
+ * Install snappy. This is usually as easy as:
+ `sudo apt-get install libsnappy-dev`.
+ * Install zlib. Try: `sudo apt-get install zlib1g-dev`.
+ * Install bzip2: `sudo apt-get install libbz2-dev`.
+ * Install lz4: `sudo apt-get install liblz4-dev`.
+ * Install zstandard: `sudo apt-get install libzstd-dev`.
+
+* **Linux - CentOS / RHEL**
+ * Upgrade your gcc to version at least 4.8 to get C++11 support:
+ `yum install gcc48-c++`
+ * Install gflags:
+
+ git clone https://github.com/gflags/gflags.git
+ cd gflags
+ git checkout v2.0
+ ./configure && make && sudo make install
+
+ **Notice**: Once installed, please add the include path for gflags to your `CPATH` environment variable and the
+ lib path to `LIBRARY_PATH`. If installed with default settings, the include path will be `/usr/local/include`
+ and the lib path will be `/usr/local/lib`.
+
+ * Install snappy:
+
+ sudo yum install snappy snappy-devel
+
+ * Install zlib:
+
+ sudo yum install zlib zlib-devel
+
+ * Install bzip2:
+
+ sudo yum install bzip2 bzip2-devel
+
+ * Install lz4:
+
+ sudo yum install lz4-devel
+
+ * Install ASAN (optional for debugging):
+
+ sudo yum install libasan
+
+ * Install zstandard:
+
+ wget https://github.com/facebook/zstd/archive/v1.1.3.tar.gz
+ mv v1.1.3.tar.gz zstd-1.1.3.tar.gz
+ tar zxvf zstd-1.1.3.tar.gz
+ cd zstd-1.1.3
+ make && sudo make install
+
+* **OS X**:
+ * Install latest C++ compiler that supports C++ 11:
+ * Update XCode: run `xcode-select --install` (or install it from XCode App's settting).
+ * Install via [homebrew](http://brew.sh/).
+ * If you're first time developer in MacOS, you still need to run: `xcode-select --install` in your command line.
+ * run `brew tap homebrew/versions; brew install gcc48 --use-llvm` to install gcc 4.8 (or higher).
+ * run `brew install rocksdb`
+
+* **iOS**:
+ * Run: `TARGET_OS=IOS make static_lib`. When building the project which uses rocksdb iOS library, make sure to define two important pre-processing macros: `ROCKSDB_LITE` and `IOS_CROSS_COMPILE`.
+
+* **Windows**:
+ * For building with MS Visual Studio 13 you will need Update 4 installed.
+ * Read and follow the instructions at CMakeLists.txt
+ * Or install via [vcpkg](https://github.com/microsoft/vcpkg)
+ * run `vcpkg install rocksdb:x64-windows`
+
+* **AIX 6.1**
+ * Install AIX Toolbox rpms with gcc
+ * Use these environment variables:
+
+ export PORTABLE=1
+ export CC=gcc
+ export AR="ar -X64"
+ export EXTRA_ARFLAGS=-X64
+ export EXTRA_CFLAGS=-maix64
+ export EXTRA_CXXFLAGS=-maix64
+ export PLATFORM_LDFLAGS="-static-libstdc++ -static-libgcc"
+ export LIBPATH=/opt/freeware/lib
+ export JAVA_HOME=/usr/java8_64
+ export PATH=/opt/freeware/bin:$PATH
+
+* **Solaris Sparc**
+ * Install GCC 4.8.2 and higher.
+ * Use these environment variables:
+
+ export CC=gcc
+ export EXTRA_CFLAGS=-m64
+ export EXTRA_CXXFLAGS=-m64
+ export EXTRA_LDFLAGS=-m64
+ export PORTABLE=1
+ export PLATFORM_LDFLAGS="-static-libstdc++ -static-libgcc"
+
diff --git a/c-deps/rocksdb/LANGUAGE-BINDINGS.md b/c-deps/rocksdb/LANGUAGE-BINDINGS.md
new file mode 100644
index 0000000000..ffeed98f28
--- /dev/null
+++ b/c-deps/rocksdb/LANGUAGE-BINDINGS.md
@@ -0,0 +1,16 @@
+This is the list of all known third-party language bindings for RocksDB. If something is missing, please open a pull request to add it.
+
+* Java - https://github.com/facebook/rocksdb/tree/master/java
+* Python - http://pyrocksdb.readthedocs.org/en/latest/
+* Perl - https://metacpan.org/pod/RocksDB
+* Node.js - https://npmjs.org/package/rocksdb
+* Go - https://github.com/tecbot/gorocksdb
+* Ruby - http://rubygems.org/gems/rocksdb-ruby
+* Haskell - https://hackage.haskell.org/package/rocksdb-haskell
+* PHP - https://github.com/Photonios/rocksdb-php
+* C# - https://github.com/warrenfalk/rocksdb-sharp
+* Rust
+ * https://github.com/spacejam/rust-rocksdb
+ * https://github.com/bh1xuw/rust-rocks
+* D programming language - https://github.com/b1naryth1ef/rocksdb
+* Erlang - https://gitlab.com/barrel-db/erlang-rocksdb
diff --git a/c-deps/rocksdb/LICENSE.Apache b/c-deps/rocksdb/LICENSE.Apache
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/c-deps/rocksdb/LICENSE.Apache
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/c-deps/rocksdb/LICENSE.leveldb b/c-deps/rocksdb/LICENSE.leveldb
new file mode 100644
index 0000000000..7108b0bfba
--- /dev/null
+++ b/c-deps/rocksdb/LICENSE.leveldb
@@ -0,0 +1,29 @@
+This contains code that is from LevelDB, and that code is under the following license:
+
+Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/c-deps/rocksdb/Makefile b/c-deps/rocksdb/Makefile
new file mode 100644
index 0000000000..9769453c5c
--- /dev/null
+++ b/c-deps/rocksdb/Makefile
@@ -0,0 +1,1866 @@
+# Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+# Inherit some settings from environment variables, if available
+
+#-----------------------------------------------
+
+BASH_EXISTS := $(shell which bash)
+SHELL := $(shell which bash)
+
+CLEAN_FILES = # deliberately empty, so we can append below.
+CFLAGS += ${EXTRA_CFLAGS}
+CXXFLAGS += ${EXTRA_CXXFLAGS}
+LDFLAGS += $(EXTRA_LDFLAGS)
+MACHINE ?= $(shell uname -m)
+ARFLAGS = ${EXTRA_ARFLAGS} rs
+STRIPFLAGS = -S -x
+
+# Transform parallel LOG output into something more readable.
+perl_command = perl -n \
+ -e '@a=split("\t",$$_,-1); $$t=$$a[8];' \
+ -e '$$t =~ /.*if\s\[\[\s"(.*?\.[\w\/]+)/ and $$t=$$1;' \
+ -e '$$t =~ s,^\./,,;' \
+ -e '$$t =~ s, >.*,,; chomp $$t;' \
+ -e '$$t =~ /.*--gtest_filter=(.*?\.[\w\/]+)/ and $$t=$$1;' \
+ -e 'printf "%7.3f %s %s\n", $$a[3], $$a[6] == 0 ? "PASS" : "FAIL", $$t'
+quoted_perl_command = $(subst ','\'',$(perl_command))
+
+# DEBUG_LEVEL can have three values:
+# * DEBUG_LEVEL=2; this is the ultimate debug mode. It will compile rocksdb
+# without any optimizations. To compile with level 2, issue `make dbg`
+# * DEBUG_LEVEL=1; debug level 1 enables all assertions and debug code, but
+# compiles rocksdb with -O2 optimizations. this is the default debug level.
+# `make all` or `make ` compile RocksDB with debug level 1.
+# We use this debug level when developing RocksDB.
+# * DEBUG_LEVEL=0; this is the debug level we use for release. If you're
+# running rocksdb in production you most definitely want to compile RocksDB
+# with debug level 0. To compile with level 0, run `make shared_lib`,
+# `make install-shared`, `make static_lib`, `make install-static` or
+# `make install`
+
+# Set the default DEBUG_LEVEL to 1
+DEBUG_LEVEL?=1
+
+ifeq ($(MAKECMDGOALS),dbg)
+ DEBUG_LEVEL=2
+endif
+
+ifeq ($(MAKECMDGOALS),clean)
+ DEBUG_LEVEL=0
+endif
+
+ifeq ($(MAKECMDGOALS),release)
+ DEBUG_LEVEL=0
+endif
+
+ifeq ($(MAKECMDGOALS),shared_lib)
+ DEBUG_LEVEL=0
+endif
+
+ifeq ($(MAKECMDGOALS),install-shared)
+ DEBUG_LEVEL=0
+endif
+
+ifeq ($(MAKECMDGOALS),static_lib)
+ DEBUG_LEVEL=0
+endif
+
+ifeq ($(MAKECMDGOALS),install-static)
+ DEBUG_LEVEL=0
+endif
+
+ifeq ($(MAKECMDGOALS),install)
+ DEBUG_LEVEL=0
+endif
+
+ifeq ($(MAKECMDGOALS),rocksdbjavastatic)
+ DEBUG_LEVEL=0
+endif
+
+ifeq ($(MAKECMDGOALS),rocksdbjavastaticrelease)
+ DEBUG_LEVEL=0
+endif
+
+ifeq ($(MAKECMDGOALS),rocksdbjavastaticpublish)
+ DEBUG_LEVEL=0
+endif
+
+# compile with -O2 if debug level is not 2
+ifneq ($(DEBUG_LEVEL), 2)
+OPT += -O2 -fno-omit-frame-pointer
+# Skip for archs that don't support -momit-leaf-frame-pointer
+ifeq (,$(shell $(CXX) -fsyntax-only -momit-leaf-frame-pointer -xc /dev/null 2>&1))
+OPT += -momit-leaf-frame-pointer
+endif
+endif
+
+ifeq (,$(shell $(CXX) -fsyntax-only -maltivec -xc /dev/null 2>&1))
+CXXFLAGS += -DHAS_ALTIVEC
+CFLAGS += -DHAS_ALTIVEC
+HAS_ALTIVEC=1
+endif
+
+ifeq (,$(shell $(CXX) -fsyntax-only -mcpu=power8 -xc /dev/null 2>&1))
+CXXFLAGS += -DHAVE_POWER8
+CFLAGS += -DHAVE_POWER8
+HAVE_POWER8=1
+endif
+
+# if we're compiling for release, compile without debug code (-DNDEBUG)
+ifeq ($(DEBUG_LEVEL),0)
+OPT += -DNDEBUG
+
+ifneq ($(USE_RTTI), 1)
+ CXXFLAGS += -fno-rtti
+else
+ CXXFLAGS += -DROCKSDB_USE_RTTI
+endif
+else
+ifneq ($(USE_RTTI), 0)
+ CXXFLAGS += -DROCKSDB_USE_RTTI
+else
+ CXXFLAGS += -fno-rtti
+endif
+
+$(warning Warning: Compiling in debug mode. Don't use the resulting binary in production)
+endif
+
+#-----------------------------------------------
+include src.mk
+
+AM_DEFAULT_VERBOSITY = 0
+
+AM_V_GEN = $(am__v_GEN_$(V))
+am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY))
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_$(V))
+am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY))
+am__v_at_0 = @
+am__v_at_1 =
+
+AM_V_CC = $(am__v_CC_$(V))
+am__v_CC_ = $(am__v_CC_$(AM_DEFAULT_VERBOSITY))
+am__v_CC_0 = @echo " CC " $@;
+am__v_CC_1 =
+CCLD = $(CC)
+LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_$(V))
+am__v_CCLD_ = $(am__v_CCLD_$(AM_DEFAULT_VERBOSITY))
+am__v_CCLD_0 = @echo " CCLD " $@;
+am__v_CCLD_1 =
+AM_V_AR = $(am__v_AR_$(V))
+am__v_AR_ = $(am__v_AR_$(AM_DEFAULT_VERBOSITY))
+am__v_AR_0 = @echo " AR " $@;
+am__v_AR_1 =
+
+ifdef ROCKSDB_USE_LIBRADOS
+LIB_SOURCES += utilities/env_librados.cc
+LDFLAGS += -lrados
+endif
+
+AM_LINK = $(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) $(COVERAGEFLAGS)
+# detect what platform we're building on
+dummy := $(shell (export ROCKSDB_ROOT="$(CURDIR)"; export PORTABLE="$(PORTABLE)"; "$(CURDIR)/build_tools/build_detect_platform" "$(CURDIR)/make_config.mk"))
+# this file is generated by the previous line to set build flags and sources
+include make_config.mk
+CLEAN_FILES += make_config.mk
+
+missing_make_config_paths := $(shell \
+ grep "\/\S*" -o $(CURDIR)/make_config.mk | \
+ while read path; \
+ do [ -e $$path ] || echo $$path; \
+ done | sort | uniq)
+
+$(foreach path, $(missing_make_config_paths), \
+ $(warning Warning: $(path) dont exist))
+
+ifeq ($(PLATFORM), OS_AIX)
+# no debug info
+else ifneq ($(PLATFORM), IOS)
+CFLAGS += -g
+CXXFLAGS += -g
+else
+# no debug info for IOS, that will make our library big
+OPT += -DNDEBUG
+endif
+
+ifeq ($(PLATFORM), OS_AIX)
+ARFLAGS = -X64 rs
+STRIPFLAGS = -X64 -x
+endif
+
+ifeq ($(PLATFORM), OS_SOLARIS)
+ PLATFORM_CXXFLAGS += -D _GLIBCXX_USE_C99
+endif
+ifneq ($(filter -DROCKSDB_LITE,$(OPT)),)
+ # found
+ CFLAGS += -fno-exceptions
+ CXXFLAGS += -fno-exceptions
+ # LUA is not supported under ROCKSDB_LITE
+ LUA_PATH =
+endif
+
+# ASAN doesn't work well with jemalloc. If we're compiling with ASAN, we should use regular malloc.
+ifdef COMPILE_WITH_ASAN
+ DISABLE_JEMALLOC=1
+ EXEC_LDFLAGS += -fsanitize=address
+ PLATFORM_CCFLAGS += -fsanitize=address
+ PLATFORM_CXXFLAGS += -fsanitize=address
+endif
+
+# TSAN doesn't work well with jemalloc. If we're compiling with TSAN, we should use regular malloc.
+ifdef COMPILE_WITH_TSAN
+ DISABLE_JEMALLOC=1
+ EXEC_LDFLAGS += -fsanitize=thread
+ PLATFORM_CCFLAGS += -fsanitize=thread -fPIC
+ PLATFORM_CXXFLAGS += -fsanitize=thread -fPIC
+ # Turn off -pg when enabling TSAN testing, because that induces
+ # a link failure. TODO: find the root cause
+ PROFILING_FLAGS =
+ # LUA is not supported under TSAN
+ LUA_PATH =
+endif
+
+# AIX doesn't work with -pg
+ifeq ($(PLATFORM), OS_AIX)
+ PROFILING_FLAGS =
+endif
+
+# USAN doesn't work well with jemalloc. If we're compiling with USAN, we should use regular malloc.
+ifdef COMPILE_WITH_UBSAN
+ DISABLE_JEMALLOC=1
+ EXEC_LDFLAGS += -fsanitize=undefined
+ PLATFORM_CCFLAGS += -fsanitize=undefined -DROCKSDB_UBSAN_RUN
+ PLATFORM_CXXFLAGS += -fsanitize=undefined -DROCKSDB_UBSAN_RUN
+endif
+
+ifndef DISABLE_JEMALLOC
+ ifdef JEMALLOC
+ PLATFORM_CXXFLAGS += -DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE
+ PLATFORM_CCFLAGS += -DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE
+ endif
+ ifdef WITH_JEMALLOC_FLAG
+ PLATFORM_LDFLAGS += -ljemalloc
+ JAVA_LDFLAGS += -ljemalloc
+ endif
+ EXEC_LDFLAGS := $(JEMALLOC_LIB) $(EXEC_LDFLAGS)
+ PLATFORM_CXXFLAGS += $(JEMALLOC_INCLUDE)
+ PLATFORM_CCFLAGS += $(JEMALLOC_INCLUDE)
+endif
+
+export GTEST_THROW_ON_FAILURE=1
+export GTEST_HAS_EXCEPTIONS=1
+GTEST_DIR = ./third-party/gtest-1.7.0/fused-src
+# AIX: pre-defined system headers are surrounded by an extern "C" block
+ifeq ($(PLATFORM), OS_AIX)
+ PLATFORM_CCFLAGS += -I$(GTEST_DIR)
+ PLATFORM_CXXFLAGS += -I$(GTEST_DIR)
+else
+ PLATFORM_CCFLAGS += -isystem $(GTEST_DIR)
+ PLATFORM_CXXFLAGS += -isystem $(GTEST_DIR)
+endif
+
+# This (the first rule) must depend on "all".
+default: all
+
+WARNING_FLAGS = -W -Wextra -Wall -Wsign-compare -Wshadow \
+ -Wno-unused-parameter
+
+ifndef DISABLE_WARNING_AS_ERROR
+ WARNING_FLAGS += -Werror
+endif
+
+
+ifdef LUA_PATH
+
+ifndef LUA_INCLUDE
+LUA_INCLUDE=$(LUA_PATH)/include
+endif
+
+LUA_INCLUDE_FILE=$(LUA_INCLUDE)/lualib.h
+
+ifeq ("$(wildcard $(LUA_INCLUDE_FILE))", "")
+# LUA_INCLUDE_FILE does not exist
+$(error Cannot find lualib.h under $(LUA_INCLUDE). Try to specify both LUA_PATH and LUA_INCLUDE manually)
+endif
+LUA_FLAGS = -I$(LUA_INCLUDE) -DLUA -DLUA_COMPAT_ALL
+CFLAGS += $(LUA_FLAGS)
+CXXFLAGS += $(LUA_FLAGS)
+
+ifndef LUA_LIB
+LUA_LIB = $(LUA_PATH)/lib/liblua.a
+endif
+ifeq ("$(wildcard $(LUA_LIB))", "") # LUA_LIB does not exist
+$(error $(LUA_LIB) does not exist. Try to specify both LUA_PATH and LUA_LIB manually)
+endif
+LDFLAGS += $(LUA_LIB)
+
+endif
+
+
+CFLAGS += $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CCFLAGS) $(OPT)
+CXXFLAGS += $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT) -Woverloaded-virtual -Wnon-virtual-dtor -Wno-missing-field-initializers
+
+LDFLAGS += $(PLATFORM_LDFLAGS)
+
+# If NO_UPDATE_BUILD_VERSION is set we don't update util/build_version.cc, but
+# the file needs to already exist or else the build will fail
+ifndef NO_UPDATE_BUILD_VERSION
+date := $(shell date +%F)
+ifdef FORCE_GIT_SHA
+ git_sha := $(FORCE_GIT_SHA)
+else
+ git_sha := $(shell git rev-parse HEAD 2>/dev/null)
+endif
+gen_build_version = sed -e s/@@GIT_SHA@@/$(git_sha)/ -e s/@@GIT_DATE_TIME@@/$(date)/ util/build_version.cc.in
+
+# Record the version of the source that we are compiling.
+# We keep a record of the git revision in this file. It is then built
+# as a regular source file as part of the compilation process.
+# One can run "strings executable_filename | grep _build_" to find
+# the version of the source that we used to build the executable file.
+FORCE:
+util/build_version.cc: FORCE
+ $(AM_V_GEN)rm -f $@-t
+ $(AM_V_at)$(gen_build_version) > $@-t
+ $(AM_V_at)if test -f $@; then \
+ cmp -s $@-t $@ && rm -f $@-t || mv -f $@-t $@; \
+ else mv -f $@-t $@; fi
+endif
+
+LIBOBJECTS = $(LIB_SOURCES:.cc=.o)
+ifeq ($(HAVE_POWER8),1)
+LIB_CC_OBJECTS = $(LIB_SOURCES:.cc=.o)
+LIBOBJECTS += $(LIB_SOURCES_C:.c=.o)
+LIBOBJECTS += $(LIB_SOURCES_ASM:.S=.o)
+endif
+
+LIBOBJECTS += $(TOOL_LIB_SOURCES:.cc=.o)
+MOCKOBJECTS = $(MOCK_LIB_SOURCES:.cc=.o)
+
+GTEST = $(GTEST_DIR)/gtest/gtest-all.o
+TESTUTIL = ./util/testutil.o
+TESTHARNESS = ./util/testharness.o $(TESTUTIL) $(MOCKOBJECTS) $(GTEST)
+VALGRIND_ERROR = 2
+VALGRIND_VER := $(join $(VALGRIND_VER),valgrind)
+
+VALGRIND_OPTS = --error-exitcode=$(VALGRIND_ERROR) --leak-check=full
+
+BENCHTOOLOBJECTS = $(BENCH_LIB_SOURCES:.cc=.o) $(LIBOBJECTS) $(TESTUTIL)
+
+EXPOBJECTS = $(EXP_LIB_SOURCES:.cc=.o) $(LIBOBJECTS) $(TESTUTIL)
+
+TESTS = \
+ db_basic_test \
+ db_encryption_test \
+ db_test2 \
+ external_sst_file_basic_test \
+ auto_roll_logger_test \
+ bloom_test \
+ dynamic_bloom_test \
+ c_test \
+ checkpoint_test \
+ crc32c_test \
+ coding_test \
+ inlineskiplist_test \
+ env_basic_test \
+ env_test \
+ hash_test \
+ thread_local_test \
+ rate_limiter_test \
+ perf_context_test \
+ iostats_context_test \
+ db_wal_test \
+ db_block_cache_test \
+ db_test \
+ db_blob_index_test \
+ db_bloom_filter_test \
+ db_iter_test \
+ db_log_iter_test \
+ db_compaction_filter_test \
+ db_compaction_test \
+ db_dynamic_level_test \
+ db_flush_test \
+ db_inplace_update_test \
+ db_iterator_test \
+ db_memtable_test \
+ db_merge_operator_test \
+ db_options_test \
+ db_range_del_test \
+ db_sst_test \
+ db_tailing_iter_test \
+ db_universal_compaction_test \
+ db_io_failure_test \
+ db_properties_test \
+ db_table_properties_test \
+ db_statistics_test \
+ db_write_test \
+ autovector_test \
+ blob_db_test \
+ cleanable_test \
+ column_family_test \
+ table_properties_collector_test \
+ arena_test \
+ block_test \
+ cache_test \
+ corruption_test \
+ slice_transform_test \
+ dbformat_test \
+ fault_injection_test \
+ filelock_test \
+ filename_test \
+ file_reader_writer_test \
+ block_based_filter_block_test \
+ full_filter_block_test \
+ partitioned_filter_block_test \
+ hash_table_test \
+ histogram_test \
+ log_test \
+ manual_compaction_test \
+ mock_env_test \
+ memtable_list_test \
+ merge_helper_test \
+ memory_test \
+ merge_test \
+ merger_test \
+ util_merge_operators_test \
+ options_file_test \
+ redis_test \
+ reduce_levels_test \
+ plain_table_db_test \
+ comparator_db_test \
+ external_sst_file_test \
+ prefix_test \
+ skiplist_test \
+ write_buffer_manager_test \
+ stringappend_test \
+ cassandra_format_test \
+ cassandra_functional_test \
+ cassandra_row_merge_test \
+ cassandra_serialize_test \
+ ttl_test \
+ date_tiered_test \
+ backupable_db_test \
+ document_db_test \
+ json_document_test \
+ sim_cache_test \
+ spatial_db_test \
+ version_edit_test \
+ version_set_test \
+ compaction_picker_test \
+ version_builder_test \
+ file_indexer_test \
+ write_batch_test \
+ write_batch_with_index_test \
+ write_controller_test\
+ deletefile_test \
+ table_test \
+ geodb_test \
+ delete_scheduler_test \
+ options_test \
+ options_settable_test \
+ options_util_test \
+ event_logger_test \
+ timer_queue_test \
+ cuckoo_table_builder_test \
+ cuckoo_table_reader_test \
+ cuckoo_table_db_test \
+ flush_job_test \
+ wal_manager_test \
+ listener_test \
+ compaction_iterator_test \
+ compaction_job_test \
+ thread_list_test \
+ sst_dump_test \
+ column_aware_encoding_test \
+ compact_files_test \
+ optimistic_transaction_test \
+ write_callback_test \
+ heap_test \
+ compact_on_deletion_collector_test \
+ compaction_job_stats_test \
+ option_change_migration_test \
+ transaction_test \
+ ldb_cmd_test \
+ persistent_cache_test \
+ statistics_test \
+ lua_test \
+ range_del_aggregator_test \
+ lru_cache_test \
+ object_registry_test \
+ repair_test \
+ env_timed_test \
+ write_prepared_transaction_test \
+
+PARALLEL_TEST = \
+ backupable_db_test \
+ db_compaction_filter_test \
+ db_compaction_test \
+ db_sst_test \
+ db_test \
+ db_universal_compaction_test \
+ db_wal_test \
+ external_sst_file_test \
+ fault_injection_test \
+ inlineskiplist_test \
+ manual_compaction_test \
+ persistent_cache_test \
+ table_test \
+ transaction_test \
+ write_prepared_transaction_test
+
+SUBSET := $(TESTS)
+ifdef ROCKSDBTESTS_START
+ SUBSET := $(shell echo $(SUBSET) | sed 's/^.*$(ROCKSDBTESTS_START)/$(ROCKSDBTESTS_START)/')
+endif
+
+ifdef ROCKSDBTESTS_END
+ SUBSET := $(shell echo $(SUBSET) | sed 's/$(ROCKSDBTESTS_END).*//')
+endif
+
+TOOLS = \
+ sst_dump \
+ db_sanity_test \
+ db_stress \
+ write_stress \
+ ldb \
+ db_repl_stress \
+ rocksdb_dump \
+ rocksdb_undump \
+ blob_dump \
+
+TEST_LIBS = \
+ librocksdb_env_basic_test.a
+
+# TODO: add back forward_iterator_bench, after making it build in all environemnts.
+BENCHMARKS = db_bench table_reader_bench cache_bench memtablerep_bench column_aware_encoding_exp persistent_cache_bench
+
+# if user didn't config LIBNAME, set the default
+ifeq ($(LIBNAME),)
+# we should only run rocksdb in production with DEBUG_LEVEL 0
+ifeq ($(DEBUG_LEVEL),0)
+ LIBNAME=librocksdb
+else
+ LIBNAME=librocksdb_debug
+endif
+endif
+LIBRARY = ${LIBNAME}.a
+TOOLS_LIBRARY = ${LIBNAME}_tools.a
+
+ROCKSDB_MAJOR = $(shell egrep "ROCKSDB_MAJOR.[0-9]" include/rocksdb/version.h | cut -d ' ' -f 3)
+ROCKSDB_MINOR = $(shell egrep "ROCKSDB_MINOR.[0-9]" include/rocksdb/version.h | cut -d ' ' -f 3)
+ROCKSDB_PATCH = $(shell egrep "ROCKSDB_PATCH.[0-9]" include/rocksdb/version.h | cut -d ' ' -f 3)
+
+default: all
+
+#-----------------------------------------------
+# Create platform independent shared libraries.
+#-----------------------------------------------
+ifneq ($(PLATFORM_SHARED_EXT),)
+
+ifneq ($(PLATFORM_SHARED_VERSIONED),true)
+SHARED1 = ${LIBNAME}.$(PLATFORM_SHARED_EXT)
+SHARED2 = $(SHARED1)
+SHARED3 = $(SHARED1)
+SHARED4 = $(SHARED1)
+SHARED = $(SHARED1)
+else
+SHARED_MAJOR = $(ROCKSDB_MAJOR)
+SHARED_MINOR = $(ROCKSDB_MINOR)
+SHARED_PATCH = $(ROCKSDB_PATCH)
+SHARED1 = ${LIBNAME}.$(PLATFORM_SHARED_EXT)
+ifeq ($(PLATFORM), OS_MACOSX)
+SHARED_OSX = $(LIBNAME).$(SHARED_MAJOR)
+SHARED2 = $(SHARED_OSX).$(PLATFORM_SHARED_EXT)
+SHARED3 = $(SHARED_OSX).$(SHARED_MINOR).$(PLATFORM_SHARED_EXT)
+SHARED4 = $(SHARED_OSX).$(SHARED_MINOR).$(SHARED_PATCH).$(PLATFORM_SHARED_EXT)
+else
+SHARED2 = $(SHARED1).$(SHARED_MAJOR)
+SHARED3 = $(SHARED1).$(SHARED_MAJOR).$(SHARED_MINOR)
+SHARED4 = $(SHARED1).$(SHARED_MAJOR).$(SHARED_MINOR).$(SHARED_PATCH)
+endif
+SHARED = $(SHARED1) $(SHARED2) $(SHARED3) $(SHARED4)
+$(SHARED1): $(SHARED4)
+ ln -fs $(SHARED4) $(SHARED1)
+$(SHARED2): $(SHARED4)
+ ln -fs $(SHARED4) $(SHARED2)
+$(SHARED3): $(SHARED4)
+ ln -fs $(SHARED4) $(SHARED3)
+endif
+ifeq ($(HAVE_POWER8),1)
+SHARED_C_OBJECTS = $(LIB_SOURCES_C:.c=.o)
+SHARED_ASM_OBJECTS = $(LIB_SOURCES_ASM:.S=.o)
+SHARED_C_LIBOBJECTS = $(patsubst %.o,shared-objects/%.o,$(SHARED_C_OBJECTS))
+SHARED_ASM_LIBOBJECTS = $(patsubst %.o,shared-objects/%.o,$(SHARED_ASM_OBJECTS))
+shared_libobjects = $(patsubst %,shared-objects/%,$(LIB_CC_OBJECTS))
+else
+shared_libobjects = $(patsubst %,shared-objects/%,$(LIBOBJECTS))
+endif
+
+CLEAN_FILES += shared-objects
+shared_all_libobjects = $(shared_libobjects)
+
+ifeq ($(HAVE_POWER8),1)
+shared-ppc-objects = $(SHARED_C_LIBOBJECTS) $(SHARED_ASM_LIBOBJECTS)
+
+shared-objects/util/crc32c_ppc.o: util/crc32c_ppc.c
+ $(AM_V_CC)$(CC) $(CFLAGS) -c $< -o $@
+
+shared-objects/util/crc32c_ppc_asm.o: util/crc32c_ppc_asm.S
+ $(AM_V_CC)$(CC) $(CFLAGS) -c $< -o $@
+endif
+$(shared_libobjects): shared-objects/%.o: %.cc
+ $(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@
+
+ifeq ($(HAVE_POWER8),1)
+shared_all_libobjects = $(shared_libobjects) $(shared-ppc-objects)
+endif
+$(SHARED4): $(shared_all_libobjects)
+ $(CXX) $(PLATFORM_SHARED_LDFLAGS)$(SHARED3) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(shared_libobjects) $(LDFLAGS) -o $@
+
+endif # PLATFORM_SHARED_EXT
+
+.PHONY: blackbox_crash_test check clean coverage crash_test ldb_tests package \
+ release tags valgrind_check whitebox_crash_test format static_lib shared_lib all \
+ dbg rocksdbjavastatic rocksdbjava install install-static install-shared uninstall \
+ analyze tools tools_lib
+
+
+all: $(LIBRARY) $(BENCHMARKS) tools tools_lib test_libs $(TESTS)
+
+all_but_some_tests: $(LIBRARY) $(BENCHMARKS) tools tools_lib test_libs $(SUBSET)
+
+static_lib: $(LIBRARY)
+
+shared_lib: $(SHARED)
+
+tools: $(TOOLS)
+
+tools_lib: $(TOOLS_LIBRARY)
+
+test_libs: $(TEST_LIBS)
+
+dbg: $(LIBRARY) $(BENCHMARKS) tools $(TESTS)
+
+# creates static library and programs
+release:
+ $(MAKE) clean
+ DEBUG_LEVEL=0 $(MAKE) static_lib tools db_bench
+
+coverage:
+ $(MAKE) clean
+ COVERAGEFLAGS="-fprofile-arcs -ftest-coverage" LDFLAGS+="-lgcov" $(MAKE) J=1 all check
+ cd coverage && ./coverage_test.sh
+ # Delete intermediate files
+ find . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" -exec rm {} \;
+
+ifneq (,$(filter check parallel_check,$(MAKECMDGOALS)),)
+# Use /dev/shm if it has the sticky bit set (otherwise, /tmp),
+# and create a randomly-named rocksdb.XXXX directory therein.
+# We'll use that directory in the "make check" rules.
+ifeq ($(TMPD),)
+TMPDIR := $(shell echo $${TMPDIR:-/tmp})
+TMPD := $(shell f=/dev/shm; test -k $$f || f=$(TMPDIR); \
+ perl -le 'use File::Temp "tempdir";' \
+ -e 'print tempdir("'$$f'/rocksdb.XXXX", CLEANUP => 0)')
+endif
+endif
+
+# Run all tests in parallel, accumulating per-test logs in t/log-*.
+#
+# Each t/run-* file is a tiny generated bourne shell script that invokes one of
+# sub-tests. Why use a file for this? Because that makes the invocation of
+# parallel below simpler, which in turn makes the parsing of parallel's
+# LOG simpler (the latter is for live monitoring as parallel
+# tests run).
+#
+# Test names are extracted by running tests with --gtest_list_tests.
+# This filter removes the "#"-introduced comments, and expands to
+# fully-qualified names by changing input like this:
+#
+# DBTest.
+# Empty
+# WriteEmptyBatch
+# MultiThreaded/MultiThreadedDBTest.
+# MultiThreaded/0 # GetParam() = 0
+# MultiThreaded/1 # GetParam() = 1
+#
+# into this:
+#
+# DBTest.Empty
+# DBTest.WriteEmptyBatch
+# MultiThreaded/MultiThreadedDBTest.MultiThreaded/0
+# MultiThreaded/MultiThreadedDBTest.MultiThreaded/1
+#
+
+parallel_tests = $(patsubst %,parallel_%,$(PARALLEL_TEST))
+.PHONY: gen_parallel_tests $(parallel_tests)
+$(parallel_tests): $(PARALLEL_TEST)
+ $(AM_V_at)TEST_BINARY=$(patsubst parallel_%,%,$@); \
+ TEST_NAMES=` \
+ ./$$TEST_BINARY --gtest_list_tests \
+ | perl -n \
+ -e 's/ *\#.*//;' \
+ -e '/^(\s*)(\S+)/; !$$1 and do {$$p=$$2; break};' \
+ -e 'print qq! $$p$$2!'`; \
+ for TEST_NAME in $$TEST_NAMES; do \
+ TEST_SCRIPT=t/run-$$TEST_BINARY-$${TEST_NAME//\//-}; \
+ echo " GEN " $$TEST_SCRIPT; \
+ printf '%s\n' \
+ '#!/bin/sh' \
+ "d=\$(TMPD)$$TEST_SCRIPT" \
+ 'mkdir -p $$d' \
+ "TEST_TMPDIR=\$$d $(DRIVER) ./$$TEST_BINARY --gtest_filter=$$TEST_NAME" \
+ > $$TEST_SCRIPT; \
+ chmod a=rx $$TEST_SCRIPT; \
+ done
+
+gen_parallel_tests:
+ $(AM_V_at)mkdir -p t
+ $(AM_V_at)rm -f t/run-*
+ $(MAKE) $(parallel_tests)
+
+# Reorder input lines (which are one per test) so that the
+# longest-running tests appear first in the output.
+# Do this by prefixing each selected name with its duration,
+# sort the resulting names, and remove the leading numbers.
+# FIXME: the "100" we prepend is a fake time, for now.
+# FIXME: squirrel away timings from each run and use them
+# (when present) on subsequent runs to order these tests.
+#
+# Without this reordering, these two tests would happen to start only
+# after almost all other tests had completed, thus adding 100 seconds
+# to the duration of parallel "make check". That's the difference
+# between 4 minutes (old) and 2m20s (new).
+#
+# 152.120 PASS t/DBTest.FileCreationRandomFailure
+# 107.816 PASS t/DBTest.EncodeDecompressedBlockSizeTest
+#
+slow_test_regexp = \
+ ^.*SnapshotConcurrentAccessTest.*$$|^t/run-table_test-HarnessTest.Randomized$$|^t/run-db_test-.*(?:FileCreationRandomFailure|EncodeDecompressedBlockSizeTest)$$|^.*RecoverFromCorruptedWALWithoutFlush$$
+prioritize_long_running_tests = \
+ perl -pe 's,($(slow_test_regexp)),100 $$1,' \
+ | sort -k1,1gr \
+ | sed 's/^[.0-9]* //'
+
+# "make check" uses
+# Run with "make J=1 check" to disable parallelism in "make check".
+# Run with "make J=200% check" to run two parallel jobs per core.
+# The default is to run one job per core (J=100%).
+# See "man parallel" for its "-j ..." option.
+J ?= 100%
+
+# Use this regexp to select the subset of tests whose names match.
+tests-regexp = .
+
+t_run = $(wildcard t/run-*)
+.PHONY: check_0
+check_0:
+ $(AM_V_GEN)export TEST_TMPDIR=$(TMPD); \
+ printf '%s\n' '' \
+ 'To monitor subtest ,' \
+ ' run "make watch-log" in a separate window' ''; \
+ test -t 1 && eta=--eta || eta=; \
+ { \
+ printf './%s\n' $(filter-out $(PARALLEL_TEST),$(TESTS)); \
+ printf '%s\n' $(t_run); \
+ } \
+ | $(prioritize_long_running_tests) \
+ | grep -E '$(tests-regexp)' \
+ | build_tools/gnu_parallel -j$(J) --plain --joblog=LOG $$eta --gnu '{} >& t/log-{/}'
+
+valgrind-blacklist-regexp = InlineSkipTest.ConcurrentInsert|TransactionTest.DeadlockStress|DBCompactionTest.SuggestCompactRangeNoTwoLevel0Compactions|BackupableDBTest.RateLimiting|DBTest.CloseSpeedup|DBTest.ThreadStatusFlush|DBTest.RateLimitingTest|DBTest.EncodeDecompressedBlockSizeTest|FaultInjectionTest.UninstalledCompaction|HarnessTest.Randomized|ExternalSSTFileTest.CompactDuringAddFileRandom|ExternalSSTFileTest.IngestFileWithGlobalSeqnoRandomized
+
+.PHONY: valgrind_check_0
+valgrind_check_0:
+ $(AM_V_GEN)export TEST_TMPDIR=$(TMPD); \
+ printf '%s\n' '' \
+ 'To monitor subtest ,' \
+ ' run "make watch-log" in a separate window' ''; \
+ test -t 1 && eta=--eta || eta=; \
+ { \
+ printf './%s\n' $(filter-out $(PARALLEL_TEST) %skiplist_test options_settable_test, $(TESTS)); \
+ printf '%s\n' $(t_run); \
+ } \
+ | $(prioritize_long_running_tests) \
+ | grep -E '$(tests-regexp)' \
+ | grep -E -v '$(valgrind-blacklist-regexp)' \
+ | build_tools/gnu_parallel -j$(J) --plain --joblog=LOG $$eta --gnu \
+ '(if [[ "{}" == "./"* ]] ; then $(DRIVER) {}; else {}; fi) ' \
+ '>& t/valgrind_log-{/}'
+
+CLEAN_FILES += t LOG $(TMPD)
+
+# When running parallel "make check", you can monitor its progress
+# from another window.
+# Run "make watch_LOG" to show the duration,PASS/FAIL,name of parallel
+# tests as they are being run. We sort them so that longer-running ones
+# appear at the top of the list and any failing tests remain at the top
+# regardless of their duration. As with any use of "watch", hit ^C to
+# interrupt.
+watch-log:
+ watch --interval=0 'sort -k7,7nr -k4,4gr LOG|$(quoted_perl_command)'
+
+# If J != 1 and GNU parallel is installed, run the tests in parallel,
+# via the check_0 rule above. Otherwise, run them sequentially.
+check: all
+ $(MAKE) gen_parallel_tests
+ $(AM_V_GEN)if test "$(J)" != 1 \
+ && (build_tools/gnu_parallel --gnu --help 2>/dev/null) | \
+ grep -q 'GNU Parallel'; \
+ then \
+ $(MAKE) T="$$t" TMPD=$(TMPD) check_0; \
+ else \
+ for t in $(TESTS); do \
+ echo "===== Running $$t"; ./$$t || exit 1; done; \
+ fi
+ rm -rf $(TMPD)
+ifneq ($(PLATFORM), OS_AIX)
+ifeq ($(filter -DROCKSDB_LITE,$(OPT)),)
+ python tools/ldb_test.py
+ sh tools/rocksdb_dump_test.sh
+endif
+endif
+
+# TODO add ldb_tests
+check_some: $(SUBSET)
+ for t in $(SUBSET); do echo "===== Running $$t"; ./$$t || exit 1; done
+
+.PHONY: ldb_tests
+ldb_tests: ldb
+ python tools/ldb_test.py
+
+crash_test: whitebox_crash_test blackbox_crash_test
+
+blackbox_crash_test: db_stress
+ python -u tools/db_crashtest.py --simple blackbox $(CRASH_TEST_EXT_ARGS)
+ python -u tools/db_crashtest.py blackbox $(CRASH_TEST_EXT_ARGS)
+
+ifeq ($(CRASH_TEST_KILL_ODD),)
+ CRASH_TEST_KILL_ODD=888887
+endif
+
+whitebox_crash_test: db_stress
+ python -u tools/db_crashtest.py --simple whitebox --random_kill_odd \
+ $(CRASH_TEST_KILL_ODD) $(CRASH_TEST_EXT_ARGS)
+ python -u tools/db_crashtest.py whitebox --random_kill_odd \
+ $(CRASH_TEST_KILL_ODD) $(CRASH_TEST_EXT_ARGS)
+
+asan_check:
+ $(MAKE) clean
+ COMPILE_WITH_ASAN=1 $(MAKE) check -j32
+ $(MAKE) clean
+
+asan_crash_test:
+ $(MAKE) clean
+ COMPILE_WITH_ASAN=1 $(MAKE) crash_test
+ $(MAKE) clean
+
+ubsan_check:
+ $(MAKE) clean
+ COMPILE_WITH_UBSAN=1 $(MAKE) check -j32
+ $(MAKE) clean
+
+ubsan_crash_test:
+ $(MAKE) clean
+ COMPILE_WITH_UBSAN=1 $(MAKE) crash_test
+ $(MAKE) clean
+
+valgrind_test:
+ DISABLE_JEMALLOC=1 $(MAKE) valgrind_check
+
+valgrind_check: $(TESTS)
+ $(MAKE) DRIVER="$(VALGRIND_VER) $(VALGRIND_OPTS)" gen_parallel_tests
+ $(AM_V_GEN)if test "$(J)" != 1 \
+ && (build_tools/gnu_parallel --gnu --help 2>/dev/null) | \
+ grep -q 'GNU Parallel'; \
+ then \
+ $(MAKE) TMPD=$(TMPD) \
+ DRIVER="$(VALGRIND_VER) $(VALGRIND_OPTS)" valgrind_check_0; \
+ else \
+ for t in $(filter-out %skiplist_test options_settable_test,$(TESTS)); do \
+ $(VALGRIND_VER) $(VALGRIND_OPTS) ./$$t; \
+ ret_code=$$?; \
+ if [ $$ret_code -ne 0 ]; then \
+ exit $$ret_code; \
+ fi; \
+ done; \
+ fi
+
+
+ifneq ($(PAR_TEST),)
+parloop:
+ ret_bad=0; \
+ for t in $(PAR_TEST); do \
+ echo "===== Running $$t in parallel $(NUM_PAR)";\
+ if [ $(db_test) -eq 1 ]; then \
+ seq $(J) | v="$$t" build_tools/gnu_parallel --gnu --plain 's=$(TMPD)/rdb-{}; export TEST_TMPDIR=$$s;' \
+ 'timeout 2m ./db_test --gtest_filter=$$v >> $$s/log-{} 2>1'; \
+ else\
+ seq $(J) | v="./$$t" build_tools/gnu_parallel --gnu --plain 's=$(TMPD)/rdb-{};' \
+ 'export TEST_TMPDIR=$$s; timeout 10m $$v >> $$s/log-{} 2>1'; \
+ fi; \
+ ret_code=$$?; \
+ if [ $$ret_code -ne 0 ]; then \
+ ret_bad=$$ret_code; \
+ echo $$t exited with $$ret_code; \
+ fi; \
+ done; \
+ exit $$ret_bad;
+endif
+
+test_names = \
+ ./db_test --gtest_list_tests \
+ | perl -n \
+ -e 's/ *\#.*//;' \
+ -e '/^(\s*)(\S+)/; !$$1 and do {$$p=$$2; break};' \
+ -e 'print qq! $$p$$2!'
+
+parallel_check: $(TESTS)
+ $(AM_V_GEN)if test "$(J)" > 1 \
+ && (build_tools/gnu_parallel --gnu --help 2>/dev/null) | \
+ grep -q 'GNU Parallel'; \
+ then \
+ echo Running in parallel $(J); \
+ else \
+ echo "Need to have GNU Parallel and J > 1"; exit 1; \
+ fi; \
+ ret_bad=0; \
+ echo $(J);\
+ echo Test Dir: $(TMPD); \
+ seq $(J) | build_tools/gnu_parallel --gnu --plain 's=$(TMPD)/rdb-{}; rm -rf $$s; mkdir $$s'; \
+ $(MAKE) PAR_TEST="$(shell $(test_names))" TMPD=$(TMPD) \
+ J=$(J) db_test=1 parloop; \
+ $(MAKE) PAR_TEST="$(filter-out db_test, $(TESTS))" \
+ TMPD=$(TMPD) J=$(J) db_test=0 parloop;
+
+analyze: clean
+ $(CLANG_SCAN_BUILD) --use-analyzer=$(CLANG_ANALYZER) \
+ --use-c++=$(CXX) --use-cc=$(CC) --status-bugs \
+ -o $(CURDIR)/scan_build_report \
+ $(MAKE) dbg
+
+CLEAN_FILES += unity.cc
+unity.cc: Makefile
+ rm -f $@ $@-t
+ for source_file in $(LIB_SOURCES); do \
+ echo "#include \"$$source_file\"" >> $@-t; \
+ done
+ chmod a=r $@-t
+ mv $@-t $@
+
+unity.a: unity.o
+ $(AM_V_AR)rm -f $@
+ $(AM_V_at)$(AR) $(ARFLAGS) $@ unity.o
+
+# try compiling db_test with unity
+unity_test: db/db_test.o db/db_test_util.o $(TESTHARNESS) unity.a
+ $(AM_LINK)
+ ./unity_test
+
+rocksdb.h rocksdb.cc: build_tools/amalgamate.py Makefile $(LIB_SOURCES) unity.cc
+ build_tools/amalgamate.py -I. -i./include unity.cc -x include/rocksdb/c.h -H rocksdb.h -o rocksdb.cc
+
+clean:
+ rm -f $(BENCHMARKS) $(TOOLS) $(TESTS) $(LIBRARY) $(SHARED)
+ rm -rf $(CLEAN_FILES) ios-x86 ios-arm scan_build_report
+ find . -name "*.[oda]" -exec rm -f {} \;
+ find . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" -exec rm {} \;
+ rm -rf bzip2* snappy* zlib* lz4* zstd*
+ cd java; $(MAKE) clean
+
+tags:
+ ctags * -R
+ cscope -b `find . -name '*.cc'` `find . -name '*.h'` `find . -name '*.c'`
+ ctags -e -R -o etags *
+
+format:
+ build_tools/format-diff.sh
+
+package:
+ bash build_tools/make_package.sh $(SHARED_MAJOR).$(SHARED_MINOR)
+
+# ---------------------------------------------------------------------------
+# Unit tests and tools
+# ---------------------------------------------------------------------------
+$(LIBRARY): $(LIBOBJECTS)
+ $(AM_V_AR)rm -f $@
+ $(AM_V_at)$(AR) $(ARFLAGS) $@ $(LIBOBJECTS)
+
+$(TOOLS_LIBRARY): $(BENCH_LIB_SOURCES:.cc=.o) $(TOOL_LIB_SOURCES:.cc=.o) $(LIB_SOURCES:.cc=.o) $(TESTUTIL)
+ $(AM_V_AR)rm -f $@
+ $(AM_V_at)$(AR) $(ARFLAGS) $@ $^
+
+librocksdb_env_basic_test.a: env/env_basic_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_V_AR)rm -f $@
+ $(AM_V_at)$(AR) $(ARFLAGS) $@ $^
+
+db_bench: tools/db_bench.o $(BENCHTOOLOBJECTS)
+ $(AM_LINK)
+
+cache_bench: cache/cache_bench.o $(LIBOBJECTS) $(TESTUTIL)
+ $(AM_LINK)
+
+persistent_cache_bench: utilities/persistent_cache/persistent_cache_bench.o $(LIBOBJECTS) $(TESTUTIL)
+ $(AM_LINK)
+
+memtablerep_bench: memtable/memtablerep_bench.o $(LIBOBJECTS) $(TESTUTIL)
+ $(AM_LINK)
+
+db_stress: tools/db_stress.o $(LIBOBJECTS) $(TESTUTIL)
+ $(AM_LINK)
+
+write_stress: tools/write_stress.o $(LIBOBJECTS) $(TESTUTIL)
+ $(AM_LINK)
+
+db_sanity_test: tools/db_sanity_test.o $(LIBOBJECTS) $(TESTUTIL)
+ $(AM_LINK)
+
+db_repl_stress: tools/db_repl_stress.o $(LIBOBJECTS) $(TESTUTIL)
+ $(AM_LINK)
+
+arena_test: util/arena_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+autovector_test: util/autovector_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+column_family_test: db/column_family_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+table_properties_collector_test: db/table_properties_collector_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+bloom_test: util/bloom_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+dynamic_bloom_test: util/dynamic_bloom_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+c_test: db/c_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+cache_test: cache/cache_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+coding_test: util/coding_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+hash_test: util/hash_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+option_change_migration_test: utilities/option_change_migration/option_change_migration_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+stringappend_test: utilities/merge_operators/string_append/stringappend_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+cassandra_format_test: utilities/cassandra/cassandra_format_test.o utilities/cassandra/test_utils.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+cassandra_functional_test: utilities/cassandra/cassandra_functional_test.o utilities/cassandra/test_utils.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+cassandra_row_merge_test: utilities/cassandra/cassandra_row_merge_test.o utilities/cassandra/test_utils.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+cassandra_serialize_test: utilities/cassandra/cassandra_serialize_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+redis_test: utilities/redis/redis_lists_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+hash_table_test: utilities/persistent_cache/hash_table_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+histogram_test: monitoring/histogram_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+thread_local_test: util/thread_local_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+corruption_test: db/corruption_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+crc32c_test: util/crc32c_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+slice_transform_test: util/slice_transform_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_basic_test: db/db_basic_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_encryption_test: db/db_encryption_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_test: db/db_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_test2: db/db_test2.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_blob_index_test: db/db_blob_index_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_block_cache_test: db/db_block_cache_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_bloom_filter_test: db/db_bloom_filter_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_log_iter_test: db/db_log_iter_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_compaction_filter_test: db/db_compaction_filter_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_compaction_test: db/db_compaction_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_dynamic_level_test: db/db_dynamic_level_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_flush_test: db/db_flush_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_inplace_update_test: db/db_inplace_update_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_iterator_test: db/db_iterator_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_memtable_test: db/db_memtable_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_merge_operator_test: db/db_merge_operator_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_options_test: db/db_options_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_range_del_test: db/db_range_del_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_sst_test: db/db_sst_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_statistics_test: db/db_statistics_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_write_test: db/db_write_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+external_sst_file_basic_test: db/external_sst_file_basic_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+external_sst_file_test: db/external_sst_file_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_tailing_iter_test: db/db_tailing_iter_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_iter_test: db/db_iter_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_universal_compaction_test: db/db_universal_compaction_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_wal_test: db/db_wal_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_io_failure_test: db/db_io_failure_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_properties_test: db/db_properties_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_table_properties_test: db/db_table_properties_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+log_write_bench: util/log_write_bench.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK) $(PROFILING_FLAGS)
+
+plain_table_db_test: db/plain_table_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+comparator_db_test: db/comparator_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+table_reader_bench: table/table_reader_bench.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK) $(PROFILING_FLAGS)
+
+perf_context_test: db/perf_context_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS)
+
+prefix_test: db/prefix_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS)
+
+backupable_db_test: utilities/backupable/backupable_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+checkpoint_test: utilities/checkpoint/checkpoint_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+document_db_test: utilities/document/document_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+json_document_test: utilities/document/json_document_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+sim_cache_test: utilities/simulator_cache/sim_cache_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+spatial_db_test: utilities/spatialdb/spatial_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+env_mirror_test: utilities/env_mirror_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+env_timed_test: utilities/env_timed_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+ifdef ROCKSDB_USE_LIBRADOS
+env_librados_test: utilities/env_librados_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) $(COVERAGEFLAGS)
+endif
+
+object_registry_test: utilities/object_registry_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+ttl_test: utilities/ttl/ttl_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+date_tiered_test: utilities/date_tiered/date_tiered_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+write_batch_with_index_test: utilities/write_batch_with_index/write_batch_with_index_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+flush_job_test: db/flush_job_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+compaction_iterator_test: db/compaction_iterator_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+compaction_job_test: db/compaction_job_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+compaction_job_stats_test: db/compaction_job_stats_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+compact_on_deletion_collector_test: utilities/table_properties_collectors/compact_on_deletion_collector_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+wal_manager_test: db/wal_manager_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+dbformat_test: db/dbformat_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+env_basic_test: env/env_basic_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+env_test: env/env_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+fault_injection_test: db/fault_injection_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+rate_limiter_test: util/rate_limiter_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+delete_scheduler_test: util/delete_scheduler_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+filename_test: db/filename_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+file_reader_writer_test: util/file_reader_writer_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+block_based_filter_block_test: table/block_based_filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+full_filter_block_test: table/full_filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+partitioned_filter_block_test: table/partitioned_filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+log_test: db/log_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+cleanable_test: table/cleanable_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+table_test: table/table_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+block_test: table/block_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+inlineskiplist_test: memtable/inlineskiplist_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+skiplist_test: memtable/skiplist_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+write_buffer_manager_test: memtable/write_buffer_manager_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+version_edit_test: db/version_edit_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+version_set_test: db/version_set_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+compaction_picker_test: db/compaction_picker_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+version_builder_test: db/version_builder_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+file_indexer_test: db/file_indexer_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+reduce_levels_test: tools/reduce_levels_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+write_batch_test: db/write_batch_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+write_controller_test: db/write_controller_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+merge_helper_test: db/merge_helper_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+memory_test: utilities/memory/memory_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+merge_test: db/merge_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+merger_test: table/merger_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+util_merge_operators_test: utilities/util_merge_operators_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+options_file_test: db/options_file_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+deletefile_test: db/deletefile_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+geodb_test: utilities/geodb/geodb_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+rocksdb_dump: tools/dump/rocksdb_dump.o $(LIBOBJECTS)
+ $(AM_LINK)
+
+rocksdb_undump: tools/dump/rocksdb_undump.o $(LIBOBJECTS)
+ $(AM_LINK)
+
+cuckoo_table_builder_test: table/cuckoo_table_builder_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+cuckoo_table_reader_test: table/cuckoo_table_reader_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+cuckoo_table_db_test: db/cuckoo_table_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+listener_test: db/listener_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+thread_list_test: util/thread_list_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+compact_files_test: db/compact_files_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+options_test: options/options_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+options_settable_test: options/options_settable_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+options_util_test: utilities/options/options_util_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+db_bench_tool_test: tools/db_bench_tool_test.o $(BENCHTOOLOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+event_logger_test: util/event_logger_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+timer_queue_test: util/timer_queue_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+sst_dump_test: tools/sst_dump_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+column_aware_encoding_test: utilities/column_aware_encoding_test.o $(TESTHARNESS) $(EXPOBJECTS)
+ $(AM_LINK)
+
+optimistic_transaction_test: utilities/transactions/optimistic_transaction_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+mock_env_test : env/mock_env_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+manual_compaction_test: db/manual_compaction_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+filelock_test: util/filelock_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+auto_roll_logger_test: util/auto_roll_logger_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+memtable_list_test: db/memtable_list_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+write_callback_test: db/write_callback_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+heap_test: util/heap_test.o $(GTEST)
+ $(AM_LINK)
+
+transaction_test: utilities/transactions/transaction_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+write_prepared_transaction_test: utilities/transactions/write_prepared_transaction_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+sst_dump: tools/sst_dump.o $(LIBOBJECTS)
+ $(AM_LINK)
+
+blob_dump: tools/blob_dump.o $(LIBOBJECTS)
+ $(AM_LINK)
+
+column_aware_encoding_exp: utilities/column_aware_encoding_exp.o $(EXPOBJECTS)
+ $(AM_LINK)
+
+repair_test: db/repair_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+ldb_cmd_test: tools/ldb_cmd_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+ldb: tools/ldb.o $(LIBOBJECTS)
+ $(AM_LINK)
+
+iostats_context_test: monitoring/iostats_context_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS)
+
+persistent_cache_test: utilities/persistent_cache/persistent_cache_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+statistics_test: monitoring/statistics_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+lru_cache_test: cache/lru_cache_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+lua_test: utilities/lua/rocks_lua_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+range_del_aggregator_test: db/range_del_aggregator_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+blob_db_test: utilities/blob_db/blob_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(AM_LINK)
+
+#-------------------------------------------------
+# make install related stuff
+INSTALL_PATH ?= /usr/local
+
+uninstall:
+ rm -rf $(INSTALL_PATH)/include/rocksdb \
+ $(INSTALL_PATH)/lib/$(LIBRARY) \
+ $(INSTALL_PATH)/lib/$(SHARED4) \
+ $(INSTALL_PATH)/lib/$(SHARED3) \
+ $(INSTALL_PATH)/lib/$(SHARED2) \
+ $(INSTALL_PATH)/lib/$(SHARED1)
+
+install-headers:
+ install -d $(INSTALL_PATH)/lib
+ for header_dir in `find "include/rocksdb" -type d`; do \
+ install -d $(INSTALL_PATH)/$$header_dir; \
+ done
+ for header in `find "include/rocksdb" -type f -name *.h`; do \
+ install -C -m 644 $$header $(INSTALL_PATH)/$$header; \
+ done
+
+install-static: install-headers $(LIBRARY)
+ install -C -m 755 $(LIBRARY) $(INSTALL_PATH)/lib
+
+install-shared: install-headers $(SHARED4)
+ install -C -m 755 $(SHARED4) $(INSTALL_PATH)/lib && \
+ ln -fs $(SHARED4) $(INSTALL_PATH)/lib/$(SHARED3) && \
+ ln -fs $(SHARED4) $(INSTALL_PATH)/lib/$(SHARED2) && \
+ ln -fs $(SHARED4) $(INSTALL_PATH)/lib/$(SHARED1)
+
+# install static by default + install shared if it exists
+install: install-static
+ [ -e $(SHARED4) ] && $(MAKE) install-shared || :
+
+#-------------------------------------------------
+
+
+# ---------------------------------------------------------------------------
+# Jni stuff
+# ---------------------------------------------------------------------------
+
+JAVA_INCLUDE = -I$(JAVA_HOME)/include/ -I$(JAVA_HOME)/include/linux
+ifeq ($(PLATFORM), OS_SOLARIS)
+ ARCH := $(shell isainfo -b)
+else
+ ARCH := $(shell getconf LONG_BIT)
+endif
+
+ifeq (,$(findstring ppc,$(MACHINE)))
+ ROCKSDBJNILIB = librocksdbjni-linux$(ARCH).so
+else
+ ROCKSDBJNILIB = librocksdbjni-linux-$(MACHINE).so
+endif
+ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-linux$(ARCH).jar
+ROCKSDB_JAR_ALL = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH).jar
+ROCKSDB_JAVADOCS_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-javadoc.jar
+ROCKSDB_SOURCES_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-sources.jar
+SHA256_CMD = sha256sum
+
+ZLIB_VER ?= 1.2.11
+ZLIB_SHA256 ?= c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1
+ZLIB_DOWNLOAD_BASE ?= http://zlib.net
+BZIP2_VER ?= 1.0.6
+BZIP2_SHA256 ?= a2848f34fcd5d6cf47def00461fcb528a0484d8edef8208d6d2e2909dc61d9cd
+BZIP2_DOWNLOAD_BASE ?= http://www.bzip.org
+SNAPPY_VER ?= 1.1.4
+SNAPPY_SHA256 ?= 134bfe122fd25599bb807bb8130e7ba6d9bdb851e0b16efcb83ac4f5d0b70057
+SNAPPY_DOWNLOAD_BASE ?= https://github.com/google/snappy/releases/download
+LZ4_VER ?= 1.7.5
+LZ4_SHA256 ?= 0190cacd63022ccb86f44fa5041dc6c3804407ad61550ca21c382827319e7e7e
+LZ4_DOWNLOAD_BASE ?= https://github.com/lz4/lz4/archive
+ZSTD_VER ?= 1.2.0
+ZSTD_SHA256 ?= 4a7e4593a3638276ca7f2a09dc4f38e674d8317bbea51626393ca73fc047cbfb
+ZSTD_DOWNLOAD_BASE ?= https://github.com/facebook/zstd/archive
+
+ifeq ($(PLATFORM), OS_MACOSX)
+ ROCKSDBJNILIB = librocksdbjni-osx.jnilib
+ ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-osx.jar
+ SHA256_CMD = openssl sha256 -r
+ifneq ("$(wildcard $(JAVA_HOME)/include/darwin)","")
+ JAVA_INCLUDE = -I$(JAVA_HOME)/include -I $(JAVA_HOME)/include/darwin
+else
+ JAVA_INCLUDE = -I/System/Library/Frameworks/JavaVM.framework/Headers/
+endif
+endif
+ifeq ($(PLATFORM), OS_FREEBSD)
+ JAVA_INCLUDE += -I$(JAVA_HOME)/include/freebsd
+ ROCKSDBJNILIB = librocksdbjni-freebsd$(ARCH).so
+ ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-freebsd$(ARCH).jar
+endif
+ifeq ($(PLATFORM), OS_SOLARIS)
+ ROCKSDBJNILIB = librocksdbjni-solaris$(ARCH).so
+ ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-solaris$(ARCH).jar
+ JAVA_INCLUDE = -I$(JAVA_HOME)/include/ -I$(JAVA_HOME)/include/solaris
+ SHA256_CMD = digest -a sha256
+endif
+ifeq ($(PLATFORM), OS_AIX)
+ JAVA_INCLUDE = -I$(JAVA_HOME)/include/ -I$(JAVA_HOME)/include/aix
+ ROCKSDBJNILIB = librocksdbjni-aix.so
+ EXTRACT_SOURCES = gunzip < TAR_GZ | tar xvf -
+ SNAPPY_MAKE_TARGET = libsnappy.la
+endif
+
+libz.a:
+ -rm -rf zlib-$(ZLIB_VER)
+ curl -O -L ${ZLIB_DOWNLOAD_BASE}/zlib-$(ZLIB_VER).tar.gz
+ ZLIB_SHA256_ACTUAL=`$(SHA256_CMD) zlib-$(ZLIB_VER).tar.gz | cut -d ' ' -f 1`; \
+ if [ "$(ZLIB_SHA256)" != "$$ZLIB_SHA256_ACTUAL" ]; then \
+ echo zlib-$(ZLIB_VER).tar.gz checksum mismatch, expected=\"$(ZLIB_SHA256)\" actual=\"$$ZLIB_SHA256_ACTUAL\"; \
+ exit 1; \
+ fi
+ tar xvzf zlib-$(ZLIB_VER).tar.gz
+ cd zlib-$(ZLIB_VER) && CFLAGS='-fPIC ${EXTRA_CFLAGS}' LDFLAGS='${EXTRA_LDFLAGS}' ./configure --static && make
+ cp zlib-$(ZLIB_VER)/libz.a .
+
+libbz2.a:
+ -rm -rf bzip2-$(BZIP2_VER)
+ curl -O -L ${BZIP2_DOWNLOAD_BASE}/$(BZIP2_VER)/bzip2-$(BZIP2_VER).tar.gz
+ BZIP2_SHA256_ACTUAL=`$(SHA256_CMD) bzip2-$(BZIP2_VER).tar.gz | cut -d ' ' -f 1`; \
+ if [ "$(BZIP2_SHA256)" != "$$BZIP2_SHA256_ACTUAL" ]; then \
+ echo bzip2-$(BZIP2_VER).tar.gz checksum mismatch, expected=\"$(BZIP2_SHA256)\" actual=\"$$BZIP2_SHA256_ACTUAL\"; \
+ exit 1; \
+ fi
+ tar xvzf bzip2-$(BZIP2_VER).tar.gz
+ cd bzip2-$(BZIP2_VER) && make CFLAGS='-fPIC -O2 -g -D_FILE_OFFSET_BITS=64 ${EXTRA_CFLAGS}' AR='ar ${EXTRA_ARFLAGS}'
+ cp bzip2-$(BZIP2_VER)/libbz2.a .
+
+libsnappy.a:
+ -rm -rf snappy-$(SNAPPY_VER)
+ curl -O -L ${SNAPPY_DOWNLOAD_BASE}/$(SNAPPY_VER)/snappy-$(SNAPPY_VER).tar.gz
+ SNAPPY_SHA256_ACTUAL=`$(SHA256_CMD) snappy-$(SNAPPY_VER).tar.gz | cut -d ' ' -f 1`; \
+ if [ "$(SNAPPY_SHA256)" != "$$SNAPPY_SHA256_ACTUAL" ]; then \
+ echo snappy-$(SNAPPY_VER).tar.gz checksum mismatch, expected=\"$(SNAPPY_SHA256)\" actual=\"$$SNAPPY_SHA256_ACTUAL\"; \
+ exit 1; \
+ fi
+ tar xvzf snappy-$(SNAPPY_VER).tar.gz
+ cd snappy-$(SNAPPY_VER) && CFLAGS='${EXTRA_CFLAGS}' CXXFLAGS='${EXTRA_CXXFLAGS}' LDFLAGS='${EXTRA_LDFLAGS}' ./configure --with-pic --enable-static --disable-shared
+ cd snappy-$(SNAPPY_VER) && make ${SNAPPY_MAKE_TARGET}
+ cp snappy-$(SNAPPY_VER)/.libs/libsnappy.a .
+
+liblz4.a:
+ -rm -rf lz4-$(LZ4_VER)
+ curl -O -L ${LZ4_DOWNLOAD_BASE}/v$(LZ4_VER).tar.gz
+ mv v$(LZ4_VER).tar.gz lz4-$(LZ4_VER).tar.gz
+ LZ4_SHA256_ACTUAL=`$(SHA256_CMD) lz4-$(LZ4_VER).tar.gz | cut -d ' ' -f 1`; \
+ if [ "$(LZ4_SHA256)" != "$$LZ4_SHA256_ACTUAL" ]; then \
+ echo lz4-$(LZ4_VER).tar.gz checksum mismatch, expected=\"$(LZ4_SHA256)\" actual=\"$$LZ4_SHA256_ACTUAL\"; \
+ exit 1; \
+ fi
+ tar xvzf lz4-$(LZ4_VER).tar.gz
+ cd lz4-$(LZ4_VER)/lib && make CFLAGS='-fPIC -O2 ${EXTRA_CFLAGS}' all
+ cp lz4-$(LZ4_VER)/lib/liblz4.a .
+
+libzstd.a:
+ -rm -rf zstd-$(ZSTD_VER)
+ curl -O -L ${ZSTD_DOWNLOAD_BASE}/v$(ZSTD_VER).tar.gz
+ mv v$(ZSTD_VER).tar.gz zstd-$(ZSTD_VER).tar.gz
+ ZSTD_SHA256_ACTUAL=`$(SHA256_CMD) zstd-$(ZSTD_VER).tar.gz | cut -d ' ' -f 1`; \
+ if [ "$(ZSTD_SHA256)" != "$$ZSTD_SHA256_ACTUAL" ]; then \
+ echo zstd-$(ZSTD_VER).tar.gz checksum mismatch, expected=\"$(ZSTD_SHA256)\" actual=\"$$ZSTD_SHA256_ACTUAL\"; \
+ exit 1; \
+ fi
+ tar xvzf zstd-$(ZSTD_VER).tar.gz
+ cd zstd-$(ZSTD_VER)/lib && make CFLAGS='-fPIC -O2 ${EXTRA_CFLAGS}' all
+ cp zstd-$(ZSTD_VER)/lib/libzstd.a .
+
+# A version of each $(LIBOBJECTS) compiled with -fPIC and a fixed set of static compression libraries
+java_static_libobjects = $(patsubst %,jls/%,$(LIBOBJECTS))
+CLEAN_FILES += jls
+
+ifneq ($(ROCKSDB_JAVA_NO_COMPRESSION), 1)
+JAVA_COMPRESSIONS = libz.a libbz2.a libsnappy.a liblz4.a libzstd.a
+endif
+
+JAVA_STATIC_FLAGS = -DZLIB -DBZIP2 -DSNAPPY -DLZ4 -DZSTD
+JAVA_STATIC_INCLUDES = -I./zlib-$(ZLIB_VER) -I./bzip2-$(BZIP2_VER) -I./snappy-$(SNAPPY_VER) -I./lz4-$(LZ4_VER)/lib -I./zstd-$(ZSTD_VER)/lib
+
+$(java_static_libobjects): jls/%.o: %.cc $(JAVA_COMPRESSIONS)
+ $(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) $(JAVA_STATIC_FLAGS) $(JAVA_STATIC_INCLUDES) -fPIC -c $< -o $@ $(COVERAGEFLAGS)
+
+rocksdbjavastatic: $(java_static_libobjects)
+ cd java;$(MAKE) javalib;
+ rm -f ./java/target/$(ROCKSDBJNILIB)
+ $(CXX) $(CXXFLAGS) -I./java/. $(JAVA_INCLUDE) -shared -fPIC \
+ -o ./java/target/$(ROCKSDBJNILIB) $(JNI_NATIVE_SOURCES) \
+ $(java_static_libobjects) $(COVERAGEFLAGS) \
+ $(JAVA_COMPRESSIONS) $(JAVA_STATIC_LDFLAGS)
+ cd java/target;strip $(STRIPFLAGS) $(ROCKSDBJNILIB)
+ cd java;jar -cf target/$(ROCKSDB_JAR) HISTORY*.md
+ cd java/target;jar -uf $(ROCKSDB_JAR) $(ROCKSDBJNILIB)
+ cd java/target/classes;jar -uf ../$(ROCKSDB_JAR) org/rocksdb/*.class org/rocksdb/util/*.class
+ cd java/target/apidocs;jar -cf ../$(ROCKSDB_JAVADOCS_JAR) *
+ cd java/src/main/java;jar -cf ../../../target/$(ROCKSDB_SOURCES_JAR) org
+
+rocksdbjavastaticrelease: rocksdbjavastatic
+ cd java/crossbuild && vagrant destroy -f && vagrant up linux32 && vagrant halt linux32 && vagrant up linux64 && vagrant halt linux64
+ cd java;jar -cf target/$(ROCKSDB_JAR_ALL) HISTORY*.md
+ cd java/target;jar -uf $(ROCKSDB_JAR_ALL) librocksdbjni-*.so librocksdbjni-*.jnilib
+ cd java/target/classes;jar -uf ../$(ROCKSDB_JAR_ALL) org/rocksdb/*.class org/rocksdb/util/*.class
+
+rocksdbjavastaticreleasedocker: rocksdbjavastatic
+ DOCKER_LINUX_X64_CONTAINER=`docker ps -aqf name=rocksdb_linux_x64-be`; \
+ if [ -z "$$DOCKER_LINUX_X64_CONTAINER" ]; then \
+ docker container create --attach stdin --attach stdout --attach stderr --volume `pwd`:/rocksdb-host --name rocksdb_linux_x64-be evolvedbinary/rocksjava:centos6_x64-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh; \
+ fi
+ docker start -a rocksdb_linux_x64-be
+ DOCKER_LINUX_X86_CONTAINER=`docker ps -aqf name=rocksdb_linux_x86-be`; \
+ if [ -z "$$DOCKER_LINUX_X86_CONTAINER" ]; then \
+ docker container create --attach stdin --attach stdout --attach stderr --volume `pwd`:/rocksdb-host --name rocksdb_linux_x86-be evolvedbinary/rocksjava:centos6_x86-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh; \
+ fi
+ docker start -a rocksdb_linux_x86-be
+ cd java;jar -cf target/$(ROCKSDB_JAR_ALL) HISTORY*.md
+ cd java/target;jar -uf $(ROCKSDB_JAR_ALL) librocksdbjni-*.so librocksdbjni-*.jnilib
+ cd java/target/classes;jar -uf ../$(ROCKSDB_JAR_ALL) org/rocksdb/*.class org/rocksdb/util/*.class
+
+rocksdbjavastaticdockerppc64le:
+ mkdir -p java/target
+ DOCKER_LINUX_PPC64LE_CONTAINER=`docker ps -aqf name=rocksdb_linux_ppc64le-be`; \
+ if [ -z "$$DOCKER_LINUX_PPC64LE_CONTAINER" ]; then \
+ docker container create --attach stdin --attach stdout --attach stderr --volume `pwd`:/rocksdb-host --name rocksdb_linux_ppc64le-be evolvedbinary/rocksjava:centos7_ppc64le-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh; \
+ fi
+ docker start -a rocksdb_linux_ppc64le-be
+
+rocksdbjavastaticpublish: rocksdbjavastaticrelease rocksdbjavastaticpublishcentral
+
+rocksdbjavastaticpublishdocker: rocksdbjavastaticreleasedocker rocksdbjavastaticpublishcentral
+
+rocksdbjavastaticpublishcentral:
+ mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-javadoc.jar -Dclassifier=javadoc
+ mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-sources.jar -Dclassifier=sources
+ mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-linux64.jar -Dclassifier=linux64
+ mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-linux32.jar -Dclassifier=linux32
+ mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-osx.jar -Dclassifier=osx
+ mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-win64.jar -Dclassifier=win64
+ mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH).jar
+
+# A version of each $(LIBOBJECTS) compiled with -fPIC
+ifeq ($(HAVE_POWER8),1)
+JAVA_CC_OBJECTS = $(SHARED_CC_OBJECTS)
+JAVA_C_OBJECTS = $(SHARED_C_OBJECTS)
+JAVA_ASM_OBJECTS = $(SHARED_ASM_OBJECTS)
+
+JAVA_C_LIBOBJECTS = $(patsubst %.c.o,jl/%.c.o,$(JAVA_C_OBJECTS))
+JAVA_ASM_LIBOBJECTS = $(patsubst %.S.o,jl/%.S.o,$(JAVA_ASM_OBJECTS))
+endif
+
+java_libobjects = $(patsubst %,jl/%,$(LIBOBJECTS))
+CLEAN_FILES += jl
+java_all_libobjects = $(java_libobjects)
+
+ifeq ($(HAVE_POWER8),1)
+java_ppc_libobjects = $(JAVA_C_LIBOBJECTS) $(JAVA_ASM_LIBOBJECTS)
+
+jl/crc32c_ppc.o: util/crc32c_ppc.c
+ $(AM_V_CC)$(CC) $(CFLAGS) -c $< -o $@
+
+jl/crc32c_ppc_asm.o: util/crc32c_ppc_asm.S
+ $(AM_V_CC)$(CC) $(CFLAGS) -c $< -o $@
+java_all_libobjects += $(java_ppc_libobjects)
+endif
+
+$(java_libobjects): jl/%.o: %.cc
+ $(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) -fPIC -c $< -o $@ $(COVERAGEFLAGS)
+
+
+
+rocksdbjava: $(java_all_libobjects)
+ $(AM_V_GEN)cd java;$(MAKE) javalib;
+ $(AM_V_at)rm -f ./java/target/$(ROCKSDBJNILIB)
+ $(AM_V_at)$(CXX) $(CXXFLAGS) -I./java/. $(JAVA_INCLUDE) -shared -fPIC -o ./java/target/$(ROCKSDBJNILIB) $(JNI_NATIVE_SOURCES) $(java_libobjects) $(JAVA_LDFLAGS) $(COVERAGEFLAGS)
+ $(AM_V_at)cd java;jar -cf target/$(ROCKSDB_JAR) HISTORY*.md
+ $(AM_V_at)cd java/target;jar -uf $(ROCKSDB_JAR) $(ROCKSDBJNILIB)
+ $(AM_V_at)cd java/target/classes;jar -uf ../$(ROCKSDB_JAR) org/rocksdb/*.class org/rocksdb/util/*.class
+
+jclean:
+ cd java;$(MAKE) clean;
+
+jtest_compile: rocksdbjava
+ cd java;$(MAKE) java_test
+
+jtest_run:
+ cd java;$(MAKE) run_test
+
+jtest: rocksdbjava
+ cd java;$(MAKE) sample;$(MAKE) test;
+
+jdb_bench:
+ cd java;$(MAKE) db_bench;
+
+commit_prereq: build_tools/rocksdb-lego-determinator \
+ build_tools/precommit_checker.py
+ J=$(J) build_tools/precommit_checker.py unit unit_481 clang_unit release release_481 clang_release tsan asan ubsan lite unit_non_shm
+ $(MAKE) clean && $(MAKE) jclean && $(MAKE) rocksdbjava;
+
+# ---------------------------------------------------------------------------
+# Platform-specific compilation
+# ---------------------------------------------------------------------------
+
+ifeq ($(PLATFORM), IOS)
+# For iOS, create universal object files to be used on both the simulator and
+# a device.
+PLATFORMSROOT=/Applications/Xcode.app/Contents/Developer/Platforms
+SIMULATORROOT=$(PLATFORMSROOT)/iPhoneSimulator.platform/Developer
+DEVICEROOT=$(PLATFORMSROOT)/iPhoneOS.platform/Developer
+IOSVERSION=$(shell defaults read $(PLATFORMSROOT)/iPhoneOS.platform/version CFBundleShortVersionString)
+
+.cc.o:
+ mkdir -p ios-x86/$(dir $@)
+ $(CXX) $(CXXFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@
+ mkdir -p ios-arm/$(dir $@)
+ xcrun -sdk iphoneos $(CXX) $(CXXFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk -arch armv6 -arch armv7 -arch armv7s -arch arm64 -c $< -o ios-arm/$@
+ lipo ios-x86/$@ ios-arm/$@ -create -output $@
+
+.c.o:
+ mkdir -p ios-x86/$(dir $@)
+ $(CC) $(CFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@
+ mkdir -p ios-arm/$(dir $@)
+ xcrun -sdk iphoneos $(CC) $(CFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk -arch armv6 -arch armv7 -arch armv7s -arch arm64 -c $< -o ios-arm/$@
+ lipo ios-x86/$@ ios-arm/$@ -create -output $@
+
+else
+ifeq ($(HAVE_POWER8),1)
+util/crc32c_ppc.o: util/crc32c_ppc.c
+ $(AM_V_CC)$(CC) $(CFLAGS) -c $< -o $@
+
+util/crc32c_ppc_asm.o: util/crc32c_ppc_asm.S
+ $(AM_V_CC)$(CC) $(CFLAGS) -c $< -o $@
+endif
+.cc.o:
+ $(AM_V_CC)$(CXX) $(CXXFLAGS) -c $< -o $@ $(COVERAGEFLAGS)
+
+.c.o:
+ $(AM_V_CC)$(CC) $(CFLAGS) -c $< -o $@
+endif
+# ---------------------------------------------------------------------------
+# Source files dependencies detection
+# ---------------------------------------------------------------------------
+
+all_sources = $(LIB_SOURCES) $(MAIN_SOURCES) $(MOCK_LIB_SOURCES) $(TOOL_LIB_SOURCES) $(BENCH_LIB_SOURCES) $(TEST_LIB_SOURCES) $(EXP_LIB_SOURCES)
+DEPFILES = $(all_sources:.cc=.cc.d)
+
+# Add proper dependency support so changing a .h file forces a .cc file to
+# rebuild.
+
+# The .d file indicates .cc file's dependencies on .h files. We generate such
+# dependency by g++'s -MM option, whose output is a make dependency rule.
+%.cc.d: %.cc
+ @$(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) \
+ -MM -MT'$@' -MT'$(<:.cc=.o)' "$<" -o '$@'
+
+ifeq ($(HAVE_POWER8),1)
+DEPFILES_C = $(LIB_SOURCES_C:.c=.c.d)
+DEPFILES_ASM = $(LIB_SOURCES_ASM:.S=.S.d)
+
+%.c.d: %.c
+ @$(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) \
+ -MM -MT'$@' -MT'$(<:.c=.o)' "$<" -o '$@'
+
+%.S.d: %.S
+ @$(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) \
+ -MM -MT'$@' -MT'$(<:.S=.o)' "$<" -o '$@'
+
+$(DEPFILES_C): %.c.d
+
+$(DEPFILES_ASM): %.S.d
+depend: $(DEPFILES) $(DEPFILES_C) $(DEPFILES_ASM)
+else
+depend: $(DEPFILES)
+endif
+
+# if the make goal is either "clean" or "format", we shouldn't
+# try to import the *.d files.
+# TODO(kailiu) The unfamiliarity of Make's conditions leads to the ugly
+# working solution.
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),format)
+ifneq ($(MAKECMDGOALS),jclean)
+ifneq ($(MAKECMDGOALS),jtest)
+ifneq ($(MAKECMDGOALS),package)
+ifneq ($(MAKECMDGOALS),analyze)
+-include $(DEPFILES)
+endif
+endif
+endif
+endif
+endif
+endif
diff --git a/c-deps/rocksdb/README.md b/c-deps/rocksdb/README.md
new file mode 100644
index 0000000000..550c352b88
--- /dev/null
+++ b/c-deps/rocksdb/README.md
@@ -0,0 +1,27 @@
+## RocksDB: A Persistent Key-Value Store for Flash and RAM Storage
+
+[![Build Status](https://travis-ci.org/facebook/rocksdb.svg?branch=master)](https://travis-ci.org/facebook/rocksdb)
+[![Build status](https://ci.appveyor.com/api/projects/status/fbgfu0so3afcno78/branch/master?svg=true)](https://ci.appveyor.com/project/Facebook/rocksdb/branch/master)
+
+
+RocksDB is developed and maintained by Facebook Database Engineering Team.
+It is built on earlier work on LevelDB by Sanjay Ghemawat (sanjay@google.com)
+and Jeff Dean (jeff@google.com)
+
+This code is a library that forms the core building block for a fast
+key value server, especially suited for storing data on flash drives.
+It has a Log-Structured-Merge-Database (LSM) design with flexible tradeoffs
+between Write-Amplification-Factor (WAF), Read-Amplification-Factor (RAF)
+and Space-Amplification-Factor (SAF). It has multi-threaded compactions,
+making it specially suitable for storing multiple terabytes of data in a
+single database.
+
+Start with example usage here: https://github.com/facebook/rocksdb/tree/master/examples
+
+See the [github wiki](https://github.com/facebook/rocksdb/wiki) for more explanation.
+
+The public interface is in `include/`. Callers should not include or
+rely on the details of any other header files in this package. Those
+internal APIs may be changed without warning.
+
+Design discussions are conducted in https://www.facebook.com/groups/rocksdb.dev/
diff --git a/c-deps/rocksdb/ROCKSDB_LITE.md b/c-deps/rocksdb/ROCKSDB_LITE.md
new file mode 100644
index 0000000000..41cfbecc2c
--- /dev/null
+++ b/c-deps/rocksdb/ROCKSDB_LITE.md
@@ -0,0 +1,21 @@
+# RocksDBLite
+
+RocksDBLite is a project focused on mobile use cases, which don't need a lot of fancy things we've built for server workloads and they are very sensitive to binary size. For that reason, we added a compile flag ROCKSDB_LITE that comments out a lot of the nonessential code and keeps the binary lean.
+
+Some examples of the features disabled by ROCKSDB_LITE:
+* compiled-in support for LDB tool
+* No backupable DB
+* No support for replication (which we provide in form of TrasactionalIterator)
+* No advanced monitoring tools
+* No special-purpose memtables that are highly optimized for specific use cases
+* No Transactions
+
+When adding a new big feature to RocksDB, please add ROCKSDB_LITE compile guard if:
+* Nobody from mobile really needs your feature,
+* Your feature is adding a lot of weight to the binary.
+
+Don't add ROCKSDB_LITE compile guard if:
+* It would introduce a lot of code complexity. Compile guards make code harder to read. It's a trade-off.
+* Your feature is not adding a lot of weight.
+
+If unsure, ask. :)
diff --git a/c-deps/rocksdb/TARGETS b/c-deps/rocksdb/TARGETS
new file mode 100644
index 0000000000..9d3b2bce76
--- /dev/null
+++ b/c-deps/rocksdb/TARGETS
@@ -0,0 +1,1060 @@
+REPO_PATH = package_name() + "/"
+
+BUCK_BINS = "buck-out/gen/" + REPO_PATH
+
+TEST_RUNNER = REPO_PATH + "buckifier/rocks_test_runner.sh"
+
+rocksdb_compiler_flags = [
+ "-fno-builtin-memcmp",
+ "-DROCKSDB_PLATFORM_POSIX",
+ "-DROCKSDB_LIB_IO_POSIX",
+ "-DROCKSDB_FALLOCATE_PRESENT",
+ "-DROCKSDB_MALLOC_USABLE_SIZE",
+ "-DROCKSDB_RANGESYNC_PRESENT",
+ "-DROCKSDB_SCHED_GETCPU_PRESENT",
+ "-DROCKSDB_SUPPORT_THREAD_LOCAL",
+ "-DOS_LINUX",
+ # Flags to enable libs we include
+ "-DSNAPPY",
+ "-DZLIB",
+ "-DBZIP2",
+ "-DLZ4",
+ "-DZSTD",
+ "-DGFLAGS=gflags",
+ "-DNUMA",
+ "-DTBB",
+ # Needed to compile in fbcode
+ "-Wno-expansion-to-defined",
+]
+
+rocksdb_external_deps = [
+ ("bzip2", None, "bz2"),
+ ("snappy", None, "snappy"),
+ ("zlib", None, "z"),
+ ("gflags", None, "gflags"),
+ ("lz4", None, "lz4"),
+ ("zstd", None),
+ ("tbb", None),
+ ("numa", None, "numa"),
+ ("googletest", None, "gtest"),
+]
+
+rocksdb_preprocessor_flags = [
+ # Directories with files for #include
+ "-I" + REPO_PATH + "include/",
+ "-I" + REPO_PATH,
+]
+
+rocksdb_arch_preprocessor_flags = {
+ "x86_64": ["-DHAVE_SSE42"],
+}
+
+build_mode = read_config("fbcode", "build_mode")
+
+is_opt_mode = build_mode.startswith("opt")
+
+# -DNDEBUG is added by default in opt mode in fbcode. But adding it twice
+# doesn't harm and avoid forgetting to add it.
+if is_opt_mode:
+ rocksdb_compiler_flags.append("-DNDEBUG")
+
+cpp_library(
+ name = "rocksdb_lib",
+ srcs = [
+ "cache/clock_cache.cc",
+ "cache/lru_cache.cc",
+ "cache/sharded_cache.cc",
+ "db/builder.cc",
+ "db/c.cc",
+ "db/column_family.cc",
+ "db/compacted_db_impl.cc",
+ "db/compaction.cc",
+ "db/compaction_iterator.cc",
+ "db/compaction_job.cc",
+ "db/compaction_picker.cc",
+ "db/compaction_picker_universal.cc",
+ "db/convenience.cc",
+ "db/db_filesnapshot.cc",
+ "db/db_impl.cc",
+ "db/db_impl_compaction_flush.cc",
+ "db/db_impl_debug.cc",
+ "db/db_impl_experimental.cc",
+ "db/db_impl_files.cc",
+ "db/db_impl_open.cc",
+ "db/db_impl_readonly.cc",
+ "db/db_impl_write.cc",
+ "db/db_info_dumper.cc",
+ "db/db_iter.cc",
+ "db/dbformat.cc",
+ "db/event_helpers.cc",
+ "db/experimental.cc",
+ "db/external_sst_file_ingestion_job.cc",
+ "db/file_indexer.cc",
+ "db/flush_job.cc",
+ "db/flush_scheduler.cc",
+ "db/forward_iterator.cc",
+ "db/internal_stats.cc",
+ "db/log_reader.cc",
+ "db/log_writer.cc",
+ "db/malloc_stats.cc",
+ "db/managed_iterator.cc",
+ "db/memtable.cc",
+ "db/memtable_list.cc",
+ "db/merge_helper.cc",
+ "db/merge_operator.cc",
+ "db/range_del_aggregator.cc",
+ "db/repair.cc",
+ "db/snapshot_impl.cc",
+ "db/table_cache.cc",
+ "db/table_properties_collector.cc",
+ "db/transaction_log_impl.cc",
+ "db/version_builder.cc",
+ "db/version_edit.cc",
+ "db/version_set.cc",
+ "db/wal_manager.cc",
+ "db/write_batch.cc",
+ "db/write_batch_base.cc",
+ "db/write_controller.cc",
+ "db/write_thread.cc",
+ "env/env.cc",
+ "env/env_chroot.cc",
+ "env/env_encryption.cc",
+ "env/env_hdfs.cc",
+ "env/env_posix.cc",
+ "env/io_posix.cc",
+ "env/mock_env.cc",
+ "memtable/alloc_tracker.cc",
+ "memtable/hash_cuckoo_rep.cc",
+ "memtable/hash_linklist_rep.cc",
+ "memtable/hash_skiplist_rep.cc",
+ "memtable/skiplistrep.cc",
+ "memtable/vectorrep.cc",
+ "memtable/write_buffer_manager.cc",
+ "monitoring/histogram.cc",
+ "monitoring/histogram_windowing.cc",
+ "monitoring/instrumented_mutex.cc",
+ "monitoring/iostats_context.cc",
+ "monitoring/perf_context.cc",
+ "monitoring/perf_level.cc",
+ "monitoring/statistics.cc",
+ "monitoring/thread_status_impl.cc",
+ "monitoring/thread_status_updater.cc",
+ "monitoring/thread_status_updater_debug.cc",
+ "monitoring/thread_status_util.cc",
+ "monitoring/thread_status_util_debug.cc",
+ "options/cf_options.cc",
+ "options/db_options.cc",
+ "options/options.cc",
+ "options/options_helper.cc",
+ "options/options_parser.cc",
+ "options/options_sanity_check.cc",
+ "port/port_posix.cc",
+ "port/stack_trace.cc",
+ "table/adaptive_table_factory.cc",
+ "table/block.cc",
+ "table/block_based_filter_block.cc",
+ "table/block_based_table_builder.cc",
+ "table/block_based_table_factory.cc",
+ "table/block_based_table_reader.cc",
+ "table/block_builder.cc",
+ "table/block_prefix_index.cc",
+ "table/bloom_block.cc",
+ "table/cuckoo_table_builder.cc",
+ "table/cuckoo_table_factory.cc",
+ "table/cuckoo_table_reader.cc",
+ "table/flush_block_policy.cc",
+ "table/format.cc",
+ "table/full_filter_block.cc",
+ "table/get_context.cc",
+ "table/index_builder.cc",
+ "table/iterator.cc",
+ "table/merging_iterator.cc",
+ "table/meta_blocks.cc",
+ "table/partitioned_filter_block.cc",
+ "table/persistent_cache_helper.cc",
+ "table/plain_table_builder.cc",
+ "table/plain_table_factory.cc",
+ "table/plain_table_index.cc",
+ "table/plain_table_key_coding.cc",
+ "table/plain_table_reader.cc",
+ "table/sst_file_writer.cc",
+ "table/table_properties.cc",
+ "table/two_level_iterator.cc",
+ "tools/dump/db_dump_tool.cc",
+ "tools/ldb_cmd.cc",
+ "tools/ldb_tool.cc",
+ "tools/sst_dump_tool.cc",
+ "util/arena.cc",
+ "util/auto_roll_logger.cc",
+ "util/bloom.cc",
+ "util/build_version.cc",
+ "util/coding.cc",
+ "util/compaction_job_stats_impl.cc",
+ "util/comparator.cc",
+ "util/concurrent_arena.cc",
+ "util/crc32c.cc",
+ "util/delete_scheduler.cc",
+ "util/dynamic_bloom.cc",
+ "util/event_logger.cc",
+ "util/file_reader_writer.cc",
+ "util/file_util.cc",
+ "util/filename.cc",
+ "util/filter_policy.cc",
+ "util/hash.cc",
+ "util/log_buffer.cc",
+ "util/murmurhash.cc",
+ "util/random.cc",
+ "util/rate_limiter.cc",
+ "util/slice.cc",
+ "util/sst_file_manager_impl.cc",
+ "util/status.cc",
+ "util/status_message.cc",
+ "util/string_util.cc",
+ "util/sync_point.cc",
+ "util/thread_local.cc",
+ "util/threadpool_imp.cc",
+ "util/transaction_test_util.cc",
+ "util/xxhash.cc",
+ "utilities/backupable/backupable_db.cc",
+ "utilities/blob_db/blob_db.cc",
+ "utilities/blob_db/blob_db_impl.cc",
+ "utilities/blob_db/blob_dump_tool.cc",
+ "utilities/blob_db/blob_file.cc",
+ "utilities/blob_db/blob_log_format.cc",
+ "utilities/blob_db/blob_log_reader.cc",
+ "utilities/blob_db/blob_log_writer.cc",
+ "utilities/blob_db/ttl_extractor.cc",
+ "utilities/cassandra/cassandra_compaction_filter.cc",
+ "utilities/cassandra/format.cc",
+ "utilities/cassandra/merge_operator.cc",
+ "utilities/checkpoint/checkpoint_impl.cc",
+ "utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc",
+ "utilities/convenience/info_log_finder.cc",
+ "utilities/date_tiered/date_tiered_db_impl.cc",
+ "utilities/debug.cc",
+ "utilities/document/document_db.cc",
+ "utilities/document/json_document.cc",
+ "utilities/document/json_document_builder.cc",
+ "utilities/env_mirror.cc",
+ "utilities/env_timed.cc",
+ "utilities/geodb/geodb_impl.cc",
+ "utilities/leveldb_options/leveldb_options.cc",
+ "utilities/lua/rocks_lua_compaction_filter.cc",
+ "utilities/memory/memory_util.cc",
+ "utilities/merge_operators/max.cc",
+ "utilities/merge_operators/put.cc",
+ "utilities/merge_operators/string_append/stringappend.cc",
+ "utilities/merge_operators/string_append/stringappend2.cc",
+ "utilities/merge_operators/uint64add.cc",
+ "utilities/option_change_migration/option_change_migration.cc",
+ "utilities/options/options_util.cc",
+ "utilities/persistent_cache/block_cache_tier.cc",
+ "utilities/persistent_cache/block_cache_tier_file.cc",
+ "utilities/persistent_cache/block_cache_tier_metadata.cc",
+ "utilities/persistent_cache/persistent_cache_tier.cc",
+ "utilities/persistent_cache/volatile_tier_impl.cc",
+ "utilities/redis/redis_lists.cc",
+ "utilities/simulator_cache/sim_cache.cc",
+ "utilities/spatialdb/spatial_db.cc",
+ "utilities/table_properties_collectors/compact_on_deletion_collector.cc",
+ "utilities/transactions/optimistic_transaction.cc",
+ "utilities/transactions/optimistic_transaction_db_impl.cc",
+ "utilities/transactions/pessimistic_transaction.cc",
+ "utilities/transactions/pessimistic_transaction_db.cc",
+ "utilities/transactions/snapshot_checker.cc",
+ "utilities/transactions/transaction_base.cc",
+ "utilities/transactions/transaction_db_mutex_impl.cc",
+ "utilities/transactions/transaction_lock_mgr.cc",
+ "utilities/transactions/transaction_util.cc",
+ "utilities/transactions/write_prepared_txn.cc",
+ "utilities/transactions/write_prepared_txn_db.cc",
+ "utilities/ttl/db_ttl_impl.cc",
+ "utilities/write_batch_with_index/write_batch_with_index.cc",
+ "utilities/write_batch_with_index/write_batch_with_index_internal.cc",
+ ],
+ headers = AutoHeaders.RECURSIVE_GLOB,
+ arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
+ compiler_flags = rocksdb_compiler_flags,
+ preprocessor_flags = rocksdb_preprocessor_flags,
+ deps = [],
+ external_deps = rocksdb_external_deps,
+)
+
+cpp_library(
+ name = "rocksdb_test_lib",
+ srcs = [
+ "db/db_test_util.cc",
+ "table/mock_table.cc",
+ "util/fault_injection_test_env.cc",
+ "util/testharness.cc",
+ "util/testutil.cc",
+ "utilities/cassandra/test_utils.cc",
+ "utilities/col_buf_decoder.cc",
+ "utilities/col_buf_encoder.cc",
+ "utilities/column_aware_encoding_util.cc",
+ ],
+ headers = AutoHeaders.RECURSIVE_GLOB,
+ arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
+ compiler_flags = rocksdb_compiler_flags,
+ preprocessor_flags = rocksdb_preprocessor_flags,
+ deps = [":rocksdb_lib"],
+ external_deps = rocksdb_external_deps,
+)
+
+cpp_library(
+ name = "rocksdb_tools_lib",
+ srcs = [
+ "tools/db_bench_tool.cc",
+ "util/testutil.cc",
+ ],
+ headers = AutoHeaders.RECURSIVE_GLOB,
+ arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
+ compiler_flags = rocksdb_compiler_flags,
+ preprocessor_flags = rocksdb_preprocessor_flags,
+ deps = [":rocksdb_lib"],
+ external_deps = rocksdb_external_deps,
+)
+
+cpp_library(
+ name = "env_basic_test_lib",
+ srcs = ["env/env_basic_test.cc"],
+ headers = AutoHeaders.RECURSIVE_GLOB,
+ arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
+ compiler_flags = rocksdb_compiler_flags,
+ preprocessor_flags = rocksdb_preprocessor_flags,
+ deps = [":rocksdb_test_lib"],
+ external_deps = rocksdb_external_deps,
+)
+
+# [test_name, test_src, test_type]
+ROCKS_TESTS = [
+ [
+ "arena_test",
+ "util/arena_test.cc",
+ "serial",
+ ],
+ [
+ "auto_roll_logger_test",
+ "util/auto_roll_logger_test.cc",
+ "serial",
+ ],
+ [
+ "autovector_test",
+ "util/autovector_test.cc",
+ "serial",
+ ],
+ [
+ "backupable_db_test",
+ "utilities/backupable/backupable_db_test.cc",
+ "parallel",
+ ],
+ [
+ "blob_db_test",
+ "utilities/blob_db/blob_db_test.cc",
+ "serial",
+ ],
+ [
+ "block_based_filter_block_test",
+ "table/block_based_filter_block_test.cc",
+ "serial",
+ ],
+ [
+ "block_test",
+ "table/block_test.cc",
+ "serial",
+ ],
+ [
+ "bloom_test",
+ "util/bloom_test.cc",
+ "serial",
+ ],
+ [
+ "c_test",
+ "db/c_test.c",
+ "serial",
+ ],
+ [
+ "cache_test",
+ "cache/cache_test.cc",
+ "serial",
+ ],
+ [
+ "cassandra_format_test",
+ "utilities/cassandra/cassandra_format_test.cc",
+ "serial",
+ ],
+ [
+ "cassandra_functional_test",
+ "utilities/cassandra/cassandra_functional_test.cc",
+ "serial",
+ ],
+ [
+ "cassandra_row_merge_test",
+ "utilities/cassandra/cassandra_row_merge_test.cc",
+ "serial",
+ ],
+ [
+ "cassandra_serialize_test",
+ "utilities/cassandra/cassandra_serialize_test.cc",
+ "serial",
+ ],
+ [
+ "checkpoint_test",
+ "utilities/checkpoint/checkpoint_test.cc",
+ "serial",
+ ],
+ [
+ "cleanable_test",
+ "table/cleanable_test.cc",
+ "serial",
+ ],
+ [
+ "coding_test",
+ "util/coding_test.cc",
+ "serial",
+ ],
+ [
+ "column_aware_encoding_test",
+ "utilities/column_aware_encoding_test.cc",
+ "serial",
+ ],
+ [
+ "column_family_test",
+ "db/column_family_test.cc",
+ "serial",
+ ],
+ [
+ "compact_files_test",
+ "db/compact_files_test.cc",
+ "serial",
+ ],
+ [
+ "compact_on_deletion_collector_test",
+ "utilities/table_properties_collectors/compact_on_deletion_collector_test.cc",
+ "serial",
+ ],
+ [
+ "compaction_iterator_test",
+ "db/compaction_iterator_test.cc",
+ "serial",
+ ],
+ [
+ "compaction_job_stats_test",
+ "db/compaction_job_stats_test.cc",
+ "serial",
+ ],
+ [
+ "compaction_job_test",
+ "db/compaction_job_test.cc",
+ "serial",
+ ],
+ [
+ "compaction_picker_test",
+ "db/compaction_picker_test.cc",
+ "serial",
+ ],
+ [
+ "comparator_db_test",
+ "db/comparator_db_test.cc",
+ "serial",
+ ],
+ [
+ "corruption_test",
+ "db/corruption_test.cc",
+ "serial",
+ ],
+ [
+ "crc32c_test",
+ "util/crc32c_test.cc",
+ "serial",
+ ],
+ [
+ "cuckoo_table_builder_test",
+ "table/cuckoo_table_builder_test.cc",
+ "serial",
+ ],
+ [
+ "cuckoo_table_db_test",
+ "db/cuckoo_table_db_test.cc",
+ "serial",
+ ],
+ [
+ "cuckoo_table_reader_test",
+ "table/cuckoo_table_reader_test.cc",
+ "serial",
+ ],
+ [
+ "date_tiered_test",
+ "utilities/date_tiered/date_tiered_test.cc",
+ "serial",
+ ],
+ [
+ "db_basic_test",
+ "db/db_basic_test.cc",
+ "serial",
+ ],
+ [
+ "db_blob_index_test",
+ "db/db_blob_index_test.cc",
+ "serial",
+ ],
+ [
+ "db_block_cache_test",
+ "db/db_block_cache_test.cc",
+ "serial",
+ ],
+ [
+ "db_bloom_filter_test",
+ "db/db_bloom_filter_test.cc",
+ "serial",
+ ],
+ [
+ "db_compaction_filter_test",
+ "db/db_compaction_filter_test.cc",
+ "parallel",
+ ],
+ [
+ "db_compaction_test",
+ "db/db_compaction_test.cc",
+ "parallel",
+ ],
+ [
+ "db_dynamic_level_test",
+ "db/db_dynamic_level_test.cc",
+ "serial",
+ ],
+ [
+ "db_encryption_test",
+ "db/db_encryption_test.cc",
+ "serial",
+ ],
+ [
+ "db_flush_test",
+ "db/db_flush_test.cc",
+ "serial",
+ ],
+ [
+ "db_inplace_update_test",
+ "db/db_inplace_update_test.cc",
+ "serial",
+ ],
+ [
+ "db_io_failure_test",
+ "db/db_io_failure_test.cc",
+ "serial",
+ ],
+ [
+ "db_iter_test",
+ "db/db_iter_test.cc",
+ "serial",
+ ],
+ [
+ "db_iterator_test",
+ "db/db_iterator_test.cc",
+ "serial",
+ ],
+ [
+ "db_log_iter_test",
+ "db/db_log_iter_test.cc",
+ "serial",
+ ],
+ [
+ "db_memtable_test",
+ "db/db_memtable_test.cc",
+ "serial",
+ ],
+ [
+ "db_merge_operator_test",
+ "db/db_merge_operator_test.cc",
+ "serial",
+ ],
+ [
+ "db_options_test",
+ "db/db_options_test.cc",
+ "serial",
+ ],
+ [
+ "db_properties_test",
+ "db/db_properties_test.cc",
+ "serial",
+ ],
+ [
+ "db_range_del_test",
+ "db/db_range_del_test.cc",
+ "serial",
+ ],
+ [
+ "db_sst_test",
+ "db/db_sst_test.cc",
+ "parallel",
+ ],
+ [
+ "db_statistics_test",
+ "db/db_statistics_test.cc",
+ "serial",
+ ],
+ [
+ "db_table_properties_test",
+ "db/db_table_properties_test.cc",
+ "serial",
+ ],
+ [
+ "db_tailing_iter_test",
+ "db/db_tailing_iter_test.cc",
+ "serial",
+ ],
+ [
+ "db_test",
+ "db/db_test.cc",
+ "parallel",
+ ],
+ [
+ "db_test2",
+ "db/db_test2.cc",
+ "serial",
+ ],
+ [
+ "db_universal_compaction_test",
+ "db/db_universal_compaction_test.cc",
+ "parallel",
+ ],
+ [
+ "db_wal_test",
+ "db/db_wal_test.cc",
+ "parallel",
+ ],
+ [
+ "db_write_test",
+ "db/db_write_test.cc",
+ "serial",
+ ],
+ [
+ "dbformat_test",
+ "db/dbformat_test.cc",
+ "serial",
+ ],
+ [
+ "delete_scheduler_test",
+ "util/delete_scheduler_test.cc",
+ "serial",
+ ],
+ [
+ "deletefile_test",
+ "db/deletefile_test.cc",
+ "serial",
+ ],
+ [
+ "document_db_test",
+ "utilities/document/document_db_test.cc",
+ "serial",
+ ],
+ [
+ "dynamic_bloom_test",
+ "util/dynamic_bloom_test.cc",
+ "serial",
+ ],
+ [
+ "env_basic_test",
+ "env/env_basic_test.cc",
+ "serial",
+ ],
+ [
+ "env_test",
+ "env/env_test.cc",
+ "serial",
+ ],
+ [
+ "env_timed_test",
+ "utilities/env_timed_test.cc",
+ "serial",
+ ],
+ [
+ "event_logger_test",
+ "util/event_logger_test.cc",
+ "serial",
+ ],
+ [
+ "external_sst_file_basic_test",
+ "db/external_sst_file_basic_test.cc",
+ "serial",
+ ],
+ [
+ "external_sst_file_test",
+ "db/external_sst_file_test.cc",
+ "parallel",
+ ],
+ [
+ "fault_injection_test",
+ "db/fault_injection_test.cc",
+ "parallel",
+ ],
+ [
+ "file_indexer_test",
+ "db/file_indexer_test.cc",
+ "serial",
+ ],
+ [
+ "file_reader_writer_test",
+ "util/file_reader_writer_test.cc",
+ "serial",
+ ],
+ [
+ "filelock_test",
+ "util/filelock_test.cc",
+ "serial",
+ ],
+ [
+ "filename_test",
+ "db/filename_test.cc",
+ "serial",
+ ],
+ [
+ "flush_job_test",
+ "db/flush_job_test.cc",
+ "serial",
+ ],
+ [
+ "full_filter_block_test",
+ "table/full_filter_block_test.cc",
+ "serial",
+ ],
+ [
+ "geodb_test",
+ "utilities/geodb/geodb_test.cc",
+ "serial",
+ ],
+ [
+ "hash_table_test",
+ "utilities/persistent_cache/hash_table_test.cc",
+ "serial",
+ ],
+ [
+ "hash_test",
+ "util/hash_test.cc",
+ "serial",
+ ],
+ [
+ "heap_test",
+ "util/heap_test.cc",
+ "serial",
+ ],
+ [
+ "histogram_test",
+ "monitoring/histogram_test.cc",
+ "serial",
+ ],
+ [
+ "inlineskiplist_test",
+ "memtable/inlineskiplist_test.cc",
+ "parallel",
+ ],
+ [
+ "iostats_context_test",
+ "monitoring/iostats_context_test.cc",
+ "serial",
+ ],
+ [
+ "json_document_test",
+ "utilities/document/json_document_test.cc",
+ "serial",
+ ],
+ [
+ "ldb_cmd_test",
+ "tools/ldb_cmd_test.cc",
+ "serial",
+ ],
+ [
+ "listener_test",
+ "db/listener_test.cc",
+ "serial",
+ ],
+ [
+ "log_test",
+ "db/log_test.cc",
+ "serial",
+ ],
+ [
+ "lru_cache_test",
+ "cache/lru_cache_test.cc",
+ "serial",
+ ],
+ [
+ "manual_compaction_test",
+ "db/manual_compaction_test.cc",
+ "parallel",
+ ],
+ [
+ "memory_test",
+ "utilities/memory/memory_test.cc",
+ "serial",
+ ],
+ [
+ "memtable_list_test",
+ "db/memtable_list_test.cc",
+ "serial",
+ ],
+ [
+ "merge_helper_test",
+ "db/merge_helper_test.cc",
+ "serial",
+ ],
+ [
+ "merge_test",
+ "db/merge_test.cc",
+ "serial",
+ ],
+ [
+ "merger_test",
+ "table/merger_test.cc",
+ "serial",
+ ],
+ [
+ "mock_env_test",
+ "env/mock_env_test.cc",
+ "serial",
+ ],
+ [
+ "object_registry_test",
+ "utilities/object_registry_test.cc",
+ "serial",
+ ],
+ [
+ "optimistic_transaction_test",
+ "utilities/transactions/optimistic_transaction_test.cc",
+ "serial",
+ ],
+ [
+ "option_change_migration_test",
+ "utilities/option_change_migration/option_change_migration_test.cc",
+ "serial",
+ ],
+ [
+ "options_file_test",
+ "db/options_file_test.cc",
+ "serial",
+ ],
+ [
+ "options_settable_test",
+ "options/options_settable_test.cc",
+ "serial",
+ ],
+ [
+ "options_test",
+ "options/options_test.cc",
+ "serial",
+ ],
+ [
+ "options_util_test",
+ "utilities/options/options_util_test.cc",
+ "serial",
+ ],
+ [
+ "partitioned_filter_block_test",
+ "table/partitioned_filter_block_test.cc",
+ "serial",
+ ],
+ [
+ "perf_context_test",
+ "db/perf_context_test.cc",
+ "serial",
+ ],
+ [
+ "persistent_cache_test",
+ "utilities/persistent_cache/persistent_cache_test.cc",
+ "parallel",
+ ],
+ [
+ "plain_table_db_test",
+ "db/plain_table_db_test.cc",
+ "serial",
+ ],
+ [
+ "prefix_test",
+ "db/prefix_test.cc",
+ "serial",
+ ],
+ [
+ "range_del_aggregator_test",
+ "db/range_del_aggregator_test.cc",
+ "serial",
+ ],
+ [
+ "rate_limiter_test",
+ "util/rate_limiter_test.cc",
+ "serial",
+ ],
+ [
+ "reduce_levels_test",
+ "tools/reduce_levels_test.cc",
+ "serial",
+ ],
+ [
+ "repair_test",
+ "db/repair_test.cc",
+ "serial",
+ ],
+ [
+ "sim_cache_test",
+ "utilities/simulator_cache/sim_cache_test.cc",
+ "serial",
+ ],
+ [
+ "skiplist_test",
+ "memtable/skiplist_test.cc",
+ "serial",
+ ],
+ [
+ "slice_transform_test",
+ "util/slice_transform_test.cc",
+ "serial",
+ ],
+ [
+ "spatial_db_test",
+ "utilities/spatialdb/spatial_db_test.cc",
+ "serial",
+ ],
+ [
+ "sst_dump_test",
+ "tools/sst_dump_test.cc",
+ "serial",
+ ],
+ [
+ "statistics_test",
+ "monitoring/statistics_test.cc",
+ "serial",
+ ],
+ [
+ "stringappend_test",
+ "utilities/merge_operators/string_append/stringappend_test.cc",
+ "serial",
+ ],
+ [
+ "table_properties_collector_test",
+ "db/table_properties_collector_test.cc",
+ "serial",
+ ],
+ [
+ "table_test",
+ "table/table_test.cc",
+ "parallel",
+ ],
+ [
+ "thread_list_test",
+ "util/thread_list_test.cc",
+ "serial",
+ ],
+ [
+ "thread_local_test",
+ "util/thread_local_test.cc",
+ "serial",
+ ],
+ [
+ "timer_queue_test",
+ "util/timer_queue_test.cc",
+ "serial",
+ ],
+ [
+ "transaction_test",
+ "utilities/transactions/transaction_test.cc",
+ "parallel",
+ ],
+ [
+ "ttl_test",
+ "utilities/ttl/ttl_test.cc",
+ "serial",
+ ],
+ [
+ "util_merge_operators_test",
+ "utilities/util_merge_operators_test.cc",
+ "serial",
+ ],
+ [
+ "version_builder_test",
+ "db/version_builder_test.cc",
+ "serial",
+ ],
+ [
+ "version_edit_test",
+ "db/version_edit_test.cc",
+ "serial",
+ ],
+ [
+ "version_set_test",
+ "db/version_set_test.cc",
+ "serial",
+ ],
+ [
+ "wal_manager_test",
+ "db/wal_manager_test.cc",
+ "serial",
+ ],
+ [
+ "write_batch_test",
+ "db/write_batch_test.cc",
+ "serial",
+ ],
+ [
+ "write_batch_with_index_test",
+ "utilities/write_batch_with_index/write_batch_with_index_test.cc",
+ "serial",
+ ],
+ [
+ "write_buffer_manager_test",
+ "memtable/write_buffer_manager_test.cc",
+ "serial",
+ ],
+ [
+ "write_callback_test",
+ "db/write_callback_test.cc",
+ "serial",
+ ],
+ [
+ "write_controller_test",
+ "db/write_controller_test.cc",
+ "serial",
+ ],
+ [
+ "write_prepared_transaction_test",
+ "utilities/transactions/write_prepared_transaction_test.cc",
+ "serial",
+ ],
+]
+
+# Generate a test rule for each entry in ROCKS_TESTS
+# Do not build the tests in opt mode, since SyncPoint and other test code
+# will not be included.
+if not is_opt_mode:
+ for test_cfg in ROCKS_TESTS:
+ test_name = test_cfg[0]
+ test_cc = test_cfg[1]
+ ttype = "gtest" if test_cfg[2] == "parallel" else "simple"
+ test_bin = test_name + "_bin"
+
+ cpp_binary (
+ name = test_bin,
+ srcs = [test_cc],
+ deps = [":rocksdb_test_lib"],
+ preprocessor_flags = rocksdb_preprocessor_flags,
+ arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
+ compiler_flags = rocksdb_compiler_flags,
+ external_deps = rocksdb_external_deps,
+ )
+
+ custom_unittest(
+ name = test_name,
+ type = ttype,
+ deps = [":" + test_bin],
+ command = [TEST_RUNNER, BUCK_BINS + test_bin]
+ )
+
+custom_unittest(
+ name = "make_rocksdbjavastatic",
+ command = ["internal_repo_rocksdb/make_rocksdbjavastatic.sh"],
+ type = "simple",
+)
+
+custom_unittest(
+ name = "make_rocksdb_lite_release",
+ command = ["internal_repo_rocksdb/make_rocksdb_lite_release.sh"],
+ type = "simple",
+)
diff --git a/c-deps/rocksdb/USERS.md b/c-deps/rocksdb/USERS.md
new file mode 100644
index 0000000000..86122cd9a0
--- /dev/null
+++ b/c-deps/rocksdb/USERS.md
@@ -0,0 +1,87 @@
+This document lists users of RocksDB and their use cases. If you are using RocksDB, please open a pull request and add yourself to the list.
+
+## Facebook
+At Facebook, we use RocksDB as storage engines in multiple data management services and a backend for many different stateful services, including:
+
+1. MyRocks -- https://github.com/MySQLOnRocksDB/mysql-5.6
+2. MongoRocks -- https://github.com/mongodb-partners/mongo-rocks
+3. ZippyDB -- Facebook's distributed key-value store with Paxos-style replication, built on top of RocksDB.[1] https://www.youtube.com/watch?v=DfiN7pG0D0khtt
+4. Laser -- Laser is a high query throughput, low (millisecond) latency, key-value storage service built on top of RocksDB.[1]
+4. Dragon -- a distributed graph query engine. https://code.facebook.com/posts/1737605303120405/dragon-a-distributed-graph-query-engine/
+5. Stylus -- a low-level stream processing framework writtenin C++.[1]
+6. LogDevice -- a distributed data store for logs [2]
+
+[1] https://research.facebook.com/publications/realtime-data-processing-at-facebook/
+[2] https://code.facebook.com/posts/357056558062811/logdevice-a-distributed-data-store-for-logs/
+
+## LinkedIn
+Two different use cases at Linkedin are using RocksDB as a storage engine:
+
+1. LinkedIn's follow feed for storing user's activities. Check out the blog post: https://engineering.linkedin.com/blog/2016/03/followfeed--linkedin-s-feed-made-faster-and-smarter
+2. Apache Samza, open source framework for stream processing
+
+Learn more about those use cases in a Tech Talk by Ankit Gupta and Naveen Somasundaram: http://www.youtube.com/watch?v=plqVp_OnSzg
+
+## Yahoo
+Yahoo is using RocksDB as a storage engine for their biggest distributed data store Sherpa. Learn more about it here: http://yahooeng.tumblr.com/post/120730204806/sherpa-scales-new-heights
+
+## CockroachDB
+CockroachDB is an open-source geo-replicated transactional database (still in development). They are using RocksDB as their storage engine. Check out their github: https://github.com/cockroachdb/cockroach
+
+## DNANexus
+DNANexus is using RocksDB to speed up processing of genomics data.
+You can learn more from this great blog post by Mike Lin: http://devblog.dnanexus.com/faster-bam-sorting-with-samtools-and-rocksdb/
+
+## Iron.io
+Iron.io is using RocksDB as a storage engine for their distributed queueing system.
+Learn more from Tech Talk by Reed Allman: http://www.youtube.com/watch?v=HTjt6oj-RL4
+
+## Tango Me
+Tango is using RocksDB as a graph storage to store all users' connection data and other social activity data.
+
+## Turn
+Turn is using RocksDB as a storage layer for their key/value store, serving at peak 2.4MM QPS out of different datacenters.
+Check out our RocksDB Protobuf merge operator at: https://github.com/vladb38/rocksdb_protobuf
+
+## Santanader UK/Cloudera Profession Services
+Check out their blog post: http://blog.cloudera.com/blog/2015/08/inside-santanders-near-real-time-data-ingest-architecture/
+
+## Airbnb
+Airbnb is using RocksDB as a storage engine for their personalized search service. You can learn more about it here: https://www.youtube.com/watch?v=ASQ6XMtogMs
+
+## Pinterest
+Pinterest's Object Retrieval System uses RocksDB for storage: https://www.youtube.com/watch?v=MtFEVEs_2Vo
+
+## Smyte
+[Smyte](https://www.smyte.com/) uses RocksDB as the storage layer for their core key-value storage, high-performance counters and time-windowed HyperLogLog services.
+
+## Rakuten Marketing
+[Rakuten Marketing](https://marketing.rakuten.com/) uses RocksDB as the disk cache layer for the real-time bidding service in their Performance DSP.
+
+## VWO, Wingify
+[VWO's](https://vwo.com/) Smart Code checker and URL helper uses RocksDB to store all the URLs where VWO's Smart Code is installed.
+
+## quasardb
+[quasardb](https://www.quasardb.net) is a high-performance, distributed, transactional key-value database that integrates well with in-memory analytics engines such as Apache Spark.
+quasardb uses a heavily tuned RocksDB as its persistence layer.
+
+## Netflix
+[Netflix](http://techblog.netflix.com/2016/05/application-data-caching-using-ssds.html) Netflix uses RocksDB on AWS EC2 instances with local SSD drives to cache application data.
+
+## TiKV
+[TiKV](https://github.com/pingcap/tikv) is a GEO-replicated, high-performance, distributed, transactional key-value database. TiKV is powered by Rust and Raft. TiKV uses RocksDB as its persistence layer.
+
+## Apache Flink
+[Apache Flink](https://flink.apache.org/news/2016/03/08/release-1.0.0.html) uses RocksDB to store state locally on a machine.
+
+## Dgraph
+[Dgraph](https://github.com/dgraph-io/dgraph) is an open-source, scalable, distributed, low latency, high throughput Graph database .They use RocksDB to store state locally on a machine.
+
+## Uber
+[Uber](http://eng.uber.com/cherami/) uses RocksDB as a durable and scalable task queue.
+
+## 360 Pika
+[360](http://www.360.cn/) [Pika](https://github.com/Qihoo360/pika) is a nosql compatible with redis. With the huge amount of data stored, redis may suffer for a capacity bottleneck, and pika was born for solving it. It has widely been widely used in many company
+
+## LzLabs
+LzLabs is using RocksDB as a storage engine in their multi-database distributed framework to store application configuration and user data.
diff --git a/c-deps/rocksdb/Vagrantfile b/c-deps/rocksdb/Vagrantfile
new file mode 100644
index 0000000000..d7c2991d79
--- /dev/null
+++ b/c-deps/rocksdb/Vagrantfile
@@ -0,0 +1,34 @@
+# Vagrant file
+Vagrant.configure("2") do |config|
+
+ config.vm.provider "virtualbox" do |v|
+ v.memory = 4096
+ v.cpus = 2
+ end
+
+ config.vm.define "ubuntu14" do |box|
+ box.vm.box = "ubuntu/trusty64"
+ end
+
+ config.vm.define "centos65" do |box|
+ box.vm.box = "chef/centos-6.5"
+ end
+
+ config.vm.define "FreeBSD10" do |box|
+ box.vm.guest = :freebsd
+ box.vm.box = "robin/freebsd-10"
+ # FreeBSD does not support 'mount_virtualbox_shared_folder', use NFS
+ box.vm.synced_folder ".", "/vagrant", :nfs => true, id: "vagrant-root"
+ box.vm.network "private_network", ip: "10.0.1.10"
+
+ # build everything after creating VM, skip using --no-provision
+ box.vm.provision "shell", inline: <<-SCRIPT
+ pkg install -y gmake clang35
+ export CXX=/usr/local/bin/clang++35
+ cd /vagrant
+ gmake clean
+ gmake all OPT=-g
+ SCRIPT
+ end
+
+end
diff --git a/c-deps/rocksdb/WINDOWS_PORT.md b/c-deps/rocksdb/WINDOWS_PORT.md
new file mode 100644
index 0000000000..a0fe1fe11f
--- /dev/null
+++ b/c-deps/rocksdb/WINDOWS_PORT.md
@@ -0,0 +1,228 @@
+# Microsoft Contribution Notes
+
+## Contributors
+* Alexander Zinoviev https://github.com/zinoale
+* Dmitri Smirnov https://github.com/yuslepukhin
+* Praveen Rao https://github.com/PraveenSinghRao
+* Sherlock Huang https://github.com/SherlockNoMad
+
+## Introduction
+RocksDB is a well proven open source key-value persistent store, optimized for fast storage. It provides scalability with number of CPUs and storage IOPS, to support IO-bound, in-memory and write-once workloads, most importantly, to be flexible to allow for innovation.
+
+As Microsoft Bing team we have been continuously pushing hard to improve the scalability, efficiency of platform and eventually benefit Bing end-user satisfaction. We would like to explore the opportunity to embrace open source, RocksDB here, to use, enhance and customize for our usage, and also contribute back to the RocksDB community. Herein, we are pleased to offer this RocksDB port for Windows platform.
+
+These notes describe some decisions and changes we had to make with regards to porting RocksDB on Windows. We hope this will help both reviewers and users of the Windows port.
+We are open for comments and improvements.
+
+## OS specifics
+All of the porting, testing and benchmarking was done on Windows Server 2012 R2 Datacenter 64-bit but to the best of our knowledge there is not a specific API we used during porting that is unsupported on other Windows OS after Vista.
+
+## Porting goals
+We strive to achieve the following goals:
+* make use of the existing porting interface of RocksDB
+* make minimum [WY2]modifications within platform independent code.
+* make all unit test pass both in debug and release builds.
+ * Note: latest introduction of SyncPoint seems to disable running db_test in Release.
+* make performance on par with published benchmarks accounting for HW differences
+* we would like to keep the port code inline with the master branch with no forking
+
+## Build system
+We have chosen CMake as a widely accepted build system to build the Windows port. It is very fast and convenient.
+
+At the same time it generates Visual Studio projects that are both usable from a command line and IDE.
+
+The top-level CMakeLists.txt file contains description of all targets and build rules. It also provides brief instructions on how to build the software for Windows. One more build related file is thirdparty.inc that also resides on the top level. This file must be edited to point to actual third party libraries location.
+We think that it would be beneficial to merge the existing make-based build system and the new cmake-based build system into a single one to use on all platforms.
+
+All building and testing was done for 64-bit. We have not conducted any testing for 32-bit and early reports indicate that it will not run on 32-bit.
+
+## C++ and STL notes
+We had to make some minimum changes within the portable files that either account for OS differences or the shortcomings of C++11 support in the current version of the MS compiler. Most or all of them are expected to be fixed in the upcoming compiler releases.
+
+We plan to use this port for our business purposes here at Bing and this provided business justification for this port. This also means, we do not have at present to choose the compiler version at will.
+
+* Certain headers that are not present and not necessary on Windows were simply `#ifndef OS_WIN` in a few places (`unistd.h`)
+* All posix specific headers were replaced to port/port.h which worked well
+* Replaced `dirent.h` for `port/dirent.h` (very few places) with the implementation of the relevant interfaces within `rocksdb::port` namespace
+* Replaced `sys/time.h` to `port/sys_time.h` (few places) implemented equivalents within `rocksdb::port`
+* `printf %z` specification is not supported on Windows. To imitate existing standards we came up with a string macro `ROCKSDB_PRIszt` which expands to `%z` on posix systems and to Iu on windows.
+* in class member initialization were moved to a __ctors in some cases
+* `constexpr` is not supported. We had to replace `std::numeric_limits<>::max/min()` to its C macros for constants. Sometimes we had to make class members `static const` and place a definition within a .cc file.
+* `constexpr` for functions was replaced to a template specialization (1 place)
+* Union members that have non-trivial constructors were replaced to `char[]` in one place along with bug fixes (spatial experimental feature)
+* Zero-sized arrays are deemed a non-standard extension which we converted to 1 size array and that should work well for the purposes of these classes.
+* `std::chrono` lacks nanoseconds support (fixed in the upcoming release of the STL) and we had to use `QueryPerfCounter()` within env_win.cc
+* Function local statics initialization is still not safe. Used `std::once` to mitigate within WinEnv.
+
+## Windows Environments notes
+We endeavored to make it functionally on par with posix_env. This means we replicated the functionality of the thread pool and other things as precise as possible, including:
+* Replicate posix logic using std:thread primitives.
+* Implement all posix_env disk access functionality.
+* Set `use_os_buffer=false` to disable OS disk buffering for WinWritableFile and WinRandomAccessFile.
+* Replace `pread/pwrite` with `WriteFile/ReadFile` with `OVERLAPPED` structure.
+* Use `SetFileInformationByHandle` to compensate absence of `fallocate`.
+
+### In detail
+Even though Windows provides its own efficient thread-pool implementation we chose to replicate posix logic using `std::thread` primitives. This allows anyone to quickly detect any changes within the posix source code and replicate them within windows env. This has proven to work very well. At the same time for anyone who wishes to replace the built-in thread-pool can do so using RocksDB stackable environments.
+
+For disk access we implemented all of the functionality present within the posix_env which includes memory mapped files, random access, rate-limiter support etc.
+The `use_os_buffer` flag on Posix platforms currently denotes disabling read-ahead log via `fadvise` mechanism. Windows does not have `fadvise` system call. What is more, it implements disk cache in a way that differs from Linux greatly. Its not an uncommon practice on Windows to perform un-buffered disk access to gain control of the memory consumption. We think that in our use case this may also be a good configuration option at the expense of disk throughput. To compensate one may increase the configured in-memory cache size instead. Thus we have chosen `use_os_buffer=false` to disable OS disk buffering for `WinWritableFile` and `WinRandomAccessFile`. The OS imposes restrictions on the alignment of the disk offsets, buffers used and the amount of data that is read/written when accessing files in un-buffered mode. When the option is true, the classes behave in a standard way. This allows to perform writes and reads in cases when un-buffered access does not make sense such as WAL and MANIFEST.
+
+We have replaced `pread/pwrite` with `WriteFile/ReadFile` with `OVERLAPPED` structure so we can atomically seek to the position of the disk operation but still perform the operation synchronously. Thus we able to emulate that functionality of `pread/pwrite` reasonably well. The only difference is that the file pointer is not returned to its original position but that hardly matters given the random nature of access.
+
+We used `SetFileInformationByHandle` both to truncate files after writing a full final page to disk and to pre-allocate disk space for faster I/O thus compensating for the absence of `fallocate` although some differences remain. For example, the pre-allocated space is not filled with zeros like on Linux, however, on a positive note, the end of file position is also not modified after pre-allocation.
+
+RocksDB renames, copies and deletes files at will even though they may be opened with another handle at the same time. We had to relax and allow nearly all the concurrent access permissions possible.
+
+## Thread-Local Storage
+Thread-Local storage plays a significant role for RocksDB performance. Rather than creating a separate implementation we chose to create inline wrappers that forward `pthread_specific` calls to Windows `Tls` interfaces within `rocksdb::port` namespace. This leaves the existing meat of the logic in tact and unchanged and just as maintainable.
+
+To mitigate the lack of thread local storage cleanup on thread-exit we added a limited amount of windows specific code within the same thread_local.cc file that injects a cleanup callback into a `"__tls"` structure within `".CRT$XLB"` data segment. This approach guarantees that the callback is invoked regardless of whether RocksDB used within an executable, standalone DLL or within another DLL.
+
+## Jemalloc usage
+
+When RocksDB is used with Jemalloc the latter needs to be initialized before any of the C++ globals or statics. To accomplish that we injected an initialization routine into `".CRT$XCT"` that is automatically invoked by the runtime before initializing static objects. je-uninit is queued to `atexit()`.
+
+The jemalloc redirecting `new/delete` global operators are used by the linker providing certain conditions are met. See build section in these notes.
+
+## Stack Trace and Unhandled Exception Handler
+
+We decided not to implement these two features because the hosting program as a rule has these two things in it.
+We experienced no inconveniences debugging issues in the debugger or analyzing process dumps if need be and thus we did not
+see this as a priority.
+
+## Performance results
+### Setup
+All of the benchmarks are run on the same set of machines. Here are the details of the test setup:
+* 2 Intel(R) Xeon(R) E5 2450 0 @ 2.10 GHz (total 16 cores)
+* 2 XK0480GDQPH SSD Device, total 894GB free disk
+* Machine has 128 GB of RAM
+* Operating System: Windows Server 2012 R2 Datacenter
+* 100 Million keys; each key is of size 10 bytes, each value is of size 800 bytes
+* total database size is ~76GB
+* The performance result is based on RocksDB 3.11.
+* The parameters used, unless specified, were exactly the same as published in the GitHub Wiki page.
+
+### RocksDB on flash storage
+
+#### Test 1. Bulk Load of keys in Random Order
+
+Version 3.11
+
+* Total Run Time: 17.6 min
+* Fillrandom: 5.480 micros/op 182465 ops/sec; 142.0 MB/s
+* Compact: 486056544.000 micros/op 0 ops/sec
+
+Version 3.10
+
+* Total Run Time: 16.2 min
+* Fillrandom: 5.018 micros/op 199269 ops/sec; 155.1 MB/s
+* Compact: 441313173.000 micros/op 0 ops/sec;
+
+
+#### Test 2. Bulk Load of keys in Sequential Order
+
+Version 3.11
+
+* Fillseq: 4.944 micros/op 202k ops/sec; 157.4 MB/s
+
+Version 3.10
+
+* Fillseq: 4.105 micros/op 243.6k ops/sec; 189.6 MB/s
+
+
+#### Test 3. Random Write
+
+Version 3.11
+
+* Unbuffered I/O enabled
+* Overwrite: 52.661 micros/op 18.9k ops/sec; 14.8 MB/s
+
+Version 3.10
+
+* Unbuffered I/O enabled
+* Overwrite: 52.661 micros/op 18.9k ops/sec;
+
+
+#### Test 4. Random Read
+
+Version 3.11
+
+* Unbuffered I/O enabled
+* Readrandom: 15.716 micros/op 63.6k ops/sec; 49.5 MB/s
+
+Version 3.10
+
+* Unbuffered I/O enabled
+* Readrandom: 15.548 micros/op 64.3k ops/sec;
+
+
+#### Test 5. Multi-threaded read and single-threaded write
+
+Version 3.11
+
+* Unbuffered I/O enabled
+* Readwhilewriting: 25.128 micros/op 39.7k ops/sec;
+
+Version 3.10
+
+* Unbuffered I/O enabled
+* Readwhilewriting: 24.854 micros/op 40.2k ops/sec;
+
+
+### RocksDB In Memory
+
+#### Test 1. Point Lookup
+
+Version 3.11
+
+80K writes/sec
+* Write Rate Achieved: 40.5k write/sec;
+* Readwhilewriting: 0.314 micros/op 3187455 ops/sec; 364.8 MB/s (715454999 of 715454999 found)
+
+Version 3.10
+
+* Write Rate Achieved: 50.6k write/sec
+* Readwhilewriting: 0.316 micros/op 3162028 ops/sec; (719576999 of 719576999 found)
+
+
+*10K writes/sec*
+
+Version 3.11
+
+* Write Rate Achieved: 5.8k/s write/sec
+* Readwhilewriting: 0.246 micros/op 4062669 ops/sec; 464.9 MB/s (915481999 of 915481999 found)
+
+Version 3.10
+
+* Write Rate Achieved: 5.8k/s write/sec
+* Readwhilewriting: 0.244 micros/op 4106253 ops/sec; (927986999 of 927986999 found)
+
+
+#### Test 2. Prefix Range Query
+
+Version 3.11
+
+80K writes/sec
+* Write Rate Achieved: 46.3k/s write/sec
+* Readwhilewriting: 0.362 micros/op 2765052 ops/sec; 316.4 MB/s (611549999 of 611549999 found)
+
+Version 3.10
+
+* Write Rate Achieved: 45.8k/s write/sec
+* Readwhilewriting: 0.317 micros/op 3154941 ops/sec; (708158999 of 708158999 found)
+
+Version 3.11
+
+10K writes/sec
+* Write Rate Achieved: 5.78k write/sec
+* Readwhilewriting: 0.269 micros/op 3716692 ops/sec; 425.3 MB/s (837401999 of 837401999 found)
+
+Version 3.10
+
+* Write Rate Achieved: 5.7k write/sec
+* Readwhilewriting: 0.261 micros/op 3830152 ops/sec; (863482999 of 863482999 found)
+
+
+We think that there is still big room to improve the performance, which will be an ongoing effort for us.
+
diff --git a/c-deps/rocksdb/appveyor.yml b/c-deps/rocksdb/appveyor.yml
new file mode 100644
index 0000000000..bff8df7c6a
--- /dev/null
+++ b/c-deps/rocksdb/appveyor.yml
@@ -0,0 +1,15 @@
+version: 1.0.{build}
+image: Visual Studio 2015
+before_build:
+- md %APPVEYOR_BUILD_FOLDER%\build
+- cd %APPVEYOR_BUILD_FOLDER%\build
+- cmake -G "Visual Studio 14 2015 Win64" -DOPTDBG=1 -DXPRESS=1 -DPORTABLE=1 ..
+- cd ..
+build:
+ project: build\rocksdb.sln
+ parallel: true
+ verbosity: normal
+test:
+test_script:
+- ps: build_tools\run_ci_db_test.ps1 -SuiteRun db_basic_test,db_test2,db_test,env_basic_test,env_test -Concurrency 8
+
diff --git a/c-deps/rocksdb/buckifier/buckify_rocksdb.py b/c-deps/rocksdb/buckifier/buckify_rocksdb.py
new file mode 100644
index 0000000000..a3c8be3b17
--- /dev/null
+++ b/c-deps/rocksdb/buckifier/buckify_rocksdb.py
@@ -0,0 +1,172 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+from targets_builder import TARGETSBuilder
+from optparse import OptionParser
+import os
+import fnmatch
+import sys
+import tempfile
+
+from util import ColorString
+import util
+
+# tests to export as libraries for inclusion in other projects
+_EXPORTED_TEST_LIBS = ["env_basic_test"]
+
+# Parse src.mk files as a Dictionary of
+# VAR_NAME => list of files
+def parse_src_mk(repo_path):
+ src_mk = repo_path + "/src.mk"
+ src_files = {}
+ for line in open(src_mk):
+ line = line.strip()
+ if len(line) == 0 or line[0] == '#':
+ continue
+ if '=' in line:
+ current_src = line.split('=')[0].strip()
+ src_files[current_src] = []
+ elif '.cc' in line:
+ src_path = line.split('.cc')[0].strip() + '.cc'
+ src_files[current_src].append(src_path)
+ return src_files
+
+
+# get all .cc / .c files
+def get_cc_files(repo_path):
+ cc_files = []
+ for root, dirnames, filenames in os.walk(repo_path):
+ root = root[(len(repo_path) + 1):]
+ if "java" in root:
+ # Skip java
+ continue
+ for filename in fnmatch.filter(filenames, '*.cc'):
+ cc_files.append(os.path.join(root, filename))
+ for filename in fnmatch.filter(filenames, '*.c'):
+ cc_files.append(os.path.join(root, filename))
+ return cc_files
+
+
+# Get tests from Makefile
+def get_tests(repo_path):
+ Makefile = repo_path + "/Makefile"
+
+ # Dictionary TEST_NAME => IS_PARALLEL
+ tests = {}
+
+ found_tests = False
+ for line in open(Makefile):
+ line = line.strip()
+ if line.startswith("TESTS ="):
+ found_tests = True
+ elif found_tests:
+ if line.endswith("\\"):
+ # remove the trailing \
+ line = line[:-1]
+ line = line.strip()
+ tests[line] = False
+ else:
+ # we consumed all the tests
+ break
+
+ found_parallel_tests = False
+ for line in open(Makefile):
+ line = line.strip()
+ if line.startswith("PARALLEL_TEST ="):
+ found_parallel_tests = True
+ elif found_parallel_tests:
+ if line.endswith("\\"):
+ # remove the trailing \
+ line = line[:-1]
+ line = line.strip()
+ tests[line] = True
+ else:
+ # we consumed all the parallel tests
+ break
+
+ return tests
+
+
+# Prepare TARGETS file for buck
+def generate_targets(repo_path):
+ print(ColorString.info("Generating TARGETS"))
+ # parsed src.mk file
+ src_mk = parse_src_mk(repo_path)
+ # get all .cc files
+ cc_files = get_cc_files(repo_path)
+ # get tests from Makefile
+ tests = get_tests(repo_path)
+
+ if src_mk is None or cc_files is None or tests is None:
+ return False
+
+ TARGETS = TARGETSBuilder("%s/TARGETS" % repo_path)
+ # rocksdb_lib
+ TARGETS.add_library(
+ "rocksdb_lib",
+ src_mk["LIB_SOURCES"] +
+ src_mk["TOOL_LIB_SOURCES"])
+ # rocksdb_test_lib
+ TARGETS.add_library(
+ "rocksdb_test_lib",
+ src_mk.get("MOCK_LIB_SOURCES", []) +
+ src_mk.get("TEST_LIB_SOURCES", []) +
+ src_mk.get("EXP_LIB_SOURCES", []),
+ [":rocksdb_lib"])
+ # rocksdb_tools_lib
+ TARGETS.add_library(
+ "rocksdb_tools_lib",
+ src_mk.get("BENCH_LIB_SOURCES", []) +
+ ["util/testutil.cc"],
+ [":rocksdb_lib"])
+
+ # test for every test we found in the Makefile
+ for test in sorted(tests):
+ match_src = [src for src in cc_files if ("/%s.c" % test) in src]
+ if len(match_src) == 0:
+ print(ColorString.warning("Cannot find .cc file for %s" % test))
+ continue
+ elif len(match_src) > 1:
+ print(ColorString.warning("Found more than one .cc for %s" % test))
+ print(match_src)
+ continue
+
+ assert(len(match_src) == 1)
+ is_parallel = tests[test]
+ TARGETS.register_test(test, match_src[0], is_parallel)
+
+ if test in _EXPORTED_TEST_LIBS:
+ test_library = "%s_lib" % test
+ TARGETS.add_library(test_library, match_src, [":rocksdb_test_lib"])
+ TARGETS.flush_tests()
+
+ print(ColorString.info("Generated TARGETS Summary:"))
+ print(ColorString.info("- %d libs" % TARGETS.total_lib))
+ print(ColorString.info("- %d binarys" % TARGETS.total_bin))
+ print(ColorString.info("- %d tests" % TARGETS.total_test))
+ return True
+
+
+def get_rocksdb_path():
+ # rocksdb = {script_dir}/..
+ script_dir = os.path.dirname(sys.argv[0])
+ script_dir = os.path.abspath(script_dir)
+ rocksdb_path = os.path.abspath(
+ os.path.join(script_dir, "../"))
+
+ return rocksdb_path
+
+def exit_with_error(msg):
+ print(ColorString.error(msg))
+ sys.exit(1)
+
+
+def main():
+ # Generate TARGETS file for buck
+ ok = generate_targets(get_rocksdb_path())
+ if not ok:
+ exit_with_error("Failed to generate TARGETS files")
+
+if __name__ == "__main__":
+ main()
diff --git a/c-deps/rocksdb/buckifier/rocks_test_runner.sh b/c-deps/rocksdb/buckifier/rocks_test_runner.sh
new file mode 100755
index 0000000000..e1f48a760d
--- /dev/null
+++ b/c-deps/rocksdb/buckifier/rocks_test_runner.sh
@@ -0,0 +1,4 @@
+#!/usr/bin/env bash
+# Create a tmp directory for the test to use
+TEST_DIR=$(mktemp -d /dev/shm/fbcode_rocksdb_XXXXXXX)
+TEST_TMPDIR="$TEST_DIR" $@ && rm -rf "$TEST_DIR"
diff --git a/c-deps/rocksdb/buckifier/targets_builder.py b/c-deps/rocksdb/buckifier/targets_builder.py
new file mode 100644
index 0000000000..7d700a21f0
--- /dev/null
+++ b/c-deps/rocksdb/buckifier/targets_builder.py
@@ -0,0 +1,63 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+import targets_cfg
+
+def pretty_list(lst, indent=8):
+ if lst is None or len(lst) == 0:
+ return ""
+
+ if len(lst) == 1:
+ return "\"%s\"" % lst[0]
+
+ separator = "\",\n%s\"" % (" " * indent)
+ res = separator.join(sorted(lst))
+ res = "\n" + (" " * indent) + "\"" + res + "\",\n" + (" " * (indent - 4))
+ return res
+
+
+class TARGETSBuilder:
+ def __init__(self, path):
+ self.path = path
+ self.targets_file = open(path, 'w')
+ self.targets_file.write(targets_cfg.rocksdb_target_header)
+ self.total_lib = 0
+ self.total_bin = 0
+ self.total_test = 0
+ self.tests_cfg = ""
+
+ def __del__(self):
+ self.targets_file.close()
+
+ def add_library(self, name, srcs, deps=None, headers=None):
+ if headers is None:
+ headers = "AutoHeaders.RECURSIVE_GLOB"
+ self.targets_file.write(targets_cfg.library_template % (
+ name,
+ pretty_list(srcs),
+ headers,
+ pretty_list(deps)))
+ self.total_lib = self.total_lib + 1
+
+ def add_binary(self, name, srcs, deps=None):
+ self.targets_file.write(targets_cfg.binary_template % (
+ name,
+ pretty_list(srcs),
+ pretty_list(deps)))
+ self.total_bin = self.total_bin + 1
+
+ def register_test(self, test_name, src, is_parallel):
+ exec_mode = "serial"
+ if is_parallel:
+ exec_mode = "parallel"
+ self.tests_cfg += targets_cfg.test_cfg_template % (
+ test_name,
+ str(src),
+ str(exec_mode))
+
+ self.total_test = self.total_test + 1
+
+ def flush_tests(self):
+ self.targets_file.write(targets_cfg.unittests_template % self.tests_cfg)
+ self.tests_cfg = ""
diff --git a/c-deps/rocksdb/buckifier/targets_cfg.py b/c-deps/rocksdb/buckifier/targets_cfg.py
new file mode 100644
index 0000000000..7aee5bdc9c
--- /dev/null
+++ b/c-deps/rocksdb/buckifier/targets_cfg.py
@@ -0,0 +1,142 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+rocksdb_target_header = """REPO_PATH = package_name() + "/"
+
+BUCK_BINS = "buck-out/gen/" + REPO_PATH
+
+TEST_RUNNER = REPO_PATH + "buckifier/rocks_test_runner.sh"
+
+rocksdb_compiler_flags = [
+ "-fno-builtin-memcmp",
+ "-DROCKSDB_PLATFORM_POSIX",
+ "-DROCKSDB_LIB_IO_POSIX",
+ "-DROCKSDB_FALLOCATE_PRESENT",
+ "-DROCKSDB_MALLOC_USABLE_SIZE",
+ "-DROCKSDB_RANGESYNC_PRESENT",
+ "-DROCKSDB_SCHED_GETCPU_PRESENT",
+ "-DROCKSDB_SUPPORT_THREAD_LOCAL",
+ "-DOS_LINUX",
+ # Flags to enable libs we include
+ "-DSNAPPY",
+ "-DZLIB",
+ "-DBZIP2",
+ "-DLZ4",
+ "-DZSTD",
+ "-DGFLAGS=gflags",
+ "-DNUMA",
+ "-DTBB",
+ # Needed to compile in fbcode
+ "-Wno-expansion-to-defined",
+]
+
+rocksdb_external_deps = [
+ ("bzip2", None, "bz2"),
+ ("snappy", None, "snappy"),
+ ("zlib", None, "z"),
+ ("gflags", None, "gflags"),
+ ("lz4", None, "lz4"),
+ ("zstd", None),
+ ("tbb", None),
+ ("numa", None, "numa"),
+ ("googletest", None, "gtest"),
+]
+
+rocksdb_preprocessor_flags = [
+ # Directories with files for #include
+ "-I" + REPO_PATH + "include/",
+ "-I" + REPO_PATH,
+]
+
+rocksdb_arch_preprocessor_flags = {
+ "x86_64": ["-DHAVE_SSE42"],
+}
+
+build_mode = read_config("fbcode", "build_mode")
+
+is_opt_mode = build_mode.startswith("opt")
+
+# -DNDEBUG is added by default in opt mode in fbcode. But adding it twice
+# doesn't harm and avoid forgetting to add it.
+if is_opt_mode:
+ rocksdb_compiler_flags.append("-DNDEBUG")
+"""
+
+
+library_template = """
+cpp_library(
+ name = "%s",
+ srcs = [%s],
+ headers = %s,
+ arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
+ compiler_flags = rocksdb_compiler_flags,
+ preprocessor_flags = rocksdb_preprocessor_flags,
+ deps = [%s],
+ external_deps = rocksdb_external_deps,
+)
+"""
+
+binary_template = """
+cpp_binary(
+ name = "%s",
+ srcs = [%s],
+ arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
+ compiler_flags = rocksdb_compiler_flags,
+ preprocessor_flags = rocksdb_preprocessor_flags,
+ deps = [%s],
+ external_deps = rocksdb_external_deps,
+)
+"""
+
+test_cfg_template = """ [
+ "%s",
+ "%s",
+ "%s",
+ ],
+"""
+
+unittests_template = """
+# [test_name, test_src, test_type]
+ROCKS_TESTS = [
+%s]
+
+# Generate a test rule for each entry in ROCKS_TESTS
+# Do not build the tests in opt mode, since SyncPoint and other test code
+# will not be included.
+if not is_opt_mode:
+ for test_cfg in ROCKS_TESTS:
+ test_name = test_cfg[0]
+ test_cc = test_cfg[1]
+ ttype = "gtest" if test_cfg[2] == "parallel" else "simple"
+ test_bin = test_name + "_bin"
+
+ cpp_binary (
+ name = test_bin,
+ srcs = [test_cc],
+ deps = [":rocksdb_test_lib"],
+ preprocessor_flags = rocksdb_preprocessor_flags,
+ arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
+ compiler_flags = rocksdb_compiler_flags,
+ external_deps = rocksdb_external_deps,
+ )
+
+ custom_unittest(
+ name = test_name,
+ type = ttype,
+ deps = [":" + test_bin],
+ command = [TEST_RUNNER, BUCK_BINS + test_bin]
+ )
+
+custom_unittest(
+ name = "make_rocksdbjavastatic",
+ command = ["internal_repo_rocksdb/make_rocksdbjavastatic.sh"],
+ type = "simple",
+)
+
+custom_unittest(
+ name = "make_rocksdb_lite_release",
+ command = ["internal_repo_rocksdb/make_rocksdb_lite_release.sh"],
+ type = "simple",
+)
+"""
diff --git a/c-deps/rocksdb/buckifier/util.py b/c-deps/rocksdb/buckifier/util.py
new file mode 100644
index 0000000000..350b7335c3
--- /dev/null
+++ b/c-deps/rocksdb/buckifier/util.py
@@ -0,0 +1,107 @@
+"""
+This module keeps commonly used components.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+import subprocess
+import os
+import time
+
+class ColorString:
+ """ Generate colorful strings on terminal """
+ HEADER = '\033[95m'
+ BLUE = '\033[94m'
+ GREEN = '\033[92m'
+ WARNING = '\033[93m'
+ FAIL = '\033[91m'
+ ENDC = '\033[0m'
+
+ @staticmethod
+ def _make_color_str(text, color):
+ return "".join([color, text.encode('utf-8'), ColorString.ENDC])
+
+ @staticmethod
+ def ok(text):
+ if ColorString.is_disabled:
+ return text
+ return ColorString._make_color_str(text, ColorString.GREEN)
+
+ @staticmethod
+ def info(text):
+ if ColorString.is_disabled:
+ return text
+ return ColorString._make_color_str(text, ColorString.BLUE)
+
+ @staticmethod
+ def header(text):
+ if ColorString.is_disabled:
+ return text
+ return ColorString._make_color_str(text, ColorString.HEADER)
+
+ @staticmethod
+ def error(text):
+ if ColorString.is_disabled:
+ return text
+ return ColorString._make_color_str(text, ColorString.FAIL)
+
+ @staticmethod
+ def warning(text):
+ if ColorString.is_disabled:
+ return text
+ return ColorString._make_color_str(text, ColorString.WARNING)
+
+ is_disabled = False
+
+
+def run_shell_command(shell_cmd, cmd_dir=None):
+ """ Run a single shell command.
+ @returns a tuple of shell command return code, stdout, stderr """
+
+ if cmd_dir is not None and not os.path.exists(cmd_dir):
+ run_shell_command("mkdir -p %s" % cmd_dir)
+
+ start = time.time()
+ print("\t>>> Running: " + shell_cmd)
+ p = subprocess.Popen(shell_cmd,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ cwd=cmd_dir)
+ stdout, stderr = p.communicate()
+ end = time.time()
+
+ # Report time if we spent more than 5 minutes executing a command
+ execution_time = end - start
+ if execution_time > (60 * 5):
+ mins = (execution_time / 60)
+ secs = (execution_time % 60)
+ print("\t>time spent: %d minutes %d seconds" % (mins, secs))
+
+
+ return p.returncode, stdout, stderr
+
+
+def run_shell_commands(shell_cmds, cmd_dir=None, verbose=False):
+ """ Execute a sequence of shell commands, which is equivalent to
+ running `cmd1 && cmd2 && cmd3`
+ @returns boolean indication if all commands succeeds.
+ """
+
+ if cmd_dir:
+ print("\t=== Set current working directory => %s" % cmd_dir)
+
+ for shell_cmd in shell_cmds:
+ ret_code, stdout, stderr = run_shell_command(shell_cmd, cmd_dir)
+ if stdout:
+ if verbose or ret_code != 0:
+ print(ColorString.info("stdout: \n"), stdout)
+ if stderr:
+ # contents in stderr is not necessarily to be error messages.
+ if verbose or ret_code != 0:
+ print(ColorString.error("stderr: \n"), stderr)
+ if ret_code != 0:
+ return False
+
+ return True
diff --git a/c-deps/rocksdb/build_tools/RocksDBCommonHelper.php b/c-deps/rocksdb/build_tools/RocksDBCommonHelper.php
new file mode 100644
index 0000000000..9fe770fe95
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/RocksDBCommonHelper.php
@@ -0,0 +1,377 @@
+ 0);
+ assert(is_numeric($diffID));
+ assert(strlen($url) > 0);
+
+ $cmd_args = array(
+ 'diff_id' => (int)$diffID,
+ 'name' => sprintf(
+ 'click here for sandcastle tests for D%d',
+ (int)$diffID
+ ),
+ 'link' => $url
+ );
+ $cmd = 'echo ' . escapeshellarg(json_encode($cmd_args))
+ . ' | arc call-conduit differential.updateunitresults';
+
+ shell_exec($cmd);
+}
+
+function buildUpdateTestStatusCmd($diffID, $test, $status) {
+ assert(strlen($diffID) > 0);
+ assert(is_numeric($diffID));
+ assert(strlen($test) > 0);
+ assert(strlen($status) > 0);
+
+ $cmd_args = array(
+ 'diff_id' => (int)$diffID,
+ 'name' => $test,
+ 'result' => $status
+ );
+
+ $cmd = 'echo ' . escapeshellarg(json_encode($cmd_args))
+ . ' | arc call-conduit differential.updateunitresults';
+
+ return $cmd;
+}
+
+function updateTestStatus($diffID, $test) {
+ assert(strlen($diffID) > 0);
+ assert(is_numeric($diffID));
+ assert(strlen($test) > 0);
+
+ shell_exec(buildUpdateTestStatusCmd($diffID, $test, "waiting"));
+}
+
+function getSteps($applyDiff, $diffID, $username, $test) {
+ assert(strlen($username) > 0);
+ assert(strlen($test) > 0);
+
+ if ($applyDiff) {
+ assert(strlen($diffID) > 0);
+ assert(is_numeric($diffID));
+
+ $arcrc_content = (PHP_OS == "Darwin" ?
+ exec("cat ~/.arcrc | gzip -f | base64") :
+ exec("cat ~/.arcrc | gzip -f | base64 -w0"));
+ assert(strlen($arcrc_content) > 0);
+
+ // Sandcastle machines don't have arc setup. We copy the user certificate
+ // and authenticate using that in Sandcastle.
+ $setup = array(
+ "name" => "Setup arcrc",
+ "shell" => "echo " . escapeshellarg($arcrc_content) . " | base64 --decode"
+ . " | gzip -d > ~/.arcrc",
+ "user" => "root"
+ );
+
+ // arc demands certain permission on its config.
+ // also fix the sticky bit issue in sandcastle
+ $fix_permission = array(
+ "name" => "Fix environment",
+ "shell" => "chmod 600 ~/.arcrc && chmod +t /dev/shm",
+ "user" => "root"
+ );
+
+ // Construct the steps in the order of execution.
+ $steps[] = $setup;
+ $steps[] = $fix_permission;
+ }
+
+ // fbcode is a sub-repo. We cannot patch until we add it to ignore otherwise
+ // Git thinks it is an uncommited change.
+ $fix_git_ignore = array(
+ "name" => "Fix git ignore",
+ "shell" => "echo fbcode >> .git/info/exclude",
+ "user" => "root"
+ );
+
+ // This fixes "FATAL: ThreadSanitizer can not mmap the shadow memory"
+ // Source:
+ // https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual#FAQ
+ $fix_kernel_issue = array(
+ "name" => "Fix kernel issue with tsan",
+ "shell" => "echo 2 >/proc/sys/kernel/randomize_va_space",
+ "user" => "root"
+ );
+
+ $steps[] = $fix_git_ignore;
+ $steps[] = $fix_kernel_issue;
+
+ // This will be the command used to execute particular type of tests.
+ $cmd = "";
+
+ if ($applyDiff) {
+ // Patch the code (keep your fingures crossed).
+ $patch = array(
+ "name" => "Patch " . $diffID,
+ "shell" => "arc --arcrc-file ~/.arcrc "
+ . "patch --nocommit --diff " . escapeshellarg($diffID),
+ "user" => "root"
+ );
+
+ $steps[] = $patch;
+
+ updateTestStatus($diffID, $test);
+ $cmd = buildUpdateTestStatusCmd($diffID, $test, "running") . "; ";
+ }
+
+ // Run the actual command.
+ $cmd = $cmd . "J=$(nproc) ./build_tools/precommit_checker.py " .
+ escapeshellarg($test) . "; exit_code=$?; ";
+
+ if ($applyDiff) {
+ $cmd = $cmd . "([[ \$exit_code -eq 0 ]] &&"
+ . buildUpdateTestStatusCmd($diffID, $test, "pass") . ")"
+ . "||" . buildUpdateTestStatusCmd($diffID, $test, "fail")
+ . "; ";
+ }
+
+ // shell command to sort the tests based on exit code and print
+ // the output of the log files.
+ $cat_sorted_logs = "
+ while read code log_file;
+ do echo \"################ cat \$log_file [exit_code : \$code] ################\";
+ cat \$log_file;
+ done < <(tail -n +2 LOG | sort -k7,7n -k4,4gr | awk '{print \$7,\$NF}')";
+
+ // Shell command to cat all log files
+ $cat_all_logs = "for f in `ls t/!(run-*)`; do echo \$f;cat \$f; done";
+
+ // If LOG file exist use it to cat log files sorted by exit code, otherwise
+ // cat everything
+ $logs_cmd = "if [ -f LOG ]; then {$cat_sorted_logs}; else {$cat_all_logs}; fi";
+
+ $cmd = $cmd . " cat /tmp/precommit-check.log"
+ . "; shopt -s extglob; {$logs_cmd}"
+ . "; shopt -u extglob; [[ \$exit_code -eq 0 ]]";
+ assert(strlen($cmd) > 0);
+
+ $run_test = array(
+ "name" => "Run " . $test,
+ "shell" => $cmd,
+ "user" => "root",
+ "parser" => "python build_tools/error_filter.py " . escapeshellarg($test),
+ );
+
+ $steps[] = $run_test;
+
+ if ($applyDiff) {
+ // Clean up the user arc config we are using.
+ $cleanup = array(
+ "name" => "Arc cleanup",
+ "shell" => "rm -f ~/.arcrc",
+ "user" => "root"
+ );
+
+ $steps[] = $cleanup;
+ }
+
+ assert(count($steps) > 0);
+ return $steps;
+}
+
+function getSandcastleConfig() {
+ $sandcastle_config = array();
+
+ $cwd = getcwd();
+ $cwd_token_file = "{$cwd}/.sandcastle";
+ // This is a case when we're executed from a continuous run. Fetch the values
+ // from the environment.
+ if (getenv(ENV_POST_RECEIVE_HOOK)) {
+ $sandcastle_config[0] = getenv(ENV_HTTPS_APP_VALUE);
+ $sandcastle_config[1] = getenv(ENV_HTTPS_TOKEN_VALUE);
+ } else {
+ // This is a typical `[p]arc diff` case. Fetch the values from the specific
+ // configuration files.
+ for ($i = 0; $i < 50; $i++) {
+ if (file_exists(PRIMARY_TOKEN_FILE) ||
+ file_exists($cwd_token_file)) {
+ break;
+ }
+ // If we failed to fetch the tokens, sleep for 0.2 second and try again
+ usleep(200000);
+ }
+ assert(file_exists(PRIMARY_TOKEN_FILE) ||
+ file_exists($cwd_token_file));
+
+ // Try the primary location first, followed by a secondary.
+ if (file_exists(PRIMARY_TOKEN_FILE)) {
+ $cmd = 'cat ' . PRIMARY_TOKEN_FILE;
+ } else {
+ $cmd = 'cat ' . escapeshellarg($cwd_token_file);
+ }
+
+ assert(strlen($cmd) > 0);
+ $sandcastle_config = explode(':', rtrim(shell_exec($cmd)));
+ }
+
+ // In this case be very explicit about the implications.
+ if (count($sandcastle_config) != 2) {
+ echo "Sandcastle configuration files don't contain valid information " .
+ "or the necessary environment variables aren't defined. Unable " .
+ "to validate the code changes.";
+ exit(1);
+ }
+
+ assert(strlen($sandcastle_config[0]) > 0);
+ assert(strlen($sandcastle_config[1]) > 0);
+ assert(count($sandcastle_config) > 0);
+
+ return $sandcastle_config;
+}
+
+// This function can be called either from `[p]arc diff` command or during
+// the Git post-receive hook.
+ function startTestsInSandcastle($applyDiff, $workflow, $diffID) {
+ // Default options don't terminate on failure, but that's what we want. In
+ // the current case we use assertions intentionally as "terminate on failure
+ // invariants".
+ assert_options(ASSERT_BAIL, true);
+
+ // In case of a diff we'll send notificatios to the author. Else it'll go to
+ // the entire team because failures indicate that build quality has regressed.
+ $username = $applyDiff ? exec("whoami") : CONT_RUN_ALIAS;
+ assert(strlen($username) > 0);
+
+ if ($applyDiff) {
+ assert($workflow);
+ assert(strlen($diffID) > 0);
+ assert(is_numeric($diffID));
+ }
+
+ // List of tests we want to run in Sandcastle.
+ $tests = array("unit", "unit_non_shm", "unit_481", "clang_unit", "tsan",
+ "asan", "lite_test", "valgrind", "release", "release_481",
+ "clang_release", "clang_analyze", "code_cov",
+ "java_build", "no_compression", "unity", "ubsan");
+
+ $send_email_template = array(
+ 'type' => 'email',
+ 'triggers' => array('fail'),
+ 'emails' => array($username . '@fb.com'),
+ );
+
+ // Construct a job definition for each test and add it to the master plan.
+ foreach ($tests as $test) {
+ $stepName = "RocksDB diff " . $diffID . " test " . $test;
+
+ if (!$applyDiff) {
+ $stepName = "RocksDB continuous integration test " . $test;
+ }
+
+ $arg[] = array(
+ "name" => $stepName,
+ "report" => array($send_email_template),
+ "steps" => getSteps($applyDiff, $diffID, $username, $test)
+ );
+ }
+
+ // We cannot submit the parallel execution master plan to Sandcastle and
+ // need supply the job plan as a determinator. So we construct a small job
+ // that will spit out the master job plan which Sandcastle will parse and
+ // execute. Why compress the job definitions? Otherwise we run over the max
+ // string size.
+ $cmd = "echo " . base64_encode(json_encode($arg))
+ . (PHP_OS == "Darwin" ?
+ " | gzip -f | base64" :
+ " | gzip -f | base64 -w0");
+ assert(strlen($cmd) > 0);
+
+ $arg_encoded = shell_exec($cmd);
+ assert(strlen($arg_encoded) > 0);
+
+ $runName = "Run diff " . $diffID . "for user " . $username;
+
+ if (!$applyDiff) {
+ $runName = "RocksDB continuous integration build and test run";
+ }
+
+ $command = array(
+ "name" => $runName,
+ "steps" => array()
+ );
+
+ $command["steps"][] = array(
+ "name" => "Generate determinator",
+ "shell" => "echo " . $arg_encoded . " | base64 --decode | gzip -d"
+ . " | base64 --decode",
+ "determinator" => true,
+ "user" => "root"
+ );
+
+ // Submit to Sandcastle.
+ $url = 'https://interngraph.intern.facebook.com/sandcastle/create';
+
+ $job = array(
+ 'command' => 'SandcastleUniversalCommand',
+ 'args' => $command,
+ 'capabilities' => array(
+ 'vcs' => 'rocksdb-int-git',
+ 'type' => 'lego',
+ ),
+ 'hash' => 'origin/master',
+ 'user' => $username,
+ 'alias' => 'rocksdb-precommit',
+ 'tags' => array('rocksdb'),
+ 'description' => 'Rocksdb precommit job',
+ );
+
+ // Fetch the configuration necessary to submit a successful HTTPS request.
+ $sandcastle_config = getSandcastleConfig();
+
+ $app = $sandcastle_config[0];
+ $token = $sandcastle_config[1];
+
+ $cmd = 'curl -s -k '
+ . ' -F app=' . escapeshellarg($app)
+ . ' -F token=' . escapeshellarg($token)
+ . ' -F job=' . escapeshellarg(json_encode($job))
+ .' ' . escapeshellarg($url);
+
+ $output = shell_exec($cmd);
+ assert(strlen($output) > 0);
+
+ // Extract Sandcastle URL from the response.
+ preg_match('/url": "(.+)"/', $output, $sandcastle_url);
+
+ assert(count($sandcastle_url) > 0, "Unable to submit Sandcastle request.");
+ assert(strlen($sandcastle_url[1]) > 0, "Unable to extract Sandcastle URL.");
+
+ if ($applyDiff) {
+ echo "\nSandcastle URL: " . $sandcastle_url[1] . "\n";
+ // Ask Phabricator to display it on the diff UI.
+ postURL($diffID, $sandcastle_url[1]);
+ } else {
+ echo "Continuous integration started Sandcastle tests. You can look at ";
+ echo "the progress at:\n" . $sandcastle_url[1] . "\n";
+ }
+}
+
+// Continuous run cript will set the environment variable and based on that
+// we'll trigger the execution of tests in Sandcastle. In that case we don't
+// need to apply any diffs and there's no associated workflow either.
+if (getenv(ENV_POST_RECEIVE_HOOK)) {
+ startTestsInSandcastle(
+ false /* $applyDiff */,
+ NULL /* $workflow */,
+ NULL /* $diffID */);
+}
diff --git a/c-deps/rocksdb/build_tools/amalgamate.py b/c-deps/rocksdb/build_tools/amalgamate.py
new file mode 100755
index 0000000000..548b1e8cec
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/amalgamate.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+
+# amalgamate.py creates an amalgamation from a unity build.
+# It can be run with either Python 2 or 3.
+# An amalgamation consists of a header that includes the contents of all public
+# headers and a source file that includes the contents of all source files and
+# private headers.
+#
+# This script works by starting with the unity build file and recursively expanding
+# #include directives. If the #include is found in a public include directory,
+# that header is expanded into the amalgamation header.
+#
+# A particular header is only expanded once, so this script will
+# break if there are multiple inclusions of the same header that are expected to
+# expand differently. Similarly, this type of code causes issues:
+#
+# #ifdef FOO
+# #include "bar.h"
+# // code here
+# #else
+# #include "bar.h" // oops, doesn't get expanded
+# // different code here
+# #endif
+#
+# The solution is to move the include out of the #ifdef.
+
+from __future__ import print_function
+
+import argparse
+from os import path
+import re
+import sys
+
+include_re = re.compile('^[ \t]*#include[ \t]+"(.*)"[ \t]*$')
+included = set()
+excluded = set()
+
+def find_header(name, abs_path, include_paths):
+ samedir = path.join(path.dirname(abs_path), name)
+ if path.exists(samedir):
+ return samedir
+ for include_path in include_paths:
+ include_path = path.join(include_path, name)
+ if path.exists(include_path):
+ return include_path
+ return None
+
+def expand_include(include_path, f, abs_path, source_out, header_out, include_paths, public_include_paths):
+ if include_path in included:
+ return False
+
+ included.add(include_path)
+ with open(include_path) as f:
+ print('#line 1 "{}"'.format(include_path), file=source_out)
+ process_file(f, include_path, source_out, header_out, include_paths, public_include_paths)
+ return True
+
+def process_file(f, abs_path, source_out, header_out, include_paths, public_include_paths):
+ for (line, text) in enumerate(f):
+ m = include_re.match(text)
+ if m:
+ filename = m.groups()[0]
+ # first check private headers
+ include_path = find_header(filename, abs_path, include_paths)
+ if include_path:
+ if include_path in excluded:
+ source_out.write(text)
+ expanded = False
+ else:
+ expanded = expand_include(include_path, f, abs_path, source_out, header_out, include_paths, public_include_paths)
+ else:
+ # now try public headers
+ include_path = find_header(filename, abs_path, public_include_paths)
+ if include_path:
+ # found public header
+ expanded = False
+ if include_path in excluded:
+ source_out.write(text)
+ else:
+ expand_include(include_path, f, abs_path, header_out, None, public_include_paths, [])
+ else:
+ sys.exit("unable to find {}, included in {} on line {}".format(filename, abs_path, line))
+
+ if expanded:
+ print('#line {} "{}"'.format(line+1, abs_path), file=source_out)
+ elif text != "#pragma once\n":
+ source_out.write(text)
+
+def main():
+ parser = argparse.ArgumentParser(description="Transform a unity build into an amalgamation")
+ parser.add_argument("source", help="source file")
+ parser.add_argument("-I", action="append", dest="include_paths", help="include paths for private headers")
+ parser.add_argument("-i", action="append", dest="public_include_paths", help="include paths for public headers")
+ parser.add_argument("-x", action="append", dest="excluded", help="excluded header files")
+ parser.add_argument("-o", dest="source_out", help="output C++ file", required=True)
+ parser.add_argument("-H", dest="header_out", help="output C++ header file", required=True)
+ args = parser.parse_args()
+
+ include_paths = list(map(path.abspath, args.include_paths or []))
+ public_include_paths = list(map(path.abspath, args.public_include_paths or []))
+ excluded.update(map(path.abspath, args.excluded or []))
+ filename = args.source
+ abs_path = path.abspath(filename)
+ with open(filename) as f, open(args.source_out, 'w') as source_out, open(args.header_out, 'w') as header_out:
+ print('#line 1 "{}"'.format(filename), file=source_out)
+ print('#include "{}"'.format(header_out.name), file=source_out)
+ process_file(f, abs_path, source_out, header_out, include_paths, public_include_paths)
+
+if __name__ == "__main__":
+ main()
diff --git a/c-deps/rocksdb/build_tools/build_detect_platform b/c-deps/rocksdb/build_tools/build_detect_platform
new file mode 100755
index 0000000000..c7ddb7ccee
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/build_detect_platform
@@ -0,0 +1,532 @@
+#!/bin/sh
+#
+# Detects OS we're compiling on and outputs a file specified by the first
+# argument, which in turn gets read while processing Makefile.
+#
+# The output will set the following variables:
+# CC C Compiler path
+# CXX C++ Compiler path
+# PLATFORM_LDFLAGS Linker flags
+# JAVA_LDFLAGS Linker flags for RocksDBJava
+# JAVA_STATIC_LDFLAGS Linker flags for RocksDBJava static build
+# PLATFORM_SHARED_EXT Extension for shared libraries
+# PLATFORM_SHARED_LDFLAGS Flags for building shared library
+# PLATFORM_SHARED_CFLAGS Flags for compiling objects for shared library
+# PLATFORM_CCFLAGS C compiler flags
+# PLATFORM_CXXFLAGS C++ compiler flags. Will contain:
+# PLATFORM_SHARED_VERSIONED Set to 'true' if platform supports versioned
+# shared libraries, empty otherwise.
+#
+# The PLATFORM_CCFLAGS and PLATFORM_CXXFLAGS might include the following:
+#
+# -DROCKSDB_PLATFORM_POSIX if posix-platform based
+# -DSNAPPY if the Snappy library is present
+# -DLZ4 if the LZ4 library is present
+# -DZSTD if the ZSTD library is present
+# -DNUMA if the NUMA library is present
+# -DTBB if the TBB library is present
+#
+# Using gflags in rocksdb:
+# Our project depends on gflags, which requires users to take some extra steps
+# before they can compile the whole repository:
+# 1. Install gflags. You may download it from here:
+# https://gflags.github.io/gflags/ (Mac users can `brew install gflags`)
+# 2. Once installed, add the include path for gflags to your CPATH env var and
+# the lib path to LIBRARY_PATH. If installed with default settings, the lib
+# will be /usr/local/lib and the include path will be /usr/local/include
+
+OUTPUT=$1
+if test -z "$OUTPUT"; then
+ echo "usage: $0 " >&2
+ exit 1
+fi
+
+# we depend on C++11
+PLATFORM_CXXFLAGS="-std=c++11"
+# we currently depend on POSIX platform
+COMMON_FLAGS="-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX"
+
+# Default to fbcode gcc on internal fb machines
+if [ -z "$ROCKSDB_NO_FBCODE" -a -d /mnt/gvfs/third-party ]; then
+ FBCODE_BUILD="true"
+ # If we're compiling with TSAN we need pic build
+ PIC_BUILD=$COMPILE_WITH_TSAN
+ if [ -z "$ROCKSDB_FBCODE_BUILD_WITH_481" ]; then
+ source "$PWD/build_tools/fbcode_config.sh"
+ else
+ # we need this to build with MySQL. Don't use for other purposes.
+ source "$PWD/build_tools/fbcode_config4.8.1.sh"
+ fi
+fi
+
+# Delete existing output, if it exists
+rm -f "$OUTPUT"
+touch "$OUTPUT"
+
+if test -z "$CC"; then
+ CC=cc
+fi
+
+if test -z "$CXX"; then
+ CXX=g++
+fi
+
+# Detect OS
+if test -z "$TARGET_OS"; then
+ TARGET_OS=`uname -s`
+fi
+
+if test -z "$TARGET_ARCHITECTURE"; then
+ TARGET_ARCHITECTURE=`uname -m`
+fi
+
+if test -z "$CLANG_SCAN_BUILD"; then
+ CLANG_SCAN_BUILD=scan-build
+fi
+
+if test -z "$CLANG_ANALYZER"; then
+ CLANG_ANALYZER=$(which clang++ 2> /dev/null)
+fi
+
+COMMON_FLAGS="$COMMON_FLAGS ${CFLAGS}"
+CROSS_COMPILE=
+PLATFORM_CCFLAGS=
+PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS"
+PLATFORM_SHARED_EXT="so"
+PLATFORM_SHARED_LDFLAGS="-Wl,--no-as-needed -shared -Wl,-soname -Wl,"
+PLATFORM_SHARED_CFLAGS="-fPIC"
+PLATFORM_SHARED_VERSIONED=true
+
+# generic port files (working on all platform by #ifdef) go directly in /port
+GENERIC_PORT_FILES=`cd "$ROCKSDB_ROOT"; find port -name '*.cc' | tr "\n" " "`
+
+# On GCC, we pick libc's memcmp over GCC's memcmp via -fno-builtin-memcmp
+case "$TARGET_OS" in
+ Darwin)
+ PLATFORM=OS_MACOSX
+ COMMON_FLAGS="$COMMON_FLAGS -DOS_MACOSX"
+ PLATFORM_SHARED_EXT=dylib
+ PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name "
+ # PORT_FILES=port/darwin/darwin_specific.cc
+ ;;
+ IOS)
+ PLATFORM=IOS
+ COMMON_FLAGS="$COMMON_FLAGS -DOS_MACOSX -DIOS_CROSS_COMPILE -DROCKSDB_LITE"
+ PLATFORM_SHARED_EXT=dylib
+ PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name "
+ CROSS_COMPILE=true
+ PLATFORM_SHARED_VERSIONED=
+ ;;
+ Linux)
+ PLATFORM=OS_LINUX
+ COMMON_FLAGS="$COMMON_FLAGS -DOS_LINUX"
+ if [ -z "$USE_CLANG" ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp"
+ fi
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lrt"
+ # PORT_FILES=port/linux/linux_specific.cc
+ ;;
+ SunOS)
+ PLATFORM=OS_SOLARIS
+ COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_SOLARIS -m64"
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lrt -static-libstdc++ -static-libgcc -m64"
+ # PORT_FILES=port/sunos/sunos_specific.cc
+ ;;
+ AIX)
+ PLATFORM=OS_AIX
+ CC=gcc
+ COMMON_FLAGS="$COMMON_FLAGS -maix64 -pthread -fno-builtin-memcmp -D_REENTRANT -DOS_AIX -D__STDC_FORMAT_MACROS"
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -pthread -lpthread -lrt -maix64 -static-libstdc++ -static-libgcc"
+ # PORT_FILES=port/aix/aix_specific.cc
+ ;;
+ FreeBSD)
+ PLATFORM=OS_FREEBSD
+ COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_FREEBSD"
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread"
+ # PORT_FILES=port/freebsd/freebsd_specific.cc
+ ;;
+ NetBSD)
+ PLATFORM=OS_NETBSD
+ COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_NETBSD"
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lgcc_s"
+ # PORT_FILES=port/netbsd/netbsd_specific.cc
+ ;;
+ OpenBSD)
+ PLATFORM=OS_OPENBSD
+ COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_OPENBSD"
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -pthread"
+ # PORT_FILES=port/openbsd/openbsd_specific.cc
+ ;;
+ DragonFly)
+ PLATFORM=OS_DRAGONFLYBSD
+ COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_DRAGONFLYBSD"
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread"
+ # PORT_FILES=port/dragonfly/dragonfly_specific.cc
+ ;;
+ Cygwin)
+ PLATFORM=CYGWIN
+ PLATFORM_SHARED_CFLAGS=""
+ PLATFORM_CXXFLAGS="-std=gnu++11"
+ COMMON_FLAGS="$COMMON_FLAGS -DCYGWIN"
+ if [ -z "$USE_CLANG" ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp"
+ fi
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lrt"
+ # PORT_FILES=port/linux/linux_specific.cc
+ ;;
+ OS_ANDROID_CROSSCOMPILE)
+ PLATFORM=OS_ANDROID
+ COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_ANDROID -DROCKSDB_PLATFORM_POSIX"
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS " # All pthread features are in the Android C library
+ # PORT_FILES=port/android/android.cc
+ CROSS_COMPILE=true
+ ;;
+ *)
+ echo "Unknown platform!" >&2
+ exit 1
+esac
+
+PLATFORM_CXXFLAGS="$PLATFORM_CXXFLAGS ${CXXFLAGS}"
+JAVA_LDFLAGS="$PLATFORM_LDFLAGS"
+JAVA_STATIC_LDFLAGS="$PLATFORM_LDFLAGS"
+
+if [ "$CROSS_COMPILE" = "true" -o "$FBCODE_BUILD" = "true" ]; then
+ # Cross-compiling; do not try any compilation tests.
+ # Also don't need any compilation tests if compiling on fbcode
+ true
+else
+ if ! test $ROCKSDB_DISABLE_FALLOCATE; then
+ # Test whether fallocate is available
+ $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <
+ #include
+ int main() {
+ int fd = open("/dev/null", 0);
+ fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, 1024);
+ }
+EOF
+ if [ "$?" = 0 ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_FALLOCATE_PRESENT"
+ fi
+ fi
+
+ # Test whether Snappy library is installed
+ # http://code.google.com/p/snappy/
+ $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <
+ int main() {}
+EOF
+ if [ "$?" = 0 ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -DSNAPPY"
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lsnappy"
+ JAVA_LDFLAGS="$JAVA_LDFLAGS -lsnappy"
+ fi
+
+ # Test whether gflags library is installed
+ # http://gflags.github.io/gflags/
+ # check if the namespace is gflags
+ $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null << EOF
+ #include
+ using namespace gflags;
+ int main() {}
+EOF
+ if [ "$?" = 0 ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -DGFLAGS=gflags"
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lgflags"
+ else
+ # check if namespace is google
+ $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null << EOF
+ #include
+ using namespace google;
+ int main() {}
+EOF
+ if [ "$?" = 0 ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -DGFLAGS=google"
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lgflags"
+ fi
+ fi
+
+ # Test whether zlib library is installed
+ $CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <
+ int main() {}
+EOF
+ if [ "$?" = 0 ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -DZLIB"
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lz"
+ JAVA_LDFLAGS="$JAVA_LDFLAGS -lz"
+ fi
+
+ # Test whether bzip library is installed
+ $CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <
+ int main() {}
+EOF
+ if [ "$?" = 0 ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -DBZIP2"
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lbz2"
+ JAVA_LDFLAGS="$JAVA_LDFLAGS -lbz2"
+ fi
+
+ # Test whether lz4 library is installed
+ $CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <
+ #include
+ int main() {}
+EOF
+ if [ "$?" = 0 ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -DLZ4"
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -llz4"
+ JAVA_LDFLAGS="$JAVA_LDFLAGS -llz4"
+ fi
+
+ # Test whether zstd library is installed
+ $CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <
+ int main() {}
+EOF
+ if [ "$?" = 0 ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -DZSTD"
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lzstd"
+ JAVA_LDFLAGS="$JAVA_LDFLAGS -lzstd"
+ fi
+
+ # Test whether numa is available
+ $CXX $CFLAGS -x c++ - -o /dev/null -lnuma 2>/dev/null <
+ #include
+ int main() {}
+EOF
+ if [ "$?" = 0 ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -DNUMA"
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lnuma"
+ JAVA_LDFLAGS="$JAVA_LDFLAGS -lnuma"
+ fi
+
+ # Test whether tbb is available
+ $CXX $CFLAGS $LDFLAGS -x c++ - -o /dev/null -ltbb 2>/dev/null <
+ int main() {}
+EOF
+ if [ "$?" = 0 ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -DTBB"
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -ltbb"
+ JAVA_LDFLAGS="$JAVA_LDFLAGS -ltbb"
+ fi
+
+ # Test whether jemalloc is available
+ if echo 'int main() {}' | $CXX $CFLAGS -x c++ - -o /dev/null -ljemalloc \
+ 2>/dev/null; then
+ # This will enable some preprocessor identifiers in the Makefile
+ JEMALLOC=1
+ # JEMALLOC can be enabled either using the flag (like here) or by
+ # providing direct link to the jemalloc library
+ WITH_JEMALLOC_FLAG=1
+ else
+ # jemalloc is not available. Let's try tcmalloc
+ if echo 'int main() {}' | $CXX $CFLAGS -x c++ - -o /dev/null \
+ -ltcmalloc 2>/dev/null; then
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -ltcmalloc"
+ JAVA_LDFLAGS="$JAVA_LDFLAGS -ltcmalloc"
+ fi
+ fi
+
+ # Test whether malloc_usable_size is available
+ $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <
+ int main() {
+ size_t res = malloc_usable_size(0);
+ return 0;
+ }
+EOF
+ if [ "$?" = 0 ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_MALLOC_USABLE_SIZE"
+ fi
+
+ # Test whether PTHREAD_MUTEX_ADAPTIVE_NP mutex type is available
+ $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <
+ int main() {
+ int x = PTHREAD_MUTEX_ADAPTIVE_NP;
+ return 0;
+ }
+EOF
+ if [ "$?" = 0 ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_PTHREAD_ADAPTIVE_MUTEX"
+ fi
+
+ # Test whether backtrace is available
+ $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <>
+ int main() {
+ void* frames[1];
+ backtrace_symbols(frames, backtrace(frames, 1));
+ return 0;
+ }
+EOF
+ if [ "$?" = 0 ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_BACKTRACE"
+ else
+ # Test whether execinfo library is installed
+ $CXX $CFLAGS -lexecinfo -x c++ - -o /dev/null 2>/dev/null <
+ int main() {
+ void* frames[1];
+ backtrace_symbols(frames, backtrace(frames, 1));
+ }
+EOF
+ if [ "$?" = 0 ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_BACKTRACE"
+ PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lexecinfo"
+ JAVA_LDFLAGS="$JAVA_LDFLAGS -lexecinfo"
+ fi
+ fi
+
+ # Test if -pg is supported
+ $CXX $CFLAGS -pg -x c++ - -o /dev/null 2>/dev/null </dev/null <
+ int main() {
+ int fd = open("/dev/null", 0);
+ sync_file_range(fd, 0, 1024, SYNC_FILE_RANGE_WRITE);
+ }
+EOF
+ if [ "$?" = 0 ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_RANGESYNC_PRESENT"
+ fi
+
+ # Test whether sched_getcpu is supported
+ $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <
+ int main() {
+ int cpuid = sched_getcpu();
+ }
+EOF
+ if [ "$?" = 0 ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_SCHED_GETCPU_PRESENT"
+ fi
+fi
+
+# TODO(tec): Fix -Wshorten-64-to-32 errors on FreeBSD and enable the warning.
+# -Wshorten-64-to-32 breaks compilation on FreeBSD i386
+if ! [ "$TARGET_OS" = FreeBSD -a "$TARGET_ARCHITECTURE" = i386 ]; then
+ # Test whether -Wshorten-64-to-32 is available
+ $CXX $CFLAGS -x c++ - -o /dev/null -Wshorten-64-to-32 2>/dev/null </dev/null <
+ #include
+ int main() {
+ volatile uint32_t x = _mm_crc32_u32(0, 0);
+ }
+EOF
+if [ "$?" = 0 ]; then
+ COMMON_FLAGS="$COMMON_FLAGS -DHAVE_SSE42"
+elif test "$USE_SSE"; then
+ echo "warning: USE_SSE specified but compiler could not use SSE intrinsics, disabling"
+fi
+
+# iOS doesn't support thread-local storage, but this check would erroneously
+# succeed because the cross-compiler flags are added by the Makefile, not this
+# script.
+if [ "$PLATFORM" != IOS ]; then
+ $CXX $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <> "$OUTPUT"
+echo "CXX=$CXX" >> "$OUTPUT"
+echo "PLATFORM=$PLATFORM" >> "$OUTPUT"
+echo "PLATFORM_LDFLAGS=$PLATFORM_LDFLAGS" >> "$OUTPUT"
+echo "JAVA_LDFLAGS=$JAVA_LDFLAGS" >> "$OUTPUT"
+echo "JAVA_STATIC_LDFLAGS=$JAVA_STATIC_LDFLAGS" >> "$OUTPUT"
+echo "VALGRIND_VER=$VALGRIND_VER" >> "$OUTPUT"
+echo "PLATFORM_CCFLAGS=$PLATFORM_CCFLAGS" >> "$OUTPUT"
+echo "PLATFORM_CXXFLAGS=$PLATFORM_CXXFLAGS" >> "$OUTPUT"
+echo "PLATFORM_SHARED_CFLAGS=$PLATFORM_SHARED_CFLAGS" >> "$OUTPUT"
+echo "PLATFORM_SHARED_EXT=$PLATFORM_SHARED_EXT" >> "$OUTPUT"
+echo "PLATFORM_SHARED_LDFLAGS=$PLATFORM_SHARED_LDFLAGS" >> "$OUTPUT"
+echo "PLATFORM_SHARED_VERSIONED=$PLATFORM_SHARED_VERSIONED" >> "$OUTPUT"
+echo "EXEC_LDFLAGS=$EXEC_LDFLAGS" >> "$OUTPUT"
+echo "JEMALLOC_INCLUDE=$JEMALLOC_INCLUDE" >> "$OUTPUT"
+echo "JEMALLOC_LIB=$JEMALLOC_LIB" >> "$OUTPUT"
+echo "ROCKSDB_MAJOR=$ROCKSDB_MAJOR" >> "$OUTPUT"
+echo "ROCKSDB_MINOR=$ROCKSDB_MINOR" >> "$OUTPUT"
+echo "ROCKSDB_PATCH=$ROCKSDB_PATCH" >> "$OUTPUT"
+echo "CLANG_SCAN_BUILD=$CLANG_SCAN_BUILD" >> "$OUTPUT"
+echo "CLANG_ANALYZER=$CLANG_ANALYZER" >> "$OUTPUT"
+echo "PROFILING_FLAGS=$PROFILING_FLAGS" >> "$OUTPUT"
+# This will enable some related identifiers for the preprocessor
+if test -n "$JEMALLOC"; then
+ echo "JEMALLOC=1" >> "$OUTPUT"
+fi
+# Indicates that jemalloc should be enabled using -ljemalloc flag
+# The alternative is to porvide a direct link to the library via JEMALLOC_LIB
+# and JEMALLOC_INCLUDE
+if test -n "$WITH_JEMALLOC_FLAG"; then
+ echo "WITH_JEMALLOC_FLAG=$WITH_JEMALLOC_FLAG" >> "$OUTPUT"
+fi
+echo "LUA_PATH=$LUA_PATH" >> "$OUTPUT"
diff --git a/c-deps/rocksdb/build_tools/cont_integration.sh b/c-deps/rocksdb/build_tools/cont_integration.sh
new file mode 100755
index 0000000000..06f25c596e
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/cont_integration.sh
@@ -0,0 +1,135 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2016, Facebook. All rights reserved.
+#
+# Overall wrapper script for RocksDB continuous builds. The implementation is a
+# trivial pulling scheme. We loop infinitely, check if any new changes have been
+# committed, if yes then trigger a Sandcastle run, and finally go to sleep again
+# for a certain interval.
+#
+
+SRC_GIT_REPO=/data/git/rocksdb-public
+error=0
+
+function log {
+ DATE=`date +%Y-%m-%d:%H:%M:%S`
+ echo $DATE $@
+}
+
+function log_err {
+ log "ERROR: $@ Error code: $error."
+}
+
+function update_repo_status {
+ # Update the parent first.
+ pushd $SRC_GIT_REPO
+
+ # This is a fatal error. Something in the environment isn't right and we will
+ # terminate the execution.
+ error=$?
+ if [ ! $error -eq 0 ]; then
+ log_err "Where is $SRC_GIT_REPO?"
+ exit $error
+ fi
+
+ HTTPS_PROXY=fwdproxy:8080 git fetch -f
+
+ error=$?
+ if [ ! $error -eq 0 ]; then
+ log_err "git fetch -f failed."
+ popd
+ return $error
+ fi
+
+ git update-ref refs/heads/master refs/remotes/origin/master
+
+ error=$?
+ if [ ! $error -eq 0 ]; then
+ log_err "git update-ref failed."
+ popd
+ return $error
+ fi
+
+ popd
+
+ # We're back in an instance-specific directory. Get the latest changes.
+ git pull --rebase
+
+ error=$?
+ if [ ! $error -eq 0 ]; then
+ log_err "git pull --rebase failed."
+ return $error
+ fi
+}
+
+#
+# Execution starts here.
+#
+
+# Path to the determinator from the root of the RocksDB repo.
+CONTRUN_DETERMINATOR=./build_tools/RocksDBCommonHelper.php
+
+# Value of the previous commit.
+PREV_COMMIT=
+
+log "Starting to monitor for new RocksDB changes ..."
+log "Running under `pwd` as `whoami`."
+
+# Paranoia. Make sure that we're using the right branch.
+git checkout master
+
+error=$?
+if [ ! $error -eq 0 ]; then
+ log_err "This is not good. Can't checkout master. Bye-bye!"
+ exit 1
+fi
+
+# We'll run forever and let the execution environment terminate us if we'll
+# exceed whatever timeout is set for the job.
+while true;
+do
+ # Get the latest changes committed.
+ update_repo_status
+
+ error=$?
+ if [ $error -eq 0 ]; then
+ LAST_COMMIT=`git log -1 | head -1 | grep commit | awk '{ print $2; }'`
+
+ log "Last commit is '$LAST_COMMIT', previous commit is '$PREV_COMMIT'."
+
+ if [ "$PREV_COMMIT" == "$LAST_COMMIT" ]; then
+ log "There were no changes since the last time I checked. Going to sleep."
+ else
+ if [ ! -z "$LAST_COMMIT" ]; then
+ log "New code has been committed or previous commit not known. " \
+ "Will trigger the tests."
+
+ PREV_COMMIT=$LAST_COMMIT
+ log "Updated previous commit to '$PREV_COMMIT'."
+
+ #
+ # This is where we'll trigger the Sandcastle run. The values for
+ # HTTPS_APP_VALUE and HTTPS_APP_VALUE will be set in the container we're
+ # running in.
+ #
+ POST_RECEIVE_HOOK=1 php $CONTRUN_DETERMINATOR
+
+ error=$?
+ if [ $error -eq 0 ]; then
+ log "Sandcastle run successfully triggered."
+ else
+ log_err "Failed to trigger Sandcastle run."
+ fi
+ else
+ log_err "Previous commit not updated. Don't know what the last one is."
+ fi
+ fi
+ else
+ log_err "Getting latest changes failed. Will skip running tests for now."
+ fi
+
+ # Always sleep, even if errors happens while trying to determine the latest
+ # commit. This will prevent us terminating in case of transient errors.
+ log "Will go to sleep for 5 minutes."
+ sleep 5m
+done
diff --git a/c-deps/rocksdb/build_tools/dependencies.sh b/c-deps/rocksdb/build_tools/dependencies.sh
new file mode 100644
index 0000000000..06c7097149
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/dependencies.sh
@@ -0,0 +1,18 @@
+GCC_BASE=/mnt/gvfs/third-party2/gcc/8219ec1bcedf8ad9da05e121e193364de2cc4f61/5.x/centos6-native/c447969
+CLANG_BASE=/mnt/gvfs/third-party2/llvm-fb/64d8d58e3d84f8bde7a029763d4f5baf39d0d5b9/stable/centos6-native/6aaf4de
+LIBGCC_BASE=/mnt/gvfs/third-party2/libgcc/ba9be983c81de7299b59fe71950c664a84dcb5f8/5.x/gcc-5-glibc-2.23/339d858
+GLIBC_BASE=/mnt/gvfs/third-party2/glibc/f20197cf3d4bd50339c9777aaa0b2ccadad9e2cb/2.23/gcc-5-glibc-2.23/ca1d1c0
+SNAPPY_BASE=/mnt/gvfs/third-party2/snappy/6427ce8c7496e4ab06c2da81543b94c0de8be3d0/1.1.3/gcc-5-glibc-2.23/9bc6787
+ZLIB_BASE=/mnt/gvfs/third-party2/zlib/8f1e8b867d26efef93eac2fabbdb2e1d512665d7/1.2.8/gcc-5-glibc-2.23/9bc6787
+BZIP2_BASE=/mnt/gvfs/third-party2/bzip2/70471c0571559fe0af7db6d7e8860b93a7eadfe1/1.0.6/gcc-5-glibc-2.23/9bc6787
+LZ4_BASE=/mnt/gvfs/third-party2/lz4/453c89d6f0e68cdf1c151c769197fabedad9cac8/r131/gcc-5-glibc-2.23/9bc6787
+ZSTD_BASE=/mnt/gvfs/third-party2/zstd/00a40fa5f8bd2cd0622f2e868552793aef37ccf4/1.3.0/gcc-5-glibc-2.23/03859b5
+GFLAGS_BASE=/mnt/gvfs/third-party2/gflags/47eef08f9acb77de982fbda6047c26d330739538/2.2.0/gcc-5-glibc-2.23/9bc6787
+JEMALLOC_BASE=/mnt/gvfs/third-party2/jemalloc/4414ddc78df8008b35cc4adac23590ad29148584/master/gcc-5-glibc-2.23/d506c82
+NUMA_BASE=/mnt/gvfs/third-party2/numa/9d7ae2693d05d62f9a579cb21e6b717cf257a75d/2.0.11/gcc-5-glibc-2.23/9bc6787
+LIBUNWIND_BASE=/mnt/gvfs/third-party2/libunwind/2b2dd58e3a52ccf2c1d827def59e5f740de0ad15/1.2/gcc-5-glibc-2.23/b443de1
+TBB_BASE=/mnt/gvfs/third-party2/tbb/379addf7ab2468a2b4293b47456cfcd1c9cb318d/4.3/gcc-5-glibc-2.23/9bc6787
+KERNEL_HEADERS_BASE=/mnt/gvfs/third-party2/kernel-headers/3f68f5fe65a85b7c2d3e66852268fbd1efdb3151/4.0.9-36_fbk5_2933_gd092e3f/gcc-5-glibc-2.23/da39a3e
+BINUTILS_BASE=/mnt/gvfs/third-party2/binutils/b9fab0aec99d9c36408e810b2677e91c12807afd/2.28/centos6-native/da39a3e
+VALGRIND_BASE=/mnt/gvfs/third-party2/valgrind/423431d61786b20bcc3bde8972901130cb29e6b3/3.11.0/gcc-5-glibc-2.23/9bc6787
+LUA_BASE=/mnt/gvfs/third-party2/lua/3b0bb3bd9a0f690a069c479fcc0f7424fc7456d2/5.2.3/gcc-5-glibc-2.23/65372bd
diff --git a/c-deps/rocksdb/build_tools/dependencies_4.8.1.sh b/c-deps/rocksdb/build_tools/dependencies_4.8.1.sh
new file mode 100644
index 0000000000..ef0cda2398
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/dependencies_4.8.1.sh
@@ -0,0 +1,18 @@
+GCC_BASE=/mnt/gvfs/third-party2/gcc/cf7d14c625ce30bae1a4661c2319c5a283e4dd22/4.8.1/centos6-native/cc6c9dc
+CLANG_BASE=/mnt/gvfs/third-party2/llvm-fb/8598c375b0e94e1448182eb3df034704144a838d/stable/centos6-native/3f16ddd
+LIBGCC_BASE=/mnt/gvfs/third-party2/libgcc/d6e0a7da6faba45f5e5b1638f9edd7afc2f34e7d/4.8.1/gcc-4.8.1-glibc-2.17/8aac7fc
+GLIBC_BASE=/mnt/gvfs/third-party2/glibc/d282e6e8f3d20f4e40a516834847bdc038e07973/2.17/gcc-4.8.1-glibc-2.17/99df8fc
+SNAPPY_BASE=/mnt/gvfs/third-party2/snappy/8c38a4c1e52b4c2cc8a9cdc31b9c947ed7dbfcb4/1.1.3/gcc-4.8.1-glibc-2.17/c3f970a
+ZLIB_BASE=/mnt/gvfs/third-party2/zlib/0882df3713c7a84f15abe368dc004581f20b39d7/1.2.8/gcc-4.8.1-glibc-2.17/c3f970a
+BZIP2_BASE=/mnt/gvfs/third-party2/bzip2/740325875f6729f42d28deaa2147b0854f3a347e/1.0.6/gcc-4.8.1-glibc-2.17/c3f970a
+LZ4_BASE=/mnt/gvfs/third-party2/lz4/0e790b441e2d9acd68d51e1d2e028f88c6a79ddf/r131/gcc-4.8.1-glibc-2.17/c3f970a
+ZSTD_BASE=/mnt/gvfs/third-party2/zstd/9455f75ff7f4831dc9fda02a6a0f8c68922fad8f/1.0.0/gcc-4.8.1-glibc-2.17/c3f970a
+GFLAGS_BASE=/mnt/gvfs/third-party2/gflags/f001a51b2854957676d07306ef3abf67186b5c8b/2.1.1/gcc-4.8.1-glibc-2.17/c3f970a
+JEMALLOC_BASE=/mnt/gvfs/third-party2/jemalloc/fc8a13ca1fffa4d0765c716c5a0b49f0c107518f/master/gcc-4.8.1-glibc-2.17/8d31e51
+NUMA_BASE=/mnt/gvfs/third-party2/numa/17c514c4d102a25ca15f4558be564eeed76f4b6a/2.0.8/gcc-4.8.1-glibc-2.17/c3f970a
+LIBUNWIND_BASE=/mnt/gvfs/third-party2/libunwind/ad576de2a1ea560c4d3434304f0fc4e079bede42/trunk/gcc-4.8.1-glibc-2.17/675d945
+TBB_BASE=/mnt/gvfs/third-party2/tbb/9d9a554877d0c5bef330fe818ab7178806dd316a/4.0_update2/gcc-4.8.1-glibc-2.17/c3f970a
+KERNEL_HEADERS_BASE=/mnt/gvfs/third-party2/kernel-headers/7c111ff27e0c466235163f00f280a9d617c3d2ec/4.0.9-36_fbk5_2933_gd092e3f/gcc-4.8.1-glibc-2.17/da39a3e
+BINUTILS_BASE=/mnt/gvfs/third-party2/binutils/b7fd454c4b10c6a81015d4524ed06cdeab558490/2.26/centos6-native/da39a3e
+VALGRIND_BASE=/mnt/gvfs/third-party2/valgrind/d7f4d4d86674a57668e3a96f76f0e17dd0eb8765/3.8.1/gcc-4.8.1-glibc-2.17/c3f970a
+LUA_BASE=/mnt/gvfs/third-party2/lua/61e4abf5813bbc39bc4f548757ccfcadde175a48/5.2.3/centos6-native/730f94e
diff --git a/c-deps/rocksdb/build_tools/dockerbuild.sh b/c-deps/rocksdb/build_tools/dockerbuild.sh
new file mode 100755
index 0000000000..02f6094428
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/dockerbuild.sh
@@ -0,0 +1,2 @@
+#!/usr/bin/env bash
+docker run -v $PWD:/rocks -w /rocks buildpack-deps make
diff --git a/c-deps/rocksdb/build_tools/error_filter.py b/c-deps/rocksdb/build_tools/error_filter.py
new file mode 100644
index 0000000000..9f619cf4ba
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/error_filter.py
@@ -0,0 +1,167 @@
+# Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+# This source code is licensed under both the GPLv2 (found in the
+# COPYING file in the root directory) and Apache 2.0 License
+# (found in the LICENSE.Apache file in the root directory).
+
+'''Filter for error messages in test output:
+ - Receives merged stdout/stderr from test on stdin
+ - Finds patterns of known error messages for test name (first argument)
+ - Prints those error messages to stdout
+'''
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import re
+import sys
+
+
+class ErrorParserBase(object):
+ def parse_error(self, line):
+ '''Parses a line of test output. If it contains an error, returns a
+ formatted message describing the error; otherwise, returns None.
+ Subclasses must override this method.
+ '''
+ raise NotImplementedError
+
+
+class GTestErrorParser(ErrorParserBase):
+ '''A parser that remembers the last test that began running so it can print
+ that test's name upon detecting failure.
+ '''
+ _GTEST_NAME_PATTERN = re.compile(r'\[ RUN \] (\S+)$')
+ # format: ':: Failure'
+ _GTEST_FAIL_PATTERN = re.compile(r'(unknown file|\S+:\d+): Failure$')
+
+ def __init__(self):
+ self._last_gtest_name = 'Unknown test'
+
+ def parse_error(self, line):
+ gtest_name_match = self._GTEST_NAME_PATTERN.match(line)
+ if gtest_name_match:
+ self._last_gtest_name = gtest_name_match.group(1)
+ return None
+ gtest_fail_match = self._GTEST_FAIL_PATTERN.match(line)
+ if gtest_fail_match:
+ return '%s failed: %s' % (
+ self._last_gtest_name, gtest_fail_match.group(1))
+ return None
+
+
+class MatchErrorParser(ErrorParserBase):
+ '''A simple parser that returns the whole line if it matches the pattern.
+ '''
+ def __init__(self, pattern):
+ self._pattern = re.compile(pattern)
+
+ def parse_error(self, line):
+ if self._pattern.match(line):
+ return line
+ return None
+
+
+class CompilerErrorParser(MatchErrorParser):
+ def __init__(self):
+ # format: '::: error: '
+ super(CompilerErrorParser, self).__init__(r'\S+:\d+:\d+: error:')
+
+
+class ScanBuildErrorParser(MatchErrorParser):
+ def __init__(self):
+ super(ScanBuildErrorParser, self).__init__(
+ r'scan-build: \d+ bugs found.$')
+
+
+class DbCrashErrorParser(MatchErrorParser):
+ def __init__(self):
+ super(DbCrashErrorParser, self).__init__(r'\*\*\*.*\^$|TEST FAILED.')
+
+
+class WriteStressErrorParser(MatchErrorParser):
+ def __init__(self):
+ super(WriteStressErrorParser, self).__init__(
+ r'ERROR: write_stress died with exitcode=\d+')
+
+
+class AsanErrorParser(MatchErrorParser):
+ def __init__(self):
+ super(AsanErrorParser, self).__init__(
+ r'==\d+==ERROR: AddressSanitizer:')
+
+
+class UbsanErrorParser(MatchErrorParser):
+ def __init__(self):
+ # format: '::: runtime error: '
+ super(UbsanErrorParser, self).__init__(r'\S+:\d+:\d+: runtime error:')
+
+
+class ValgrindErrorParser(MatchErrorParser):
+ def __init__(self):
+ # just grab the summary, valgrind doesn't clearly distinguish errors
+ # from other log messages.
+ super(ValgrindErrorParser, self).__init__(r'==\d+== ERROR SUMMARY:')
+
+
+class CompatErrorParser(MatchErrorParser):
+ def __init__(self):
+ super(CompatErrorParser, self).__init__(r'==== .*[Ee]rror.* ====$')
+
+
+class TsanErrorParser(MatchErrorParser):
+ def __init__(self):
+ super(TsanErrorParser, self).__init__(r'WARNING: ThreadSanitizer:')
+
+
+_TEST_NAME_TO_PARSERS = {
+ 'punit': [CompilerErrorParser, GTestErrorParser],
+ 'unit': [CompilerErrorParser, GTestErrorParser],
+ 'release': [CompilerErrorParser, GTestErrorParser],
+ 'unit_481': [CompilerErrorParser, GTestErrorParser],
+ 'release_481': [CompilerErrorParser, GTestErrorParser],
+ 'clang_unit': [CompilerErrorParser, GTestErrorParser],
+ 'clang_release': [CompilerErrorParser, GTestErrorParser],
+ 'clang_analyze': [CompilerErrorParser, ScanBuildErrorParser],
+ 'code_cov': [CompilerErrorParser, GTestErrorParser],
+ 'unity': [CompilerErrorParser, GTestErrorParser],
+ 'lite': [CompilerErrorParser],
+ 'lite_test': [CompilerErrorParser, GTestErrorParser],
+ 'stress_crash': [CompilerErrorParser, DbCrashErrorParser],
+ 'write_stress': [CompilerErrorParser, WriteStressErrorParser],
+ 'asan': [CompilerErrorParser, GTestErrorParser, AsanErrorParser],
+ 'asan_crash': [CompilerErrorParser, AsanErrorParser, DbCrashErrorParser],
+ 'ubsan': [CompilerErrorParser, GTestErrorParser, UbsanErrorParser],
+ 'ubsan_crash': [CompilerErrorParser, UbsanErrorParser, DbCrashErrorParser],
+ 'valgrind': [CompilerErrorParser, GTestErrorParser, ValgrindErrorParser],
+ 'tsan': [CompilerErrorParser, GTestErrorParser, TsanErrorParser],
+ 'format_compatible': [CompilerErrorParser, CompatErrorParser],
+ 'run_format_compatible': [CompilerErrorParser, CompatErrorParser],
+ 'no_compression': [CompilerErrorParser, GTestErrorParser],
+ 'run_no_compression': [CompilerErrorParser, GTestErrorParser],
+ 'regression': [CompilerErrorParser],
+ 'run_regression': [CompilerErrorParser],
+}
+
+
+def main():
+ if len(sys.argv) != 2:
+ return 'Usage: %s ' % sys.argv[0]
+ test_name = sys.argv[1]
+ if test_name not in _TEST_NAME_TO_PARSERS:
+ return 'Unknown test name: %s' % test_name
+
+ error_parsers = []
+ for parser_cls in _TEST_NAME_TO_PARSERS[test_name]:
+ error_parsers.append(parser_cls())
+
+ for line in sys.stdin:
+ line = line.strip()
+ for error_parser in error_parsers:
+ error_msg = error_parser.parse_error(line)
+ if error_msg is not None:
+ print(error_msg)
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/c-deps/rocksdb/build_tools/fb_compile_mongo.sh b/c-deps/rocksdb/build_tools/fb_compile_mongo.sh
new file mode 100755
index 0000000000..c087f81611
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/fb_compile_mongo.sh
@@ -0,0 +1,55 @@
+#!/bin/sh
+
+# fail early
+set -e
+
+if test -z $ROCKSDB_PATH; then
+ ROCKSDB_PATH=~/rocksdb
+fi
+source $ROCKSDB_PATH/build_tools/fbcode_config4.8.1.sh
+
+EXTRA_LDFLAGS=""
+
+if test -z $ALLOC; then
+ # default
+ ALLOC=tcmalloc
+elif [[ $ALLOC == "jemalloc" ]]; then
+ ALLOC=system
+ EXTRA_LDFLAGS+=" -Wl,--whole-archive $JEMALLOC_LIB -Wl,--no-whole-archive"
+fi
+
+# we need to force mongo to use static library, not shared
+STATIC_LIB_DEP_DIR='build/static_library_dependencies'
+test -d $STATIC_LIB_DEP_DIR || mkdir $STATIC_LIB_DEP_DIR
+test -h $STATIC_LIB_DEP_DIR/`basename $SNAPPY_LIBS` || ln -s $SNAPPY_LIBS $STATIC_LIB_DEP_DIR
+test -h $STATIC_LIB_DEP_DIR/`basename $LZ4_LIBS` || ln -s $LZ4_LIBS $STATIC_LIB_DEP_DIR
+
+EXTRA_LDFLAGS+=" -L $STATIC_LIB_DEP_DIR"
+
+set -x
+
+EXTRA_CMD=""
+if ! test -e version.json; then
+ # this is Mongo 3.0
+ EXTRA_CMD="--rocksdb \
+ --variant-dir=linux2/norm
+ --cxx=${CXX} \
+ --cc=${CC} \
+ --use-system-zlib" # add this line back to normal code path
+ # when https://jira.mongodb.org/browse/SERVER-19123 is resolved
+fi
+
+scons \
+ LINKFLAGS="$EXTRA_LDFLAGS $EXEC_LDFLAGS $PLATFORM_LDFLAGS" \
+ CCFLAGS="$CXXFLAGS -L $STATIC_LIB_DEP_DIR" \
+ LIBS="lz4 gcc stdc++" \
+ LIBPATH="$ROCKSDB_PATH" \
+ CPPPATH="$ROCKSDB_PATH/include" \
+ -j32 \
+ --allocator=$ALLOC \
+ --nostrip \
+ --opt=on \
+ --disable-minimum-compiler-version-enforcement \
+ --use-system-snappy \
+ --disable-warnings-as-errors \
+ $EXTRA_CMD $*
diff --git a/c-deps/rocksdb/build_tools/fbcode_config.sh b/c-deps/rocksdb/build_tools/fbcode_config.sh
new file mode 100644
index 0000000000..b8609a11c6
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/fbcode_config.sh
@@ -0,0 +1,156 @@
+#!/bin/sh
+#
+# Set environment variables so that we can compile rocksdb using
+# fbcode settings. It uses the latest g++ and clang compilers and also
+# uses jemalloc
+# Environment variables that change the behavior of this script:
+# PIC_BUILD -- if true, it will only take pic versions of libraries from fbcode. libraries that don't have pic variant will not be included
+
+
+BASEDIR=`dirname $BASH_SOURCE`
+source "$BASEDIR/dependencies.sh"
+
+CFLAGS=""
+
+# libgcc
+LIBGCC_INCLUDE="$LIBGCC_BASE/include"
+LIBGCC_LIBS=" -L $LIBGCC_BASE/lib"
+
+# glibc
+GLIBC_INCLUDE="$GLIBC_BASE/include"
+GLIBC_LIBS=" -L $GLIBC_BASE/lib"
+
+# snappy
+SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include/"
+if test -z $PIC_BUILD; then
+ SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy.a"
+else
+ SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy_pic.a"
+fi
+CFLAGS+=" -DSNAPPY"
+
+if test -z $PIC_BUILD; then
+ # location of zlib headers and libraries
+ ZLIB_INCLUDE=" -I $ZLIB_BASE/include/"
+ ZLIB_LIBS=" $ZLIB_BASE/lib/libz.a"
+ CFLAGS+=" -DZLIB"
+
+ # location of bzip headers and libraries
+ BZIP_INCLUDE=" -I $BZIP2_BASE/include/"
+ BZIP_LIBS=" $BZIP2_BASE/lib/libbz2.a"
+ CFLAGS+=" -DBZIP2"
+
+ LZ4_INCLUDE=" -I $LZ4_BASE/include/"
+ LZ4_LIBS=" $LZ4_BASE/lib/liblz4.a"
+ CFLAGS+=" -DLZ4"
+
+ ZSTD_INCLUDE=" -I $ZSTD_BASE/include/"
+ ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd.a"
+ CFLAGS+=" -DZSTD"
+fi
+
+# location of gflags headers and libraries
+GFLAGS_INCLUDE=" -I $GFLAGS_BASE/include/"
+if test -z $PIC_BUILD; then
+ GFLAGS_LIBS=" $GFLAGS_BASE/lib/libgflags.a"
+else
+ GFLAGS_LIBS=" $GFLAGS_BASE/lib/libgflags_pic.a"
+fi
+CFLAGS+=" -DGFLAGS=gflags"
+
+# location of jemalloc
+JEMALLOC_INCLUDE=" -I $JEMALLOC_BASE/include/"
+JEMALLOC_LIB=" $JEMALLOC_BASE/lib/libjemalloc.a"
+
+if test -z $PIC_BUILD; then
+ # location of numa
+ NUMA_INCLUDE=" -I $NUMA_BASE/include/"
+ NUMA_LIB=" $NUMA_BASE/lib/libnuma.a"
+ CFLAGS+=" -DNUMA"
+
+ # location of libunwind
+ LIBUNWIND="$LIBUNWIND_BASE/lib/libunwind.a"
+fi
+
+# location of TBB
+TBB_INCLUDE=" -isystem $TBB_BASE/include/"
+if test -z $PIC_BUILD; then
+ TBB_LIBS="$TBB_BASE/lib/libtbb.a"
+else
+ TBB_LIBS="$TBB_BASE/lib/libtbb_pic.a"
+fi
+CFLAGS+=" -DTBB"
+
+# use Intel SSE support for checksum calculations
+export USE_SSE=1
+
+BINUTILS="$BINUTILS_BASE/bin"
+AR="$BINUTILS/ar"
+
+DEPS_INCLUDE="$SNAPPY_INCLUDE $ZLIB_INCLUDE $BZIP_INCLUDE $LZ4_INCLUDE $ZSTD_INCLUDE $GFLAGS_INCLUDE $NUMA_INCLUDE $TBB_INCLUDE"
+
+STDLIBS="-L $GCC_BASE/lib64"
+
+CLANG_BIN="$CLANG_BASE/bin"
+CLANG_LIB="$CLANG_BASE/lib"
+CLANG_SRC="$CLANG_BASE/../../src"
+
+CLANG_ANALYZER="$CLANG_BIN/clang++"
+CLANG_SCAN_BUILD="$CLANG_SRC/llvm/tools/clang/tools/scan-build/bin/scan-build"
+
+if [ -z "$USE_CLANG" ]; then
+ # gcc
+ CC="$GCC_BASE/bin/gcc"
+ CXX="$GCC_BASE/bin/g++"
+
+ CFLAGS+=" -B$BINUTILS/gold"
+ CFLAGS+=" -isystem $GLIBC_INCLUDE"
+ CFLAGS+=" -isystem $LIBGCC_INCLUDE"
+ JEMALLOC=1
+else
+ # clang
+ CLANG_INCLUDE="$CLANG_LIB/clang/stable/include"
+ CC="$CLANG_BIN/clang"
+ CXX="$CLANG_BIN/clang++"
+
+ KERNEL_HEADERS_INCLUDE="$KERNEL_HEADERS_BASE/include"
+
+ CFLAGS+=" -B$BINUTILS/gold -nostdinc -nostdlib"
+ CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/5.x "
+ CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/5.x/x86_64-facebook-linux "
+ CFLAGS+=" -isystem $GLIBC_INCLUDE"
+ CFLAGS+=" -isystem $LIBGCC_INCLUDE"
+ CFLAGS+=" -isystem $CLANG_INCLUDE"
+ CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE/linux "
+ CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE "
+ CFLAGS+=" -Wno-expansion-to-defined "
+ CXXFLAGS="-nostdinc++"
+fi
+
+CFLAGS+=" $DEPS_INCLUDE"
+CFLAGS+=" -DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DROCKSDB_FALLOCATE_PRESENT -DROCKSDB_MALLOC_USABLE_SIZE -DROCKSDB_RANGESYNC_PRESENT -DROCKSDB_SCHED_GETCPU_PRESENT -DROCKSDB_SUPPORT_THREAD_LOCAL -DHAVE_SSE42"
+CXXFLAGS+=" $CFLAGS"
+
+EXEC_LDFLAGS=" $SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $NUMA_LIB $TBB_LIBS"
+EXEC_LDFLAGS+=" -B$BINUTILS/gold"
+EXEC_LDFLAGS+=" -Wl,--dynamic-linker,/usr/local/fbcode/gcc-5-glibc-2.23/lib/ld.so"
+EXEC_LDFLAGS+=" $LIBUNWIND"
+EXEC_LDFLAGS+=" -Wl,-rpath=/usr/local/fbcode/gcc-5-glibc-2.23/lib"
+# required by libtbb
+EXEC_LDFLAGS+=" -ldl"
+
+PLATFORM_LDFLAGS="$LIBGCC_LIBS $GLIBC_LIBS $STDLIBS -lgcc -lstdc++"
+
+EXEC_LDFLAGS_SHARED="$SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $TBB_LIBS"
+
+VALGRIND_VER="$VALGRIND_BASE/bin/"
+
+LUA_PATH="$LUA_BASE"
+
+if test -z $PIC_BUILD; then
+ LUA_LIB=" $LUA_PATH/lib/liblua.a"
+else
+ LUA_LIB=" $LUA_PATH/lib/liblua_pic.a"
+fi
+
+export CC CXX AR CFLAGS CXXFLAGS EXEC_LDFLAGS EXEC_LDFLAGS_SHARED VALGRIND_VER JEMALLOC_LIB JEMALLOC_INCLUDE CLANG_ANALYZER CLANG_SCAN_BUILD LUA_PATH LUA_LIB
diff --git a/c-deps/rocksdb/build_tools/fbcode_config4.8.1.sh b/c-deps/rocksdb/build_tools/fbcode_config4.8.1.sh
new file mode 100644
index 0000000000..f5b8334db2
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/fbcode_config4.8.1.sh
@@ -0,0 +1,115 @@
+#!/bin/sh
+#
+# Set environment variables so that we can compile rocksdb using
+# fbcode settings. It uses the latest g++ compiler and also
+# uses jemalloc
+
+BASEDIR=`dirname $BASH_SOURCE`
+source "$BASEDIR/dependencies_4.8.1.sh"
+
+# location of libgcc
+LIBGCC_INCLUDE="$LIBGCC_BASE/include"
+LIBGCC_LIBS=" -L $LIBGCC_BASE/lib"
+
+# location of glibc
+GLIBC_INCLUDE="$GLIBC_BASE/include"
+GLIBC_LIBS=" -L $GLIBC_BASE/lib"
+
+# location of snappy headers and libraries
+SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include"
+SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy.a"
+
+# location of zlib headers and libraries
+ZLIB_INCLUDE=" -I $ZLIB_BASE/include"
+ZLIB_LIBS=" $ZLIB_BASE/lib/libz.a"
+
+# location of bzip headers and libraries
+BZIP2_INCLUDE=" -I $BZIP2_BASE/include/"
+BZIP2_LIBS=" $BZIP2_BASE/lib/libbz2.a"
+
+LZ4_INCLUDE=" -I $LZ4_BASE/include"
+LZ4_LIBS=" $LZ4_BASE/lib/liblz4.a"
+
+ZSTD_INCLUDE=" -I $ZSTD_BASE/include"
+ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd.a"
+
+# location of gflags headers and libraries
+GFLAGS_INCLUDE=" -I $GFLAGS_BASE/include/"
+GFLAGS_LIBS=" $GFLAGS_BASE/lib/libgflags.a"
+
+# location of jemalloc
+JEMALLOC_INCLUDE=" -I $JEMALLOC_BASE/include"
+JEMALLOC_LIB="$JEMALLOC_BASE/lib/libjemalloc.a"
+
+# location of numa
+NUMA_INCLUDE=" -I $NUMA_BASE/include/"
+NUMA_LIB=" $NUMA_BASE/lib/libnuma.a"
+
+# location of libunwind
+LIBUNWIND="$LIBUNWIND_BASE/lib/libunwind.a"
+
+# location of tbb
+TBB_INCLUDE=" -isystem $TBB_BASE/include/"
+TBB_LIBS="$TBB_BASE/lib/libtbb.a"
+
+# use Intel SSE support for checksum calculations
+export USE_SSE=1
+
+BINUTILS="$BINUTILS_BASE/bin"
+AR="$BINUTILS/ar"
+
+DEPS_INCLUDE="$SNAPPY_INCLUDE $ZLIB_INCLUDE $BZIP2_INCLUDE $LZ4_INCLUDE $ZSTD_INCLUDE $GFLAGS_INCLUDE $NUMA_INCLUDE $TBB_INCLUDE"
+
+STDLIBS="-L $GCC_BASE/lib64"
+
+if [ -z "$USE_CLANG" ]; then
+ # gcc
+ CC="$GCC_BASE/bin/gcc"
+ CXX="$GCC_BASE/bin/g++"
+
+ CFLAGS="-B$BINUTILS/gold -m64 -mtune=generic"
+ CFLAGS+=" -isystem $GLIBC_INCLUDE"
+ CFLAGS+=" -isystem $LIBGCC_INCLUDE"
+ JEMALLOC=1
+else
+ # clang
+ CLANG_BIN="$CLANG_BASE/bin"
+ CLANG_LIB="$CLANG_BASE/lib"
+ CLANG_INCLUDE="$CLANG_LIB/clang/*/include"
+ CC="$CLANG_BIN/clang"
+ CXX="$CLANG_BIN/clang++"
+
+ KERNEL_HEADERS_INCLUDE="$KERNEL_HEADERS_BASE/include/"
+
+ CFLAGS="-B$BINUTILS/gold -nostdinc -nostdlib"
+ CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/4.8.1 "
+ CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/4.8.1/x86_64-facebook-linux "
+ CFLAGS+=" -isystem $GLIBC_INCLUDE"
+ CFLAGS+=" -isystem $LIBGCC_INCLUDE"
+ CFLAGS+=" -isystem $CLANG_INCLUDE"
+ CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE/linux "
+ CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE "
+ CXXFLAGS="-nostdinc++"
+fi
+
+CFLAGS+=" $DEPS_INCLUDE"
+CFLAGS+=" -DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DROCKSDB_FALLOCATE_PRESENT -DROCKSDB_MALLOC_USABLE_SIZE -DROCKSDB_RANGESYNC_PRESENT -DROCKSDB_SCHED_GETCPU_PRESENT -DROCKSDB_SUPPORT_THREAD_LOCAL -DHAVE_SSE42"
+CFLAGS+=" -DSNAPPY -DGFLAGS=google -DZLIB -DBZIP2 -DLZ4 -DZSTD -DNUMA -DTBB"
+CXXFLAGS+=" $CFLAGS"
+
+EXEC_LDFLAGS=" $SNAPPY_LIBS $ZLIB_LIBS $BZIP2_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $NUMA_LIB $TBB_LIBS"
+EXEC_LDFLAGS+=" -Wl,--dynamic-linker,/usr/local/fbcode/gcc-4.8.1-glibc-2.17/lib/ld.so"
+EXEC_LDFLAGS+=" $LIBUNWIND"
+EXEC_LDFLAGS+=" -Wl,-rpath=/usr/local/fbcode/gcc-4.8.1-glibc-2.17/lib"
+# required by libtbb
+EXEC_LDFLAGS+=" -ldl"
+
+PLATFORM_LDFLAGS="$LIBGCC_LIBS $GLIBC_LIBS $STDLIBS -lgcc -lstdc++"
+
+EXEC_LDFLAGS_SHARED="$SNAPPY_LIBS $ZLIB_LIBS $BZIP2_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS"
+
+VALGRIND_VER="$VALGRIND_BASE/bin/"
+
+LUA_PATH="$LUA_BASE"
+
+export CC CXX AR CFLAGS CXXFLAGS EXEC_LDFLAGS EXEC_LDFLAGS_SHARED VALGRIND_VER JEMALLOC_LIB JEMALLOC_INCLUDE LUA_PATH
diff --git a/c-deps/rocksdb/build_tools/format-diff.sh b/c-deps/rocksdb/build_tools/format-diff.sh
new file mode 100755
index 0000000000..81221ed9a4
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/format-diff.sh
@@ -0,0 +1,122 @@
+#!/usr/bin/env bash
+# If clang_format_diff.py command is not specfied, we assume we are able to
+# access directly without any path.
+if [ -z $CLANG_FORMAT_DIFF ]
+then
+CLANG_FORMAT_DIFF="clang-format-diff.py"
+fi
+
+# Check clang-format-diff.py
+if ! which $CLANG_FORMAT_DIFF &> /dev/null
+then
+ echo "You didn't have clang-format-diff.py and/or clang-format available in your computer!"
+ echo "You can download clang-format-diff.py by running: "
+ echo " curl --location http://goo.gl/iUW1u2 -o ${CLANG_FORMAT_DIFF}"
+ echo "You can download clang-format by running: "
+ echo " brew install clang-format"
+ echo "Then, move both files (i.e. ${CLANG_FORMAT_DIFF} and clang-format) to some directory within PATH=${PATH}"
+ exit 128
+fi
+
+# Check argparse, a library that clang-format-diff.py requires.
+python 2>/dev/null << EOF
+import argparse
+EOF
+
+if [ "$?" != 0 ]
+then
+ echo "To run clang-format-diff.py, we'll need the library "argparse" to be"
+ echo "installed. You can try either of the follow ways to install it:"
+ echo " 1. Manually download argparse: https://pypi.python.org/pypi/argparse"
+ echo " 2. easy_install argparse (if you have easy_install)"
+ echo " 3. pip install argparse (if you have pip)"
+ exit 129
+fi
+
+# TODO(kailiu) following work is not complete since we still need to figure
+# out how to add the modified files done pre-commit hook to git's commit index.
+#
+# Check if this script has already been added to pre-commit hook.
+# Will suggest user to add this script to pre-commit hook if their pre-commit
+# is empty.
+# PRE_COMMIT_SCRIPT_PATH="`git rev-parse --show-toplevel`/.git/hooks/pre-commit"
+# if ! ls $PRE_COMMIT_SCRIPT_PATH &> /dev/null
+# then
+# echo "Would you like to add this script to pre-commit hook, which will do "
+# echo -n "the format check for all the affected lines before you check in (y/n):"
+# read add_to_hook
+# if [ "$add_to_hook" == "y" ]
+# then
+# ln -s `git rev-parse --show-toplevel`/build_tools/format-diff.sh $PRE_COMMIT_SCRIPT_PATH
+# fi
+# fi
+set -e
+
+uncommitted_code=`git diff HEAD`
+LAST_MASTER=`git merge-base master HEAD`
+
+# If there's no uncommitted changes, we assume user are doing post-commit
+# format check, in which case we'll check the modified lines since last commit
+# from master. Otherwise, we'll check format of the uncommitted code only.
+if [ -z "$uncommitted_code" ]
+then
+ # Check the format of last commit
+ diffs=$(git diff -U0 $LAST_MASTER^ | $CLANG_FORMAT_DIFF -p 1)
+else
+ # Check the format of uncommitted lines,
+ diffs=$(git diff -U0 HEAD | $CLANG_FORMAT_DIFF -p 1)
+fi
+
+if [ -z "$diffs" ]
+then
+ echo "Nothing needs to be reformatted!"
+ exit 0
+fi
+
+# Highlight the insertion/deletion from the clang-format-diff.py's output
+COLOR_END="\033[0m"
+COLOR_RED="\033[0;31m"
+COLOR_GREEN="\033[0;32m"
+
+echo -e "Detect lines that doesn't follow the format rules:\r"
+# Add the color to the diff. lines added will be green; lines removed will be red.
+echo "$diffs" |
+ sed -e "s/\(^-.*$\)/`echo -e \"$COLOR_RED\1$COLOR_END\"`/" |
+ sed -e "s/\(^+.*$\)/`echo -e \"$COLOR_GREEN\1$COLOR_END\"`/"
+
+if [[ "$OPT" == *"-DTRAVIS"* ]]
+then
+ exit 1
+fi
+
+echo -e "Would you like to fix the format automatically (y/n): \c"
+
+# Make sure under any mode, we can read user input.
+exec < /dev/tty
+read to_fix
+
+if [ "$to_fix" != "y" ]
+then
+ exit 1
+fi
+
+# Do in-place format adjustment.
+if [ -z "$uncommitted_code" ]
+then
+ git diff -U0 $LAST_MASTER^ | $CLANG_FORMAT_DIFF -i -p 1
+else
+ git diff -U0 HEAD^ | $CLANG_FORMAT_DIFF -i -p 1
+fi
+echo "Files reformatted!"
+
+# Amend to last commit if user do the post-commit format check
+if [ -z "$uncommitted_code" ]; then
+ echo -e "Would you like to amend the changes to last commit (`git log HEAD --oneline | head -1`)? (y/n): \c"
+ read to_amend
+
+ if [ "$to_amend" == "y" ]
+ then
+ git commit -a --amend --reuse-message HEAD
+ echo "Amended to last commit"
+ fi
+fi
diff --git a/c-deps/rocksdb/build_tools/gnu_parallel b/c-deps/rocksdb/build_tools/gnu_parallel
new file mode 100755
index 0000000000..abbf8f1008
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/gnu_parallel
@@ -0,0 +1,7936 @@
+#!/usr/bin/env perl
+
+# Copyright (C) 2007,2008,2009,2010,2011,2012,2013,2014 Ole Tange and
+# Free Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+
+# open3 used in Job::start
+use IPC::Open3;
+# &WNOHANG used in reaper
+use POSIX qw(:sys_wait_h setsid ceil :errno_h);
+# gensym used in Job::start
+use Symbol qw(gensym);
+# tempfile used in Job::start
+use File::Temp qw(tempfile tempdir);
+# mkpath used in openresultsfile
+use File::Path;
+# GetOptions used in get_options_from_array
+use Getopt::Long;
+# Used to ensure code quality
+use strict;
+use File::Basename;
+
+if(not $ENV{HOME}) {
+ # $ENV{HOME} is sometimes not set if called from PHP
+ ::warning("\$HOME not set. Using /tmp\n");
+ $ENV{HOME} = "/tmp";
+}
+
+save_stdin_stdout_stderr();
+save_original_signal_handler();
+parse_options();
+::debug("init", "Open file descriptors: ", join(" ",keys %Global::fd), "\n");
+my $number_of_args;
+if($Global::max_number_of_args) {
+ $number_of_args=$Global::max_number_of_args;
+} elsif ($opt::X or $opt::m or $opt::xargs) {
+ $number_of_args = undef;
+} else {
+ $number_of_args = 1;
+}
+
+my @command;
+@command = @ARGV;
+
+my @fhlist;
+if($opt::pipepart) {
+ @fhlist = map { open_or_exit($_) } "/dev/null";
+} else {
+ @fhlist = map { open_or_exit($_) } @opt::a;
+ if(not @fhlist and not $opt::pipe) {
+ @fhlist = (*STDIN);
+ }
+}
+
+if($opt::skip_first_line) {
+ # Skip the first line for the first file handle
+ my $fh = $fhlist[0];
+ <$fh>;
+}
+if($opt::header and not $opt::pipe) {
+ my $fh = $fhlist[0];
+ # split with colsep or \t
+ # $header force $colsep = \t if undef?
+ my $delimiter = $opt::colsep;
+ $delimiter ||= "\$";
+ my $id = 1;
+ for my $fh (@fhlist) {
+ my $line = <$fh>;
+ chomp($line);
+ ::debug("init", "Delimiter: '$delimiter'");
+ for my $s (split /$delimiter/o, $line) {
+ ::debug("init", "Colname: '$s'");
+ # Replace {colname} with {2}
+ # TODO accept configurable short hands
+ # TODO how to deal with headers in {=...=}
+ for(@command) {
+ s:\{$s(|/|//|\.|/\.)\}:\{$id$1\}:g;
+ }
+ $Global::input_source_header{$id} = $s;
+ $id++;
+ }
+ }
+} else {
+ my $id = 1;
+ for my $fh (@fhlist) {
+ $Global::input_source_header{$id} = $id;
+ $id++;
+ }
+}
+
+if($opt::filter_hosts and (@opt::sshlogin or @opt::sshloginfile)) {
+ # Parallel check all hosts are up. Remove hosts that are down
+ filter_hosts();
+}
+
+if($opt::nonall or $opt::onall) {
+ onall(@command);
+ wait_and_exit(min(undef_as_zero($Global::exitstatus),254));
+}
+
+# TODO --transfer foo/./bar --cleanup
+# multiple --transfer and --basefile with different /./
+
+$Global::JobQueue = JobQueue->new(
+ \@command,\@fhlist,$Global::ContextReplace,$number_of_args,\@Global::ret_files);
+
+if($opt::eta or $opt::bar) {
+ # Count the number of jobs before starting any
+ $Global::JobQueue->total_jobs();
+}
+if($opt::pipepart) {
+ @Global::cat_partials = map { pipe_part_files($_) } @opt::a;
+ # Unget the command as many times as there are parts
+ $Global::JobQueue->{'commandlinequeue'}->unget(
+ map { $Global::JobQueue->{'commandlinequeue'}->get() } @Global::cat_partials
+ );
+}
+for my $sshlogin (values %Global::host) {
+ $sshlogin->max_jobs_running();
+}
+
+init_run_jobs();
+my $sem;
+if($Global::semaphore) {
+ $sem = acquire_semaphore();
+}
+$SIG{TERM} = \&start_no_new_jobs;
+
+start_more_jobs();
+if(not $opt::pipepart) {
+ if($opt::pipe) {
+ spreadstdin();
+ }
+}
+::debug("init", "Start draining\n");
+drain_job_queue();
+::debug("init", "Done draining\n");
+reaper();
+::debug("init", "Done reaping\n");
+if($opt::pipe and @opt::a) {
+ for my $job (@Global::tee_jobs) {
+ unlink $job->fh(2,"name");
+ $job->set_fh(2,"name","");
+ $job->print();
+ unlink $job->fh(1,"name");
+ }
+}
+::debug("init", "Cleaning\n");
+cleanup();
+if($Global::semaphore) {
+ $sem->release();
+}
+for(keys %Global::sshmaster) {
+ kill "TERM", $_;
+}
+::debug("init", "Halt\n");
+if($opt::halt_on_error) {
+ wait_and_exit($Global::halt_on_error_exitstatus);
+} else {
+ wait_and_exit(min(undef_as_zero($Global::exitstatus),254));
+}
+
+sub __PIPE_MODE__ {}
+
+sub pipe_part_files {
+ # Input:
+ # $file = the file to read
+ # Returns:
+ # @commands that will cat_partial each part
+ my ($file) = @_;
+ my $buf = "";
+ my $header = find_header(\$buf,open_or_exit($file));
+ # find positions
+ my @pos = find_split_positions($file,$opt::blocksize,length $header);
+ # Make @cat_partials
+ my @cat_partials = ();
+ for(my $i=0; $i<$#pos; $i++) {
+ push @cat_partials, cat_partial($file, 0, length($header), $pos[$i], $pos[$i+1]);
+ }
+ # Remote exec should look like:
+ # ssh -oLogLevel=quiet lo 'eval `echo $SHELL | grep "/t\{0,1\}csh" > /dev/null && echo setenv PARALLEL_SEQ '$PARALLEL_SEQ'\; setenv PARALLEL_PID '$PARALLEL_PID' || echo PARALLEL_SEQ='$PARALLEL_SEQ'\;export PARALLEL_SEQ\; PARALLEL_PID='$PARALLEL_PID'\;export PARALLEL_PID` ;' tty\ \>/dev/null\ \&\&\ stty\ isig\ -onlcr\ -echo\;echo\ \$SHELL\ \|\ grep\ \"/t\\\{0,1\\\}csh\"\ \>\ /dev/null\ \&\&\ setenv\ FOO\ /tmp/foo\ \|\|\ export\ FOO=/tmp/foo\; \(wc\ -\ \$FOO\)
+ # ssh -tt not allowed. Remote will die due to broken pipe anyway.
+ # TODO test remote with --fifo / --cat
+ return @cat_partials;
+}
+
+sub find_header {
+ # Input:
+ # $buf_ref = reference to read-in buffer
+ # $fh = filehandle to read from
+ # Uses:
+ # $opt::header
+ # $opt::blocksize
+ # Returns:
+ # $header string
+ my ($buf_ref, $fh) = @_;
+ my $header = "";
+ if($opt::header) {
+ if($opt::header eq ":") { $opt::header = "(.*\n)"; }
+ # Number = number of lines
+ $opt::header =~ s/^(\d+)$/"(.*\n)"x$1/e;
+ while(read($fh,substr($$buf_ref,length $$buf_ref,0),$opt::blocksize)) {
+ if($$buf_ref=~s/^($opt::header)//) {
+ $header = $1;
+ last;
+ }
+ }
+ }
+ return $header;
+}
+
+sub find_split_positions {
+ # Input:
+ # $file = the file to read
+ # $block = (minimal) --block-size of each chunk
+ # $headerlen = length of header to be skipped
+ # Uses:
+ # $opt::recstart
+ # $opt::recend
+ # Returns:
+ # @positions of block start/end
+ my($file, $block, $headerlen) = @_;
+ my $size = -s $file;
+ $block = int $block;
+ # The optimal dd blocksize for mint, redhat, solaris, openbsd = 2^17..2^20
+ # The optimal dd blocksize for freebsd = 2^15..2^17
+ my $dd_block_size = 131072; # 2^17
+ my @pos;
+ my ($recstart,$recend) = recstartrecend();
+ my $recendrecstart = $recend.$recstart;
+ my $fh = ::open_or_exit($file);
+ push(@pos,$headerlen);
+ for(my $pos = $block+$headerlen; $pos < $size; $pos += $block) {
+ my $buf;
+ seek($fh, $pos, 0) || die;
+ while(read($fh,substr($buf,length $buf,0),$dd_block_size)) {
+ if($opt::regexp) {
+ # If match /$recend$recstart/ => Record position
+ if($buf =~ /(.*$recend)$recstart/os) {
+ my $i = length($1);
+ push(@pos,$pos+$i);
+ # Start looking for next record _after_ this match
+ $pos += $i;
+ last;
+ }
+ } else {
+ # If match $recend$recstart => Record position
+ my $i = index($buf,$recendrecstart);
+ if($i != -1) {
+ push(@pos,$pos+$i);
+ # Start looking for next record _after_ this match
+ $pos += $i;
+ last;
+ }
+ }
+ }
+ }
+ push(@pos,$size);
+ close $fh;
+ return @pos;
+}
+
+sub cat_partial {
+ # Input:
+ # $file = the file to read
+ # ($start, $end, [$start2, $end2, ...]) = start byte, end byte
+ # Returns:
+ # Efficient perl command to copy $start..$end, $start2..$end2, ... to stdout
+ my($file, @start_end) = @_;
+ my($start, $i);
+ # Convert start_end to start_len
+ my @start_len = map { if(++$i % 2) { $start = $_; } else { $_-$start } } @start_end;
+ return "<". shell_quote_scalar($file) .
+ q{ perl -e 'while(@ARGV) { sysseek(STDIN,shift,0) || die; $left = shift; while($read = sysread(STDIN,$buf, ($left > 32768 ? 32768 : $left))){ $left -= $read; syswrite(STDOUT,$buf); } }' } .
+ " @start_len";
+}
+
+sub spreadstdin {
+ # read a record
+ # Spawn a job and print the record to it.
+ # Uses:
+ # $opt::blocksize
+ # STDIN
+ # $opr::r
+ # $Global::max_lines
+ # $Global::max_number_of_args
+ # $opt::regexp
+ # $Global::start_no_new_jobs
+ # $opt::roundrobin
+ # %Global::running
+
+ my $buf = "";
+ my ($recstart,$recend) = recstartrecend();
+ my $recendrecstart = $recend.$recstart;
+ my $chunk_number = 1;
+ my $one_time_through;
+ my $blocksize = $opt::blocksize;
+ my $in = *STDIN;
+ my $header = find_header(\$buf,$in);
+ while(1) {
+ my $anything_written = 0;
+ if(not read($in,substr($buf,length $buf,0),$blocksize)) {
+ # End-of-file
+ $chunk_number != 1 and last;
+ # Force the while-loop once if everything was read by header reading
+ $one_time_through++ and last;
+ }
+ if($opt::r) {
+ # Remove empty lines
+ $buf =~ s/^\s*\n//gm;
+ if(length $buf == 0) {
+ next;
+ }
+ }
+ if($Global::max_lines and not $Global::max_number_of_args) {
+ # Read n-line records
+ my $n_lines = $buf =~ tr/\n/\n/;
+ my $last_newline_pos = rindex($buf,"\n");
+ while($n_lines % $Global::max_lines) {
+ $n_lines--;
+ $last_newline_pos = rindex($buf,"\n",$last_newline_pos-1);
+ }
+ # Chop at $last_newline_pos as that is where n-line record ends
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$buf,
+ $recstart,$recend,$last_newline_pos+1);
+ substr($buf,0,$last_newline_pos+1) = "";
+ } elsif($opt::regexp) {
+ if($Global::max_number_of_args) {
+ # -N => (start..*?end){n}
+ # -L -N => (start..*?end){n*l}
+ my $read_n_lines = $Global::max_number_of_args * ($Global::max_lines || 1);
+ while($buf =~ s/((?:$recstart.*?$recend){$read_n_lines})($recstart.*)$/$2/os) {
+ # Copy to modifiable variable
+ my $b = $1;
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$b,
+ $recstart,$recend,length $1);
+ }
+ } else {
+ # Find the last recend-recstart in $buf
+ if($buf =~ s/(.*$recend)($recstart.*?)$/$2/os) {
+ # Copy to modifiable variable
+ my $b = $1;
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$b,
+ $recstart,$recend,length $1);
+ }
+ }
+ } else {
+ if($Global::max_number_of_args) {
+ # -N => (start..*?end){n}
+ my $i = 0;
+ my $read_n_lines = $Global::max_number_of_args * ($Global::max_lines || 1);
+ while(($i = nindex(\$buf,$recendrecstart,$read_n_lines)) != -1) {
+ $i += length $recend; # find the actual splitting location
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$buf,
+ $recstart,$recend,$i);
+ substr($buf,0,$i) = "";
+ }
+ } else {
+ # Find the last recend-recstart in $buf
+ my $i = rindex($buf,$recendrecstart);
+ if($i != -1) {
+ $i += length $recend; # find the actual splitting location
+ $anything_written +=
+ write_record_to_pipe($chunk_number++,\$header,\$buf,
+ $recstart,$recend,$i);
+ substr($buf,0,$i) = "";
+ }
+ }
+ }
+ if(not $anything_written and not eof($in)) {
+ # Nothing was written - maybe the block size < record size?
+ # Increase blocksize exponentially
+ my $old_blocksize = $blocksize;
+ $blocksize = ceil($blocksize * 1.3 + 1);
+ ::warning("A record was longer than $old_blocksize. " .
+ "Increasing to --blocksize $blocksize\n");
+ }
+ }
+ ::debug("init", "Done reading input\n");
+
+ # If there is anything left in the buffer write it
+ substr($buf,0,0) = "";
+ write_record_to_pipe($chunk_number++,\$header,\$buf,$recstart,$recend,length $buf);
+
+ $Global::start_no_new_jobs ||= 1;
+ if($opt::roundrobin) {
+ for my $job (values %Global::running) {
+ close $job->fh(0,"w");
+ }
+ my %incomplete_jobs = %Global::running;
+ my $sleep = 1;
+ while(keys %incomplete_jobs) {
+ my $something_written = 0;
+ for my $pid (keys %incomplete_jobs) {
+ my $job = $incomplete_jobs{$pid};
+ if($job->stdin_buffer_length()) {
+ $something_written += $job->non_block_write();
+ } else {
+ delete $incomplete_jobs{$pid}
+ }
+ }
+ if($something_written) {
+ $sleep = $sleep/2+0.001;
+ }
+ $sleep = ::reap_usleep($sleep);
+ }
+ }
+}
+
+sub recstartrecend {
+ # Uses:
+ # $opt::recstart
+ # $opt::recend
+ # Returns:
+ # $recstart,$recend with default values and regexp conversion
+ my($recstart,$recend);
+ if(defined($opt::recstart) and defined($opt::recend)) {
+ # If both --recstart and --recend is given then both must match
+ $recstart = $opt::recstart;
+ $recend = $opt::recend;
+ } elsif(defined($opt::recstart)) {
+ # If --recstart is given it must match start of record
+ $recstart = $opt::recstart;
+ $recend = "";
+ } elsif(defined($opt::recend)) {
+ # If --recend is given then it must match end of record
+ $recstart = "";
+ $recend = $opt::recend;
+ }
+
+ if($opt::regexp) {
+ # If $recstart/$recend contains '|' this should only apply to the regexp
+ $recstart = "(?:".$recstart.")";
+ $recend = "(?:".$recend.")";
+ } else {
+ # $recstart/$recend = printf strings (\n)
+ $recstart =~ s/\\([0rnt\'\"\\])/"qq|\\$1|"/gee;
+ $recend =~ s/\\([0rnt\'\"\\])/"qq|\\$1|"/gee;
+ }
+ return ($recstart,$recend);
+}
+
+sub nindex {
+ # See if string is in buffer N times
+ # Returns:
+ # the position where the Nth copy is found
+ my ($buf_ref, $str, $n) = @_;
+ my $i = 0;
+ for(1..$n) {
+ $i = index($$buf_ref,$str,$i+1);
+ if($i == -1) { last }
+ }
+ return $i;
+}
+
+{
+ my @robin_queue;
+
+ sub round_robin_write {
+ # Input:
+ # $header_ref = ref to $header string
+ # $block_ref = ref to $block to be written
+ # $recstart = record start string
+ # $recend = record end string
+ # $endpos = end position of $block
+ # Uses:
+ # %Global::running
+ my ($header_ref,$block_ref,$recstart,$recend,$endpos) = @_;
+ my $something_written = 0;
+ my $block_passed = 0;
+ my $sleep = 1;
+ while(not $block_passed) {
+ # Continue flushing existing buffers
+ # until one is empty and a new block is passed
+ # Make a queue to spread the blocks evenly
+ if(not @robin_queue) {
+ push @robin_queue, values %Global::running;
+ }
+ while(my $job = shift @robin_queue) {
+ if($job->stdin_buffer_length() > 0) {
+ $something_written += $job->non_block_write();
+ } else {
+ $job->set_stdin_buffer($header_ref,$block_ref,$endpos,$recstart,$recend);
+ $block_passed = 1;
+ $job->set_virgin(0);
+ $something_written += $job->non_block_write();
+ last;
+ }
+ }
+ $sleep = ::reap_usleep($sleep);
+ }
+ return $something_written;
+ }
+}
+
+sub write_record_to_pipe {
+ # Fork then
+ # Write record from pos 0 .. $endpos to pipe
+ # Input:
+ # $chunk_number = sequence number - to see if already run
+ # $header_ref = reference to header string to prepend
+ # $record_ref = reference to record to write
+ # $recstart = start string of record
+ # $recend = end string of record
+ # $endpos = position in $record_ref where record ends
+ # Uses:
+ # $Global::job_already_run
+ # $opt::roundrobin
+ # @Global::virgin_jobs
+ # Returns:
+ # Number of chunks written (0 or 1)
+ my ($chunk_number,$header_ref,$record_ref,$recstart,$recend,$endpos) = @_;
+ if($endpos == 0) { return 0; }
+ if(vec($Global::job_already_run,$chunk_number,1)) { return 1; }
+ if($opt::roundrobin) {
+ return round_robin_write($header_ref,$record_ref,$recstart,$recend,$endpos);
+ }
+ # If no virgin found, backoff
+ my $sleep = 0.0001; # 0.01 ms - better performance on highend
+ while(not @Global::virgin_jobs) {
+ ::debug("pipe", "No virgin jobs");
+ $sleep = ::reap_usleep($sleep);
+ # Jobs may not be started because of loadavg
+ # or too little time between each ssh login.
+ start_more_jobs();
+ }
+ my $job = shift @Global::virgin_jobs;
+ # Job is no longer virgin
+ $job->set_virgin(0);
+ if(fork()) {
+ # Skip
+ } else {
+ # Chop of at $endpos as we do not know how many rec_sep will
+ # be removed.
+ substr($$record_ref,$endpos,length $$record_ref) = "";
+ # Remove rec_sep
+ if($opt::remove_rec_sep) {
+ Job::remove_rec_sep($record_ref,$recstart,$recend);
+ }
+ $job->write($header_ref);
+ $job->write($record_ref);
+ close $job->fh(0,"w");
+ exit(0);
+ }
+ close $job->fh(0,"w");
+ return 1;
+}
+
+sub __SEM_MODE__ {}
+
+sub acquire_semaphore {
+ # Acquires semaphore. If needed: spawns to the background
+ # Uses:
+ # @Global::host
+ # Returns:
+ # The semaphore to be released when jobs is complete
+ $Global::host{':'} = SSHLogin->new(":");
+ my $sem = Semaphore->new($Semaphore::name,$Global::host{':'}->max_jobs_running());
+ $sem->acquire();
+ if($Semaphore::fg) {
+ # skip
+ } else {
+ # If run in the background, the PID will change
+ # therefore release and re-acquire the semaphore
+ $sem->release();
+ if(fork()) {
+ exit(0);
+ } else {
+ # child
+ # Get a semaphore for this pid
+ ::die_bug("Can't start a new session: $!") if setsid() == -1;
+ $sem = Semaphore->new($Semaphore::name,$Global::host{':'}->max_jobs_running());
+ $sem->acquire();
+ }
+ }
+ return $sem;
+}
+
+sub __PARSE_OPTIONS__ {}
+
+sub options_hash {
+ # Returns:
+ # %hash = the GetOptions config
+ return
+ ("debug|D=s" => \$opt::D,
+ "xargs" => \$opt::xargs,
+ "m" => \$opt::m,
+ "X" => \$opt::X,
+ "v" => \@opt::v,
+ "joblog=s" => \$opt::joblog,
+ "results|result|res=s" => \$opt::results,
+ "resume" => \$opt::resume,
+ "resume-failed|resumefailed" => \$opt::resume_failed,
+ "silent" => \$opt::silent,
+ #"silent-error|silenterror" => \$opt::silent_error,
+ "keep-order|keeporder|k" => \$opt::keeporder,
+ "group" => \$opt::group,
+ "g" => \$opt::retired,
+ "ungroup|u" => \$opt::ungroup,
+ "linebuffer|linebuffered|line-buffer|line-buffered" => \$opt::linebuffer,
+ "tmux" => \$opt::tmux,
+ "null|0" => \$opt::0,
+ "quote|q" => \$opt::q,
+ # Replacement strings
+ "parens=s" => \$opt::parens,
+ "rpl=s" => \@opt::rpl,
+ "plus" => \$opt::plus,
+ "I=s" => \$opt::I,
+ "extensionreplace|er=s" => \$opt::U,
+ "U=s" => \$opt::retired,
+ "basenamereplace|bnr=s" => \$opt::basenamereplace,
+ "dirnamereplace|dnr=s" => \$opt::dirnamereplace,
+ "basenameextensionreplace|bner=s" => \$opt::basenameextensionreplace,
+ "seqreplace=s" => \$opt::seqreplace,
+ "slotreplace=s" => \$opt::slotreplace,
+ "jobs|j=s" => \$opt::jobs,
+ "delay=f" => \$opt::delay,
+ "sshdelay=f" => \$opt::sshdelay,
+ "load=s" => \$opt::load,
+ "noswap" => \$opt::noswap,
+ "max-line-length-allowed" => \$opt::max_line_length_allowed,
+ "number-of-cpus" => \$opt::number_of_cpus,
+ "number-of-cores" => \$opt::number_of_cores,
+ "use-cpus-instead-of-cores" => \$opt::use_cpus_instead_of_cores,
+ "shellquote|shell_quote|shell-quote" => \$opt::shellquote,
+ "nice=i" => \$opt::nice,
+ "timeout=s" => \$opt::timeout,
+ "tag" => \$opt::tag,
+ "tagstring|tag-string=s" => \$opt::tagstring,
+ "onall" => \$opt::onall,
+ "nonall" => \$opt::nonall,
+ "filter-hosts|filterhosts|filter-host" => \$opt::filter_hosts,
+ "sshlogin|S=s" => \@opt::sshlogin,
+ "sshloginfile|slf=s" => \@opt::sshloginfile,
+ "controlmaster|M" => \$opt::controlmaster,
+ "return=s" => \@opt::return,
+ "trc=s" => \@opt::trc,
+ "transfer" => \$opt::transfer,
+ "cleanup" => \$opt::cleanup,
+ "basefile|bf=s" => \@opt::basefile,
+ "B=s" => \$opt::retired,
+ "ctrlc|ctrl-c" => \$opt::ctrlc,
+ "noctrlc|no-ctrlc|no-ctrl-c" => \$opt::noctrlc,
+ "workdir|work-dir|wd=s" => \$opt::workdir,
+ "W=s" => \$opt::retired,
+ "tmpdir=s" => \$opt::tmpdir,
+ "tempdir=s" => \$opt::tmpdir,
+ "use-compress-program|compress-program=s" => \$opt::compress_program,
+ "use-decompress-program|decompress-program=s" => \$opt::decompress_program,
+ "compress" => \$opt::compress,
+ "tty" => \$opt::tty,
+ "T" => \$opt::retired,
+ "halt-on-error|halt=s" => \$opt::halt_on_error,
+ "H=i" => \$opt::retired,
+ "retries=i" => \$opt::retries,
+ "dry-run|dryrun" => \$opt::dryrun,
+ "progress" => \$opt::progress,
+ "eta" => \$opt::eta,
+ "bar" => \$opt::bar,
+ "arg-sep|argsep=s" => \$opt::arg_sep,
+ "arg-file-sep|argfilesep=s" => \$opt::arg_file_sep,
+ "trim=s" => \$opt::trim,
+ "env=s" => \@opt::env,
+ "recordenv|record-env" => \$opt::record_env,
+ "plain" => \$opt::plain,
+ "profile|J=s" => \@opt::profile,
+ "pipe|spreadstdin" => \$opt::pipe,
+ "robin|round-robin|roundrobin" => \$opt::roundrobin,
+ "recstart=s" => \$opt::recstart,
+ "recend=s" => \$opt::recend,
+ "regexp|regex" => \$opt::regexp,
+ "remove-rec-sep|removerecsep|rrs" => \$opt::remove_rec_sep,
+ "files|output-as-files|outputasfiles" => \$opt::files,
+ "block|block-size|blocksize=s" => \$opt::blocksize,
+ "tollef" => \$opt::retired,
+ "gnu" => \$opt::gnu,
+ "xapply" => \$opt::xapply,
+ "bibtex" => \$opt::bibtex,
+ "nn|nonotice|no-notice" => \$opt::no_notice,
+ # xargs-compatibility - implemented, man, testsuite
+ "max-procs|P=s" => \$opt::jobs,
+ "delimiter|d=s" => \$opt::d,
+ "max-chars|s=i" => \$opt::max_chars,
+ "arg-file|a=s" => \@opt::a,
+ "no-run-if-empty|r" => \$opt::r,
+ "replace|i:s" => \$opt::i,
+ "E=s" => \$opt::eof,
+ "eof|e:s" => \$opt::eof,
+ "max-args|n=i" => \$opt::max_args,
+ "max-replace-args|N=i" => \$opt::max_replace_args,
+ "colsep|col-sep|C=s" => \$opt::colsep,
+ "help|h" => \$opt::help,
+ "L=f" => \$opt::L,
+ "max-lines|l:f" => \$opt::max_lines,
+ "interactive|p" => \$opt::p,
+ "verbose|t" => \$opt::verbose,
+ "version|V" => \$opt::version,
+ "minversion|min-version=i" => \$opt::minversion,
+ "show-limits|showlimits" => \$opt::show_limits,
+ "exit|x" => \$opt::x,
+ # Semaphore
+ "semaphore" => \$opt::semaphore,
+ "semaphoretimeout=i" => \$opt::semaphoretimeout,
+ "semaphorename|id=s" => \$opt::semaphorename,
+ "fg" => \$opt::fg,
+ "bg" => \$opt::bg,
+ "wait" => \$opt::wait,
+ # Shebang #!/usr/bin/parallel --shebang
+ "shebang|hashbang" => \$opt::shebang,
+ "internal-pipe-means-argfiles" => \$opt::internal_pipe_means_argfiles,
+ "Y" => \$opt::retired,
+ "skip-first-line" => \$opt::skip_first_line,
+ "header=s" => \$opt::header,
+ "cat" => \$opt::cat,
+ "fifo" => \$opt::fifo,
+ "pipepart|pipe-part" => \$opt::pipepart,
+ "hgrp|hostgroup|hostgroups" => \$opt::hostgroups,
+ );
+}
+
+sub get_options_from_array {
+ # Run GetOptions on @array
+ # Input:
+ # $array_ref = ref to @ARGV to parse
+ # @keep_only = Keep only these options
+ # Uses:
+ # @ARGV
+ # Returns:
+ # true if parsing worked
+ # false if parsing failed
+ # @$array_ref is changed
+ my ($array_ref, @keep_only) = @_;
+ if(not @$array_ref) {
+ # Empty array: No need to look more at that
+ return 1;
+ }
+ # A bit of shuffling of @ARGV needed as GetOptionsFromArray is not
+ # supported everywhere
+ my @save_argv;
+ my $this_is_ARGV = (\@::ARGV == $array_ref);
+ if(not $this_is_ARGV) {
+ @save_argv = @::ARGV;
+ @::ARGV = @{$array_ref};
+ }
+ # If @keep_only set: Ignore all values except @keep_only
+ my %options = options_hash();
+ if(@keep_only) {
+ my (%keep,@dummy);
+ @keep{@keep_only} = @keep_only;
+ for my $k (grep { not $keep{$_} } keys %options) {
+ # Store the value of the option in @dummy
+ $options{$k} = \@dummy;
+ }
+ }
+ my $retval = GetOptions(%options);
+ if(not $this_is_ARGV) {
+ @{$array_ref} = @::ARGV;
+ @::ARGV = @save_argv;
+ }
+ return $retval;
+}
+
+sub parse_options {
+ # Returns: N/A
+ # Defaults:
+ $Global::version = 20141122;
+ $Global::progname = 'parallel';
+ $Global::infinity = 2**31;
+ $Global::debug = 0;
+ $Global::verbose = 0;
+ $Global::quoting = 0;
+ # Read only table with default --rpl values
+ %Global::replace =
+ (
+ '{}' => '',
+ '{#}' => '1 $_=$job->seq()',
+ '{%}' => '1 $_=$job->slot()',
+ '{/}' => 's:.*/::',
+ '{//}' => '$Global::use{"File::Basename"} ||= eval "use File::Basename; 1;"; $_ = dirname($_);',
+ '{/.}' => 's:.*/::; s:\.[^/.]+$::;',
+ '{.}' => 's:\.[^/.]+$::',
+ );
+ %Global::plus =
+ (
+ # {} = {+/}/{/}
+ # = {.}.{+.} = {+/}/{/.}.{+.}
+ # = {..}.{+..} = {+/}/{/..}.{+..}
+ # = {...}.{+...} = {+/}/{/...}.{+...}
+ '{+/}' => 's:/[^/]*$::',
+ '{+.}' => 's:.*\.::',
+ '{+..}' => 's:.*\.([^.]*\.):$1:',
+ '{+...}' => 's:.*\.([^.]*\.[^.]*\.):$1:',
+ '{..}' => 's:\.[^/.]+$::; s:\.[^/.]+$::',
+ '{...}' => 's:\.[^/.]+$::; s:\.[^/.]+$::; s:\.[^/.]+$::',
+ '{/..}' => 's:.*/::; s:\.[^/.]+$::; s:\.[^/.]+$::',
+ '{/...}' => 's:.*/::; s:\.[^/.]+$::; s:\.[^/.]+$::; s:\.[^/.]+$::',
+ );
+ # Modifiable copy of %Global::replace
+ %Global::rpl = %Global::replace;
+ $Global::parens = "{==}";
+ $/="\n";
+ $Global::ignore_empty = 0;
+ $Global::interactive = 0;
+ $Global::stderr_verbose = 0;
+ $Global::default_simultaneous_sshlogins = 9;
+ $Global::exitstatus = 0;
+ $Global::halt_on_error_exitstatus = 0;
+ $Global::arg_sep = ":::";
+ $Global::arg_file_sep = "::::";
+ $Global::trim = 'n';
+ $Global::max_jobs_running = 0;
+ $Global::job_already_run = '';
+ $ENV{'TMPDIR'} ||= "/tmp";
+
+ @ARGV=read_options();
+
+ if(@opt::v) { $Global::verbose = $#opt::v+1; } # Convert -v -v to v=2
+ $Global::debug = $opt::D;
+ $Global::shell = $ENV{'PARALLEL_SHELL'} || parent_shell($$) || $ENV{'SHELL'} || "/bin/sh";
+ if(defined $opt::X) { $Global::ContextReplace = 1; }
+ if(defined $opt::silent) { $Global::verbose = 0; }
+ if(defined $opt::0) { $/ = "\0"; }
+ if(defined $opt::d) { my $e="sprintf \"$opt::d\""; $/ = eval $e; }
+ if(defined $opt::p) { $Global::interactive = $opt::p; }
+ if(defined $opt::q) { $Global::quoting = 1; }
+ if(defined $opt::r) { $Global::ignore_empty = 1; }
+ if(defined $opt::verbose) { $Global::stderr_verbose = 1; }
+ # Deal with --rpl
+ sub rpl {
+ # Modify %Global::rpl
+ # Replace $old with $new
+ my ($old,$new) = @_;
+ if($old ne $new) {
+ $Global::rpl{$new} = $Global::rpl{$old};
+ delete $Global::rpl{$old};
+ }
+ }
+ if(defined $opt::parens) { $Global::parens = $opt::parens; }
+ my $parenslen = 0.5*length $Global::parens;
+ $Global::parensleft = substr($Global::parens,0,$parenslen);
+ $Global::parensright = substr($Global::parens,$parenslen);
+ if(defined $opt::plus) { %Global::rpl = (%Global::plus,%Global::rpl); }
+ if(defined $opt::I) { rpl('{}',$opt::I); }
+ if(defined $opt::U) { rpl('{.}',$opt::U); }
+ if(defined $opt::i and $opt::i) { rpl('{}',$opt::i); }
+ if(defined $opt::basenamereplace) { rpl('{/}',$opt::basenamereplace); }
+ if(defined $opt::dirnamereplace) { rpl('{//}',$opt::dirnamereplace); }
+ if(defined $opt::seqreplace) { rpl('{#}',$opt::seqreplace); }
+ if(defined $opt::slotreplace) { rpl('{%}',$opt::slotreplace); }
+ if(defined $opt::basenameextensionreplace) {
+ rpl('{/.}',$opt::basenameextensionreplace);
+ }
+ for(@opt::rpl) {
+ # Create $Global::rpl entries for --rpl options
+ # E.g: "{..} s:\.[^.]+$:;s:\.[^.]+$:;"
+ my ($shorthand,$long) = split/ /,$_,2;
+ $Global::rpl{$shorthand} = $long;
+ }
+ if(defined $opt::eof) { $Global::end_of_file_string = $opt::eof; }
+ if(defined $opt::max_args) { $Global::max_number_of_args = $opt::max_args; }
+ if(defined $opt::timeout) { $Global::timeoutq = TimeoutQueue->new($opt::timeout); }
+ if(defined $opt::tmpdir) { $ENV{'TMPDIR'} = $opt::tmpdir; }
+ if(defined $opt::help) { die_usage(); }
+ if(defined $opt::colsep) { $Global::trim = 'lr'; }
+ if(defined $opt::header) { $opt::colsep = defined $opt::colsep ? $opt::colsep : "\t"; }
+ if(defined $opt::trim) { $Global::trim = $opt::trim; }
+ if(defined $opt::arg_sep) { $Global::arg_sep = $opt::arg_sep; }
+ if(defined $opt::arg_file_sep) { $Global::arg_file_sep = $opt::arg_file_sep; }
+ if(defined $opt::number_of_cpus) { print SSHLogin::no_of_cpus(),"\n"; wait_and_exit(0); }
+ if(defined $opt::number_of_cores) {
+ print SSHLogin::no_of_cores(),"\n"; wait_and_exit(0);
+ }
+ if(defined $opt::max_line_length_allowed) {
+ print Limits::Command::real_max_length(),"\n"; wait_and_exit(0);
+ }
+ if(defined $opt::version) { version(); wait_and_exit(0); }
+ if(defined $opt::bibtex) { bibtex(); wait_and_exit(0); }
+ if(defined $opt::record_env) { record_env(); wait_and_exit(0); }
+ if(defined $opt::show_limits) { show_limits(); }
+ if(@opt::sshlogin) { @Global::sshlogin = @opt::sshlogin; }
+ if(@opt::sshloginfile) { read_sshloginfiles(@opt::sshloginfile); }
+ if(@opt::return) { push @Global::ret_files, @opt::return; }
+ if(not defined $opt::recstart and
+ not defined $opt::recend) { $opt::recend = "\n"; }
+ if(not defined $opt::blocksize) { $opt::blocksize = "1M"; }
+ $opt::blocksize = multiply_binary_prefix($opt::blocksize);
+ if(defined $opt::controlmaster) { $opt::noctrlc = 1; }
+ if(defined $opt::semaphore) { $Global::semaphore = 1; }
+ if(defined $opt::semaphoretimeout) { $Global::semaphore = 1; }
+ if(defined $opt::semaphorename) { $Global::semaphore = 1; }
+ if(defined $opt::fg) { $Global::semaphore = 1; }
+ if(defined $opt::bg) { $Global::semaphore = 1; }
+ if(defined $opt::wait) { $Global::semaphore = 1; }
+ if(defined $opt::halt_on_error and
+ $opt::halt_on_error=~/%/) { $opt::halt_on_error /= 100; }
+ if(defined $opt::timeout and $opt::timeout !~ /^\d+(\.\d+)?%?$/) {
+ ::error("--timeout must be seconds or percentage\n");
+ wait_and_exit(255);
+ }
+ if(defined $opt::minversion) {
+ print $Global::version,"\n";
+ if($Global::version < $opt::minversion) {
+ wait_and_exit(255);
+ } else {
+ wait_and_exit(0);
+ }
+ }
+ if(not defined $opt::delay) {
+ # Set --delay to --sshdelay if not set
+ $opt::delay = $opt::sshdelay;
+ }
+ if($opt::compress_program) {
+ $opt::compress = 1;
+ $opt::decompress_program ||= $opt::compress_program." -dc";
+ }
+ if($opt::compress) {
+ my ($compress, $decompress) = find_compression_program();
+ $opt::compress_program ||= $compress;
+ $opt::decompress_program ||= $decompress;
+ }
+ if(defined $opt::nonall) {
+ # Append a dummy empty argument
+ push @ARGV, $Global::arg_sep, "";
+ }
+ if(defined $opt::tty) {
+ # Defaults for --tty: -j1 -u
+ # Can be overridden with -jXXX -g
+ if(not defined $opt::jobs) {
+ $opt::jobs = 1;
+ }
+ if(not defined $opt::group) {
+ $opt::ungroup = 0;
+ }
+ }
+ if(@opt::trc) {
+ push @Global::ret_files, @opt::trc;
+ $opt::transfer = 1;
+ $opt::cleanup = 1;
+ }
+ if(defined $opt::max_lines) {
+ if($opt::max_lines eq "-0") {
+ # -l -0 (swallowed -0)
+ $opt::max_lines = 1;
+ $opt::0 = 1;
+ $/ = "\0";
+ } elsif ($opt::max_lines == 0) {
+ # If not given (or if 0 is given) => 1
+ $opt::max_lines = 1;
+ }
+ $Global::max_lines = $opt::max_lines;
+ if(not $opt::pipe) {
+ # --pipe -L means length of record - not max_number_of_args
+ $Global::max_number_of_args ||= $Global::max_lines;
+ }
+ }
+
+ # Read more than one arg at a time (-L, -N)
+ if(defined $opt::L) {
+ $Global::max_lines = $opt::L;
+ if(not $opt::pipe) {
+ # --pipe -L means length of record - not max_number_of_args
+ $Global::max_number_of_args ||= $Global::max_lines;
+ }
+ }
+ if(defined $opt::max_replace_args) {
+ $Global::max_number_of_args = $opt::max_replace_args;
+ $Global::ContextReplace = 1;
+ }
+ if((defined $opt::L or defined $opt::max_replace_args)
+ and
+ not ($opt::xargs or $opt::m)) {
+ $Global::ContextReplace = 1;
+ }
+ if(defined $opt::tag and not defined $opt::tagstring) {
+ $opt::tagstring = "\257<\257>"; # Default = {}
+ }
+ if(defined $opt::pipepart and
+ (defined $opt::L or defined $opt::max_lines
+ or defined $opt::max_replace_args)) {
+ ::error("--pipepart is incompatible with --max-replace-args, ",
+ "--max-lines, and -L.\n");
+ wait_and_exit(255);
+ }
+ if(grep /^$Global::arg_sep$|^$Global::arg_file_sep$/o, @ARGV) {
+ # Deal with ::: and ::::
+ @ARGV=read_args_from_command_line();
+ }
+
+ # Semaphore defaults
+ # Must be done before computing number of processes and max_line_length
+ # because when running as a semaphore GNU Parallel does not read args
+ $Global::semaphore ||= ($0 =~ m:(^|/)sem$:); # called as 'sem'
+ if($Global::semaphore) {
+ # A semaphore does not take input from neither stdin nor file
+ @opt::a = ("/dev/null");
+ push(@Global::unget_argv, [Arg->new("")]);
+ $Semaphore::timeout = $opt::semaphoretimeout || 0;
+ if(defined $opt::semaphorename) {
+ $Semaphore::name = $opt::semaphorename;
+ } else {
+ $Semaphore::name = `tty`;
+ chomp $Semaphore::name;
+ }
+ $Semaphore::fg = $opt::fg;
+ $Semaphore::wait = $opt::wait;
+ $Global::default_simultaneous_sshlogins = 1;
+ if(not defined $opt::jobs) {
+ $opt::jobs = 1;
+ }
+ if($Global::interactive and $opt::bg) {
+ ::error("Jobs running in the ".
+ "background cannot be interactive.\n");
+ ::wait_and_exit(255);
+ }
+ }
+ if(defined $opt::eta) {
+ $opt::progress = $opt::eta;
+ }
+ if(defined $opt::bar) {
+ $opt::progress = $opt::bar;
+ }
+ if(defined $opt::retired) {
+ ::error("-g has been retired. Use --group.\n");
+ ::error("-B has been retired. Use --bf.\n");
+ ::error("-T has been retired. Use --tty.\n");
+ ::error("-U has been retired. Use --er.\n");
+ ::error("-W has been retired. Use --wd.\n");
+ ::error("-Y has been retired. Use --shebang.\n");
+ ::error("-H has been retired. Use --halt.\n");
+ ::error("--tollef has been retired. Use -u -q --arg-sep -- and --load for -l.\n");
+ ::wait_and_exit(255);
+ }
+ citation_notice();
+
+ parse_sshlogin();
+ parse_env_var();
+
+ if(remote_hosts() and ($opt::X or $opt::m or $opt::xargs)) {
+ # As we do not know the max line length on the remote machine
+ # long commands generated by xargs may fail
+ # If opt_N is set, it is probably safe
+ ::warning("Using -X or -m with --sshlogin may fail.\n");
+ }
+
+ if(not defined $opt::jobs) {
+ $opt::jobs = "100%";
+ }
+ open_joblog();
+}
+
+sub env_quote {
+ # Input:
+ # $v = value to quote
+ # Returns:
+ # $v = value quoted as environment variable
+ my $v = $_[0];
+ $v =~ s/([\\])/\\$1/g;
+ $v =~ s/([\[\] \#\'\&\<\>\(\)\;\{\}\t\"\$\`\*\174\!\?\~])/\\$1/g;
+ $v =~ s/\n/"\n"/g;
+ return $v;
+}
+
+sub record_env {
+ # Record current %ENV-keys in ~/.parallel/ignored_vars
+ # Returns: N/A
+ my $ignore_filename = $ENV{'HOME'} . "/.parallel/ignored_vars";
+ if(open(my $vars_fh, ">", $ignore_filename)) {
+ print $vars_fh map { $_,"\n" } keys %ENV;
+ } else {
+ ::error("Cannot write to $ignore_filename\n");
+ ::wait_and_exit(255);
+ }
+}
+
+sub parse_env_var {
+ # Parse --env and set $Global::envvar, $Global::envwarn and $Global::envvarlen
+ #
+ # Bash functions must be parsed to export them remotely
+ # Pre-shellshock style bash function:
+ # myfunc=() {...
+ # Post-shellshock style bash function:
+ # BASH_FUNC_myfunc()=() {...
+ #
+ # Uses:
+ # $Global::envvar = eval string that will set variables in both bash and csh
+ # $Global::envwarn = If functions are used: Give warning in csh
+ # $Global::envvarlen = length of $Global::envvar
+ # @opt::env
+ # $Global::shell
+ # %ENV
+ # Returns: N/A
+ $Global::envvar = "";
+ $Global::envwarn = "";
+ my @vars = ('parallel_bash_environment');
+ for my $varstring (@opt::env) {
+ # Split up --env VAR1,VAR2
+ push @vars, split /,/, $varstring;
+ }
+ if(grep { /^_$/ } @vars) {
+ # --env _
+ # Include all vars that are not in a clean environment
+ if(open(my $vars_fh, "<", $ENV{'HOME'} . "/.parallel/ignored_vars")) {
+ my @ignore = <$vars_fh>;
+ chomp @ignore;
+ my %ignore;
+ @ignore{@ignore} = @ignore;
+ close $vars_fh;
+ push @vars, grep { not defined $ignore{$_} } keys %ENV;
+ @vars = grep { not /^_$/ } @vars;
+ } else {
+ ::error("Run '$Global::progname --record-env' in a clean environment first.\n");
+ ::wait_and_exit(255);
+ }
+ }
+ # Duplicate vars as BASH functions to include post-shellshock functions.
+ # So --env myfunc should also look for BASH_FUNC_myfunc()
+ @vars = map { $_, "BASH_FUNC_$_()" } @vars;
+ # Keep only defined variables
+ @vars = grep { defined($ENV{$_}) } @vars;
+ # Pre-shellshock style bash function:
+ # myfunc=() { echo myfunc
+ # }
+ # Post-shellshock style bash function:
+ # BASH_FUNC_myfunc()=() { echo myfunc
+ # }
+ my @bash_functions = grep { substr($ENV{$_},0,4) eq "() {" } @vars;
+ my @non_functions = grep { substr($ENV{$_},0,4) ne "() {" } @vars;
+ if(@bash_functions) {
+ # Functions are not supported for all shells
+ if($Global::shell !~ m:/(bash|rbash|zsh|rzsh|dash|ksh):) {
+ ::warning("Shell functions may not be supported in $Global::shell\n");
+ }
+ }
+
+ # Pre-shellschock names are without ()
+ my @bash_pre_shellshock = grep { not /\(\)/ } @bash_functions;
+ # Post-shellschock names are with ()
+ my @bash_post_shellshock = grep { /\(\)/ } @bash_functions;
+
+ my @qcsh = (map { my $a=$_; "setenv $a " . env_quote($ENV{$a}) }
+ grep { not /^parallel_bash_environment$/ } @non_functions);
+ my @qbash = (map { my $a=$_; "export $a=" . env_quote($ENV{$a}) }
+ @non_functions, @bash_pre_shellshock);
+
+ push @qbash, map { my $a=$_; "eval $a\"\$$a\"" } @bash_pre_shellshock;
+ push @qbash, map { /BASH_FUNC_(.*)\(\)/; "$1 $ENV{$_}" } @bash_post_shellshock;
+
+ #ssh -tt -oLogLevel=quiet lo 'eval `echo PARALLEL_SEQ='$PARALLEL_SEQ'\;export PARALLEL_SEQ\; PARALLEL_PID='$PARALLEL_PID'\;export PARALLEL_PID` ;' tty\ \>/dev/null\ \&\&\ stty\ isig\ -onlcr\ -echo\;echo\ \$SHELL\ \|\ grep\ \"/t\\\{0,1\\\}csh\"\ \>\ /dev/null\ \&\&\ setenv\ BASH_FUNC_myfunc\ \\\(\\\)\\\ \\\{\\\ \\\ echo\\\ a\"'
+ #'\"\\\}\ \|\|\ myfunc\(\)\ \{\ \ echo\ a'
+ #'\}\ \;myfunc\ 1;
+
+ # Check if any variables contain \n
+ if(my @v = map { s/BASH_FUNC_(.*)\(\)/$1/; $_ } grep { $ENV{$_}=~/\n/ } @vars) {
+ # \n is bad for csh and will cause it to fail.
+ $Global::envwarn = ::shell_quote_scalar(q{echo $SHELL | egrep "/t?csh" > /dev/null && echo CSH/TCSH DO NOT SUPPORT newlines IN VARIABLES/FUNCTIONS. Unset }."@v".q{ && exec false;}."\n\n") . $Global::envwarn;
+ }
+
+ if(not @qcsh) { push @qcsh, "true"; }
+ if(not @qbash) { push @qbash, "true"; }
+ # Create lines like:
+ # echo $SHELL | grep "/t\\{0,1\\}csh" >/dev/null && setenv V1 val1 && setenv V2 val2 || export V1=val1 && export V2=val2 ; echo "$V1$V2"
+ if(@vars) {
+ $Global::envvar .=
+ join"",
+ (q{echo $SHELL | grep "/t\\{0,1\\}csh" > /dev/null && }
+ . join(" && ", @qcsh)
+ . q{ || }
+ . join(" && ", @qbash)
+ .q{;});
+ if($ENV{'parallel_bash_environment'}) {
+ $Global::envvar .= 'eval "$parallel_bash_environment";'."\n";
+ }
+ }
+ $Global::envvarlen = length $Global::envvar;
+}
+
+sub open_joblog {
+ # Open joblog as specified by --joblog
+ # Uses:
+ # $opt::resume
+ # $opt::resume_failed
+ # $opt::joblog
+ # $opt::results
+ # $Global::job_already_run
+ # %Global::fd
+ my $append = 0;
+ if(($opt::resume or $opt::resume_failed)
+ and
+ not ($opt::joblog or $opt::results)) {
+ ::error("--resume and --resume-failed require --joblog or --results.\n");
+ ::wait_and_exit(255);
+ }
+ if($opt::joblog) {
+ if($opt::resume || $opt::resume_failed) {
+ if(open(my $joblog_fh, "<", $opt::joblog)) {
+ # Read the joblog
+ $append = <$joblog_fh>; # If there is a header: Open as append later
+ my $joblog_regexp;
+ if($opt::resume_failed) {
+ # Make a regexp that only matches commands with exit+signal=0
+ # 4 host 1360490623.067 3.445 1023 1222 0 0 command
+ $joblog_regexp='^(\d+)(?:\t[^\t]+){5}\t0\t0\t';
+ } else {
+ # Just match the job number
+ $joblog_regexp='^(\d+)';
+ }
+ while(<$joblog_fh>) {
+ if(/$joblog_regexp/o) {
+ # This is 30% faster than set_job_already_run($1);
+ vec($Global::job_already_run,($1||0),1) = 1;
+ } elsif(not /\d+\s+[^\s]+\s+([0-9.]+\s+){6}/) {
+ ::error("Format of '$opt::joblog' is wrong: $_");
+ ::wait_and_exit(255);
+ }
+ }
+ close $joblog_fh;
+ }
+ }
+ if($append) {
+ # Append to joblog
+ if(not open($Global::joblog, ">>", $opt::joblog)) {
+ ::error("Cannot append to --joblog $opt::joblog.\n");
+ ::wait_and_exit(255);
+ }
+ } else {
+ if($opt::joblog eq "-") {
+ # Use STDOUT as joblog
+ $Global::joblog = $Global::fd{1};
+ } elsif(not open($Global::joblog, ">", $opt::joblog)) {
+ # Overwrite the joblog
+ ::error("Cannot write to --joblog $opt::joblog.\n");
+ ::wait_and_exit(255);
+ }
+ print $Global::joblog
+ join("\t", "Seq", "Host", "Starttime", "JobRuntime",
+ "Send", "Receive", "Exitval", "Signal", "Command"
+ ). "\n";
+ }
+ }
+}
+
+sub find_compression_program {
+ # Find a fast compression program
+ # Returns:
+ # $compress_program = compress program with options
+ # $decompress_program = decompress program with options
+
+ # Search for these. Sorted by speed
+ my @prg = qw(lzop pigz pxz gzip plzip pbzip2 lzma xz lzip bzip2);
+ for my $p (@prg) {
+ if(which($p)) {
+ return ("$p -c -1","$p -dc");
+ }
+ }
+ # Fall back to cat
+ return ("cat","cat");
+}
+
+
+sub read_options {
+ # Read options from command line, profile and $PARALLEL
+ # Uses:
+ # $opt::shebang_wrap
+ # $opt::shebang
+ # @ARGV
+ # $opt::plain
+ # @opt::profile
+ # $ENV{'HOME'}
+ # $ENV{'PARALLEL'}
+ # Returns:
+ # @ARGV_no_opt = @ARGV without --options
+
+ # This must be done first as this may exec myself
+ if(defined $ARGV[0] and ($ARGV[0] =~ /^--shebang/ or
+ $ARGV[0] =~ /^--shebang-?wrap/ or
+ $ARGV[0] =~ /^--hashbang/)) {
+ # Program is called from #! line in script
+ # remove --shebang-wrap if it is set
+ $opt::shebang_wrap = ($ARGV[0] =~ s/^--shebang-?wrap *//);
+ # remove --shebang if it is set
+ $opt::shebang = ($ARGV[0] =~ s/^--shebang *//);
+ # remove --hashbang if it is set
+ $opt::shebang .= ($ARGV[0] =~ s/^--hashbang *//);
+ if($opt::shebang) {
+ my $argfile = shell_quote_scalar(pop @ARGV);
+ # exec myself to split $ARGV[0] into separate fields
+ exec "$0 --skip-first-line -a $argfile @ARGV";
+ }
+ if($opt::shebang_wrap) {
+ my @options;
+ my @parser;
+ if ($^O eq 'freebsd') {
+ # FreeBSD's #! puts different values in @ARGV than Linux' does.
+ my @nooptions = @ARGV;
+ get_options_from_array(\@nooptions);
+ while($#ARGV > $#nooptions) {
+ push @options, shift @ARGV;
+ }
+ while(@ARGV and $ARGV[0] ne ":::") {
+ push @parser, shift @ARGV;
+ }
+ if(@ARGV and $ARGV[0] eq ":::") {
+ shift @ARGV;
+ }
+ } else {
+ @options = shift @ARGV;
+ }
+ my $script = shell_quote_scalar(shift @ARGV);
+ # exec myself to split $ARGV[0] into separate fields
+ exec "$0 --internal-pipe-means-argfiles @options @parser $script ::: @ARGV";
+ }
+ }
+
+ Getopt::Long::Configure("bundling","require_order");
+ my @ARGV_copy = @ARGV;
+ # Check if there is a --profile to set @opt::profile
+ get_options_from_array(\@ARGV_copy,"profile|J=s","plain") || die_usage();
+ my @ARGV_profile = ();
+ my @ARGV_env = ();
+ if(not $opt::plain) {
+ # Add options from .parallel/config and other profiles
+ my @config_profiles = (
+ "/etc/parallel/config",
+ $ENV{'HOME'}."/.parallel/config",
+ $ENV{'HOME'}."/.parallelrc");
+ my @profiles = @config_profiles;
+ if(@opt::profile) {
+ # --profile overrides default profiles
+ @profiles = ();
+ for my $profile (@opt::profile) {
+ if(-r $profile) {
+ push @profiles, $profile;
+ } else {
+ push @profiles, $ENV{'HOME'}."/.parallel/".$profile;
+ }
+ }
+ }
+ for my $profile (@profiles) {
+ if(-r $profile) {
+ open (my $in_fh, "<", $profile) || ::die_bug("read-profile: $profile");
+ while(<$in_fh>) {
+ /^\s*\#/ and next;
+ chomp;
+ push @ARGV_profile, shellwords($_);
+ }
+ close $in_fh;
+ } else {
+ if(grep /^$profile$/, @config_profiles) {
+ # config file is not required to exist
+ } else {
+ ::error("$profile not readable.\n");
+ wait_and_exit(255);
+ }
+ }
+ }
+ # Add options from shell variable $PARALLEL
+ if($ENV{'PARALLEL'}) {
+ @ARGV_env = shellwords($ENV{'PARALLEL'});
+ }
+ }
+ Getopt::Long::Configure("bundling","require_order");
+ get_options_from_array(\@ARGV_profile) || die_usage();
+ get_options_from_array(\@ARGV_env) || die_usage();
+ get_options_from_array(\@ARGV) || die_usage();
+
+ # Prepend non-options to @ARGV (such as commands like 'nice')
+ unshift @ARGV, @ARGV_profile, @ARGV_env;
+ return @ARGV;
+}
+
+sub read_args_from_command_line {
+ # Arguments given on the command line after:
+ # ::: ($Global::arg_sep)
+ # :::: ($Global::arg_file_sep)
+ # Removes the arguments from @ARGV and:
+ # - puts filenames into -a
+ # - puts arguments into files and add the files to -a
+ # Input:
+ # @::ARGV = command option ::: arg arg arg :::: argfiles
+ # Uses:
+ # $Global::arg_sep
+ # $Global::arg_file_sep
+ # $opt::internal_pipe_means_argfiles
+ # $opt::pipe
+ # @opt::a
+ # Returns:
+ # @argv_no_argsep = @::ARGV without ::: and :::: and following args
+ my @new_argv = ();
+ for(my $arg = shift @ARGV; @ARGV; $arg = shift @ARGV) {
+ if($arg eq $Global::arg_sep
+ or
+ $arg eq $Global::arg_file_sep) {
+ my $group = $arg; # This group of arguments is args or argfiles
+ my @group;
+ while(defined ($arg = shift @ARGV)) {
+ if($arg eq $Global::arg_sep
+ or
+ $arg eq $Global::arg_file_sep) {
+ # exit while loop if finding new separator
+ last;
+ } else {
+ # If not hitting ::: or ::::
+ # Append it to the group
+ push @group, $arg;
+ }
+ }
+
+ if($group eq $Global::arg_file_sep
+ or ($opt::internal_pipe_means_argfiles and $opt::pipe)
+ ) {
+ # Group of file names on the command line.
+ # Append args into -a
+ push @opt::a, @group;
+ } elsif($group eq $Global::arg_sep) {
+ # Group of arguments on the command line.
+ # Put them into a file.
+ # Create argfile
+ my ($outfh,$name) = ::tmpfile(SUFFIX => ".arg");
+ unlink($name);
+ # Put args into argfile
+ print $outfh map { $_,$/ } @group;
+ seek $outfh, 0, 0;
+ # Append filehandle to -a
+ push @opt::a, $outfh;
+ } else {
+ ::die_bug("Unknown command line group: $group");
+ }
+ if(defined($arg)) {
+ # $arg is ::: or ::::
+ redo;
+ } else {
+ # $arg is undef -> @ARGV empty
+ last;
+ }
+ }
+ push @new_argv, $arg;
+ }
+ # Output: @ARGV = command to run with options
+ return @new_argv;
+}
+
+sub cleanup {
+ # Returns: N/A
+ if(@opt::basefile) { cleanup_basefile(); }
+}
+
+sub __QUOTING_ARGUMENTS_FOR_SHELL__ {}
+
+sub shell_quote {
+ # Input:
+ # @strings = strings to be quoted
+ # Output:
+ # @shell_quoted_strings = string quoted with \ as needed by the shell
+ my @strings = (@_);
+ for my $a (@strings) {
+ $a =~ s/([\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\*\>\<\~\|\; \"\!\$\&\'\202-\377])/\\$1/g;
+ $a =~ s/[\n]/'\n'/g; # filenames with '\n' is quoted using \'
+ }
+ return wantarray ? @strings : "@strings";
+}
+
+sub shell_quote_empty {
+ # Inputs:
+ # @strings = strings to be quoted
+ # Returns:
+ # @quoted_strings = empty strings quoted as ''.
+ my @strings = shell_quote(@_);
+ for my $a (@strings) {
+ if($a eq "") {
+ $a = "''";
+ }
+ }
+ return wantarray ? @strings : "@strings";
+}
+
+sub shell_quote_scalar {
+ # Quote the string so shell will not expand any special chars
+ # Inputs:
+ # $string = string to be quoted
+ # Returns:
+ # $shell_quoted = string quoted with \ as needed by the shell
+ my $a = $_[0];
+ if(defined $a) {
+ # $a =~ s/([\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\*\>\<\~\|\; \"\!\$\&\'\202-\377])/\\$1/g;
+ # This is 1% faster than the above
+ $a =~ s/[\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\*\>\<\~\|\; \"\!\$\&\'\202-\377]/\\$&/go;
+ $a =~ s/[\n]/'\n'/go; # filenames with '\n' is quoted using \'
+ }
+ return $a;
+}
+
+sub shell_quote_file {
+ # Quote the string so shell will not expand any special chars and prepend ./ if needed
+ # Input:
+ # $filename = filename to be shell quoted
+ # Returns:
+ # $quoted_filename = filename quoted with \ as needed by the shell and ./ if needed
+ my $a = shell_quote_scalar(shift);
+ if(defined $a) {
+ if($a =~ m:^/: or $a =~ m:^\./:) {
+ # /abs/path or ./rel/path => skip
+ } else {
+ # rel/path => ./rel/path
+ $a = "./".$a;
+ }
+ }
+ return $a;
+}
+
+sub shellwords {
+ # Input:
+ # $string = shell line
+ # Returns:
+ # @shell_words = $string split into words as shell would do
+ $Global::use{"Text::ParseWords"} ||= eval "use Text::ParseWords; 1;";
+ return Text::ParseWords::shellwords(@_);
+}
+
+
+sub __FILEHANDLES__ {}
+
+
+sub save_stdin_stdout_stderr {
+ # Remember the original STDIN, STDOUT and STDERR
+ # and file descriptors opened by the shell (e.g. 3>/tmp/foo)
+ # Uses:
+ # %Global::fd
+ # $Global::original_stderr
+ # $Global::original_stdin
+ # Returns: N/A
+
+ # Find file descriptors that are already opened (by the shell)
+ for my $fdno (1..61) {
+ # /dev/fd/62 and above are used by bash for <(cmd)
+ my $fh;
+ # 2-argument-open is used to be compatible with old perl 5.8.0
+ # bug #43570: Perl 5.8.0 creates 61 files
+ if(open($fh,">&=$fdno")) {
+ $Global::fd{$fdno}=$fh;
+ }
+ }
+ open $Global::original_stderr, ">&", "STDERR" or
+ ::die_bug("Can't dup STDERR: $!");
+ open $Global::original_stdin, "<&", "STDIN" or
+ ::die_bug("Can't dup STDIN: $!");
+}
+
+sub enough_file_handles {
+ # Check that we have enough filehandles available for starting
+ # another job
+ # Uses:
+ # $opt::ungroup
+ # %Global::fd
+ # Returns:
+ # 1 if ungrouped (thus not needing extra filehandles)
+ # 0 if too few filehandles
+ # 1 if enough filehandles
+ if(not $opt::ungroup) {
+ my %fh;
+ my $enough_filehandles = 1;
+ # perl uses 7 filehandles for something?
+ # open3 uses 2 extra filehandles temporarily
+ # We need a filehandle for each redirected file descriptor
+ # (normally just STDOUT and STDERR)
+ for my $i (1..(7+2+keys %Global::fd)) {
+ $enough_filehandles &&= open($fh{$i}, "<", "/dev/null");
+ }
+ for (values %fh) { close $_; }
+ return $enough_filehandles;
+ } else {
+ # Ungrouped does not need extra file handles
+ return 1;
+ }
+}
+
+sub open_or_exit {
+ # Open a file name or exit if the file cannot be opened
+ # Inputs:
+ # $file = filehandle or filename to open
+ # Uses:
+ # $Global::stdin_in_opt_a
+ # $Global::original_stdin
+ # Returns:
+ # $fh = file handle to read-opened file
+ my $file = shift;
+ if($file eq "-") {
+ $Global::stdin_in_opt_a = 1;
+ return ($Global::original_stdin || *STDIN);
+ }
+ if(ref $file eq "GLOB") {
+ # This is an open filehandle
+ return $file;
+ }
+ my $fh = gensym;
+ if(not open($fh, "<", $file)) {
+ ::error("Cannot open input file `$file': No such file or directory.\n");
+ wait_and_exit(255);
+ }
+ return $fh;
+}
+
+sub __RUNNING_THE_JOBS_AND_PRINTING_PROGRESS__ {}
+
+# Variable structure:
+#
+# $Global::running{$pid} = Pointer to Job-object
+# @Global::virgin_jobs = Pointer to Job-object that have received no input
+# $Global::host{$sshlogin} = Pointer to SSHLogin-object
+# $Global::total_running = total number of running jobs
+# $Global::total_started = total jobs started
+
+sub init_run_jobs {
+ $Global::total_running = 0;
+ $Global::total_started = 0;
+ $Global::tty_taken = 0;
+ $SIG{USR1} = \&list_running_jobs;
+ $SIG{USR2} = \&toggle_progress;
+ if(@opt::basefile) { setup_basefile(); }
+}
+
+{
+ my $last_time;
+ my %last_mtime;
+
+sub start_more_jobs {
+ # Run start_another_job() but only if:
+ # * not $Global::start_no_new_jobs set
+ # * not JobQueue is empty
+ # * not load on server is too high
+ # * not server swapping
+ # * not too short time since last remote login
+ # Uses:
+ # $Global::max_procs_file
+ # $Global::max_procs_file_last_mod
+ # %Global::host
+ # @opt::sshloginfile
+ # $Global::start_no_new_jobs
+ # $opt::filter_hosts
+ # $Global::JobQueue
+ # $opt::pipe
+ # $opt::load
+ # $opt::noswap
+ # $opt::delay
+ # $Global::newest_starttime
+ # Returns:
+ # $jobs_started = number of jobs started
+ my $jobs_started = 0;
+ my $jobs_started_this_round = 0;
+ if($Global::start_no_new_jobs) {
+ return $jobs_started;
+ }
+ if(time - ($last_time||0) > 1) {
+ # At most do this every second
+ $last_time = time;
+ if($Global::max_procs_file) {
+ # --jobs filename
+ my $mtime = (stat($Global::max_procs_file))[9];
+ if($mtime > $Global::max_procs_file_last_mod) {
+ # file changed: Force re-computing max_jobs_running
+ $Global::max_procs_file_last_mod = $mtime;
+ for my $sshlogin (values %Global::host) {
+ $sshlogin->set_max_jobs_running(undef);
+ }
+ }
+ }
+ if(@opt::sshloginfile) {
+ # Is --sshloginfile changed?
+ for my $slf (@opt::sshloginfile) {
+ my $actual_file = expand_slf_shorthand($slf);
+ my $mtime = (stat($actual_file))[9];
+ $last_mtime{$actual_file} ||= $mtime;
+ if($mtime - $last_mtime{$actual_file} > 1) {
+ ::debug("run","--sshloginfile $actual_file changed. reload\n");
+ $last_mtime{$actual_file} = $mtime;
+ # Reload $slf
+ # Empty sshlogins
+ @Global::sshlogin = ();
+ for (values %Global::host) {
+ # Don't start new jobs on any host
+ # except the ones added back later
+ $_->set_max_jobs_running(0);
+ }
+ # This will set max_jobs_running on the SSHlogins
+ read_sshloginfile($actual_file);
+ parse_sshlogin();
+ $opt::filter_hosts and filter_hosts();
+ setup_basefile();
+ }
+ }
+ }
+ }
+ do {
+ $jobs_started_this_round = 0;
+ # This will start 1 job on each --sshlogin (if possible)
+ # thus distribute the jobs on the --sshlogins round robin
+
+ for my $sshlogin (values %Global::host) {
+ if($Global::JobQueue->empty() and not $opt::pipe) {
+ # No more jobs in the queue
+ last;
+ }
+ debug("run", "Running jobs before on ", $sshlogin->string(), ": ",
+ $sshlogin->jobs_running(), "\n");
+ if ($sshlogin->jobs_running() < $sshlogin->max_jobs_running()) {
+ if($opt::load and $sshlogin->loadavg_too_high()) {
+ # The load is too high or unknown
+ next;
+ }
+ if($opt::noswap and $sshlogin->swapping()) {
+ # The server is swapping
+ next;
+ }
+ if($sshlogin->too_fast_remote_login()) {
+ # It has been too short since
+ next;
+ }
+ if($opt::delay and $opt::delay > ::now() - $Global::newest_starttime) {
+ # It has been too short since last start
+ next;
+ }
+ debug("run", $sshlogin->string(), " has ", $sshlogin->jobs_running(),
+ " out of ", $sshlogin->max_jobs_running(),
+ " jobs running. Start another.\n");
+ if(start_another_job($sshlogin) == 0) {
+ # No more jobs to start on this $sshlogin
+ debug("run","No jobs started on ", $sshlogin->string(), "\n");
+ next;
+ }
+ $sshlogin->inc_jobs_running();
+ $sshlogin->set_last_login_at(::now());
+ $jobs_started++;
+ $jobs_started_this_round++;
+ }
+ debug("run","Running jobs after on ", $sshlogin->string(), ": ",
+ $sshlogin->jobs_running(), " of ",
+ $sshlogin->max_jobs_running(), "\n");
+ }
+ } while($jobs_started_this_round);
+
+ return $jobs_started;
+}
+}
+
+{
+ my $no_more_file_handles_warned;
+
+sub start_another_job {
+ # If there are enough filehandles
+ # and JobQueue not empty
+ # and not $job is in joblog
+ # Then grab a job from Global::JobQueue,
+ # start it at sshlogin
+ # mark it as virgin_job
+ # Inputs:
+ # $sshlogin = the SSHLogin to start the job on
+ # Uses:
+ # $Global::JobQueue
+ # $opt::pipe
+ # $opt::results
+ # $opt::resume
+ # @Global::virgin_jobs
+ # Returns:
+ # 1 if another jobs was started
+ # 0 otherwise
+ my $sshlogin = shift;
+ # Do we have enough file handles to start another job?
+ if(enough_file_handles()) {
+ if($Global::JobQueue->empty() and not $opt::pipe) {
+ # No more commands to run
+ debug("start", "Not starting: JobQueue empty\n");
+ return 0;
+ } else {
+ my $job;
+ # Skip jobs already in job log
+ # Skip jobs already in results
+ do {
+ $job = get_job_with_sshlogin($sshlogin);
+ if(not defined $job) {
+ # No command available for that sshlogin
+ debug("start", "Not starting: no jobs available for ",
+ $sshlogin->string(), "\n");
+ return 0;
+ }
+ } while ($job->is_already_in_joblog()
+ or
+ ($opt::results and $opt::resume and $job->is_already_in_results()));
+ debug("start", "Command to run on '", $job->sshlogin()->string(), "': '",
+ $job->replaced(),"'\n");
+ if($job->start()) {
+ if($opt::pipe) {
+ push(@Global::virgin_jobs,$job);
+ }
+ debug("start", "Started as seq ", $job->seq(),
+ " pid:", $job->pid(), "\n");
+ return 1;
+ } else {
+ # Not enough processes to run the job.
+ # Put it back on the queue.
+ $Global::JobQueue->unget($job);
+ # Count down the number of jobs to run for this SSHLogin.
+ my $max = $sshlogin->max_jobs_running();
+ if($max > 1) { $max--; } else {
+ ::error("No more processes: cannot run a single job. Something is wrong.\n");
+ ::wait_and_exit(255);
+ }
+ $sshlogin->set_max_jobs_running($max);
+ # Sleep up to 300 ms to give other processes time to die
+ ::usleep(rand()*300);
+ ::warning("No more processes: ",
+ "Decreasing number of running jobs to $max. ",
+ "Raising ulimit -u or /etc/security/limits.conf may help.\n");
+ return 0;
+ }
+ }
+ } else {
+ # No more file handles
+ $no_more_file_handles_warned++ or
+ ::warning("No more file handles. ",
+ "Raising ulimit -n or /etc/security/limits.conf may help.\n");
+ return 0;
+ }
+}
+}
+
+sub init_progress {
+ # Uses:
+ # $opt::bar
+ # Returns:
+ # list of computers for progress output
+ $|=1;
+ if($opt::bar) {
+ return("","");
+ }
+ my %progress = progress();
+ return ("\nComputers / CPU cores / Max jobs to run\n",
+ $progress{'workerlist'});
+}
+
+sub drain_job_queue {
+ # Uses:
+ # $opt::progress
+ # $Global::original_stderr
+ # $Global::total_running
+ # $Global::max_jobs_running
+ # %Global::running
+ # $Global::JobQueue
+ # %Global::host
+ # $Global::start_no_new_jobs
+ # Returns: N/A
+ if($opt::progress) {
+ print $Global::original_stderr init_progress();
+ }
+ my $last_header="";
+ my $sleep = 0.2;
+ do {
+ while($Global::total_running > 0) {
+ debug($Global::total_running, "==", scalar
+ keys %Global::running," slots: ", $Global::max_jobs_running);
+ if($opt::pipe) {
+ # When using --pipe sometimes file handles are not closed properly
+ for my $job (values %Global::running) {
+ close $job->fh(0,"w");
+ }
+ }
+ if($opt::progress) {
+ my %progress = progress();
+ if($last_header ne $progress{'header'}) {
+ print $Global::original_stderr "\n", $progress{'header'}, "\n";
+ $last_header = $progress{'header'};
+ }
+ print $Global::original_stderr "\r",$progress{'status'};
+ flush $Global::original_stderr;
+ }
+ if($Global::total_running < $Global::max_jobs_running
+ and not $Global::JobQueue->empty()) {
+ # These jobs may not be started because of loadavg
+ # or too little time between each ssh login.
+ if(start_more_jobs() > 0) {
+ # Exponential back-on if jobs were started
+ $sleep = $sleep/2+0.001;
+ }
+ }
+ # Sometimes SIGCHLD is not registered, so force reaper
+ $sleep = ::reap_usleep($sleep);
+ }
+ if(not $Global::JobQueue->empty()) {
+ # These jobs may not be started:
+ # * because there the --filter-hosts has removed all
+ if(not %Global::host) {
+ ::error("There are no hosts left to run on.\n");
+ ::wait_and_exit(255);
+ }
+ # * because of loadavg
+ # * because of too little time between each ssh login.
+ start_more_jobs();
+ $sleep = ::reap_usleep($sleep);
+ if($Global::max_jobs_running == 0) {
+ ::warning("There are no job slots available. Increase --jobs.\n");
+ }
+ }
+ } while ($Global::total_running > 0
+ or
+ not $Global::start_no_new_jobs and not $Global::JobQueue->empty());
+ if($opt::progress) {
+ my %progress = progress();
+ print $Global::original_stderr "\r", $progress{'status'}, "\n";
+ flush $Global::original_stderr;
+ }
+}
+
+sub toggle_progress {
+ # Turn on/off progress view
+ # Uses:
+ # $opt::progress
+ # $Global::original_stderr
+ # Returns: N/A
+ $opt::progress = not $opt::progress;
+ if($opt::progress) {
+ print $Global::original_stderr init_progress();
+ }
+}
+
+sub progress {
+ # Uses:
+ # $opt::bar
+ # $opt::eta
+ # %Global::host
+ # $Global::total_started
+ # Returns:
+ # $workerlist = list of workers
+ # $header = that will fit on the screen
+ # $status = message that will fit on the screen
+ if($opt::bar) {
+ return ("workerlist" => "", "header" => "", "status" => bar());
+ }
+ my $eta = "";
+ my ($status,$header)=("","");
+ if($opt::eta) {
+ my($total, $completed, $left, $pctcomplete, $avgtime, $this_eta) =
+ compute_eta();
+ $eta = sprintf("ETA: %ds Left: %d AVG: %.2fs ",
+ $this_eta, $left, $avgtime);
+ }
+ my $termcols = terminal_columns();
+ my @workers = sort keys %Global::host;
+ my %sshlogin = map { $_ eq ":" ? ($_=>"local") : ($_=>$_) } @workers;
+ my $workerno = 1;
+ my %workerno = map { ($_=>$workerno++) } @workers;
+ my $workerlist = "";
+ for my $w (@workers) {
+ $workerlist .=
+ $workerno{$w}.":".$sshlogin{$w} ." / ".
+ ($Global::host{$w}->ncpus() || "-")." / ".
+ $Global::host{$w}->max_jobs_running()."\n";
+ }
+ $status = "x"x($termcols+1);
+ if(length $status > $termcols) {
+ # sshlogin1:XX/XX/XX%/XX.Xs sshlogin2:XX/XX/XX%/XX.Xs sshlogin3:XX/XX/XX%/XX.Xs
+ $header = "Computer:jobs running/jobs completed/%of started jobs/Average seconds to complete";
+ $status = $eta .
+ join(" ",map
+ {
+ if($Global::total_started) {
+ my $completed = ($Global::host{$_}->jobs_completed()||0);
+ my $running = $Global::host{$_}->jobs_running();
+ my $time = $completed ? (time-$^T)/($completed) : "0";
+ sprintf("%s:%d/%d/%d%%/%.1fs ",
+ $sshlogin{$_}, $running, $completed,
+ ($running+$completed)*100
+ / $Global::total_started, $time);
+ }
+ } @workers);
+ }
+ if(length $status > $termcols) {
+ # 1:XX/XX/XX%/XX.Xs 2:XX/XX/XX%/XX.Xs 3:XX/XX/XX%/XX.Xs 4:XX/XX/XX%/XX.Xs
+ $header = "Computer:jobs running/jobs completed/%of started jobs";
+ $status = $eta .
+ join(" ",map
+ {
+ my $completed = ($Global::host{$_}->jobs_completed()||0);
+ my $running = $Global::host{$_}->jobs_running();
+ my $time = $completed ? (time-$^T)/($completed) : "0";
+ sprintf("%s:%d/%d/%d%%/%.1fs ",
+ $workerno{$_}, $running, $completed,
+ ($running+$completed)*100
+ / $Global::total_started, $time);
+ } @workers);
+ }
+ if(length $status > $termcols) {
+ # sshlogin1:XX/XX/XX% sshlogin2:XX/XX/XX% sshlogin3:XX/XX/XX%
+ $header = "Computer:jobs running/jobs completed/%of started jobs";
+ $status = $eta .
+ join(" ",map
+ { sprintf("%s:%d/%d/%d%%",
+ $sshlogin{$_},
+ $Global::host{$_}->jobs_running(),
+ ($Global::host{$_}->jobs_completed()||0),
+ ($Global::host{$_}->jobs_running()+
+ ($Global::host{$_}->jobs_completed()||0))*100
+ / $Global::total_started) }
+ @workers);
+ }
+ if(length $status > $termcols) {
+ # 1:XX/XX/XX% 2:XX/XX/XX% 3:XX/XX/XX% 4:XX/XX/XX% 5:XX/XX/XX% 6:XX/XX/XX%
+ $header = "Computer:jobs running/jobs completed/%of started jobs";
+ $status = $eta .
+ join(" ",map
+ { sprintf("%s:%d/%d/%d%%",
+ $workerno{$_},
+ $Global::host{$_}->jobs_running(),
+ ($Global::host{$_}->jobs_completed()||0),
+ ($Global::host{$_}->jobs_running()+
+ ($Global::host{$_}->jobs_completed()||0))*100
+ / $Global::total_started) }
+ @workers);
+ }
+ if(length $status > $termcols) {
+ # sshlogin1:XX/XX/XX% sshlogin2:XX/XX/XX% sshlogin3:XX/XX sshlogin4:XX/XX
+ $header = "Computer:jobs running/jobs completed";
+ $status = $eta .
+ join(" ",map
+ { sprintf("%s:%d/%d",
+ $sshlogin{$_}, $Global::host{$_}->jobs_running(),
+ ($Global::host{$_}->jobs_completed()||0)) }
+ @workers);
+ }
+ if(length $status > $termcols) {
+ # sshlogin1:XX/XX sshlogin2:XX/XX sshlogin3:XX/XX sshlogin4:XX/XX
+ $header = "Computer:jobs running/jobs completed";
+ $status = $eta .
+ join(" ",map
+ { sprintf("%s:%d/%d",
+ $sshlogin{$_}, $Global::host{$_}->jobs_running(),
+ ($Global::host{$_}->jobs_completed()||0)) }
+ @workers);
+ }
+ if(length $status > $termcols) {
+ # 1:XX/XX 2:XX/XX 3:XX/XX 4:XX/XX 5:XX/XX 6:XX/XX
+ $header = "Computer:jobs running/jobs completed";
+ $status = $eta .
+ join(" ",map
+ { sprintf("%s:%d/%d",
+ $workerno{$_}, $Global::host{$_}->jobs_running(),
+ ($Global::host{$_}->jobs_completed()||0)) }
+ @workers);
+ }
+ if(length $status > $termcols) {
+ # sshlogin1:XX sshlogin2:XX sshlogin3:XX sshlogin4:XX sshlogin5:XX
+ $header = "Computer:jobs completed";
+ $status = $eta .
+ join(" ",map
+ { sprintf("%s:%d",
+ $sshlogin{$_},
+ ($Global::host{$_}->jobs_completed()||0)) }
+ @workers);
+ }
+ if(length $status > $termcols) {
+ # 1:XX 2:XX 3:XX 4:XX 5:XX 6:XX
+ $header = "Computer:jobs completed";
+ $status = $eta .
+ join(" ",map
+ { sprintf("%s:%d",
+ $workerno{$_},
+ ($Global::host{$_}->jobs_completed()||0)) }
+ @workers);
+ }
+ return ("workerlist" => $workerlist, "header" => $header, "status" => $status);
+}
+
+{
+ my ($total, $first_completed, $smoothed_avg_time);
+
+ sub compute_eta {
+ # Calculate important numbers for ETA
+ # Returns:
+ # $total = number of jobs in total
+ # $completed = number of jobs completed
+ # $left = number of jobs left
+ # $pctcomplete = percent of jobs completed
+ # $avgtime = averaged time
+ # $eta = smoothed eta
+ $total ||= $Global::JobQueue->total_jobs();
+ my $completed = 0;
+ for(values %Global::host) { $completed += $_->jobs_completed() }
+ my $left = $total - $completed;
+ if(not $completed) {
+ return($total, $completed, $left, 0, 0, 0);
+ }
+ my $pctcomplete = $completed / $total;
+ $first_completed ||= time;
+ my $timepassed = (time - $first_completed);
+ my $avgtime = $timepassed / $completed;
+ $smoothed_avg_time ||= $avgtime;
+ # Smooth the eta so it does not jump wildly
+ $smoothed_avg_time = (1 - $pctcomplete) * $smoothed_avg_time +
+ $pctcomplete * $avgtime;
+ my $eta = int($left * $smoothed_avg_time);
+ return($total, $completed, $left, $pctcomplete, $avgtime, $eta);
+ }
+}
+
+{
+ my ($rev,$reset);
+
+ sub bar {
+ # Return:
+ # $status = bar with eta, completed jobs, arg and pct
+ $rev ||= "\033[7m";
+ $reset ||= "\033[0m";
+ my($total, $completed, $left, $pctcomplete, $avgtime, $eta) =
+ compute_eta();
+ my $arg = $Global::newest_job ?
+ $Global::newest_job->{'commandline'}->replace_placeholders(["\257<\257>"],0,0) : "";
+ # These chars mess up display in the terminal
+ $arg =~ tr/[\011-\016\033\302-\365]//d;
+ my $bar_text =
+ sprintf("%d%% %d:%d=%ds %s",
+ $pctcomplete*100, $completed, $left, $eta, $arg);
+ my $terminal_width = terminal_columns();
+ my $s = sprintf("%-${terminal_width}s",
+ substr($bar_text." "x$terminal_width,
+ 0,$terminal_width));
+ my $width = int($terminal_width * $pctcomplete);
+ substr($s,$width,0) = $reset;
+ my $zenity = sprintf("%-${terminal_width}s",
+ substr("# $eta sec $arg",
+ 0,$terminal_width));
+ $s = "\r" . $zenity . "\r" . $pctcomplete*100 . # Prefix with zenity header
+ "\r" . $rev . $s . $reset;
+ return $s;
+ }
+}
+
+{
+ my ($columns,$last_column_time);
+
+ sub terminal_columns {
+ # Get the number of columns of the display
+ # Returns:
+ # number of columns of the screen
+ if(not $columns or $last_column_time < time) {
+ $last_column_time = time;
+ $columns = $ENV{'COLUMNS'};
+ if(not $columns) {
+ my $resize = qx{ resize 2>/dev/null };
+ $resize =~ /COLUMNS=(\d+);/ and do { $columns = $1; };
+ }
+ $columns ||= 80;
+ }
+ return $columns;
+ }
+}
+
+sub get_job_with_sshlogin {
+ # Returns:
+ # next job object for $sshlogin if any available
+ my $sshlogin = shift;
+ my $job = undef;
+
+ if ($opt::hostgroups) {
+ my @other_hostgroup_jobs = ();
+
+ while($job = $Global::JobQueue->get()) {
+ if($sshlogin->in_hostgroups($job->hostgroups())) {
+ # Found a job for this hostgroup
+ last;
+ } else {
+ # This job was not in the hostgroups of $sshlogin
+ push @other_hostgroup_jobs, $job;
+ }
+ }
+ $Global::JobQueue->unget(@other_hostgroup_jobs);
+ if(not defined $job) {
+ # No more jobs
+ return undef;
+ }
+ } else {
+ $job = $Global::JobQueue->get();
+ if(not defined $job) {
+ # No more jobs
+ ::debug("start", "No more jobs: JobQueue empty\n");
+ return undef;
+ }
+ }
+
+ my $clean_command = $job->replaced();
+ if($clean_command =~ /^\s*$/) {
+ # Do not run empty lines
+ if(not $Global::JobQueue->empty()) {
+ return get_job_with_sshlogin($sshlogin);
+ } else {
+ return undef;
+ }
+ }
+ $job->set_sshlogin($sshlogin);
+ if($opt::retries and $clean_command and
+ $job->failed_here()) {
+ # This command with these args failed for this sshlogin
+ my ($no_of_failed_sshlogins,$min_failures) = $job->min_failed();
+ # Only look at the Global::host that have > 0 jobslots
+ if($no_of_failed_sshlogins == grep { $_->max_jobs_running() > 0 } values %Global::host
+ and $job->failed_here() == $min_failures) {
+ # It failed the same or more times on another host:
+ # run it on this host
+ } else {
+ # If it failed fewer times on another host:
+ # Find another job to run
+ my $nextjob;
+ if(not $Global::JobQueue->empty()) {
+ # This can potentially recurse for all args
+ no warnings 'recursion';
+ $nextjob = get_job_with_sshlogin($sshlogin);
+ }
+ # Push the command back on the queue
+ $Global::JobQueue->unget($job);
+ return $nextjob;
+ }
+ }
+ return $job;
+}
+
+sub __REMOTE_SSH__ {}
+
+sub read_sshloginfiles {
+ # Returns: N/A
+ for my $s (@_) {
+ read_sshloginfile(expand_slf_shorthand($s));
+ }
+}
+
+sub expand_slf_shorthand {
+ my $file = shift;
+ if($file eq "-") {
+ # skip: It is stdin
+ } elsif($file eq "..") {
+ $file = $ENV{'HOME'}."/.parallel/sshloginfile";
+ } elsif($file eq ".") {
+ $file = "/etc/parallel/sshloginfile";
+ } elsif(not -r $file) {
+ if(not -r $ENV{'HOME'}."/.parallel/".$file) {
+ # Try prepending ~/.parallel
+ ::error("Cannot open $file.\n");
+ ::wait_and_exit(255);
+ } else {
+ $file = $ENV{'HOME'}."/.parallel/".$file;
+ }
+ }
+ return $file;
+}
+
+sub read_sshloginfile {
+ # Returns: N/A
+ my $file = shift;
+ my $close = 1;
+ my $in_fh;
+ ::debug("init","--slf ",$file);
+ if($file eq "-") {
+ $in_fh = *STDIN;
+ $close = 0;
+ } else {
+ if(not open($in_fh, "<", $file)) {
+ # Try the filename
+ ::error("Cannot open $file.\n");
+ ::wait_and_exit(255);
+ }
+ }
+ while(<$in_fh>) {
+ chomp;
+ /^\s*#/ and next;
+ /^\s*$/ and next;
+ push @Global::sshlogin, $_;
+ }
+ if($close) {
+ close $in_fh;
+ }
+}
+
+sub parse_sshlogin {
+ # Returns: N/A
+ my @login;
+ if(not @Global::sshlogin) { @Global::sshlogin = (":"); }
+ for my $sshlogin (@Global::sshlogin) {
+ # Split up -S sshlogin,sshlogin
+ for my $s (split /,/, $sshlogin) {
+ if ($s eq ".." or $s eq "-") {
+ # This may add to @Global::sshlogin - possibly bug
+ read_sshloginfile(expand_slf_shorthand($s));
+ } else {
+ push (@login, $s);
+ }
+ }
+ }
+ $Global::minimal_command_line_length = 8_000_000;
+ my @allowed_hostgroups;
+ for my $ncpu_sshlogin_string (::uniq(@login)) {
+ my $sshlogin = SSHLogin->new($ncpu_sshlogin_string);
+ my $sshlogin_string = $sshlogin->string();
+ if($sshlogin_string eq "") {
+ # This is an ssh group: -S @webservers
+ push @allowed_hostgroups, $sshlogin->hostgroups();
+ next;
+ }
+ if($Global::host{$sshlogin_string}) {
+ # This sshlogin has already been added:
+ # It is probably a host that has come back
+ # Set the max_jobs_running back to the original
+ debug("run","Already seen $sshlogin_string\n");
+ if($sshlogin->{'ncpus'}) {
+ # If ncpus set by '#/' of the sshlogin, overwrite it:
+ $Global::host{$sshlogin_string}->set_ncpus($sshlogin->ncpus());
+ }
+ $Global::host{$sshlogin_string}->set_max_jobs_running(undef);
+ next;
+ }
+ if($sshlogin_string eq ":") {
+ $sshlogin->set_maxlength(Limits::Command::max_length());
+ } else {
+ # If all chars needs to be quoted, every other character will be \
+ $sshlogin->set_maxlength(int(Limits::Command::max_length()/2));
+ }
+ $Global::minimal_command_line_length =
+ ::min($Global::minimal_command_line_length, $sshlogin->maxlength());
+ $Global::host{$sshlogin_string} = $sshlogin;
+ }
+ if(@allowed_hostgroups) {
+ # Remove hosts that are not in these groups
+ while (my ($string, $sshlogin) = each %Global::host) {
+ if(not $sshlogin->in_hostgroups(@allowed_hostgroups)) {
+ delete $Global::host{$string};
+ }
+ }
+ }
+
+ # debug("start", "sshlogin: ", my_dump(%Global::host),"\n");
+ if($opt::transfer or @opt::return or $opt::cleanup or @opt::basefile) {
+ if(not remote_hosts()) {
+ # There are no remote hosts
+ if(@opt::trc) {
+ ::warning("--trc ignored as there are no remote --sshlogin.\n");
+ } elsif (defined $opt::transfer) {
+ ::warning("--transfer ignored as there are no remote --sshlogin.\n");
+ } elsif (@opt::return) {
+ ::warning("--return ignored as there are no remote --sshlogin.\n");
+ } elsif (defined $opt::cleanup) {
+ ::warning("--cleanup ignored as there are no remote --sshlogin.\n");
+ } elsif (@opt::basefile) {
+ ::warning("--basefile ignored as there are no remote --sshlogin.\n");
+ }
+ }
+ }
+}
+
+sub remote_hosts {
+ # Return sshlogins that are not ':'
+ # Returns:
+ # list of sshlogins with ':' removed
+ return grep !/^:$/, keys %Global::host;
+}
+
+sub setup_basefile {
+ # Transfer basefiles to each $sshlogin
+ # This needs to be done before first jobs on $sshlogin is run
+ # Returns: N/A
+ my $cmd = "";
+ my $rsync_destdir;
+ my $workdir;
+ for my $sshlogin (values %Global::host) {
+ if($sshlogin->string() eq ":") { next }
+ for my $file (@opt::basefile) {
+ if($file !~ m:^/: and $opt::workdir eq "...") {
+ ::error("Work dir '...' will not work with relative basefiles\n");
+ ::wait_and_exit(255);
+ }
+ $workdir ||= Job->new("")->workdir();
+ $cmd .= $sshlogin->rsync_transfer_cmd($file,$workdir) . "&";
+ }
+ }
+ $cmd .= "wait;";
+ debug("init", "basesetup: $cmd\n");
+ print `$cmd`;
+}
+
+sub cleanup_basefile {
+ # Remove the basefiles transferred
+ # Returns: N/A
+ my $cmd="";
+ my $workdir = Job->new("")->workdir();
+ for my $sshlogin (values %Global::host) {
+ if($sshlogin->string() eq ":") { next }
+ for my $file (@opt::basefile) {
+ $cmd .= $sshlogin->cleanup_cmd($file,$workdir)."&";
+ }
+ }
+ $cmd .= "wait;";
+ debug("init", "basecleanup: $cmd\n");
+ print `$cmd`;
+}
+
+sub filter_hosts {
+ my(@cores, @cpus, @maxline, @echo);
+ my $envvar = ::shell_quote_scalar($Global::envvar);
+ while (my ($host, $sshlogin) = each %Global::host) {
+ if($host eq ":") { next }
+ # The 'true' is used to get the $host out later
+ my $sshcmd = "true $host;" . $sshlogin->sshcommand()." ".$sshlogin->serverlogin();
+ push(@cores, $host."\t".$sshcmd." ".$envvar." parallel --number-of-cores\n\0");
+ push(@cpus, $host."\t".$sshcmd." ".$envvar." parallel --number-of-cpus\n\0");
+ push(@maxline, $host."\t".$sshcmd." ".$envvar." parallel --max-line-length-allowed\n\0");
+ # 'echo' is used to get the best possible value for an ssh login time
+ push(@echo, $host."\t".$sshcmd." echo\n\0");
+ }
+ my ($fh, $tmpfile) = ::tmpfile(SUFFIX => ".ssh");
+ print $fh @cores, @cpus, @maxline, @echo;
+ close $fh;
+ # --timeout 5: Setting up an SSH connection and running a simple
+ # command should never take > 5 sec.
+ # --delay 0.1: If multiple sshlogins use the same proxy the delay
+ # will make it less likely to overload the ssh daemon.
+ # --retries 3: If the ssh daemon it overloaded, try 3 times
+ # -s 16000: Half of the max line on UnixWare
+ my $cmd = "cat $tmpfile | $0 -j0 --timeout 5 -s 16000 --joblog - --plain --delay 0.1 --retries 3 --tag --tagstring {1} -0 --colsep '\t' -k eval {2} 2>/dev/null";
+ ::debug("init", $cmd, "\n");
+ open(my $host_fh, "-|", $cmd) || ::die_bug("parallel host check: $cmd");
+ my (%ncores, %ncpus, %time_to_login, %maxlen, %echo, @down_hosts);
+ my $prepend = "";
+ while(<$host_fh>) {
+ if(/\'$/) {
+ # if last char = ' then append next line
+ # This may be due to quoting of $Global::envvar
+ $prepend .= $_;
+ next;
+ }
+ $_ = $prepend . $_;
+ $prepend = "";
+ chomp;
+ my @col = split /\t/, $_;
+ if(defined $col[6]) {
+ # This is a line from --joblog
+ # seq host time spent sent received exit signal command
+ # 2 : 1372607672.654 0.675 0 0 0 0 eval true\ m\;ssh\ m\ parallel\ --number-of-cores
+ if($col[0] eq "Seq" and $col[1] eq "Host" and
+ $col[2] eq "Starttime") {
+ # Header => skip
+ next;
+ }
+ # Get server from: eval true server\;
+ $col[8] =~ /eval true..([^;]+).;/ or ::die_bug("col8 does not contain host: $col[8]");
+ my $host = $1;
+ $host =~ tr/\\//d;
+ $Global::host{$host} or next;
+ if($col[6] eq "255" or $col[7] eq "15") {
+ # exit == 255 or signal == 15: ssh failed
+ # Remove sshlogin
+ ::debug("init", "--filtered $host\n");
+ push(@down_hosts, $host);
+ @down_hosts = uniq(@down_hosts);
+ } elsif($col[6] eq "127") {
+ # signal == 127: parallel not installed remote
+ # Set ncpus and ncores = 1
+ ::warning("Could not figure out ",
+ "number of cpus on $host. Using 1.\n");
+ $ncores{$host} = 1;
+ $ncpus{$host} = 1;
+ $maxlen{$host} = Limits::Command::max_length();
+ } elsif($col[0] =~ /^\d+$/ and $Global::host{$host}) {
+ # Remember how log it took to log in
+ # 2 : 1372607672.654 0.675 0 0 0 0 eval true\ m\;ssh\ m\ echo
+ $time_to_login{$host} = ::min($time_to_login{$host},$col[3]);
+ } else {
+ ::die_bug("host check unmatched long jobline: $_");
+ }
+ } elsif($Global::host{$col[0]}) {
+ # This output from --number-of-cores, --number-of-cpus,
+ # --max-line-length-allowed
+ # ncores: server 8
+ # ncpus: server 2
+ # maxlen: server 131071
+ if(not $ncores{$col[0]}) {
+ $ncores{$col[0]} = $col[1];
+ } elsif(not $ncpus{$col[0]}) {
+ $ncpus{$col[0]} = $col[1];
+ } elsif(not $maxlen{$col[0]}) {
+ $maxlen{$col[0]} = $col[1];
+ } elsif(not $echo{$col[0]}) {
+ $echo{$col[0]} = $col[1];
+ } elsif(m/perl: warning:|LANGUAGE =|LC_ALL =|LANG =|are supported and installed/) {
+ # Skip these:
+ # perl: warning: Setting locale failed.
+ # perl: warning: Please check that your locale settings:
+ # LANGUAGE = (unset),
+ # LC_ALL = (unset),
+ # LANG = "en_US.UTF-8"
+ # are supported and installed on your system.
+ # perl: warning: Falling back to the standard locale ("C").
+ } else {
+ ::die_bug("host check too many col0: $_");
+ }
+ } else {
+ ::die_bug("host check unmatched short jobline ($col[0]): $_");
+ }
+ }
+ close $host_fh;
+ $Global::debug or unlink $tmpfile;
+ delete @Global::host{@down_hosts};
+ @down_hosts and ::warning("Removed @down_hosts\n");
+ $Global::minimal_command_line_length = 8_000_000;
+ while (my ($sshlogin, $obj) = each %Global::host) {
+ if($sshlogin eq ":") { next }
+ $ncpus{$sshlogin} or ::die_bug("ncpus missing: ".$obj->serverlogin());
+ $ncores{$sshlogin} or ::die_bug("ncores missing: ".$obj->serverlogin());
+ $time_to_login{$sshlogin} or ::die_bug("time_to_login missing: ".$obj->serverlogin());
+ $maxlen{$sshlogin} or ::die_bug("maxlen missing: ".$obj->serverlogin());
+ if($opt::use_cpus_instead_of_cores) {
+ $obj->set_ncpus($ncpus{$sshlogin});
+ } else {
+ $obj->set_ncpus($ncores{$sshlogin});
+ }
+ $obj->set_time_to_login($time_to_login{$sshlogin});
+ $obj->set_maxlength($maxlen{$sshlogin});
+ $Global::minimal_command_line_length =
+ ::min($Global::minimal_command_line_length,
+ int($maxlen{$sshlogin}/2));
+ ::debug("init", "Timing from -S:$sshlogin ncpus:",$ncpus{$sshlogin},
+ " ncores:", $ncores{$sshlogin},
+ " time_to_login:", $time_to_login{$sshlogin},
+ " maxlen:", $maxlen{$sshlogin},
+ " min_max_len:", $Global::minimal_command_line_length,"\n");
+ }
+}
+
+sub onall {
+ sub tmp_joblog {
+ my $joblog = shift;
+ if(not defined $joblog) {
+ return undef;
+ }
+ my ($fh, $tmpfile) = ::tmpfile(SUFFIX => ".log");
+ close $fh;
+ return $tmpfile;
+ }
+ my @command = @_;
+ if($Global::quoting) {
+ @command = shell_quote_empty(@command);
+ }
+
+ # Copy all @fhlist into tempfiles
+ my @argfiles = ();
+ for my $fh (@fhlist) {
+ my ($outfh, $name) = ::tmpfile(SUFFIX => ".all", UNLINK => 1);
+ print $outfh (<$fh>);
+ close $outfh;
+ push @argfiles, $name;
+ }
+ if(@opt::basefile) { setup_basefile(); }
+ # for each sshlogin do:
+ # parallel -S $sshlogin $command :::: @argfiles
+ #
+ # Pass some of the options to the sub-parallels, not all of them as
+ # -P should only go to the first, and -S should not be copied at all.
+ my $options =
+ join(" ",
+ ((defined $opt::jobs) ? "-P $opt::jobs" : ""),
+ ((defined $opt::linebuffer) ? "--linebuffer" : ""),
+ ((defined $opt::ungroup) ? "-u" : ""),
+ ((defined $opt::group) ? "-g" : ""),
+ ((defined $opt::keeporder) ? "--keeporder" : ""),
+ ((defined $opt::D) ? "-D $opt::D" : ""),
+ ((defined $opt::plain) ? "--plain" : ""),
+ ((defined $opt::max_chars) ? "--max-chars ".$opt::max_chars : ""),
+ );
+ my $suboptions =
+ join(" ",
+ ((defined $opt::ungroup) ? "-u" : ""),
+ ((defined $opt::linebuffer) ? "--linebuffer" : ""),
+ ((defined $opt::group) ? "-g" : ""),
+ ((defined $opt::files) ? "--files" : ""),
+ ((defined $opt::keeporder) ? "--keeporder" : ""),
+ ((defined $opt::colsep) ? "--colsep ".shell_quote($opt::colsep) : ""),
+ ((@opt::v) ? "-vv" : ""),
+ ((defined $opt::D) ? "-D $opt::D" : ""),
+ ((defined $opt::timeout) ? "--timeout ".$opt::timeout : ""),
+ ((defined $opt::plain) ? "--plain" : ""),
+ ((defined $opt::retries) ? "--retries ".$opt::retries : ""),
+ ((defined $opt::max_chars) ? "--max-chars ".$opt::max_chars : ""),
+ ((defined $opt::arg_sep) ? "--arg-sep ".$opt::arg_sep : ""),
+ ((defined $opt::arg_file_sep) ? "--arg-file-sep ".$opt::arg_file_sep : ""),
+ (@opt::env ? map { "--env ".::shell_quote_scalar($_) } @opt::env : ""),
+ );
+ ::debug("init", "| $0 $options\n");
+ open(my $parallel_fh, "|-", "$0 --no-notice -j0 $options") ||
+ ::die_bug("This does not run GNU Parallel: $0 $options");
+ my @joblogs;
+ for my $host (sort keys %Global::host) {
+ my $sshlogin = $Global::host{$host};
+ my $joblog = tmp_joblog($opt::joblog);
+ if($joblog) {
+ push @joblogs, $joblog;
+ $joblog = "--joblog $joblog";
+ }
+ my $quad = $opt::arg_file_sep || "::::";
+ ::debug("init", "$0 $suboptions -j1 $joblog ",
+ ((defined $opt::tag) ?
+ "--tagstring ".shell_quote_scalar($sshlogin->string()) : ""),
+ " -S ", shell_quote_scalar($sshlogin->string())," ",
+ join(" ",shell_quote(@command))," $quad @argfiles\n");
+ print $parallel_fh "$0 $suboptions -j1 $joblog ",
+ ((defined $opt::tag) ?
+ "--tagstring ".shell_quote_scalar($sshlogin->string()) : ""),
+ " -S ", shell_quote_scalar($sshlogin->string())," ",
+ join(" ",shell_quote(@command))," $quad @argfiles\n";
+ }
+ close $parallel_fh;
+ $Global::exitstatus = $? >> 8;
+ debug("init", "--onall exitvalue ", $?);
+ if(@opt::basefile) { cleanup_basefile(); }
+ $Global::debug or unlink(@argfiles);
+ my %seen;
+ for my $joblog (@joblogs) {
+ # Append to $joblog
+ open(my $fh, "<", $joblog) || ::die_bug("Cannot open tmp joblog $joblog");
+ # Skip first line (header);
+ <$fh>;
+ print $Global::joblog (<$fh>);
+ close $fh;
+ unlink($joblog);
+ }
+}
+
+sub __SIGNAL_HANDLING__ {}
+
+sub save_original_signal_handler {
+ # Remember the original signal handler
+ # Returns: N/A
+ $SIG{TERM} ||= sub { exit 0; }; # $SIG{TERM} is not set on Mac OS X
+ $SIG{INT} = sub { if($opt::tmux) { qx { tmux kill-session -t p$$ }; }
+ unlink keys %Global::unlink; exit -1 };
+ $SIG{TERM} = sub { if($opt::tmux) { qx { tmux kill-session -t p$$ }; }
+ unlink keys %Global::unlink; exit -1 };
+ %Global::original_sig = %SIG;
+ $SIG{TERM} = sub {}; # Dummy until jobs really start
+}
+
+sub list_running_jobs {
+ # Returns: N/A
+ for my $v (values %Global::running) {
+ print $Global::original_stderr "$Global::progname: ",$v->replaced(),"\n";
+ }
+}
+
+sub start_no_new_jobs {
+ # Returns: N/A
+ $SIG{TERM} = $Global::original_sig{TERM};
+ print $Global::original_stderr
+ ("$Global::progname: SIGTERM received. No new jobs will be started.\n",
+ "$Global::progname: Waiting for these ", scalar(keys %Global::running),
+ " jobs to finish. Send SIGTERM again to stop now.\n");
+ list_running_jobs();
+ $Global::start_no_new_jobs ||= 1;
+}
+
+sub reaper {
+ # A job finished.
+ # Print the output.
+ # Start another job
+ # Returns: N/A
+ my $stiff;
+ my $children_reaped = 0;
+ debug("run", "Reaper ");
+ while (($stiff = waitpid(-1, &WNOHANG)) > 0) {
+ $children_reaped++;
+ if($Global::sshmaster{$stiff}) {
+ # This is one of the ssh -M: ignore
+ next;
+ }
+ my $job = $Global::running{$stiff};
+ # '-a <(seq 10)' will give us a pid not in %Global::running
+ $job or next;
+ $job->set_exitstatus($? >> 8);
+ $job->set_exitsignal($? & 127);
+ debug("run", "died (", $job->exitstatus(), "): ", $job->seq());
+ $job->set_endtime(::now());
+ if($stiff == $Global::tty_taken) {
+ # The process that died had the tty => release it
+ $Global::tty_taken = 0;
+ }
+
+ if(not $job->should_be_retried()) {
+ # The job is done
+ # Free the jobslot
+ push @Global::slots, $job->slot();
+ if($opt::timeout) {
+ # Update average runtime for timeout
+ $Global::timeoutq->update_delta_time($job->runtime());
+ }
+ # Force printing now if the job failed and we are going to exit
+ my $print_now = ($opt::halt_on_error and $opt::halt_on_error == 2
+ and $job->exitstatus());
+ if($opt::keeporder and not $print_now) {
+ print_earlier_jobs($job);
+ } else {
+ $job->print();
+ }
+ if($job->exitstatus()) {
+ process_failed_job($job);
+ }
+
+ }
+ my $sshlogin = $job->sshlogin();
+ $sshlogin->dec_jobs_running();
+ $sshlogin->inc_jobs_completed();
+ $Global::total_running--;
+ delete $Global::running{$stiff};
+ start_more_jobs();
+ }
+ debug("run", "done ");
+ return $children_reaped;
+}
+
+sub process_failed_job {
+ # The jobs had a exit status <> 0, so error
+ # Returns: N/A
+ my $job = shift;
+ $Global::exitstatus++;
+ $Global::total_failed++;
+ if($opt::halt_on_error) {
+ if($opt::halt_on_error == 1
+ or
+ ($opt::halt_on_error < 1 and $Global::total_failed > 3
+ and
+ $Global::total_failed / $Global::total_started > $opt::halt_on_error)) {
+ # If halt on error == 1 or --halt 10%
+ # we should gracefully exit
+ print $Global::original_stderr
+ ("$Global::progname: Starting no more jobs. ",
+ "Waiting for ", scalar(keys %Global::running),
+ " jobs to finish. This job failed:\n",
+ $job->replaced(),"\n");
+ $Global::start_no_new_jobs ||= 1;
+ $Global::halt_on_error_exitstatus = $job->exitstatus();
+ } elsif($opt::halt_on_error == 2) {
+ # If halt on error == 2 we should exit immediately
+ print $Global::original_stderr
+ ("$Global::progname: This job failed:\n",
+ $job->replaced(),"\n");
+ exit ($job->exitstatus());
+ }
+ }
+}
+
+{
+ my (%print_later,$job_end_sequence);
+
+ sub print_earlier_jobs {
+ # Print jobs completed earlier
+ # Returns: N/A
+ my $job = shift;
+ $print_later{$job->seq()} = $job;
+ $job_end_sequence ||= 1;
+ debug("run", "Looking for: $job_end_sequence ",
+ "Current: ", $job->seq(), "\n");
+ for(my $j = $print_later{$job_end_sequence};
+ $j or vec($Global::job_already_run,$job_end_sequence,1);
+ $job_end_sequence++,
+ $j = $print_later{$job_end_sequence}) {
+ debug("run", "Found job end $job_end_sequence");
+ if($j) {
+ $j->print();
+ delete $print_later{$job_end_sequence};
+ }
+ }
+ }
+}
+
+sub __USAGE__ {}
+
+sub wait_and_exit {
+ # If we do not wait, we sometimes get segfault
+ # Returns: N/A
+ my $error = shift;
+ if($error) {
+ # Kill all without printing
+ for my $job (values %Global::running) {
+ $job->kill("TERM");
+ $job->kill("TERM");
+ }
+ }
+ for (keys %Global::unkilled_children) {
+ kill 9, $_;
+ waitpid($_,0);
+ delete $Global::unkilled_children{$_};
+ }
+ wait();
+ exit($error);
+}
+
+sub die_usage {
+ # Returns: N/A
+ usage();
+ wait_and_exit(255);
+}
+
+sub usage {
+ # Returns: N/A
+ print join
+ ("\n",
+ "Usage:",
+ "",
+ "$Global::progname [options] [command [arguments]] < list_of_arguments",
+ "$Global::progname [options] [command [arguments]] (::: arguments|:::: argfile(s))...",
+ "cat ... | $Global::progname --pipe [options] [command [arguments]]",
+ "",
+ "-j n Run n jobs in parallel",
+ "-k Keep same order",
+ "-X Multiple arguments with context replace",
+ "--colsep regexp Split input on regexp for positional replacements",
+ "{} {.} {/} {/.} {#} {%} {= perl code =} Replacement strings",
+ "{3} {3.} {3/} {3/.} {=3 perl code =} Positional replacement strings",
+ "With --plus: {} = {+/}/{/} = {.}.{+.} = {+/}/{/.}.{+.} = {..}.{+..} =",
+ " {+/}/{/..}.{+..} = {...}.{+...} = {+/}/{/...}.{+...}",
+ "",
+ "-S sshlogin Example: foo\@server.example.com",
+ "--slf .. Use ~/.parallel/sshloginfile as the list of sshlogins",
+ "--trc {}.bar Shorthand for --transfer --return {}.bar --cleanup",
+ "--onall Run the given command with argument on all sshlogins",
+ "--nonall Run the given command with no arguments on all sshlogins",
+ "",
+ "--pipe Split stdin (standard input) to multiple jobs.",
+ "--recend str Record end separator for --pipe.",
+ "--recstart str Record start separator for --pipe.",
+ "",
+ "See 'man $Global::progname' for details",
+ "",
+ "When using programs that use GNU Parallel to process data for publication please cite:",
+ "",
+ "O. Tange (2011): GNU Parallel - The Command-Line Power Tool,",
+ ";login: The USENIX Magazine, February 2011:42-47.",
+ "",
+ "Or you can get GNU Parallel without this requirement by paying 10000 EUR.",
+ "");
+}
+
+
+sub citation_notice {
+ # if --no-notice or --plain: do nothing
+ # if stderr redirected: do nothing
+ # if ~/.parallel/will-cite: do nothing
+ # else: print citation notice to stderr
+ if($opt::no_notice
+ or
+ $opt::plain
+ or
+ not -t $Global::original_stderr
+ or
+ -e $ENV{'HOME'}."/.parallel/will-cite") {
+ # skip
+ } else {
+ print $Global::original_stderr
+ ("When using programs that use GNU Parallel to process data for publication please cite:\n",
+ "\n",
+ " O. Tange (2011): GNU Parallel - The Command-Line Power Tool,\n",
+ " ;login: The USENIX Magazine, February 2011:42-47.\n",
+ "\n",
+ "This helps funding further development; and it won't cost you a cent.\n",
+ "Or you can get GNU Parallel without this requirement by paying 10000 EUR.\n",
+ "\n",
+ "To silence this citation notice run 'parallel --bibtex' once or use '--no-notice'.\n\n",
+ );
+ flush $Global::original_stderr;
+ }
+}
+
+
+sub warning {
+ my @w = @_;
+ my $fh = $Global::original_stderr || *STDERR;
+ my $prog = $Global::progname || "parallel";
+ print $fh $prog, ": Warning: ", @w;
+}
+
+
+sub error {
+ my @w = @_;
+ my $fh = $Global::original_stderr || *STDERR;
+ my $prog = $Global::progname || "parallel";
+ print $fh $prog, ": Error: ", @w;
+}
+
+
+sub die_bug {
+ my $bugid = shift;
+ print STDERR
+ ("$Global::progname: This should not happen. You have found a bug.\n",
+ "Please contact and include:\n",
+ "* The version number: $Global::version\n",
+ "* The bugid: $bugid\n",
+ "* The command line being run\n",
+ "* The files being read (put the files on a webserver if they are big)\n",
+ "\n",
+ "If you get the error on smaller/fewer files, please include those instead.\n");
+ ::wait_and_exit(255);
+}
+
+sub version {
+ # Returns: N/A
+ if($opt::tollef and not $opt::gnu) {
+ print "WARNING: YOU ARE USING --tollef. IF THINGS ARE ACTING WEIRD USE --gnu.\n";
+ }
+ print join("\n",
+ "GNU $Global::progname $Global::version",
+ "Copyright (C) 2007,2008,2009,2010,2011,2012,2013,2014 Ole Tange and Free Software Foundation, Inc.",
+ "License GPLv3+: GNU GPL version 3 or later ",
+ "This is free software: you are free to change and redistribute it.",
+ "GNU $Global::progname comes with no warranty.",
+ "",
+ "Web site: http://www.gnu.org/software/${Global::progname}\n",
+ "When using programs that use GNU Parallel to process data for publication please cite:\n",
+ "O. Tange (2011): GNU Parallel - The Command-Line Power Tool, ",
+ ";login: The USENIX Magazine, February 2011:42-47.\n",
+ "Or you can get GNU Parallel without this requirement by paying 10000 EUR.\n",
+ );
+}
+
+sub bibtex {
+ # Returns: N/A
+ if($opt::tollef and not $opt::gnu) {
+ print "WARNING: YOU ARE USING --tollef. IF THINGS ARE ACTING WEIRD USE --gnu.\n";
+ }
+ print join("\n",
+ "When using programs that use GNU Parallel to process data for publication please cite:",
+ "",
+ "\@article{Tange2011a,",
+ " title = {GNU Parallel - The Command-Line Power Tool},",
+ " author = {O. Tange},",
+ " address = {Frederiksberg, Denmark},",
+ " journal = {;login: The USENIX Magazine},",
+ " month = {Feb},",
+ " number = {1},",
+ " volume = {36},",
+ " url = {http://www.gnu.org/s/parallel},",
+ " year = {2011},",
+ " pages = {42-47}",
+ "}",
+ "",
+ "(Feel free to use \\nocite{Tange2011a})",
+ "",
+ "This helps funding further development.",
+ "",
+ "Or you can get GNU Parallel without this requirement by paying 10000 EUR.",
+ ""
+ );
+ while(not -e $ENV{'HOME'}."/.parallel/will-cite") {
+ print "\nType: 'will cite' and press enter.\n> ";
+ my $input = ;
+ if($input =~ /will cite/i) {
+ mkdir $ENV{'HOME'}."/.parallel";
+ open (my $fh, ">", $ENV{'HOME'}."/.parallel/will-cite")
+ || ::die_bug("Cannot write: ".$ENV{'HOME'}."/.parallel/will-cite");
+ close $fh;
+ print "\nThank you for your support. It is much appreciated. The citation\n",
+ "notice is now silenced.\n";
+ }
+ }
+}
+
+sub show_limits {
+ # Returns: N/A
+ print("Maximal size of command: ",Limits::Command::real_max_length(),"\n",
+ "Maximal used size of command: ",Limits::Command::max_length(),"\n",
+ "\n",
+ "Execution of will continue now, and it will try to read its input\n",
+ "and run commands; if this is not what you wanted to happen, please\n",
+ "press CTRL-D or CTRL-C\n");
+}
+
+sub __GENERIC_COMMON_FUNCTION__ {}
+
+sub uniq {
+ # Remove duplicates and return unique values
+ return keys %{{ map { $_ => 1 } @_ }};
+}
+
+sub min {
+ # Returns:
+ # Minimum value of array
+ my $min;
+ for (@_) {
+ # Skip undefs
+ defined $_ or next;
+ defined $min or do { $min = $_; next; }; # Set $_ to the first non-undef
+ $min = ($min < $_) ? $min : $_;
+ }
+ return $min;
+}
+
+sub max {
+ # Returns:
+ # Maximum value of array
+ my $max;
+ for (@_) {
+ # Skip undefs
+ defined $_ or next;
+ defined $max or do { $max = $_; next; }; # Set $_ to the first non-undef
+ $max = ($max > $_) ? $max : $_;
+ }
+ return $max;
+}
+
+sub sum {
+ # Returns:
+ # Sum of values of array
+ my @args = @_;
+ my $sum = 0;
+ for (@args) {
+ # Skip undefs
+ $_ and do { $sum += $_; }
+ }
+ return $sum;
+}
+
+sub undef_as_zero {
+ my $a = shift;
+ return $a ? $a : 0;
+}
+
+sub undef_as_empty {
+ my $a = shift;
+ return $a ? $a : "";
+}
+
+{
+ my $hostname;
+ sub hostname {
+ if(not $hostname) {
+ $hostname = `hostname`;
+ chomp($hostname);
+ $hostname ||= "nohostname";
+ }
+ return $hostname;
+ }
+}
+
+sub which {
+ # Input:
+ # @programs = programs to find the path to
+ # Returns:
+ # @full_path = full paths to @programs. Nothing if not found
+ my @which;
+ for my $prg (@_) {
+ push @which, map { $_."/".$prg } grep { -x $_."/".$prg } split(":",$ENV{'PATH'});
+ }
+ return @which;
+}
+
+{
+ my ($regexp,%fakename);
+
+ sub parent_shell {
+ # Input:
+ # $pid = pid to see if (grand)*parent is a shell
+ # Returns:
+ # $shellpath = path to shell - undef if no shell found
+ my $pid = shift;
+ if(not $regexp) {
+ # All shells known to mankind
+ #
+ # ash bash csh dash fdsh fish fizsh ksh ksh93 mksh pdksh
+ # posh rbash rush rzsh sash sh static-sh tcsh yash zsh
+ my @shells = qw(ash bash csh dash fdsh fish fizsh ksh
+ ksh93 mksh pdksh posh rbash rush rzsh
+ sash sh static-sh tcsh yash zsh -sh -csh);
+ # Can be formatted as:
+ # [sh] -sh sh busybox sh
+ # /bin/sh /sbin/sh /opt/csw/sh
+ # NOT: foo.sh sshd crash flush pdflush scosh fsflush ssh
+ my $shell = "(?:".join("|",@shells).")";
+ $regexp = '^((\[)('. $shell. ')(\])|(|\S+/|busybox )('. $shell. '))($| )';
+ %fakename = (
+ # csh and tcsh disguise themselves as -sh/-csh
+ "-sh" => ["csh", "tcsh"],
+ "-csh" => ["tcsh", "csh"],
+ );
+ }
+ my ($children_of_ref, $parent_of_ref, $name_of_ref) = pid_table();
+ my $shellpath;
+ my $testpid = $pid;
+ while($testpid) {
+ ::debug("init", "shell? ". $name_of_ref->{$testpid}."\n");
+ if($name_of_ref->{$testpid} =~ /$regexp/o) {
+ ::debug("init", "which ".($3||$6)." => ");
+ $shellpath = (which($3 || $6,@{$fakename{$3 || $6}}))[0];
+ ::debug("init", "shell path $shellpath\n");
+ $shellpath and last;
+ }
+ $testpid = $parent_of_ref->{$testpid};
+ }
+ return $shellpath;
+ }
+}
+
+{
+ my %pid_parentpid_cmd;
+
+ sub pid_table {
+ # Returns:
+ # %children_of = { pid -> children of pid }
+ # %parent_of = { pid -> pid of parent }
+ # %name_of = { pid -> commandname }
+
+ if(not %pid_parentpid_cmd) {
+ # Filter for SysV-style `ps`
+ my $sysv = q( ps -ef | perl -ane '1..1 and /^(.*)CO?MM?A?N?D/ and $s=length $1;).
+ q(s/^.{$s}//; print "@F[1,2] $_"' );
+ # BSD-style `ps`
+ my $bsd = q(ps -o pid,ppid,command -ax);
+ %pid_parentpid_cmd =
+ (
+ 'aix' => $sysv,
+ 'cygwin' => $sysv,
+ 'msys' => $sysv,
+ 'dec_osf' => $sysv,
+ 'darwin' => $bsd,
+ 'dragonfly' => $bsd,
+ 'freebsd' => $bsd,
+ 'gnu' => $sysv,
+ 'hpux' => $sysv,
+ 'linux' => $sysv,
+ 'mirbsd' => $bsd,
+ 'netbsd' => $bsd,
+ 'nto' => $sysv,
+ 'openbsd' => $bsd,
+ 'solaris' => $sysv,
+ 'svr5' => $sysv,
+ );
+ }
+ $pid_parentpid_cmd{$^O} or ::die_bug("pid_parentpid_cmd for $^O missing");
+
+ my (@pidtable,%parent_of,%children_of,%name_of);
+ # Table with pid -> children of pid
+ @pidtable = `$pid_parentpid_cmd{$^O}`;
+ my $p=$$;
+ for (@pidtable) {
+ # must match: 24436 21224 busybox ash
+ /(\S+)\s+(\S+)\s+(\S+.*)/ or ::die_bug("pidtable format: $_");
+ $parent_of{$1} = $2;
+ push @{$children_of{$2}}, $1;
+ $name_of{$1} = $3;
+ }
+ return(\%children_of, \%parent_of, \%name_of);
+ }
+}
+
+sub reap_usleep {
+ # Reap dead children.
+ # If no dead children: Sleep specified amount with exponential backoff
+ # Input:
+ # $ms = milliseconds to sleep
+ # Returns:
+ # $ms/2+0.001 if children reaped
+ # $ms*1.1 if no children reaped
+ my $ms = shift;
+ if(reaper()) {
+ # Sleep exponentially shorter (1/2^n) if a job finished
+ return $ms/2+0.001;
+ } else {
+ if($opt::timeout) {
+ $Global::timeoutq->process_timeouts();
+ }
+ usleep($ms);
+ Job::exit_if_disk_full();
+ if($opt::linebuffer) {
+ for my $job (values %Global::running) {
+ $job->print();
+ }
+ }
+ # Sleep exponentially longer (1.1^n) if a job did not finish
+ # though at most 1000 ms.
+ return (($ms < 1000) ? ($ms * 1.1) : ($ms));
+ }
+}
+
+sub usleep {
+ # Sleep this many milliseconds.
+ # Input:
+ # $ms = milliseconds to sleep
+ my $ms = shift;
+ ::debug(int($ms),"ms ");
+ select(undef, undef, undef, $ms/1000);
+}
+
+sub now {
+ # Returns time since epoch as in seconds with 3 decimals
+ # Uses:
+ # @Global::use
+ # Returns:
+ # $time = time now with millisecond accuracy
+ if(not $Global::use{"Time::HiRes"}) {
+ if(eval "use Time::HiRes qw ( time );") {
+ eval "sub TimeHiRestime { return Time::HiRes::time };";
+ } else {
+ eval "sub TimeHiRestime { return time() };";
+ }
+ $Global::use{"Time::HiRes"} = 1;
+ }
+
+ return (int(TimeHiRestime()*1000))/1000;
+}
+
+sub multiply_binary_prefix {
+ # Evalualte numbers with binary prefix
+ # Ki=2^10, Mi=2^20, Gi=2^30, Ti=2^40, Pi=2^50, Ei=2^70, Zi=2^80, Yi=2^80
+ # ki=2^10, mi=2^20, gi=2^30, ti=2^40, pi=2^50, ei=2^70, zi=2^80, yi=2^80
+ # K =2^10, M =2^20, G =2^30, T =2^40, P =2^50, E =2^70, Z =2^80, Y =2^80
+ # k =10^3, m =10^6, g =10^9, t=10^12, p=10^15, e=10^18, z=10^21, y=10^24
+ # 13G = 13*1024*1024*1024 = 13958643712
+ # Input:
+ # $s = string with prefixes
+ # Returns:
+ # $value = int with prefixes multiplied
+ my $s = shift;
+ $s =~ s/ki/*1024/gi;
+ $s =~ s/mi/*1024*1024/gi;
+ $s =~ s/gi/*1024*1024*1024/gi;
+ $s =~ s/ti/*1024*1024*1024*1024/gi;
+ $s =~ s/pi/*1024*1024*1024*1024*1024/gi;
+ $s =~ s/ei/*1024*1024*1024*1024*1024*1024/gi;
+ $s =~ s/zi/*1024*1024*1024*1024*1024*1024*1024/gi;
+ $s =~ s/yi/*1024*1024*1024*1024*1024*1024*1024*1024/gi;
+ $s =~ s/xi/*1024*1024*1024*1024*1024*1024*1024*1024*1024/gi;
+
+ $s =~ s/K/*1024/g;
+ $s =~ s/M/*1024*1024/g;
+ $s =~ s/G/*1024*1024*1024/g;
+ $s =~ s/T/*1024*1024*1024*1024/g;
+ $s =~ s/P/*1024*1024*1024*1024*1024/g;
+ $s =~ s/E/*1024*1024*1024*1024*1024*1024/g;
+ $s =~ s/Z/*1024*1024*1024*1024*1024*1024*1024/g;
+ $s =~ s/Y/*1024*1024*1024*1024*1024*1024*1024*1024/g;
+ $s =~ s/X/*1024*1024*1024*1024*1024*1024*1024*1024*1024/g;
+
+ $s =~ s/k/*1000/g;
+ $s =~ s/m/*1000*1000/g;
+ $s =~ s/g/*1000*1000*1000/g;
+ $s =~ s/t/*1000*1000*1000*1000/g;
+ $s =~ s/p/*1000*1000*1000*1000*1000/g;
+ $s =~ s/e/*1000*1000*1000*1000*1000*1000/g;
+ $s =~ s/z/*1000*1000*1000*1000*1000*1000*1000/g;
+ $s =~ s/y/*1000*1000*1000*1000*1000*1000*1000*1000/g;
+ $s =~ s/x/*1000*1000*1000*1000*1000*1000*1000*1000*1000/g;
+
+ $s = eval $s;
+ ::debug($s);
+ return $s;
+}
+
+sub tmpfile {
+ # Create tempfile as $TMPDIR/parXXXXX
+ # Returns:
+ # $filename = file name created
+ return ::tempfile(DIR=>$ENV{'TMPDIR'}, TEMPLATE => 'parXXXXX', @_);
+}
+
+sub __DEBUGGING__ {}
+
+sub debug {
+ # Uses:
+ # $Global::debug
+ # %Global::fd
+ # Returns: N/A
+ $Global::debug or return;
+ @_ = grep { defined $_ ? $_ : "" } @_;
+ if($Global::debug eq "all" or $Global::debug eq $_[0]) {
+ if($Global::fd{1}) {
+ # Original stdout was saved
+ my $stdout = $Global::fd{1};
+ print $stdout @_[1..$#_];
+ } else {
+ print @_[1..$#_];
+ }
+ }
+}
+
+sub my_memory_usage {
+ # Returns:
+ # memory usage if found
+ # 0 otherwise
+ use strict;
+ use FileHandle;
+
+ my $pid = $$;
+ if(-e "/proc/$pid/stat") {
+ my $fh = FileHandle->new(";
+ chomp $data;
+ $fh->close;
+
+ my @procinfo = split(/\s+/,$data);
+
+ return undef_as_zero($procinfo[22]);
+ } else {
+ return 0;
+ }
+}
+
+sub my_size {
+ # Returns:
+ # $size = size of object if Devel::Size is installed
+ # -1 otherwise
+ my @size_this = (@_);
+ eval "use Devel::Size qw(size total_size)";
+ if ($@) {
+ return -1;
+ } else {
+ return total_size(@_);
+ }
+}
+
+sub my_dump {
+ # Returns:
+ # ascii expression of object if Data::Dump(er) is installed
+ # error code otherwise
+ my @dump_this = (@_);
+ eval "use Data::Dump qw(dump);";
+ if ($@) {
+ # Data::Dump not installed
+ eval "use Data::Dumper;";
+ if ($@) {
+ my $err = "Neither Data::Dump nor Data::Dumper is installed\n".
+ "Not dumping output\n";
+ print $Global::original_stderr $err;
+ return $err;
+ } else {
+ return Dumper(@dump_this);
+ }
+ } else {
+ # Create a dummy Data::Dump:dump as Hans Schou sometimes has
+ # it undefined
+ eval "sub Data::Dump:dump {}";
+ eval "use Data::Dump qw(dump);";
+ return (Data::Dump::dump(@dump_this));
+ }
+}
+
+sub my_croak {
+ eval "use Carp; 1";
+ $Carp::Verbose = 1;
+ croak(@_);
+}
+
+sub my_carp {
+ eval "use Carp; 1";
+ $Carp::Verbose = 1;
+ carp(@_);
+}
+
+sub __OBJECT_ORIENTED_PARTS__ {}
+
+package SSHLogin;
+
+sub new {
+ my $class = shift;
+ my $sshlogin_string = shift;
+ my $ncpus;
+ my %hostgroups;
+ # SSHLogins can have these formats:
+ # @grp+grp/ncpu//usr/bin/ssh user@server
+ # ncpu//usr/bin/ssh user@server
+ # /usr/bin/ssh user@server
+ # user@server
+ # ncpu/user@server
+ # @grp+grp/user@server
+ if($sshlogin_string =~ s:^\@([^/]+)/?::) {
+ # Look for SSHLogin hostgroups
+ %hostgroups = map { $_ => 1 } split(/\+/, $1);
+ }
+ if ($sshlogin_string =~ s:^(\d+)/::) {
+ # Override default autodetected ncpus unless missing
+ $ncpus = $1;
+ }
+ my $string = $sshlogin_string;
+ # An SSHLogin is always in the hostgroup of its $string-name
+ $hostgroups{$string} = 1;
+ @Global::hostgroups{keys %hostgroups} = values %hostgroups;
+ my @unget = ();
+ my $no_slash_string = $string;
+ $no_slash_string =~ s/[^-a-z0-9:]/_/gi;
+ return bless {
+ 'string' => $string,
+ 'jobs_running' => 0,
+ 'jobs_completed' => 0,
+ 'maxlength' => undef,
+ 'max_jobs_running' => undef,
+ 'orig_max_jobs_running' => undef,
+ 'ncpus' => $ncpus,
+ 'hostgroups' => \%hostgroups,
+ 'sshcommand' => undef,
+ 'serverlogin' => undef,
+ 'control_path_dir' => undef,
+ 'control_path' => undef,
+ 'time_to_login' => undef,
+ 'last_login_at' => undef,
+ 'loadavg_file' => $ENV{'HOME'} . "/.parallel/tmp/loadavg-" .
+ $no_slash_string,
+ 'loadavg' => undef,
+ 'last_loadavg_update' => 0,
+ 'swap_activity_file' => $ENV{'HOME'} . "/.parallel/tmp/swap_activity-" .
+ $no_slash_string,
+ 'swap_activity' => undef,
+ }, ref($class) || $class;
+}
+
+sub DESTROY {
+ my $self = shift;
+ # Remove temporary files if they are created.
+ unlink $self->{'loadavg_file'};
+ unlink $self->{'swap_activity_file'};
+}
+
+sub string {
+ my $self = shift;
+ return $self->{'string'};
+}
+
+sub jobs_running {
+ my $self = shift;
+
+ return ($self->{'jobs_running'} || "0");
+}
+
+sub inc_jobs_running {
+ my $self = shift;
+ $self->{'jobs_running'}++;
+}
+
+sub dec_jobs_running {
+ my $self = shift;
+ $self->{'jobs_running'}--;
+}
+
+sub set_maxlength {
+ my $self = shift;
+ $self->{'maxlength'} = shift;
+}
+
+sub maxlength {
+ my $self = shift;
+ return $self->{'maxlength'};
+}
+
+sub jobs_completed {
+ my $self = shift;
+ return $self->{'jobs_completed'};
+}
+
+sub in_hostgroups {
+ # Input:
+ # @hostgroups = the hostgroups to look for
+ # Returns:
+ # true if intersection of @hostgroups and the hostgroups of this
+ # SSHLogin is non-empty
+ my $self = shift;
+ return grep { defined $self->{'hostgroups'}{$_} } @_;
+}
+
+sub hostgroups {
+ my $self = shift;
+ return keys %{$self->{'hostgroups'}};
+}
+
+sub inc_jobs_completed {
+ my $self = shift;
+ $self->{'jobs_completed'}++;
+}
+
+sub set_max_jobs_running {
+ my $self = shift;
+ if(defined $self->{'max_jobs_running'}) {
+ $Global::max_jobs_running -= $self->{'max_jobs_running'};
+ }
+ $self->{'max_jobs_running'} = shift;
+ if(defined $self->{'max_jobs_running'}) {
+ # max_jobs_running could be resat if -j is a changed file
+ $Global::max_jobs_running += $self->{'max_jobs_running'};
+ }
+ # Initialize orig to the first non-zero value that comes around
+ $self->{'orig_max_jobs_running'} ||= $self->{'max_jobs_running'};
+}
+
+sub swapping {
+ my $self = shift;
+ my $swapping = $self->swap_activity();
+ return (not defined $swapping or $swapping)
+}
+
+sub swap_activity {
+ # If the currently known swap activity is too old:
+ # Recompute a new one in the background
+ # Returns:
+ # last swap activity computed
+ my $self = shift;
+ # Should we update the swap_activity file?
+ my $update_swap_activity_file = 0;
+ if(-r $self->{'swap_activity_file'}) {
+ open(my $swap_fh, "<", $self->{'swap_activity_file'}) || ::die_bug("swap_activity_file-r");
+ my $swap_out = <$swap_fh>;
+ close $swap_fh;
+ if($swap_out =~ /^(\d+)$/) {
+ $self->{'swap_activity'} = $1;
+ ::debug("swap", "New swap_activity: ", $self->{'swap_activity'});
+ }
+ ::debug("swap", "Last update: ", $self->{'last_swap_activity_update'});
+ if(time - $self->{'last_swap_activity_update'} > 10) {
+ # last swap activity update was started 10 seconds ago
+ ::debug("swap", "Older than 10 sec: ", $self->{'swap_activity_file'});
+ $update_swap_activity_file = 1;
+ }
+ } else {
+ ::debug("swap", "No swap_activity file: ", $self->{'swap_activity_file'});
+ $self->{'swap_activity'} = undef;
+ $update_swap_activity_file = 1;
+ }
+ if($update_swap_activity_file) {
+ ::debug("swap", "Updating swap_activity file ", $self->{'swap_activity_file'});
+ $self->{'last_swap_activity_update'} = time;
+ -e $ENV{'HOME'}."/.parallel" or mkdir $ENV{'HOME'}."/.parallel";
+ -e $ENV{'HOME'}."/.parallel/tmp" or mkdir $ENV{'HOME'}."/.parallel/tmp";
+ my $swap_activity;
+ $swap_activity = swapactivityscript();
+ if($self->{'string'} ne ":") {
+ $swap_activity = $self->sshcommand() . " " . $self->serverlogin() . " " .
+ ::shell_quote_scalar($swap_activity);
+ }
+ # Run swap_activity measuring.
+ # As the command can take long to run if run remote
+ # save it to a tmp file before moving it to the correct file
+ my $file = $self->{'swap_activity_file'};
+ my ($dummy_fh, $tmpfile) = ::tmpfile(SUFFIX => ".swp");
+ ::debug("swap", "\n", $swap_activity, "\n");
+ qx{ ($swap_activity > $tmpfile && mv $tmpfile $file || rm $tmpfile) & };
+ }
+ return $self->{'swap_activity'};
+}
+
+{
+ my $script;
+
+ sub swapactivityscript {
+ # Returns:
+ # shellscript for detecting swap activity
+ #
+ # arguments for vmstat are OS dependant
+ # swap_in and swap_out are in different columns depending on OS
+ #
+ if(not $script) {
+ my %vmstat = (
+ # linux: $7*$8
+ # $ vmstat 1 2
+ # procs -----------memory---------- ---swap-- -----io---- -system-- ----cpu----
+ # r b swpd free buff cache si so bi bo in cs us sy id wa
+ # 5 0 51208 1701096 198012 18857888 0 0 37 153 28 19 56 11 33 1
+ # 3 0 51208 1701288 198012 18857972 0 0 0 0 3638 10412 15 3 82 0
+ 'linux' => ['vmstat 1 2 | tail -n1', '$7*$8'],
+
+ # solaris: $6*$7
+ # $ vmstat -S 1 2
+ # kthr memory page disk faults cpu
+ # r b w swap free si so pi po fr de sr s3 s4 -- -- in sy cs us sy id
+ # 0 0 0 4628952 3208408 0 0 3 1 1 0 0 -0 2 0 0 263 613 246 1 2 97
+ # 0 0 0 4552504 3166360 0 0 0 0 0 0 0 0 0 0 0 246 213 240 1 1 98
+ 'solaris' => ['vmstat -S 1 2 | tail -1', '$6*$7'],
+
+ # darwin (macosx): $21*$22
+ # $ vm_stat -c 2 1
+ # Mach Virtual Memory Statistics: (page size of 4096 bytes)
+ # free active specul inactive throttle wired prgable faults copy 0fill reactive purged file-backed anonymous cmprssed cmprssor dcomprs comprs pageins pageout swapins swapouts
+ # 346306 829050 74871 606027 0 240231 90367 544858K 62343596 270837K 14178 415070 570102 939846 356 370 116 922 4019813 4 0 0
+ # 345740 830383 74875 606031 0 239234 90369 2696 359 553 0 0 570110 941179 356 370 0 0 0 0 0 0
+ 'darwin' => ['vm_stat -c 2 1 | tail -n1', '$21*$22'],
+
+ # ultrix: $12*$13
+ # $ vmstat -S 1 2
+ # procs faults cpu memory page disk
+ # r b w in sy cs us sy id avm fre si so pi po fr de sr s0
+ # 1 0 0 4 23 2 3 0 97 7743 217k 0 0 0 0 0 0 0 0
+ # 1 0 0 6 40 8 0 1 99 7743 217k 0 0 3 0 0 0 0 0
+ 'ultrix' => ['vmstat -S 1 2 | tail -1', '$12*$13'],
+
+ # aix: $6*$7
+ # $ vmstat 1 2
+ # System configuration: lcpu=1 mem=2048MB
+ #
+ # kthr memory page faults cpu
+ # ----- ----------- ------------------------ ------------ -----------
+ # r b avm fre re pi po fr sr cy in sy cs us sy id wa
+ # 0 0 333933 241803 0 0 0 0 0 0 10 143 90 0 0 99 0
+ # 0 0 334125 241569 0 0 0 0 0 0 37 5368 184 0 9 86 5
+ 'aix' => ['vmstat 1 2 | tail -n1', '$6*$7'],
+
+ # freebsd: $8*$9
+ # $ vmstat -H 1 2
+ # procs memory page disks faults cpu
+ # r b w avm fre flt re pi po fr sr ad0 ad1 in sy cs us sy id
+ # 1 0 0 596716 19560 32 0 0 0 33 8 0 0 11 220 277 0 0 99
+ # 0 0 0 596716 19560 2 0 0 0 0 0 0 0 11 144 263 0 1 99
+ 'freebsd' => ['vmstat -H 1 2 | tail -n1', '$8*$9'],
+
+ # mirbsd: $8*$9
+ # $ vmstat 1 2
+ # procs memory page disks traps cpu
+ # r b w avm fre flt re pi po fr sr wd0 cd0 int sys cs us sy id
+ # 0 0 0 25776 164968 34 0 0 0 0 0 0 0 230 259 38 4 0 96
+ # 0 0 0 25776 164968 24 0 0 0 0 0 0 0 237 275 37 0 0 100
+ 'mirbsd' => ['vmstat 1 2 | tail -n1', '$8*$9'],
+
+ # netbsd: $7*$8
+ # $ vmstat 1 2
+ # procs memory page disks faults cpu
+ # r b avm fre flt re pi po fr sr w0 w1 in sy cs us sy id
+ # 0 0 138452 6012 54 0 0 0 1 2 3 0 4 100 23 0 0 100
+ # 0 0 138456 6008 1 0 0 0 0 0 0 0 7 26 19 0 0 100
+ 'netbsd' => ['vmstat 1 2 | tail -n1', '$7*$8'],
+
+ # openbsd: $8*$9
+ # $ vmstat 1 2
+ # procs memory page disks traps cpu
+ # r b w avm fre flt re pi po fr sr wd0 wd1 int sys cs us sy id
+ # 0 0 0 76596 109944 73 0 0 0 0 0 0 1 5 259 22 0 1 99
+ # 0 0 0 76604 109936 24 0 0 0 0 0 0 0 7 114 20 0 1 99
+ 'openbsd' => ['vmstat 1 2 | tail -n1', '$8*$9'],
+
+ # hpux: $8*$9
+ # $ vmstat 1 2
+ # procs memory page faults cpu
+ # r b w avm free re at pi po fr de sr in sy cs us sy id
+ # 1 0 0 247211 216476 4 1 0 0 0 0 0 102 73005 54 6 11 83
+ # 1 0 0 247211 216421 43 9 0 0 0 0 0 144 1675 96 25269512791222387000 25269512791222387000 105
+ 'hpux' => ['vmstat 1 2 | tail -n1', '$8*$9'],
+
+ # dec_osf (tru64): $11*$12
+ # $ vmstat 1 2
+ # Virtual Memory Statistics: (pagesize = 8192)
+ # procs memory pages intr cpu
+ # r w u act free wire fault cow zero react pin pout in sy cs us sy id
+ # 3 181 36 51K 1895 8696 348M 59M 122M 259 79M 0 5 218 302 4 1 94
+ # 3 181 36 51K 1893 8696 3 15 21 0 28 0 4 81 321 1 1 98
+ 'dec_osf' => ['vmstat 1 2 | tail -n1', '$11*$12'],
+
+ # gnu (hurd): $7*$8
+ # $ vmstat -k 1 2
+ # (pagesize: 4, size: 512288, swap size: 894972)
+ # free actv inact wired zeroed react pgins pgouts pfaults cowpfs hrat caobj cache swfree
+ # 371940 30844 89228 20276 298348 0 48192 19016 756105 99808 98% 876 20628 894972
+ # 371940 30844 89228 20276 +0 +0 +0 +0 +42 +2 98% 876 20628 894972
+ 'gnu' => ['vmstat -k 1 2 | tail -n1', '$7*$8'],
+
+ # -nto (qnx has no swap)
+ #-irix
+ #-svr5 (scosysv)
+ );
+ my $perlscript = "";
+ for my $os (keys %vmstat) {
+ #q[ { vmstat 1 2 2> /dev/null || vmstat -c 1 2; } | ].
+ # q[ awk 'NR!=4{next} NF==17||NF==16{print $7*$8} NF==22{print $21*$22} {exit}' ];
+ $vmstat{$os}[1] =~ s/\$/\\\\\\\$/g; # $ => \\\$
+ $perlscript .= 'if($^O eq "'.$os.'") { print `'.$vmstat{$os}[0].' | awk "{print ' .
+ $vmstat{$os}[1] . '}"` }';
+ }
+ $perlscript = "perl -e " . ::shell_quote_scalar($perlscript);
+ $script = $Global::envvar. " " .$perlscript;
+ }
+ return $script;
+ }
+}
+
+sub too_fast_remote_login {
+ my $self = shift;
+ if($self->{'last_login_at'} and $self->{'time_to_login'}) {
+ # sshd normally allows 10 simultaneous logins
+ # A login takes time_to_login
+ # So time_to_login/5 should be safe
+ # If now <= last_login + time_to_login/5: Then it is too soon.
+ my $too_fast = (::now() <= $self->{'last_login_at'}
+ + $self->{'time_to_login'}/5);
+ ::debug("run", "Too fast? $too_fast ");
+ return $too_fast;
+ } else {
+ # No logins so far (or time_to_login not computed): it is not too fast
+ return 0;
+ }
+}
+
+sub last_login_at {
+ my $self = shift;
+ return $self->{'last_login_at'};
+}
+
+sub set_last_login_at {
+ my $self = shift;
+ $self->{'last_login_at'} = shift;
+}
+
+sub loadavg_too_high {
+ my $self = shift;
+ my $loadavg = $self->loadavg();
+ return (not defined $loadavg or
+ $loadavg > $self->max_loadavg());
+}
+
+sub loadavg {
+ # If the currently know loadavg is too old:
+ # Recompute a new one in the background
+ # The load average is computed as the number of processes waiting for disk
+ # or CPU right now. So it is the server load this instant and not averaged over
+ # several minutes. This is needed so GNU Parallel will at most start one job
+ # that will push the load over the limit.
+ #
+ # Returns:
+ # $last_loadavg = last load average computed (undef if none)
+ my $self = shift;
+ # Should we update the loadavg file?
+ my $update_loadavg_file = 0;
+ if(open(my $load_fh, "<", $self->{'loadavg_file'})) {
+ local $/ = undef;
+ my $load_out = <$load_fh>;
+ close $load_fh;
+ my $load =()= ($load_out=~/(^[DR]....[^\[])/gm);
+ if($load > 0) {
+ # load is overestimated by 1
+ $self->{'loadavg'} = $load - 1;
+ ::debug("load", "New loadavg: ", $self->{'loadavg'});
+ } else {
+ ::die_bug("loadavg_invalid_content: $load_out");
+ }
+ ::debug("load", "Last update: ", $self->{'last_loadavg_update'});
+ if(time - $self->{'last_loadavg_update'} > 10) {
+ # last loadavg was started 10 seconds ago
+ ::debug("load", time - $self->{'last_loadavg_update'}, " secs old: ",
+ $self->{'loadavg_file'});
+ $update_loadavg_file = 1;
+ }
+ } else {
+ ::debug("load", "No loadavg file: ", $self->{'loadavg_file'});
+ $self->{'loadavg'} = undef;
+ $update_loadavg_file = 1;
+ }
+ if($update_loadavg_file) {
+ ::debug("load", "Updating loadavg file", $self->{'loadavg_file'}, "\n");
+ $self->{'last_loadavg_update'} = time;
+ -e $ENV{'HOME'}."/.parallel" or mkdir $ENV{'HOME'}."/.parallel";
+ -e $ENV{'HOME'}."/.parallel/tmp" or mkdir $ENV{'HOME'}."/.parallel/tmp";
+ my $cmd = "";
+ if($self->{'string'} ne ":") {
+ $cmd = $self->sshcommand() . " " . $self->serverlogin() . " ";
+ }
+ # TODO Is is called 'ps ax -o state,command' on other platforms?
+ $cmd .= "ps ax -o state,command";
+ # As the command can take long to run if run remote
+ # save it to a tmp file before moving it to the correct file
+ my $file = $self->{'loadavg_file'};
+ my ($dummy_fh, $tmpfile) = ::tmpfile(SUFFIX => ".loa");
+ qx{ ($cmd > $tmpfile && mv $tmpfile $file || rm $tmpfile) & };
+ }
+ return $self->{'loadavg'};
+}
+
+sub max_loadavg {
+ my $self = shift;
+ # If --load is a file it might be changed
+ if($Global::max_load_file) {
+ my $mtime = (stat($Global::max_load_file))[9];
+ if($mtime > $Global::max_load_file_last_mod) {
+ $Global::max_load_file_last_mod = $mtime;
+ for my $sshlogin (values %Global::host) {
+ $sshlogin->set_max_loadavg(undef);
+ }
+ }
+ }
+ if(not defined $self->{'max_loadavg'}) {
+ $self->{'max_loadavg'} =
+ $self->compute_max_loadavg($opt::load);
+ }
+ ::debug("load", "max_loadavg: ", $self->string(), " ", $self->{'max_loadavg'});
+ return $self->{'max_loadavg'};
+}
+
+sub set_max_loadavg {
+ my $self = shift;
+ $self->{'max_loadavg'} = shift;
+}
+
+sub compute_max_loadavg {
+ # Parse the max loadaverage that the user asked for using --load
+ # Returns:
+ # max loadaverage
+ my $self = shift;
+ my $loadspec = shift;
+ my $load;
+ if(defined $loadspec) {
+ if($loadspec =~ /^\+(\d+)$/) {
+ # E.g. --load +2
+ my $j = $1;
+ $load =
+ $self->ncpus() + $j;
+ } elsif ($loadspec =~ /^-(\d+)$/) {
+ # E.g. --load -2
+ my $j = $1;
+ $load =
+ $self->ncpus() - $j;
+ } elsif ($loadspec =~ /^(\d+)\%$/) {
+ my $j = $1;
+ $load =
+ $self->ncpus() * $j / 100;
+ } elsif ($loadspec =~ /^(\d+(\.\d+)?)$/) {
+ $load = $1;
+ } elsif (-f $loadspec) {
+ $Global::max_load_file = $loadspec;
+ $Global::max_load_file_last_mod = (stat($Global::max_load_file))[9];
+ if(open(my $in_fh, "<", $Global::max_load_file)) {
+ my $opt_load_file = join("",<$in_fh>);
+ close $in_fh;
+ $load = $self->compute_max_loadavg($opt_load_file);
+ } else {
+ print $Global::original_stderr "Cannot open $loadspec\n";
+ ::wait_and_exit(255);
+ }
+ } else {
+ print $Global::original_stderr "Parsing of --load failed\n";
+ ::die_usage();
+ }
+ if($load < 0.01) {
+ $load = 0.01;
+ }
+ }
+ return $load;
+}
+
+sub time_to_login {
+ my $self = shift;
+ return $self->{'time_to_login'};
+}
+
+sub set_time_to_login {
+ my $self = shift;
+ $self->{'time_to_login'} = shift;
+}
+
+sub max_jobs_running {
+ my $self = shift;
+ if(not defined $self->{'max_jobs_running'}) {
+ my $nproc = $self->compute_number_of_processes($opt::jobs);
+ $self->set_max_jobs_running($nproc);
+ }
+ return $self->{'max_jobs_running'};
+}
+
+sub orig_max_jobs_running {
+ my $self = shift;
+ return $self->{'orig_max_jobs_running'};
+}
+
+sub compute_number_of_processes {
+ # Number of processes wanted and limited by system resources
+ # Returns:
+ # Number of processes
+ my $self = shift;
+ my $opt_P = shift;
+ my $wanted_processes = $self->user_requested_processes($opt_P);
+ if(not defined $wanted_processes) {
+ $wanted_processes = $Global::default_simultaneous_sshlogins;
+ }
+ ::debug("load", "Wanted procs: $wanted_processes\n");
+ my $system_limit =
+ $self->processes_available_by_system_limit($wanted_processes);
+ ::debug("load", "Limited to procs: $system_limit\n");
+ return $system_limit;
+}
+
+sub processes_available_by_system_limit {
+ # If the wanted number of processes is bigger than the system limits:
+ # Limit them to the system limits
+ # Limits are: File handles, number of input lines, processes,
+ # and taking > 1 second to spawn 10 extra processes
+ # Returns:
+ # Number of processes
+ my $self = shift;
+ my $wanted_processes = shift;
+
+ my $system_limit = 0;
+ my @jobs = ();
+ my $job;
+ my @args = ();
+ my $arg;
+ my $more_filehandles = 1;
+ my $max_system_proc_reached = 0;
+ my $slow_spawining_warning_printed = 0;
+ my $time = time;
+ my %fh;
+ my @children;
+
+ # Reserve filehandles
+ # perl uses 7 filehandles for something?
+ # parallel uses 1 for memory_usage
+ # parallel uses 4 for ?
+ for my $i (1..12) {
+ open($fh{"init-$i"}, "<", "/dev/null");
+ }
+
+ for(1..2) {
+ # System process limit
+ my $child;
+ if($child = fork()) {
+ push (@children,$child);
+ $Global::unkilled_children{$child} = 1;
+ } elsif(defined $child) {
+ # The child takes one process slot
+ # It will be killed later
+ $SIG{TERM} = $Global::original_sig{TERM};
+ sleep 10000000;
+ exit(0);
+ } else {
+ $max_system_proc_reached = 1;
+ }
+ }
+ my $count_jobs_already_read = $Global::JobQueue->next_seq();
+ my $wait_time_for_getting_args = 0;
+ my $start_time = time;
+ while(1) {
+ $system_limit >= $wanted_processes and last;
+ not $more_filehandles and last;
+ $max_system_proc_reached and last;
+ my $before_getting_arg = time;
+ if($Global::semaphore or $opt::pipe) {
+ # Skip: No need to get args
+ } elsif(defined $opt::retries and $count_jobs_already_read) {
+ # For retries we may need to run all jobs on this sshlogin
+ # so include the already read jobs for this sshlogin
+ $count_jobs_already_read--;
+ } else {
+ if($opt::X or $opt::m) {
+ # The arguments may have to be re-spread over several jobslots
+ # So pessimistically only read one arg per jobslot
+ # instead of a full commandline
+ if($Global::JobQueue->{'commandlinequeue'}->{'arg_queue'}->empty()) {
+ if($Global::JobQueue->empty()) {
+ last;
+ } else {
+ ($job) = $Global::JobQueue->get();
+ push(@jobs, $job);
+ }
+ } else {
+ ($arg) = $Global::JobQueue->{'commandlinequeue'}->{'arg_queue'}->get();
+ push(@args, $arg);
+ }
+ } else {
+ # If there are no more command lines, then we have a process
+ # per command line, so no need to go further
+ $Global::JobQueue->empty() and last;
+ ($job) = $Global::JobQueue->get();
+ push(@jobs, $job);
+ }
+ }
+ $wait_time_for_getting_args += time - $before_getting_arg;
+ $system_limit++;
+
+ # Every simultaneous process uses 2 filehandles when grouping
+ # Every simultaneous process uses 2 filehandles when compressing
+ $more_filehandles = open($fh{$system_limit*10}, "<", "/dev/null")
+ && open($fh{$system_limit*10+2}, "<", "/dev/null")
+ && open($fh{$system_limit*10+3}, "<", "/dev/null")
+ && open($fh{$system_limit*10+4}, "<", "/dev/null");
+
+ # System process limit
+ my $child;
+ if($child = fork()) {
+ push (@children,$child);
+ $Global::unkilled_children{$child} = 1;
+ } elsif(defined $child) {
+ # The child takes one process slot
+ # It will be killed later
+ $SIG{TERM} = $Global::original_sig{TERM};
+ sleep 10000000;
+ exit(0);
+ } else {
+ $max_system_proc_reached = 1;
+ }
+ my $forktime = time - $time - $wait_time_for_getting_args;
+ ::debug("run", "Time to fork $system_limit procs: $wait_time_for_getting_args ",
+ $forktime,
+ " (processes so far: ", $system_limit,")\n");
+ if($system_limit > 10 and
+ $forktime > 1 and
+ $forktime > $system_limit * 0.01
+ and not $slow_spawining_warning_printed) {
+ # It took more than 0.01 second to fork a processes on avg.
+ # Give the user a warning. He can press Ctrl-C if this
+ # sucks.
+ print $Global::original_stderr
+ ("parallel: Warning: Starting $system_limit processes took > $forktime sec.\n",
+ "Consider adjusting -j. Press CTRL-C to stop.\n");
+ $slow_spawining_warning_printed = 1;
+ }
+ }
+ # Cleanup: Close the files
+ for (values %fh) { close $_ }
+ # Cleanup: Kill the children
+ for my $pid (@children) {
+ kill 9, $pid;
+ waitpid($pid,0);
+ delete $Global::unkilled_children{$pid};
+ }
+ # Cleanup: Unget the command_lines or the @args
+ $Global::JobQueue->{'commandlinequeue'}->{'arg_queue'}->unget(@args);
+ $Global::JobQueue->unget(@jobs);
+ if($system_limit < $wanted_processes) {
+ # The system_limit is less than the wanted_processes
+ if($system_limit < 1 and not $Global::JobQueue->empty()) {
+ ::warning("Cannot spawn any jobs. Raising ulimit -u or /etc/security/limits.conf\n",
+ "or /proc/sys/kernel/pid_max may help.\n");
+ ::wait_and_exit(255);
+ }
+ if(not $more_filehandles) {
+ ::warning("Only enough file handles to run ", $system_limit, " jobs in parallel.\n",
+ "Running 'parallel -j0 -N", $system_limit, " --pipe parallel -j0' or ",
+ "raising ulimit -n or /etc/security/limits.conf may help.\n");
+ }
+ if($max_system_proc_reached) {
+ ::warning("Only enough available processes to run ", $system_limit,
+ " jobs in parallel. Raising ulimit -u or /etc/security/limits.conf\n",
+ "or /proc/sys/kernel/pid_max may help.\n");
+ }
+ }
+ if($] == 5.008008 and $system_limit > 1000) {
+ # https://savannah.gnu.org/bugs/?36942
+ $system_limit = 1000;
+ }
+ if($Global::JobQueue->empty()) {
+ $system_limit ||= 1;
+ }
+ if($self->string() ne ":" and
+ $system_limit > $Global::default_simultaneous_sshlogins) {
+ $system_limit =
+ $self->simultaneous_sshlogin_limit($system_limit);
+ }
+ return $system_limit;
+}
+
+sub simultaneous_sshlogin_limit {
+ # Test by logging in wanted number of times simultaneously
+ # Returns:
+ # min($wanted_processes,$working_simultaneous_ssh_logins-1)
+ my $self = shift;
+ my $wanted_processes = shift;
+ if($self->{'time_to_login'}) {
+ return $wanted_processes;
+ }
+
+ # Try twice because it guesses wrong sometimes
+ # Choose the minimal
+ my $ssh_limit =
+ ::min($self->simultaneous_sshlogin($wanted_processes),
+ $self->simultaneous_sshlogin($wanted_processes));
+ if($ssh_limit < $wanted_processes) {
+ my $serverlogin = $self->serverlogin();
+ ::warning("ssh to $serverlogin only allows ",
+ "for $ssh_limit simultaneous logins.\n",
+ "You may raise this by changing ",
+ "/etc/ssh/sshd_config:MaxStartups and MaxSessions on $serverlogin.\n",
+ "Using only ",$ssh_limit-1," connections ",
+ "to avoid race conditions.\n");
+ }
+ # Race condition can cause problem if using all sshs.
+ if($ssh_limit > 1) { $ssh_limit -= 1; }
+ return $ssh_limit;
+}
+
+sub simultaneous_sshlogin {
+ # Using $sshlogin try to see if we can do $wanted_processes
+ # simultaneous logins
+ # (ssh host echo simultaneouslogin & ssh host echo simultaneouslogin & ...)|grep simul|wc -l
+ # Returns:
+ # Number of succesful logins
+ my $self = shift;
+ my $wanted_processes = shift;
+ my $sshcmd = $self->sshcommand();
+ my $serverlogin = $self->serverlogin();
+ my $sshdelay = $opt::sshdelay ? "sleep $opt::sshdelay;" : "";
+ my $cmd = "$sshdelay$sshcmd $serverlogin echo simultaneouslogin &1 &"x$wanted_processes;
+ ::debug("init", "Trying $wanted_processes logins at $serverlogin\n");
+ open (my $simul_fh, "-|", "($cmd)|grep simultaneouslogin | wc -l") or
+ ::die_bug("simultaneouslogin");
+ my $ssh_limit = <$simul_fh>;
+ close $simul_fh;
+ chomp $ssh_limit;
+ return $ssh_limit;
+}
+
+sub set_ncpus {
+ my $self = shift;
+ $self->{'ncpus'} = shift;
+}
+
+sub user_requested_processes {
+ # Parse the number of processes that the user asked for using -j
+ # Returns:
+ # the number of processes to run on this sshlogin
+ my $self = shift;
+ my $opt_P = shift;
+ my $processes;
+ if(defined $opt_P) {
+ if($opt_P =~ /^\+(\d+)$/) {
+ # E.g. -P +2
+ my $j = $1;
+ $processes =
+ $self->ncpus() + $j;
+ } elsif ($opt_P =~ /^-(\d+)$/) {
+ # E.g. -P -2
+ my $j = $1;
+ $processes =
+ $self->ncpus() - $j;
+ } elsif ($opt_P =~ /^(\d+(\.\d+)?)\%$/) {
+ # E.g. -P 10.5%
+ my $j = $1;
+ $processes =
+ $self->ncpus() * $j / 100;
+ } elsif ($opt_P =~ /^(\d+)$/) {
+ $processes = $1;
+ if($processes == 0) {
+ # -P 0 = infinity (or at least close)
+ $processes = $Global::infinity;
+ }
+ } elsif (-f $opt_P) {
+ $Global::max_procs_file = $opt_P;
+ $Global::max_procs_file_last_mod = (stat($Global::max_procs_file))[9];
+ if(open(my $in_fh, "<", $Global::max_procs_file)) {
+ my $opt_P_file = join("",<$in_fh>);
+ close $in_fh;
+ $processes = $self->user_requested_processes($opt_P_file);
+ } else {
+ ::error("Cannot open $opt_P.\n");
+ ::wait_and_exit(255);
+ }
+ } else {
+ ::error("Parsing of --jobs/-j/--max-procs/-P failed.\n");
+ ::die_usage();
+ }
+ $processes = ::ceil($processes);
+ }
+ return $processes;
+}
+
+sub ncpus {
+ my $self = shift;
+ if(not defined $self->{'ncpus'}) {
+ my $sshcmd = $self->sshcommand();
+ my $serverlogin = $self->serverlogin();
+ if($serverlogin eq ":") {
+ if($opt::use_cpus_instead_of_cores) {
+ $self->{'ncpus'} = no_of_cpus();
+ } else {
+ $self->{'ncpus'} = no_of_cores();
+ }
+ } else {
+ my $ncpu;
+ my $sqe = ::shell_quote_scalar($Global::envvar);
+ if($opt::use_cpus_instead_of_cores) {
+ $ncpu = qx(echo|$sshcmd $serverlogin $sqe parallel --number-of-cpus);
+ } else {
+ ::debug("init",qq(echo|$sshcmd $serverlogin $sqe parallel --number-of-cores\n));
+ $ncpu = qx(echo|$sshcmd $serverlogin $sqe parallel --number-of-cores);
+ }
+ chomp $ncpu;
+ if($ncpu =~ /^\s*[0-9]+\s*$/s) {
+ $self->{'ncpus'} = $ncpu;
+ } else {
+ ::warning("Could not figure out ",
+ "number of cpus on $serverlogin ($ncpu). Using 1.\n");
+ $self->{'ncpus'} = 1;
+ }
+ }
+ }
+ return $self->{'ncpus'};
+}
+
+sub no_of_cpus {
+ # Returns:
+ # Number of physical CPUs
+ local $/="\n"; # If delimiter is set, then $/ will be wrong
+ my $no_of_cpus;
+ if ($^O eq 'linux') {
+ $no_of_cpus = no_of_cpus_gnu_linux() || no_of_cores_gnu_linux();
+ } elsif ($^O eq 'freebsd') {
+ $no_of_cpus = no_of_cpus_freebsd();
+ } elsif ($^O eq 'netbsd') {
+ $no_of_cpus = no_of_cpus_netbsd();
+ } elsif ($^O eq 'openbsd') {
+ $no_of_cpus = no_of_cpus_openbsd();
+ } elsif ($^O eq 'gnu') {
+ $no_of_cpus = no_of_cpus_hurd();
+ } elsif ($^O eq 'darwin') {
+ $no_of_cpus = no_of_cpus_darwin();
+ } elsif ($^O eq 'solaris') {
+ $no_of_cpus = no_of_cpus_solaris();
+ } elsif ($^O eq 'aix') {
+ $no_of_cpus = no_of_cpus_aix();
+ } elsif ($^O eq 'hpux') {
+ $no_of_cpus = no_of_cpus_hpux();
+ } elsif ($^O eq 'nto') {
+ $no_of_cpus = no_of_cpus_qnx();
+ } elsif ($^O eq 'svr5') {
+ $no_of_cpus = no_of_cpus_openserver();
+ } elsif ($^O eq 'irix') {
+ $no_of_cpus = no_of_cpus_irix();
+ } elsif ($^O eq 'dec_osf') {
+ $no_of_cpus = no_of_cpus_tru64();
+ } else {
+ $no_of_cpus = (no_of_cpus_gnu_linux()
+ || no_of_cpus_freebsd()
+ || no_of_cpus_netbsd()
+ || no_of_cpus_openbsd()
+ || no_of_cpus_hurd()
+ || no_of_cpus_darwin()
+ || no_of_cpus_solaris()
+ || no_of_cpus_aix()
+ || no_of_cpus_hpux()
+ || no_of_cpus_qnx()
+ || no_of_cpus_openserver()
+ || no_of_cpus_irix()
+ || no_of_cpus_tru64()
+ # Number of cores is better than no guess for #CPUs
+ || nproc()
+ );
+ }
+ if($no_of_cpus) {
+ chomp $no_of_cpus;
+ return $no_of_cpus;
+ } else {
+ ::warning("Cannot figure out number of cpus. Using 1.\n");
+ return 1;
+ }
+}
+
+sub no_of_cores {
+ # Returns:
+ # Number of CPU cores
+ local $/="\n"; # If delimiter is set, then $/ will be wrong
+ my $no_of_cores;
+ if ($^O eq 'linux') {
+ $no_of_cores = no_of_cores_gnu_linux();
+ } elsif ($^O eq 'freebsd') {
+ $no_of_cores = no_of_cores_freebsd();
+ } elsif ($^O eq 'netbsd') {
+ $no_of_cores = no_of_cores_netbsd();
+ } elsif ($^O eq 'openbsd') {
+ $no_of_cores = no_of_cores_openbsd();
+ } elsif ($^O eq 'gnu') {
+ $no_of_cores = no_of_cores_hurd();
+ } elsif ($^O eq 'darwin') {
+ $no_of_cores = no_of_cores_darwin();
+ } elsif ($^O eq 'solaris') {
+ $no_of_cores = no_of_cores_solaris();
+ } elsif ($^O eq 'aix') {
+ $no_of_cores = no_of_cores_aix();
+ } elsif ($^O eq 'hpux') {
+ $no_of_cores = no_of_cores_hpux();
+ } elsif ($^O eq 'nto') {
+ $no_of_cores = no_of_cores_qnx();
+ } elsif ($^O eq 'svr5') {
+ $no_of_cores = no_of_cores_openserver();
+ } elsif ($^O eq 'irix') {
+ $no_of_cores = no_of_cores_irix();
+ } elsif ($^O eq 'dec_osf') {
+ $no_of_cores = no_of_cores_tru64();
+ } else {
+ $no_of_cores = (no_of_cores_gnu_linux()
+ || no_of_cores_freebsd()
+ || no_of_cores_netbsd()
+ || no_of_cores_openbsd()
+ || no_of_cores_hurd()
+ || no_of_cores_darwin()
+ || no_of_cores_solaris()
+ || no_of_cores_aix()
+ || no_of_cores_hpux()
+ || no_of_cores_qnx()
+ || no_of_cores_openserver()
+ || no_of_cores_irix()
+ || no_of_cores_tru64()
+ || nproc()
+ );
+ }
+ if($no_of_cores) {
+ chomp $no_of_cores;
+ return $no_of_cores;
+ } else {
+ ::warning("Cannot figure out number of CPU cores. Using 1.\n");
+ return 1;
+ }
+}
+
+sub nproc {
+ # Returns:
+ # Number of cores using `nproc`
+ my $no_of_cores = `nproc 2>/dev/null`;
+ return $no_of_cores;
+}
+
+sub no_of_cpus_gnu_linux {
+ # Returns:
+ # Number of physical CPUs on GNU/Linux
+ # undef if not GNU/Linux
+ my $no_of_cpus;
+ my $no_of_cores;
+ if(-e "/proc/cpuinfo") {
+ $no_of_cpus = 0;
+ $no_of_cores = 0;
+ my %seen;
+ open(my $in_fh, "<", "/proc/cpuinfo") || return undef;
+ while(<$in_fh>) {
+ if(/^physical id.*[:](.*)/ and not $seen{$1}++) {
+ $no_of_cpus++;
+ }
+ /^processor.*[:]/i and $no_of_cores++;
+ }
+ close $in_fh;
+ }
+ return ($no_of_cpus||$no_of_cores);
+}
+
+sub no_of_cores_gnu_linux {
+ # Returns:
+ # Number of CPU cores on GNU/Linux
+ # undef if not GNU/Linux
+ my $no_of_cores;
+ if(-e "/proc/cpuinfo") {
+ $no_of_cores = 0;
+ open(my $in_fh, "<", "/proc/cpuinfo") || return undef;
+ while(<$in_fh>) {
+ /^processor.*[:]/i and $no_of_cores++;
+ }
+ close $in_fh;
+ }
+ return $no_of_cores;
+}
+
+sub no_of_cpus_freebsd {
+ # Returns:
+ # Number of physical CPUs on FreeBSD
+ # undef if not FreeBSD
+ my $no_of_cpus =
+ (`sysctl -a dev.cpu 2>/dev/null | grep \%parent | awk '{ print \$2 }' | uniq | wc -l | awk '{ print \$1 }'`
+ or
+ `sysctl hw.ncpu 2>/dev/null | awk '{ print \$2 }'`);
+ chomp $no_of_cpus;
+ return $no_of_cpus;
+}
+
+sub no_of_cores_freebsd {
+ # Returns:
+ # Number of CPU cores on FreeBSD
+ # undef if not FreeBSD
+ my $no_of_cores =
+ (`sysctl hw.ncpu 2>/dev/null | awk '{ print \$2 }'`
+ or
+ `sysctl -a hw 2>/dev/null | grep [^a-z]logicalcpu[^a-z] | awk '{ print \$2 }'`);
+ chomp $no_of_cores;
+ return $no_of_cores;
+}
+
+sub no_of_cpus_netbsd {
+ # Returns:
+ # Number of physical CPUs on NetBSD
+ # undef if not NetBSD
+ my $no_of_cpus = `sysctl -n hw.ncpu 2>/dev/null`;
+ chomp $no_of_cpus;
+ return $no_of_cpus;
+}
+
+sub no_of_cores_netbsd {
+ # Returns:
+ # Number of CPU cores on NetBSD
+ # undef if not NetBSD
+ my $no_of_cores = `sysctl -n hw.ncpu 2>/dev/null`;
+ chomp $no_of_cores;
+ return $no_of_cores;
+}
+
+sub no_of_cpus_openbsd {
+ # Returns:
+ # Number of physical CPUs on OpenBSD
+ # undef if not OpenBSD
+ my $no_of_cpus = `sysctl -n hw.ncpu 2>/dev/null`;
+ chomp $no_of_cpus;
+ return $no_of_cpus;
+}
+
+sub no_of_cores_openbsd {
+ # Returns:
+ # Number of CPU cores on OpenBSD
+ # undef if not OpenBSD
+ my $no_of_cores = `sysctl -n hw.ncpu 2>/dev/null`;
+ chomp $no_of_cores;
+ return $no_of_cores;
+}
+
+sub no_of_cpus_hurd {
+ # Returns:
+ # Number of physical CPUs on HURD
+ # undef if not HURD
+ my $no_of_cpus = `nproc`;
+ chomp $no_of_cpus;
+ return $no_of_cpus;
+}
+
+sub no_of_cores_hurd {
+ # Returns:
+ # Number of physical CPUs on HURD
+ # undef if not HURD
+ my $no_of_cores = `nproc`;
+ chomp $no_of_cores;
+ return $no_of_cores;
+}
+
+sub no_of_cpus_darwin {
+ # Returns:
+ # Number of physical CPUs on Mac Darwin
+ # undef if not Mac Darwin
+ my $no_of_cpus =
+ (`sysctl -n hw.physicalcpu 2>/dev/null`
+ or
+ `sysctl -a hw 2>/dev/null | grep [^a-z]physicalcpu[^a-z] | awk '{ print \$2 }'`);
+ return $no_of_cpus;
+}
+
+sub no_of_cores_darwin {
+ # Returns:
+ # Number of CPU cores on Mac Darwin
+ # undef if not Mac Darwin
+ my $no_of_cores =
+ (`sysctl -n hw.logicalcpu 2>/dev/null`
+ or
+ `sysctl -a hw 2>/dev/null | grep [^a-z]logicalcpu[^a-z] | awk '{ print \$2 }'`);
+ return $no_of_cores;
+}
+
+sub no_of_cpus_solaris {
+ # Returns:
+ # Number of physical CPUs on Solaris
+ # undef if not Solaris
+ if(-x "/usr/sbin/psrinfo") {
+ my @psrinfo = `/usr/sbin/psrinfo`;
+ if($#psrinfo >= 0) {
+ return $#psrinfo +1;
+ }
+ }
+ if(-x "/usr/sbin/prtconf") {
+ my @prtconf = `/usr/sbin/prtconf | grep cpu..instance`;
+ if($#prtconf >= 0) {
+ return $#prtconf +1;
+ }
+ }
+ return undef;
+}
+
+sub no_of_cores_solaris {
+ # Returns:
+ # Number of CPU cores on Solaris
+ # undef if not Solaris
+ if(-x "/usr/sbin/psrinfo") {
+ my @psrinfo = `/usr/sbin/psrinfo`;
+ if($#psrinfo >= 0) {
+ return $#psrinfo +1;
+ }
+ }
+ if(-x "/usr/sbin/prtconf") {
+ my @prtconf = `/usr/sbin/prtconf | grep cpu..instance`;
+ if($#prtconf >= 0) {
+ return $#prtconf +1;
+ }
+ }
+ return undef;
+}
+
+sub no_of_cpus_aix {
+ # Returns:
+ # Number of physical CPUs on AIX
+ # undef if not AIX
+ my $no_of_cpus = 0;
+ if(-x "/usr/sbin/lscfg") {
+ open(my $in_fh, "-|", "/usr/sbin/lscfg -vs |grep proc | wc -l|tr -d ' '")
+ || return undef;
+ $no_of_cpus = <$in_fh>;
+ chomp ($no_of_cpus);
+ close $in_fh;
+ }
+ return $no_of_cpus;
+}
+
+sub no_of_cores_aix {
+ # Returns:
+ # Number of CPU cores on AIX
+ # undef if not AIX
+ my $no_of_cores;
+ if(-x "/usr/bin/vmstat") {
+ open(my $in_fh, "-|", "/usr/bin/vmstat 1 1") || return undef;
+ while(<$in_fh>) {
+ /lcpu=([0-9]*) / and $no_of_cores = $1;
+ }
+ close $in_fh;
+ }
+ return $no_of_cores;
+}
+
+sub no_of_cpus_hpux {
+ # Returns:
+ # Number of physical CPUs on HP-UX
+ # undef if not HP-UX
+ my $no_of_cpus =
+ (`/usr/bin/mpsched -s 2>&1 | grep 'Locality Domain Count' | awk '{ print \$4 }'`);
+ return $no_of_cpus;
+}
+
+sub no_of_cores_hpux {
+ # Returns:
+ # Number of CPU cores on HP-UX
+ # undef if not HP-UX
+ my $no_of_cores =
+ (`/usr/bin/mpsched -s 2>&1 | grep 'Processor Count' | awk '{ print \$3 }'`);
+ return $no_of_cores;
+}
+
+sub no_of_cpus_qnx {
+ # Returns:
+ # Number of physical CPUs on QNX
+ # undef if not QNX
+ # BUG: It is now known how to calculate this.
+ my $no_of_cpus = 0;
+ return $no_of_cpus;
+}
+
+sub no_of_cores_qnx {
+ # Returns:
+ # Number of CPU cores on QNX
+ # undef if not QNX
+ # BUG: It is now known how to calculate this.
+ my $no_of_cores = 0;
+ return $no_of_cores;
+}
+
+sub no_of_cpus_openserver {
+ # Returns:
+ # Number of physical CPUs on SCO OpenServer
+ # undef if not SCO OpenServer
+ my $no_of_cpus = 0;
+ if(-x "/usr/sbin/psrinfo") {
+ my @psrinfo = `/usr/sbin/psrinfo`;
+ if($#psrinfo >= 0) {
+ return $#psrinfo +1;
+ }
+ }
+ return $no_of_cpus;
+}
+
+sub no_of_cores_openserver {
+ # Returns:
+ # Number of CPU cores on SCO OpenServer
+ # undef if not SCO OpenServer
+ my $no_of_cores = 0;
+ if(-x "/usr/sbin/psrinfo") {
+ my @psrinfo = `/usr/sbin/psrinfo`;
+ if($#psrinfo >= 0) {
+ return $#psrinfo +1;
+ }
+ }
+ return $no_of_cores;
+}
+
+sub no_of_cpus_irix {
+ # Returns:
+ # Number of physical CPUs on IRIX
+ # undef if not IRIX
+ my $no_of_cpus = `hinv | grep HZ | grep Processor | awk '{print \$1}'`;
+ return $no_of_cpus;
+}
+
+sub no_of_cores_irix {
+ # Returns:
+ # Number of CPU cores on IRIX
+ # undef if not IRIX
+ my $no_of_cores = `hinv | grep HZ | grep Processor | awk '{print \$1}'`;
+ return $no_of_cores;
+}
+
+sub no_of_cpus_tru64 {
+ # Returns:
+ # Number of physical CPUs on Tru64
+ # undef if not Tru64
+ my $no_of_cpus = `sizer -pr`;
+ return $no_of_cpus;
+}
+
+sub no_of_cores_tru64 {
+ # Returns:
+ # Number of CPU cores on Tru64
+ # undef if not Tru64
+ my $no_of_cores = `sizer -pr`;
+ return $no_of_cores;
+}
+
+sub sshcommand {
+ my $self = shift;
+ if (not defined $self->{'sshcommand'}) {
+ $self->sshcommand_of_sshlogin();
+ }
+ return $self->{'sshcommand'};
+}
+
+sub serverlogin {
+ my $self = shift;
+ if (not defined $self->{'serverlogin'}) {
+ $self->sshcommand_of_sshlogin();
+ }
+ return $self->{'serverlogin'};
+}
+
+sub sshcommand_of_sshlogin {
+ # 'server' -> ('ssh -S /tmp/parallel-ssh-RANDOM/host-','server')
+ # 'user@server' -> ('ssh','user@server')
+ # 'myssh user@server' -> ('myssh','user@server')
+ # 'myssh -l user server' -> ('myssh -l user','server')
+ # '/usr/bin/myssh -l user server' -> ('/usr/bin/myssh -l user','server')
+ # Returns:
+ # sshcommand - defaults to 'ssh'
+ # login@host
+ my $self = shift;
+ my ($sshcmd, $serverlogin);
+ if($self->{'string'} =~ /(.+) (\S+)$/) {
+ # Own ssh command
+ $sshcmd = $1; $serverlogin = $2;
+ } else {
+ # Normal ssh
+ if($opt::controlmaster) {
+ # Use control_path to make ssh faster
+ my $control_path = $self->control_path_dir()."/ssh-%r@%h:%p";
+ $sshcmd = "ssh -S ".$control_path;
+ $serverlogin = $self->{'string'};
+ if(not $self->{'control_path'}{$control_path}++) {
+ # Master is not running for this control_path
+ # Start it
+ my $pid = fork();
+ if($pid) {
+ $Global::sshmaster{$pid} ||= 1;
+ } else {
+ $SIG{'TERM'} = undef;
+ # Ignore the 'foo' being printed
+ open(STDOUT,">","/dev/null");
+ # OpenSSH_3.6.1p2 gives 'tcgetattr: Invalid argument' with -tt
+ # STDERR >/dev/null to ignore "process_mux_new_session: tcgetattr: Invalid argument"
+ open(STDERR,">","/dev/null");
+ open(STDIN,"<","/dev/null");
+ # Run a sleep that outputs data, so it will discover if the ssh connection closes.
+ my $sleep = ::shell_quote_scalar('$|=1;while(1){sleep 1;print "foo\n"}');
+ my @master = ("ssh", "-tt", "-MTS", $control_path, $serverlogin, "perl", "-e", $sleep);
+ exec(@master);
+ }
+ }
+ } else {
+ $sshcmd = "ssh"; $serverlogin = $self->{'string'};
+ }
+ }
+ $self->{'sshcommand'} = $sshcmd;
+ $self->{'serverlogin'} = $serverlogin;
+}
+
+sub control_path_dir {
+ # Returns:
+ # path to directory
+ my $self = shift;
+ if(not defined $self->{'control_path_dir'}) {
+ -e $ENV{'HOME'}."/.parallel" or mkdir $ENV{'HOME'}."/.parallel";
+ -e $ENV{'HOME'}."/.parallel/tmp" or mkdir $ENV{'HOME'}."/.parallel/tmp";
+ $self->{'control_path_dir'} =
+ File::Temp::tempdir($ENV{'HOME'}
+ . "/.parallel/tmp/control_path_dir-XXXX",
+ CLEANUP => 1);
+ }
+ return $self->{'control_path_dir'};
+}
+
+sub rsync_transfer_cmd {
+ # Command to run to transfer a file
+ # Input:
+ # $file = filename of file to transfer
+ # $workdir = destination dir
+ # Returns:
+ # $cmd = rsync command to run to transfer $file ("" if unreadable)
+ my $self = shift;
+ my $file = shift;
+ my $workdir = shift;
+ if(not -r $file) {
+ ::warning($file, " is not readable and will not be transferred.\n");
+ return "true";
+ }
+ my $rsync_destdir;
+ if($file =~ m:^/:) {
+ # rsync /foo/bar /
+ $rsync_destdir = "/";
+ } else {
+ $rsync_destdir = ::shell_quote_file($workdir);
+ }
+ $file = ::shell_quote_file($file);
+ my $sshcmd = $self->sshcommand();
+ my $rsync_opt = "-rlDzR -e" . ::shell_quote_scalar($sshcmd);
+ my $serverlogin = $self->serverlogin();
+ # Make dir if it does not exist
+ return "( $sshcmd $serverlogin mkdir -p $rsync_destdir;" .
+ rsync()." $rsync_opt $file $serverlogin:$rsync_destdir )";
+}
+
+sub cleanup_cmd {
+ # Command to run to remove the remote file
+ # Input:
+ # $file = filename to remove
+ # $workdir = destination dir
+ # Returns:
+ # $cmd = ssh command to run to remove $file and empty parent dirs
+ my $self = shift;
+ my $file = shift;
+ my $workdir = shift;
+ my $f = $file;
+ if($f =~ m:/\./:) {
+ # foo/bar/./baz/quux => workdir/baz/quux
+ # /foo/bar/./baz/quux => workdir/baz/quux
+ $f =~ s:.*/\./:$workdir/:;
+ } elsif($f =~ m:^[^/]:) {
+ # foo/bar => workdir/foo/bar
+ $f = $workdir."/".$f;
+ }
+ my @subdirs = split m:/:, ::dirname($f);
+ my @rmdir;
+ my $dir = "";
+ for(@subdirs) {
+ $dir .= $_."/";
+ unshift @rmdir, ::shell_quote_file($dir);
+ }
+ my $rmdir = @rmdir ? "rmdir @rmdir 2>/dev/null;" : "";
+ if(defined $opt::workdir and $opt::workdir eq "...") {
+ $rmdir .= "rm -rf " . ::shell_quote_file($workdir).';';
+ }
+
+ $f = ::shell_quote_file($f);
+ my $sshcmd = $self->sshcommand();
+ my $serverlogin = $self->serverlogin();
+ return "$sshcmd $serverlogin ".::shell_quote_scalar("(rm -f $f; $rmdir)");
+}
+
+{
+ my $rsync;
+
+ sub rsync {
+ # rsync 3.1.x uses protocol 31 which is unsupported by 2.5.7.
+ # If the version >= 3.1.0: downgrade to protocol 30
+ if(not $rsync) {
+ my @out = `rsync --version`;
+ for (@out) {
+ if(/version (\d+.\d+)(.\d+)?/) {
+ if($1 >= 3.1) {
+ # Version 3.1.0 or later: Downgrade to protocol 30
+ $rsync = "rsync --protocol 30";
+ } else {
+ $rsync = "rsync";
+ }
+ }
+ }
+ $rsync or ::die_bug("Cannot figure out version of rsync: @out");
+ }
+ return $rsync;
+ }
+}
+
+
+package JobQueue;
+
+sub new {
+ my $class = shift;
+ my $commandref = shift;
+ my $read_from = shift;
+ my $context_replace = shift;
+ my $max_number_of_args = shift;
+ my $return_files = shift;
+ my $commandlinequeue = CommandLineQueue->new
+ ($commandref, $read_from, $context_replace, $max_number_of_args,
+ $return_files);
+ my @unget = ();
+ return bless {
+ 'unget' => \@unget,
+ 'commandlinequeue' => $commandlinequeue,
+ 'total_jobs' => undef,
+ }, ref($class) || $class;
+}
+
+sub get {
+ my $self = shift;
+
+ if(@{$self->{'unget'}}) {
+ my $job = shift @{$self->{'unget'}};
+ return ($job);
+ } else {
+ my $commandline = $self->{'commandlinequeue'}->get();
+ if(defined $commandline) {
+ my $job = Job->new($commandline);
+ return $job;
+ } else {
+ return undef;
+ }
+ }
+}
+
+sub unget {
+ my $self = shift;
+ unshift @{$self->{'unget'}}, @_;
+}
+
+sub empty {
+ my $self = shift;
+ my $empty = (not @{$self->{'unget'}})
+ && $self->{'commandlinequeue'}->empty();
+ ::debug("run", "JobQueue->empty $empty ");
+ return $empty;
+}
+
+sub total_jobs {
+ my $self = shift;
+ if(not defined $self->{'total_jobs'}) {
+ my $job;
+ my @queue;
+ my $start = time;
+ while($job = $self->get()) {
+ if(time - $start > 10) {
+ ::warning("Reading all arguments takes longer than 10 seconds.\n");
+ $opt::eta && ::warning("Consider removing --eta.\n");
+ $opt::bar && ::warning("Consider removing --bar.\n");
+ last;
+ }
+ push @queue, $job;
+ }
+ while($job = $self->get()) {
+ push @queue, $job;
+ }
+
+ $self->unget(@queue);
+ $self->{'total_jobs'} = $#queue+1;
+ }
+ return $self->{'total_jobs'};
+}
+
+sub next_seq {
+ my $self = shift;
+
+ return $self->{'commandlinequeue'}->seq();
+}
+
+sub quote_args {
+ my $self = shift;
+ return $self->{'commandlinequeue'}->quote_args();
+}
+
+
+package Job;
+
+sub new {
+ my $class = shift;
+ my $commandlineref = shift;
+ return bless {
+ 'commandline' => $commandlineref, # CommandLine object
+ 'workdir' => undef, # --workdir
+ 'stdin' => undef, # filehandle for stdin (used for --pipe)
+ # filename for writing stdout to (used for --files)
+ 'remaining' => "", # remaining data not sent to stdin (used for --pipe)
+ 'datawritten' => 0, # amount of data sent via stdin (used for --pipe)
+ 'transfersize' => 0, # size of files using --transfer
+ 'returnsize' => 0, # size of files using --return
+ 'pid' => undef,
+ # hash of { SSHLogins => number of times the command failed there }
+ 'failed' => undef,
+ 'sshlogin' => undef,
+ # The commandline wrapped with rsync and ssh
+ 'sshlogin_wrap' => undef,
+ 'exitstatus' => undef,
+ 'exitsignal' => undef,
+ # Timestamp for timeout if any
+ 'timeout' => undef,
+ 'virgin' => 1,
+ }, ref($class) || $class;
+}
+
+sub replaced {
+ my $self = shift;
+ $self->{'commandline'} or ::die_bug("commandline empty");
+ return $self->{'commandline'}->replaced();
+}
+
+sub seq {
+ my $self = shift;
+ return $self->{'commandline'}->seq();
+}
+
+sub slot {
+ my $self = shift;
+ return $self->{'commandline'}->slot();
+}
+
+{
+ my($cattail);
+
+ sub cattail {
+ # Returns:
+ # $cattail = perl program for: cattail "decompress program" writerpid [file_to_decompress or stdin] [file_to_unlink]
+ if(not $cattail) {
+ $cattail = q{
+ # cat followed by tail.
+ # If $writerpid dead: finish after this round
+ use Fcntl;
+
+ $|=1;
+
+ my ($cmd, $writerpid, $read_file, $unlink_file) = @ARGV;
+ if($read_file) {
+ open(IN,"<",$read_file) || die("cattail: Cannot open $read_file");
+ } else {
+ *IN = *STDIN;
+ }
+
+ my $flags;
+ fcntl(IN, F_GETFL, $flags) || die $!; # Get the current flags on the filehandle
+ $flags |= O_NONBLOCK; # Add non-blocking to the flags
+ fcntl(IN, F_SETFL, $flags) || die $!; # Set the flags on the filehandle
+ open(OUT,"|-",$cmd) || die("cattail: Cannot run $cmd");
+
+ while(1) {
+ # clear EOF
+ seek(IN,0,1);
+ my $writer_running = kill 0, $writerpid;
+ $read = sysread(IN,$buf,32768);
+ if($read) {
+ # We can unlink the file now: The writer has written something
+ -e $unlink_file and unlink $unlink_file;
+ # Blocking print
+ while($buf) {
+ my $bytes_written = syswrite(OUT,$buf);
+ # syswrite may be interrupted by SIGHUP
+ substr($buf,0,$bytes_written) = "";
+ }
+ # Something printed: Wait less next time
+ $sleep /= 2;
+ } else {
+ if(eof(IN) and not $writer_running) {
+ # Writer dead: There will never be more to read => exit
+ exit;
+ }
+ # TODO This could probably be done more efficiently using select(2)
+ # Nothing read: Wait longer before next read
+ # Up to 30 milliseconds
+ $sleep = ($sleep < 30) ? ($sleep * 1.001 + 0.01) : ($sleep);
+ usleep($sleep);
+ }
+ }
+
+ sub usleep {
+ # Sleep this many milliseconds.
+ my $secs = shift;
+ select(undef, undef, undef, $secs/1000);
+ }
+ };
+ $cattail =~ s/#.*//mg;
+ $cattail =~ s/\s+/ /g;
+ }
+ return $cattail;
+ }
+}
+
+sub openoutputfiles {
+ # Open files for STDOUT and STDERR
+ # Set file handles in $self->fh
+ my $self = shift;
+ my ($outfhw, $errfhw, $outname, $errname);
+ if($opt::results) {
+ my $args_as_dirname = $self->{'commandline'}->args_as_dirname();
+ # Output in: prefix/name1/val1/name2/val2/stdout
+ my $dir = $opt::results."/".$args_as_dirname;
+ if(eval{ File::Path::mkpath($dir); }) {
+ # OK
+ } else {
+ # mkpath failed: Argument probably too long.
+ # Set $Global::max_file_length, which will keep the individual
+ # dir names shorter than the max length
+ max_file_name_length($opt::results);
+ $args_as_dirname = $self->{'commandline'}->args_as_dirname();
+ # prefix/name1/val1/name2/val2/
+ $dir = $opt::results."/".$args_as_dirname;
+ File::Path::mkpath($dir);
+ }
+ # prefix/name1/val1/name2/val2/stdout
+ $outname = "$dir/stdout";
+ if(not open($outfhw, "+>", $outname)) {
+ ::error("Cannot write to `$outname'.\n");
+ ::wait_and_exit(255);
+ }
+ # prefix/name1/val1/name2/val2/stderr
+ $errname = "$dir/stderr";
+ if(not open($errfhw, "+>", $errname)) {
+ ::error("Cannot write to `$errname'.\n");
+ ::wait_and_exit(255);
+ }
+ $self->set_fh(1,"unlink","");
+ $self->set_fh(2,"unlink","");
+ } elsif(not $opt::ungroup) {
+ # To group we create temporary files for STDOUT and STDERR
+ # To avoid the cleanup unlink the files immediately (but keep them open)
+ if(@Global::tee_jobs) {
+ # files must be removed when the tee is done
+ } elsif($opt::files) {
+ ($outfhw, $outname) = ::tmpfile(SUFFIX => ".par");
+ ($errfhw, $errname) = ::tmpfile(SUFFIX => ".par");
+ # --files => only remove stderr
+ $self->set_fh(1,"unlink","");
+ $self->set_fh(2,"unlink",$errname);
+ } else {
+ ($outfhw, $outname) = ::tmpfile(SUFFIX => ".par");
+ ($errfhw, $errname) = ::tmpfile(SUFFIX => ".par");
+ $self->set_fh(1,"unlink",$outname);
+ $self->set_fh(2,"unlink",$errname);
+ }
+ } else {
+ # --ungroup
+ open($outfhw,">&",$Global::fd{1}) || die;
+ open($errfhw,">&",$Global::fd{2}) || die;
+ # File name must be empty as it will otherwise be printed
+ $outname = "";
+ $errname = "";
+ $self->set_fh(1,"unlink",$outname);
+ $self->set_fh(2,"unlink",$errname);
+ }
+ # Set writing FD
+ $self->set_fh(1,'w',$outfhw);
+ $self->set_fh(2,'w',$errfhw);
+ $self->set_fh(1,'name',$outname);
+ $self->set_fh(2,'name',$errname);
+ if($opt::compress) {
+ # Send stdout to stdin for $opt::compress_program(1)
+ # Send stderr to stdin for $opt::compress_program(2)
+ # cattail get pid: $pid = $self->fh($fdno,'rpid');
+ my $cattail = cattail();
+ for my $fdno (1,2) {
+ my $wpid = open(my $fdw,"|-","$opt::compress_program >>".
+ $self->fh($fdno,'name')) || die $?;
+ $self->set_fh($fdno,'w',$fdw);
+ $self->set_fh($fdno,'wpid',$wpid);
+ my $rpid = open(my $fdr, "-|", "perl", "-e", $cattail,
+ $opt::decompress_program, $wpid,
+ $self->fh($fdno,'name'),$self->fh($fdno,'unlink')) || die $?;
+ $self->set_fh($fdno,'r',$fdr);
+ $self->set_fh($fdno,'rpid',$rpid);
+ }
+ } elsif(not $opt::ungroup) {
+ # Set reading FD if using --group (--ungroup does not need)
+ for my $fdno (1,2) {
+ # Re-open the file for reading
+ # so fdw can be closed seperately
+ # and fdr can be seeked seperately (for --line-buffer)
+ open(my $fdr,"<", $self->fh($fdno,'name')) ||
+ ::die_bug("fdr: Cannot open ".$self->fh($fdno,'name'));
+ $self->set_fh($fdno,'r',$fdr);
+ # Unlink if required
+ $Global::debug or unlink $self->fh($fdno,"unlink");
+ }
+ }
+ if($opt::linebuffer) {
+ # Set non-blocking when using --linebuffer
+ $Global::use{"Fcntl"} ||= eval "use Fcntl qw(:DEFAULT :flock); 1;";
+ for my $fdno (1,2) {
+ my $fdr = $self->fh($fdno,'r');
+ my $flags;
+ fcntl($fdr, &F_GETFL, $flags) || die $!; # Get the current flags on the filehandle
+ $flags |= &O_NONBLOCK; # Add non-blocking to the flags
+ fcntl($fdr, &F_SETFL, $flags) || die $!; # Set the flags on the filehandle
+ }
+ }
+}
+
+sub max_file_name_length {
+ # Figure out the max length of a subdir
+ # TODO and the max total length
+ # Ext4 = 255,130816
+ my $testdir = shift;
+
+ my $upper = 8_000_000;
+ my $len = 8;
+ my $dir="x"x$len;
+ do {
+ rmdir($testdir."/".$dir);
+ $len *= 16;
+ $dir="x"x$len;
+ } while (mkdir $testdir."/".$dir);
+ # Then search for the actual max length between $len/16 and $len
+ my $min = $len/16;
+ my $max = $len;
+ while($max-$min > 5) {
+ # If we are within 5 chars of the exact value:
+ # it is not worth the extra time to find the exact value
+ my $test = int(($min+$max)/2);
+ $dir="x"x$test;
+ if(mkdir $testdir."/".$dir) {
+ rmdir($testdir."/".$dir);
+ $min = $test;
+ } else {
+ $max = $test;
+ }
+ }
+ $Global::max_file_length = $min;
+ return $min;
+}
+
+sub set_fh {
+ # Set file handle
+ my ($self, $fd_no, $key, $fh) = @_;
+ $self->{'fd'}{$fd_no,$key} = $fh;
+}
+
+sub fh {
+ # Get file handle
+ my ($self, $fd_no, $key) = @_;
+ return $self->{'fd'}{$fd_no,$key};
+}
+
+sub write {
+ my $self = shift;
+ my $remaining_ref = shift;
+ my $stdin_fh = $self->fh(0,"w");
+ syswrite($stdin_fh,$$remaining_ref);
+}
+
+sub set_stdin_buffer {
+ # Copy stdin buffer from $block_ref up to $endpos
+ # Prepend with $header_ref
+ # Remove $recstart and $recend if needed
+ # Input:
+ # $header_ref = ref to $header to prepend
+ # $block_ref = ref to $block to pass on
+ # $endpos = length of $block to pass on
+ # $recstart = --recstart regexp
+ # $recend = --recend regexp
+ # Returns:
+ # N/A
+ my $self = shift;
+ my ($header_ref,$block_ref,$endpos,$recstart,$recend) = @_;
+ $self->{'stdin_buffer'} = ($self->virgin() ? $$header_ref : "").substr($$block_ref,0,$endpos);
+ if($opt::remove_rec_sep) {
+ remove_rec_sep(\$self->{'stdin_buffer'},$recstart,$recend);
+ }
+ $self->{'stdin_buffer_length'} = length $self->{'stdin_buffer'};
+ $self->{'stdin_buffer_pos'} = 0;
+}
+
+sub stdin_buffer_length {
+ my $self = shift;
+ return $self->{'stdin_buffer_length'};
+}
+
+sub remove_rec_sep {
+ my ($block_ref,$recstart,$recend) = @_;
+ # Remove record separator
+ $$block_ref =~ s/$recend$recstart//gos;
+ $$block_ref =~ s/^$recstart//os;
+ $$block_ref =~ s/$recend$//os;
+}
+
+sub non_block_write {
+ my $self = shift;
+ my $something_written = 0;
+ use POSIX qw(:errno_h);
+# use Fcntl;
+# my $flags = '';
+ for my $buf (substr($self->{'stdin_buffer'},$self->{'stdin_buffer_pos'})) {
+ my $in = $self->fh(0,"w");
+# fcntl($in, F_GETFL, $flags)
+# or die "Couldn't get flags for HANDLE : $!\n";
+# $flags |= O_NONBLOCK;
+# fcntl($in, F_SETFL, $flags)
+# or die "Couldn't set flags for HANDLE: $!\n";
+ my $rv = syswrite($in, $buf);
+ if (!defined($rv) && $! == EAGAIN) {
+ # would block
+ $something_written = 0;
+ } elsif ($self->{'stdin_buffer_pos'}+$rv != $self->{'stdin_buffer_length'}) {
+ # incomplete write
+ # Remove the written part
+ $self->{'stdin_buffer_pos'} += $rv;
+ $something_written = $rv;
+ } else {
+ # successfully wrote everything
+ my $a="";
+ $self->set_stdin_buffer(\$a,\$a,"","");
+ $something_written = $rv;
+ }
+ }
+
+ ::debug("pipe", "Non-block: ", $something_written);
+ return $something_written;
+}
+
+
+sub virgin {
+ my $self = shift;
+ return $self->{'virgin'};
+}
+
+sub set_virgin {
+ my $self = shift;
+ $self->{'virgin'} = shift;
+}
+
+sub pid {
+ my $self = shift;
+ return $self->{'pid'};
+}
+
+sub set_pid {
+ my $self = shift;
+ $self->{'pid'} = shift;
+}
+
+sub starttime {
+ # Returns:
+ # UNIX-timestamp this job started
+ my $self = shift;
+ return sprintf("%.3f",$self->{'starttime'});
+}
+
+sub set_starttime {
+ my $self = shift;
+ my $starttime = shift || ::now();
+ $self->{'starttime'} = $starttime;
+}
+
+sub runtime {
+ # Returns:
+ # Run time in seconds
+ my $self = shift;
+ return sprintf("%.3f",int(($self->endtime() - $self->starttime())*1000)/1000);
+}
+
+sub endtime {
+ # Returns:
+ # UNIX-timestamp this job ended
+ # 0 if not ended yet
+ my $self = shift;
+ return ($self->{'endtime'} || 0);
+}
+
+sub set_endtime {
+ my $self = shift;
+ my $endtime = shift;
+ $self->{'endtime'} = $endtime;
+}
+
+sub timedout {
+ # Is the job timedout?
+ # Input:
+ # $delta_time = time that the job may run
+ # Returns:
+ # True or false
+ my $self = shift;
+ my $delta_time = shift;
+ return time > $self->{'starttime'} + $delta_time;
+}
+
+sub kill {
+ # Kill the job.
+ # Send the signals to (grand)*children and pid.
+ # If no signals: TERM TERM KILL
+ # Wait 200 ms after each TERM.
+ # Input:
+ # @signals = signals to send
+ my $self = shift;
+ my @signals = @_;
+ my @family_pids = $self->family_pids();
+ # Record this jobs as failed
+ $self->set_exitstatus(-1);
+ # Send two TERMs to give time to clean up
+ ::debug("run", "Kill seq ", $self->seq(), "\n");
+ my @send_signals = @signals || ("TERM", "TERM", "KILL");
+ for my $signal (@send_signals) {
+ my $alive = 0;
+ for my $pid (@family_pids) {
+ if(kill 0, $pid) {
+ # The job still running
+ kill $signal, $pid;
+ $alive = 1;
+ }
+ }
+ # If a signal was given as input, do not do the sleep below
+ @signals and next;
+
+ if($signal eq "TERM" and $alive) {
+ # Wait up to 200 ms between TERMs - but only if any pids are alive
+ my $sleep = 1;
+ for (my $sleepsum = 0; kill 0, $family_pids[0] and $sleepsum < 200;
+ $sleepsum += $sleep) {
+ $sleep = ::reap_usleep($sleep);
+ }
+ }
+ }
+}
+
+sub family_pids {
+ # Find the pids with this->pid as (grand)*parent
+ # Returns:
+ # @pids = pids of (grand)*children
+ my $self = shift;
+ my $pid = $self->pid();
+ my @pids;
+
+ my ($children_of_ref, $parent_of_ref, $name_of_ref) = ::pid_table();
+
+ my @more = ($pid);
+ # While more (grand)*children
+ while(@more) {
+ my @m;
+ push @pids, @more;
+ for my $parent (@more) {
+ if($children_of_ref->{$parent}) {
+ # add the children of this parent
+ push @m, @{$children_of_ref->{$parent}};
+ }
+ }
+ @more = @m;
+ }
+ return (@pids);
+}
+
+sub failed {
+ # return number of times failed for this $sshlogin
+ # Input:
+ # $sshlogin
+ # Returns:
+ # Number of times failed for $sshlogin
+ my $self = shift;
+ my $sshlogin = shift;
+ return $self->{'failed'}{$sshlogin};
+}
+
+sub failed_here {
+ # return number of times failed for the current $sshlogin
+ # Returns:
+ # Number of times failed for this sshlogin
+ my $self = shift;
+ return $self->{'failed'}{$self->sshlogin()};
+}
+
+sub add_failed {
+ # increase the number of times failed for this $sshlogin
+ my $self = shift;
+ my $sshlogin = shift;
+ $self->{'failed'}{$sshlogin}++;
+}
+
+sub add_failed_here {
+ # increase the number of times failed for the current $sshlogin
+ my $self = shift;
+ $self->{'failed'}{$self->sshlogin()}++;
+}
+
+sub reset_failed {
+ # increase the number of times failed for this $sshlogin
+ my $self = shift;
+ my $sshlogin = shift;
+ delete $self->{'failed'}{$sshlogin};
+}
+
+sub reset_failed_here {
+ # increase the number of times failed for this $sshlogin
+ my $self = shift;
+ delete $self->{'failed'}{$self->sshlogin()};
+}
+
+sub min_failed {
+ # Returns:
+ # the number of sshlogins this command has failed on
+ # the minimal number of times this command has failed
+ my $self = shift;
+ my $min_failures =
+ ::min(map { $self->{'failed'}{$_} } keys %{$self->{'failed'}});
+ my $number_of_sshlogins_failed_on = scalar keys %{$self->{'failed'}};
+ return ($number_of_sshlogins_failed_on,$min_failures);
+}
+
+sub total_failed {
+ # Returns:
+ # $total_failures = the number of times this command has failed
+ my $self = shift;
+ my $total_failures = 0;
+ for (values %{$self->{'failed'}}) {
+ $total_failures += $_;
+ }
+ return $total_failures;
+}
+
+sub wrapped {
+ # Wrap command with:
+ # * --shellquote
+ # * --nice
+ # * --cat
+ # * --fifo
+ # * --sshlogin
+ # * --pipepart (@Global::cat_partials)
+ # * --pipe
+ # * --tmux
+ # The ordering of the wrapping is important:
+ # * --nice/--cat/--fifo should be done on the remote machine
+ # * --pipepart/--pipe should be done on the local machine inside --tmux
+ # Uses:
+ # $Global::envvar
+ # $opt::shellquote
+ # $opt::nice
+ # $Global::shell
+ # $opt::cat
+ # $opt::fifo
+ # @Global::cat_partials
+ # $opt::pipe
+ # $opt::tmux
+ # Returns:
+ # $self->{'wrapped'} = the command wrapped with the above
+ my $self = shift;
+ if(not defined $self->{'wrapped'}) {
+ my $command = $Global::envvar.$self->replaced();
+ if($opt::shellquote) {
+ # Prepend echo
+ # and quote twice
+ $command = "echo " .
+ ::shell_quote_scalar(::shell_quote_scalar($command));
+ }
+ if($opt::nice) {
+ # Prepend \nice -n19 $SHELL -c
+ # and quote.
+ # The '\' before nice is needed to avoid tcsh's built-in
+ $command = '\nice'. " -n". $opt::nice. " ".
+ $Global::shell. " -c ".
+ ::shell_quote_scalar($command);
+ }
+ if($opt::cat) {
+ # Prepend 'cat > {};'
+ # Append '_EXIT=$?;(rm {};exit $_EXIT)'
+ $command =
+ $self->{'commandline'}->replace_placeholders(["cat > \257<\257>; "], 0, 0).
+ $command.
+ $self->{'commandline'}->replace_placeholders(
+ ["; _EXIT=\$?; rm \257<\257>; exit \$_EXIT"], 0, 0);
+ } elsif($opt::fifo) {
+ # Prepend 'mkfifo {}; ('
+ # Append ') & _PID=$!; cat > {}; wait $_PID; _EXIT=$?;(rm {};exit $_EXIT)'
+ $command =
+ $self->{'commandline'}->replace_placeholders(["mkfifo \257<\257>; ("], 0, 0).
+ $command.
+ $self->{'commandline'}->replace_placeholders([") & _PID=\$!; cat > \257<\257>; ",
+ "wait \$_PID; _EXIT=\$?; ",
+ "rm \257<\257>; exit \$_EXIT"],
+ 0,0);
+ }
+ # Wrap with ssh + tranferring of files
+ $command = $self->sshlogin_wrap($command);
+ if(@Global::cat_partials) {
+ # Prepend:
+ # < /tmp/foo perl -e 'while(@ARGV) { sysseek(STDIN,shift,0) || die; $left = shift; while($read = sysread(STDIN,$buf, ($left > 32768 ? 32768 : $left))){ $left -= $read; syswrite(STDOUT,$buf); } }' 0 0 0 11 |
+ $command = (shift @Global::cat_partials). "|". "(". $command. ")";
+ } elsif($opt::pipe) {
+ # Prepend EOF-detector to avoid starting $command if EOF.
+ # The $tmpfile might exist if run on a remote system - we accept that risk
+ my ($dummy_fh, $tmpfile) = ::tmpfile(SUFFIX => ".chr");
+ # Unlink to avoid leaving files if --dry-run or --sshlogin
+ unlink $tmpfile;
+ $command =
+ # Exit value:
+ # empty input = true
+ # some input = exit val from command
+ qq{ sh -c 'dd bs=1 count=1 of=$tmpfile 2>/dev/null'; }.
+ qq{ test \! -s "$tmpfile" && rm -f "$tmpfile" && exec true; }.
+ qq{ (cat $tmpfile; rm $tmpfile; cat - ) | }.
+ "($command);";
+ }
+ if($opt::tmux) {
+ # Wrap command with 'tmux'
+ $command = $self->tmux_wrap($command);
+ }
+ $self->{'wrapped'} = $command;
+ }
+ return $self->{'wrapped'};
+}
+
+sub set_sshlogin {
+ my $self = shift;
+ my $sshlogin = shift;
+ $self->{'sshlogin'} = $sshlogin;
+ delete $self->{'sshlogin_wrap'}; # If sshlogin is changed the wrap is wrong
+ delete $self->{'wrapped'};
+}
+
+sub sshlogin {
+ my $self = shift;
+ return $self->{'sshlogin'};
+}
+
+sub sshlogin_wrap {
+ # Wrap the command with the commands needed to run remotely
+ # Returns:
+ # $self->{'sshlogin_wrap'} = command wrapped with ssh+transfer commands
+ my $self = shift;
+ my $command = shift;
+ if(not defined $self->{'sshlogin_wrap'}) {
+ my $sshlogin = $self->sshlogin();
+ my $sshcmd = $sshlogin->sshcommand();
+ my $serverlogin = $sshlogin->serverlogin();
+ my ($pre,$post,$cleanup)=("","","");
+
+ if($serverlogin eq ":") {
+ # No transfer neeeded
+ $self->{'sshlogin_wrap'} = $command;
+ } else {
+ # --transfer
+ $pre .= $self->sshtransfer();
+ # --return
+ $post .= $self->sshreturn();
+ # --cleanup
+ $post .= $self->sshcleanup();
+ if($post) {
+ # We need to save the exit status of the job
+ $post = '_EXIT_status=$?; ' . $post . ' exit $_EXIT_status;';
+ }
+ # If the remote login shell is (t)csh then use 'setenv'
+ # otherwise use 'export'
+ # We cannot use parse_env_var(), as PARALLEL_SEQ changes
+ # for each command
+ my $parallel_env =
+ ($Global::envwarn
+ . q{ 'eval `echo $SHELL | grep "/t\\{0,1\\}csh" > /dev/null }
+ . q{ && echo setenv PARALLEL_SEQ '$PARALLEL_SEQ'\; }
+ . q{ setenv PARALLEL_PID '$PARALLEL_PID' }
+ . q{ || echo PARALLEL_SEQ='$PARALLEL_SEQ'\;export PARALLEL_SEQ\; }
+ . q{ PARALLEL_PID='$PARALLEL_PID'\;export PARALLEL_PID` ;' });
+ my $remote_pre = "";
+ my $ssh_options = "";
+ if(($opt::pipe or $opt::pipepart) and $opt::ctrlc
+ or
+ not ($opt::pipe or $opt::pipepart) and not $opt::noctrlc) {
+ # TODO Determine if this is needed
+ # Propagating CTRL-C to kill remote jobs requires
+ # remote jobs to be run with a terminal.
+ $ssh_options = "-tt -oLogLevel=quiet";
+# $ssh_options = "";
+ # tty - check if we have a tty.
+ # stty:
+ # -onlcr - make output 8-bit clean
+ # isig - pass CTRL-C as signal
+ # -echo - do not echo input
+ $remote_pre .= ::shell_quote_scalar('tty >/dev/null && stty isig -onlcr -echo;');
+ }
+ if($opt::workdir) {
+ my $wd = ::shell_quote_file($self->workdir());
+ $remote_pre .= ::shell_quote_scalar("mkdir -p ") . $wd .
+ ::shell_quote_scalar("; cd ") . $wd .
+ # exit 255 (instead of exec false) would be the correct thing,
+ # but that fails on tcsh
+ ::shell_quote_scalar(qq{ || exec false;});
+ }
+ # This script is to solve the problem of
+ # * not mixing STDERR and STDOUT
+ # * terminating with ctrl-c
+ # It works on Linux but not Solaris
+ # Finishes on Solaris, but wrong exit code:
+ # $SIG{CHLD} = sub {exit ($?&127 ? 128+($?&127) : 1+$?>>8)};
+ # Hangs on Solaris, but correct exit code on Linux:
+ # $SIG{CHLD} = sub { $done = 1 };
+ # $p->poll;
+ my $signal_script = "perl -e '".
+ q{
+ use IO::Poll;
+ $SIG{CHLD} = sub { $done = 1 };
+ $p = IO::Poll->new;
+ $p->mask(STDOUT, POLLHUP);
+ $pid=fork; unless($pid) {setpgrp; exec $ENV{SHELL}, "-c", @ARGV; die "exec: $!\n"}
+ $p->poll;
+ kill SIGHUP, -${pid} unless $done;
+ wait; exit ($?&127 ? 128+($?&127) : 1+$?>>8)
+ } . "' ";
+ $signal_script =~ s/\s+/ /g;
+
+ $self->{'sshlogin_wrap'} =
+ ($pre
+ . "$sshcmd $ssh_options $serverlogin $parallel_env "
+ . $remote_pre
+# . ::shell_quote_scalar($signal_script . ::shell_quote_scalar($command))
+ . ::shell_quote_scalar($command)
+ . ";"
+ . $post);
+ }
+ }
+ return $self->{'sshlogin_wrap'};
+}
+
+sub transfer {
+ # Files to transfer
+ # Returns:
+ # @transfer - File names of files to transfer
+ my $self = shift;
+ my @transfer = ();
+ $self->{'transfersize'} = 0;
+ if($opt::transfer) {
+ for my $record (@{$self->{'commandline'}{'arg_list'}}) {
+ # Merge arguments from records into args
+ for my $arg (@$record) {
+ CORE::push @transfer, $arg->orig();
+ # filesize
+ if(-e $arg->orig()) {
+ $self->{'transfersize'} += (stat($arg->orig()))[7];
+ }
+ }
+ }
+ }
+ return @transfer;
+}
+
+sub transfersize {
+ my $self = shift;
+ return $self->{'transfersize'};
+}
+
+sub sshtransfer {
+ # Returns for each transfer file:
+ # rsync $file remote:$workdir
+ my $self = shift;
+ my @pre;
+ my $sshlogin = $self->sshlogin();
+ my $workdir = $self->workdir();
+ for my $file ($self->transfer()) {
+ push @pre, $sshlogin->rsync_transfer_cmd($file,$workdir).";";
+ }
+ return join("",@pre);
+}
+
+sub return {
+ # Files to return
+ # Non-quoted and with {...} substituted
+ # Returns:
+ # @non_quoted_filenames
+ my $self = shift;
+ return $self->{'commandline'}->
+ replace_placeholders($self->{'commandline'}{'return_files'},0,0);
+}
+
+sub returnsize {
+ # This is called after the job has finished
+ # Returns:
+ # $number_of_bytes transferred in return
+ my $self = shift;
+ for my $file ($self->return()) {
+ if(-e $file) {
+ $self->{'returnsize'} += (stat($file))[7];
+ }
+ }
+ return $self->{'returnsize'};
+}
+
+sub sshreturn {
+ # Returns for each return-file:
+ # rsync remote:$workdir/$file .
+ my $self = shift;
+ my $sshlogin = $self->sshlogin();
+ my $sshcmd = $sshlogin->sshcommand();
+ my $serverlogin = $sshlogin->serverlogin();
+ my $rsync_opt = "-rlDzR -e".::shell_quote_scalar($sshcmd);
+ my $pre = "";
+ for my $file ($self->return()) {
+ $file =~ s:^\./::g; # Remove ./ if any
+ my $relpath = ($file !~ m:^/:); # Is the path relative?
+ my $cd = "";
+ my $wd = "";
+ if($relpath) {
+ # rsync -avR /foo/./bar/baz.c remote:/tmp/
+ # == (on old systems)
+ # rsync -avR --rsync-path="cd /foo; rsync" remote:bar/baz.c /tmp/
+ $wd = ::shell_quote_file($self->workdir()."/");
+ }
+ # Only load File::Basename if actually needed
+ $Global::use{"File::Basename"} ||= eval "use File::Basename; 1;";
+ # dir/./file means relative to dir, so remove dir on remote
+ $file =~ m:(.*)/\./:;
+ my $basedir = $1 ? ::shell_quote_file($1."/") : "";
+ my $nobasedir = $file;
+ $nobasedir =~ s:.*/\./::;
+ $cd = ::shell_quote_file(::dirname($nobasedir));
+ my $rsync_cd = '--rsync-path='.::shell_quote_scalar("cd $wd$cd; rsync");
+ my $basename = ::shell_quote_scalar(::shell_quote_file(basename($file)));
+ # --return
+ # mkdir -p /home/tange/dir/subdir/;
+ # rsync (--protocol 30) -rlDzR --rsync-path="cd /home/tange/dir/subdir/; rsync"
+ # server:file.gz /home/tange/dir/subdir/
+ $pre .= "mkdir -p $basedir$cd; ".$sshlogin->rsync()." $rsync_cd $rsync_opt $serverlogin:".
+ $basename . " ".$basedir.$cd.";";
+ }
+ return $pre;
+}
+
+sub sshcleanup {
+ # Return the sshcommand needed to remove the file
+ # Returns:
+ # ssh command needed to remove files from sshlogin
+ my $self = shift;
+ my $sshlogin = $self->sshlogin();
+ my $sshcmd = $sshlogin->sshcommand();
+ my $serverlogin = $sshlogin->serverlogin();
+ my $workdir = $self->workdir();
+ my $cleancmd = "";
+
+ for my $file ($self->cleanup()) {
+ my @subworkdirs = parentdirs_of($file);
+ $cleancmd .= $sshlogin->cleanup_cmd($file,$workdir).";";
+ }
+ if(defined $opt::workdir and $opt::workdir eq "...") {
+ $cleancmd .= "$sshcmd $serverlogin rm -rf " . ::shell_quote_scalar($workdir).';';
+ }
+ return $cleancmd;
+}
+
+sub cleanup {
+ # Returns:
+ # Files to remove at cleanup
+ my $self = shift;
+ if($opt::cleanup) {
+ my @transfer = $self->transfer();
+ my @return = $self->return();
+ return (@transfer,@return);
+ } else {
+ return ();
+ }
+}
+
+sub workdir {
+ # Returns:
+ # the workdir on a remote machine
+ my $self = shift;
+ if(not defined $self->{'workdir'}) {
+ my $workdir;
+ if(defined $opt::workdir) {
+ if($opt::workdir eq ".") {
+ # . means current dir
+ my $home = $ENV{'HOME'};
+ eval 'use Cwd';
+ my $cwd = cwd();
+ $workdir = $cwd;
+ if($home) {
+ # If homedir exists: remove the homedir from
+ # workdir if cwd starts with homedir
+ # E.g. /home/foo/my/dir => my/dir
+ # E.g. /tmp/my/dir => /tmp/my/dir
+ my ($home_dev, $home_ino) = (stat($home))[0,1];
+ my $parent = "";
+ my @dir_parts = split(m:/:,$cwd);
+ my $part;
+ while(defined ($part = shift @dir_parts)) {
+ $part eq "" and next;
+ $parent .= "/".$part;
+ my ($parent_dev, $parent_ino) = (stat($parent))[0,1];
+ if($parent_dev == $home_dev and $parent_ino == $home_ino) {
+ # dev and ino is the same: We found the homedir.
+ $workdir = join("/",@dir_parts);
+ last;
+ }
+ }
+ }
+ if($workdir eq "") {
+ $workdir = ".";
+ }
+ } elsif($opt::workdir eq "...") {
+ $workdir = ".parallel/tmp/" . ::hostname() . "-" . $$
+ . "-" . $self->seq();
+ } else {
+ $workdir = $opt::workdir;
+ # Rsync treats /./ special. We dont want that
+ $workdir =~ s:/\./:/:g; # Remove /./
+ $workdir =~ s:/+$::; # Remove ending / if any
+ $workdir =~ s:^\./::g; # Remove starting ./ if any
+ }
+ } else {
+ $workdir = ".";
+ }
+ $self->{'workdir'} = ::shell_quote_scalar($workdir);
+ }
+ return $self->{'workdir'};
+}
+
+sub parentdirs_of {
+ # Return:
+ # all parentdirs except . of this dir or file - sorted desc by length
+ my $d = shift;
+ my @parents = ();
+ while($d =~ s:/[^/]+$::) {
+ if($d ne ".") {
+ push @parents, $d;
+ }
+ }
+ return @parents;
+}
+
+sub start {
+ # Setup STDOUT and STDERR for a job and start it.
+ # Returns:
+ # job-object or undef if job not to run
+ my $job = shift;
+ # Get the shell command to be executed (possibly with ssh infront).
+ my $command = $job->wrapped();
+
+ if($Global::interactive or $Global::stderr_verbose) {
+ if($Global::interactive) {
+ print $Global::original_stderr "$command ?...";
+ open(my $tty_fh, "<", "/dev/tty") || ::die_bug("interactive-tty");
+ my $answer = <$tty_fh>;
+ close $tty_fh;
+ my $run_yes = ($answer =~ /^\s*y/i);
+ if (not $run_yes) {
+ $command = "true"; # Run the command 'true'
+ }
+ } else {
+ print $Global::original_stderr "$command\n";
+ }
+ }
+
+ my $pid;
+ $job->openoutputfiles();
+ my($stdout_fh,$stderr_fh) = ($job->fh(1,"w"),$job->fh(2,"w"));
+ local (*IN,*OUT,*ERR);
+ open OUT, '>&', $stdout_fh or ::die_bug("Can't redirect STDOUT: $!");
+ open ERR, '>&', $stderr_fh or ::die_bug("Can't dup STDOUT: $!");
+
+ if(($opt::dryrun or $Global::verbose) and $opt::ungroup) {
+ if($Global::verbose <= 1) {
+ print $stdout_fh $job->replaced(),"\n";
+ } else {
+ # Verbose level > 1: Print the rsync and stuff
+ print $stdout_fh $command,"\n";
+ }
+ }
+ if($opt::dryrun) {
+ $command = "true";
+ }
+ $ENV{'PARALLEL_SEQ'} = $job->seq();
+ $ENV{'PARALLEL_PID'} = $$;
+ ::debug("run", $Global::total_running, " processes . Starting (",
+ $job->seq(), "): $command\n");
+ if($opt::pipe) {
+ my ($stdin_fh);
+ # The eval is needed to catch exception from open3
+ eval {
+ $pid = ::open3($stdin_fh, ">&OUT", ">&ERR", $Global::shell, "-c", $command) ||
+ ::die_bug("open3-pipe");
+ 1;
+ };
+ $job->set_fh(0,"w",$stdin_fh);
+ } elsif(@opt::a and not $Global::stdin_in_opt_a and $job->seq() == 1
+ and $job->sshlogin()->string() eq ":") {
+ # Give STDIN to the first job if using -a (but only if running
+ # locally - otherwise CTRL-C does not work for other jobs Bug#36585)
+ *IN = *STDIN;
+ # The eval is needed to catch exception from open3
+ eval {
+ $pid = ::open3("<&IN", ">&OUT", ">&ERR", $Global::shell, "-c", $command) ||
+ ::die_bug("open3-a");
+ 1;
+ };
+ # Re-open to avoid complaining
+ open(STDIN, "<&", $Global::original_stdin)
+ or ::die_bug("dup-\$Global::original_stdin: $!");
+ } elsif ($opt::tty and not $Global::tty_taken and -c "/dev/tty" and
+ open(my $devtty_fh, "<", "/dev/tty")) {
+ # Give /dev/tty to the command if no one else is using it
+ *IN = $devtty_fh;
+ # The eval is needed to catch exception from open3
+ eval {
+ $pid = ::open3("<&IN", ">&OUT", ">&ERR", $Global::shell, "-c", $command) ||
+ ::die_bug("open3-/dev/tty");
+ $Global::tty_taken = $pid;
+ close $devtty_fh;
+ 1;
+ };
+ } else {
+ # The eval is needed to catch exception from open3
+ eval {
+ $pid = ::open3(::gensym, ">&OUT", ">&ERR", $Global::shell, "-c", $command) ||
+ ::die_bug("open3-gensym");
+ 1;
+ };
+ }
+ if($pid) {
+ # A job was started
+ $Global::total_running++;
+ $Global::total_started++;
+ $job->set_pid($pid);
+ $job->set_starttime();
+ $Global::running{$job->pid()} = $job;
+ if($opt::timeout) {
+ $Global::timeoutq->insert($job);
+ }
+ $Global::newest_job = $job;
+ $Global::newest_starttime = ::now();
+ return $job;
+ } else {
+ # No more processes
+ ::debug("run", "Cannot spawn more jobs.\n");
+ return undef;
+ }
+}
+
+sub tmux_wrap {
+ # Wrap command with tmux for session pPID
+ # Input:
+ # $actual_command = the actual command being run (incl ssh wrap)
+ my $self = shift;
+ my $actual_command = shift;
+ # Temporary file name. Used for fifo to communicate exit val
+ my ($fh, $tmpfile) = ::tmpfile(SUFFIX => ".tmx");
+ $Global::unlink{$tmpfile}=1;
+ close $fh;
+ unlink $tmpfile;
+ my $visual_command = $self->replaced();
+ my $title = $visual_command;
+ # ; causes problems
+ # ascii 194-245 annoys tmux
+ $title =~ tr/[\011-\016;\302-\365]//d;
+
+ my $tmux;
+ if($Global::total_running == 0) {
+ $tmux = "tmux new-session -s p$$ -d -n ".
+ ::shell_quote_scalar($title);
+ print $Global::original_stderr "See output with: tmux attach -t p$$\n";
+ } else {
+ $tmux = "tmux new-window -t p$$ -n ".::shell_quote_scalar($title);
+ }
+ return "mkfifo $tmpfile; $tmux ".
+ # Run in tmux
+ ::shell_quote_scalar(
+ "(".$actual_command.');(echo $?$status;echo 255) >'.$tmpfile."&".
+ "echo ".::shell_quote_scalar($visual_command).";".
+ "echo \007Job finished at: `date`;sleep 10").
+ # Run outside tmux
+ # Read the first line from the fifo and use that as status code
+ "; exit `perl -ne 'unlink \$ARGV; 1..1 and print' $tmpfile` ";
+}
+
+sub is_already_in_results {
+ # Do we already have results for this job?
+ # Returns:
+ # $job_already_run = bool whether there is output for this or not
+ my $job = $_[0];
+ my $args_as_dirname = $job->{'commandline'}->args_as_dirname();
+ # prefix/name1/val1/name2/val2/
+ my $dir = $opt::results."/".$args_as_dirname;
+ ::debug("run", "Test $dir/stdout", -e "$dir/stdout", "\n");
+ return -e "$dir/stdout";
+}
+
+sub is_already_in_joblog {
+ my $job = shift;
+ return vec($Global::job_already_run,$job->seq(),1);
+}
+
+sub set_job_in_joblog {
+ my $job = shift;
+ vec($Global::job_already_run,$job->seq(),1) = 1;
+}
+
+sub should_be_retried {
+ # Should this job be retried?
+ # Returns
+ # 0 - do not retry
+ # 1 - job queued for retry
+ my $self = shift;
+ if (not $opt::retries) {
+ return 0;
+ }
+ if(not $self->exitstatus()) {
+ # Completed with success. If there is a recorded failure: forget it
+ $self->reset_failed_here();
+ return 0
+ } else {
+ # The job failed. Should it be retried?
+ $self->add_failed_here();
+ if($self->total_failed() == $opt::retries) {
+ # This has been retried enough
+ return 0;
+ } else {
+ # This command should be retried
+ $self->set_endtime(undef);
+ $Global::JobQueue->unget($self);
+ ::debug("run", "Retry ", $self->seq(), "\n");
+ return 1;
+ }
+ }
+}
+
+sub print {
+ # Print the output of the jobs
+ # Returns: N/A
+
+ my $self = shift;
+ ::debug("print", ">>joboutput ", $self->replaced(), "\n");
+ if($opt::dryrun) {
+ # Nothing was printed to this job:
+ # cleanup tmp files if --files was set
+ unlink $self->fh(1,"name");
+ }
+ if($opt::pipe and $self->virgin()) {
+ # Skip --joblog, --dryrun, --verbose
+ } else {
+ if($Global::joblog and defined $self->{'exitstatus'}) {
+ # Add to joblog when finished
+ $self->print_joblog();
+ }
+
+ # Printing is only relevant for grouped/--line-buffer output.
+ $opt::ungroup and return;
+ # Check for disk full
+ exit_if_disk_full();
+
+ if(($opt::dryrun or $Global::verbose)
+ and
+ not $self->{'verbose_printed'}) {
+ $self->{'verbose_printed'}++;
+ if($Global::verbose <= 1) {
+ print STDOUT $self->replaced(),"\n";
+ } else {
+ # Verbose level > 1: Print the rsync and stuff
+ print STDOUT $self->wrapped(),"\n";
+ }
+ # If STDOUT and STDERR are merged,
+ # we want the command to be printed first
+ # so flush to avoid STDOUT being buffered
+ flush STDOUT;
+ }
+ }
+ for my $fdno (sort { $a <=> $b } keys %Global::fd) {
+ # Sort by file descriptor numerically: 1,2,3,..,9,10,11
+ $fdno == 0 and next;
+ my $out_fd = $Global::fd{$fdno};
+ my $in_fh = $self->fh($fdno,"r");
+ if(not $in_fh) {
+ if(not $Job::file_descriptor_warning_printed{$fdno}++) {
+ # ::warning("File descriptor $fdno not defined\n");
+ }
+ next;
+ }
+ ::debug("print", "File descriptor $fdno (", $self->fh($fdno,"name"), "):");
+ if($opt::files) {
+ # If --compress: $in_fh must be closed first.
+ close $self->fh($fdno,"w");
+ close $in_fh;
+ if($opt::pipe and $self->virgin()) {
+ # Nothing was printed to this job:
+ # cleanup unused tmp files if --files was set
+ for my $fdno (1,2) {
+ unlink $self->fh($fdno,"name");
+ unlink $self->fh($fdno,"unlink");
+ }
+ } elsif($fdno == 1 and $self->fh($fdno,"name")) {
+ print $out_fd $self->fh($fdno,"name"),"\n";
+ }
+ } elsif($opt::linebuffer) {
+ # Line buffered print out
+ $self->linebuffer_print($fdno,$in_fh,$out_fd);
+ } else {
+ my $buf;
+ close $self->fh($fdno,"w");
+ seek $in_fh, 0, 0;
+ # $in_fh is now ready for reading at position 0
+ if($opt::tag or defined $opt::tagstring) {
+ my $tag = $self->tag();
+ if($fdno == 2) {
+ # OpenSSH_3.6.1p2 gives 'tcgetattr: Invalid argument' with -tt
+ # This is a crappy way of ignoring it.
+ while(<$in_fh>) {
+ if(/^(client_process_control: )?tcgetattr: Invalid argument\n/) {
+ # Skip
+ } else {
+ print $out_fd $tag,$_;
+ }
+ # At most run the loop once
+ last;
+ }
+ }
+ while(<$in_fh>) {
+ print $out_fd $tag,$_;
+ }
+ } else {
+ my $buf;
+ if($fdno == 2) {
+ # OpenSSH_3.6.1p2 gives 'tcgetattr: Invalid argument' with -tt
+ # This is a crappy way of ignoring it.
+ sysread($in_fh,$buf,1_000);
+ $buf =~ s/^(client_process_control: )?tcgetattr: Invalid argument\n//;
+ print $out_fd $buf;
+ }
+ while(sysread($in_fh,$buf,32768)) {
+ print $out_fd $buf;
+ }
+ }
+ close $in_fh;
+ }
+ flush $out_fd;
+ }
+ ::debug("print", "<{'partial_line',$fdno};
+
+ if(defined $self->{'exitstatus'}) {
+ # If the job is dead: close printing fh. Needed for --compress
+ close $self->fh($fdno,"w");
+ if($opt::compress) {
+ # Blocked reading in final round
+ $Global::use{"Fcntl"} ||= eval "use Fcntl qw(:DEFAULT :flock); 1;";
+ for my $fdno (1,2) {
+ my $fdr = $self->fh($fdno,'r');
+ my $flags;
+ fcntl($fdr, &F_GETFL, $flags) || die $!; # Get the current flags on the filehandle
+ $flags &= ~&O_NONBLOCK; # Remove non-blocking to the flags
+ fcntl($fdr, &F_SETFL, $flags) || die $!; # Set the flags on the filehandle
+ }
+ }
+ }
+ # This seek will clear EOF
+ seek $in_fh, tell($in_fh), 0;
+ # The read is non-blocking: The $in_fh is set to non-blocking.
+ # 32768 --tag = 5.1s
+ # 327680 --tag = 4.4s
+ # 1024000 --tag = 4.4s
+ # 3276800 --tag = 4.3s
+ # 32768000 --tag = 4.7s
+ # 10240000 --tag = 4.3s
+ while(read($in_fh,substr($$partial,length $$partial),3276800)) {
+ # Append to $$partial
+ # Find the last \n
+ my $i = rindex($$partial,"\n");
+ if($i != -1) {
+ # One or more complete lines were found
+ if($fdno == 2 and not $self->{'printed_first_line',$fdno}++) {
+ # OpenSSH_3.6.1p2 gives 'tcgetattr: Invalid argument' with -tt
+ # This is a crappy way of ignoring it.
+ $$partial =~ s/^(client_process_control: )?tcgetattr: Invalid argument\n//;
+ # Length of partial line has changed: Find the last \n again
+ $i = rindex($$partial,"\n");
+ }
+ if($opt::tag or defined $opt::tagstring) {
+ # Replace ^ with $tag within the full line
+ my $tag = $self->tag();
+ substr($$partial,0,$i+1) =~ s/^/$tag/gm;
+ # Length of partial line has changed: Find the last \n again
+ $i = rindex($$partial,"\n");
+ }
+ # Print up to and including the last \n
+ print $out_fd substr($$partial,0,$i+1);
+ # Remove the printed part
+ substr($$partial,0,$i+1)="";
+ }
+ }
+ if(defined $self->{'exitstatus'}) {
+ # If the job is dead: print the remaining partial line
+ # read remaining
+ if($$partial and ($opt::tag or defined $opt::tagstring)) {
+ my $tag = $self->tag();
+ $$partial =~ s/^/$tag/gm;
+ }
+ print $out_fd $$partial;
+ # Release the memory
+ $$partial = undef;
+ if($self->fh($fdno,"rpid") and CORE::kill 0, $self->fh($fdno,"rpid")) {
+ # decompress still running
+ } else {
+ # decompress done: close fh
+ close $in_fh;
+ }
+ }
+}
+
+sub print_joblog {
+ my $self = shift;
+ my $cmd;
+ if($Global::verbose <= 1) {
+ $cmd = $self->replaced();
+ } else {
+ # Verbose level > 1: Print the rsync and stuff
+ $cmd = "@command";
+ }
+ print $Global::joblog
+ join("\t", $self->seq(), $self->sshlogin()->string(),
+ $self->starttime(), sprintf("%10.3f",$self->runtime()),
+ $self->transfersize(), $self->returnsize(),
+ $self->exitstatus(), $self->exitsignal(), $cmd
+ ). "\n";
+ flush $Global::joblog;
+ $self->set_job_in_joblog();
+}
+
+sub tag {
+ my $self = shift;
+ if(not defined $self->{'tag'}) {
+ $self->{'tag'} = $self->{'commandline'}->
+ replace_placeholders([$opt::tagstring],0,0)."\t";
+ }
+ return $self->{'tag'};
+}
+
+sub hostgroups {
+ my $self = shift;
+ if(not defined $self->{'hostgroups'}) {
+ $self->{'hostgroups'} = $self->{'commandline'}->{'arg_list'}[0][0]->{'hostgroups'};
+ }
+ return @{$self->{'hostgroups'}};
+}
+
+sub exitstatus {
+ my $self = shift;
+ return $self->{'exitstatus'};
+}
+
+sub set_exitstatus {
+ my $self = shift;
+ my $exitstatus = shift;
+ if($exitstatus) {
+ # Overwrite status if non-zero
+ $self->{'exitstatus'} = $exitstatus;
+ } else {
+ # Set status but do not overwrite
+ # Status may have been set by --timeout
+ $self->{'exitstatus'} ||= $exitstatus;
+ }
+}
+
+sub exitsignal {
+ my $self = shift;
+ return $self->{'exitsignal'};
+}
+
+sub set_exitsignal {
+ my $self = shift;
+ my $exitsignal = shift;
+ $self->{'exitsignal'} = $exitsignal;
+}
+
+{
+ my ($disk_full_fh, $b8193, $name);
+ sub exit_if_disk_full {
+ # Checks if $TMPDIR is full by writing 8kb to a tmpfile
+ # If the disk is full: Exit immediately.
+ # Returns:
+ # N/A
+ if(not $disk_full_fh) {
+ ($disk_full_fh, $name) = ::tmpfile(SUFFIX => ".df");
+ unlink $name;
+ $b8193 = "x"x8193;
+ }
+ # Linux does not discover if a disk is full if writing <= 8192
+ # Tested on:
+ # bfs btrfs cramfs ext2 ext3 ext4 ext4dev jffs2 jfs minix msdos
+ # ntfs reiserfs tmpfs ubifs vfat xfs
+ # TODO this should be tested on different OS similar to this:
+ #
+ # doit() {
+ # sudo mount /dev/ram0 /mnt/loop; sudo chmod 1777 /mnt/loop
+ # seq 100000 | parallel --tmpdir /mnt/loop/ true &
+ # seq 6900000 > /mnt/loop/i && echo seq OK
+ # seq 6980868 > /mnt/loop/i
+ # seq 10000 > /mnt/loop/ii
+ # sleep 3
+ # sudo umount /mnt/loop/ || sudo umount -l /mnt/loop/
+ # echo >&2
+ # }
+ print $disk_full_fh $b8193;
+ if(not $disk_full_fh
+ or
+ tell $disk_full_fh == 0) {
+ ::error("Output is incomplete. Cannot append to buffer file in $ENV{'TMPDIR'}. Is the disk full?\n");
+ ::error("Change \$TMPDIR with --tmpdir or use --compress.\n");
+ ::wait_and_exit(255);
+ }
+ truncate $disk_full_fh, 0;
+ seek($disk_full_fh, 0, 0) || die;
+ }
+}
+
+
+package CommandLine;
+
+sub new {
+ my $class = shift;
+ my $seq = shift;
+ my $commandref = shift;
+ $commandref || die;
+ my $arg_queue = shift;
+ my $context_replace = shift;
+ my $max_number_of_args = shift; # for -N and normal (-n1)
+ my $return_files = shift;
+ my $replacecount_ref = shift;
+ my $len_ref = shift;
+ my %replacecount = %$replacecount_ref;
+ my %len = %$len_ref;
+ for (keys %$replacecount_ref) {
+ # Total length of this replacement string {} replaced with all args
+ $len{$_} = 0;
+ }
+ return bless {
+ 'command' => $commandref,
+ 'seq' => $seq,
+ 'len' => \%len,
+ 'arg_list' => [],
+ 'arg_queue' => $arg_queue,
+ 'max_number_of_args' => $max_number_of_args,
+ 'replacecount' => \%replacecount,
+ 'context_replace' => $context_replace,
+ 'return_files' => $return_files,
+ 'replaced' => undef,
+ }, ref($class) || $class;
+}
+
+sub seq {
+ my $self = shift;
+ return $self->{'seq'};
+}
+
+{
+ my $max_slot_number;
+
+ sub slot {
+ # Find the number of a free job slot and return it
+ # Uses:
+ # @Global::slots
+ # Returns:
+ # $jobslot = number of jobslot
+ my $self = shift;
+ if(not $self->{'slot'}) {
+ if(not @Global::slots) {
+ # $Global::max_slot_number will typically be $Global::max_jobs_running
+ push @Global::slots, ++$max_slot_number;
+ }
+ $self->{'slot'} = shift @Global::slots;
+ }
+ return $self->{'slot'};
+ }
+}
+
+sub populate {
+ # Add arguments from arg_queue until the number of arguments or
+ # max line length is reached
+ # Uses:
+ # $Global::minimal_command_line_length
+ # $opt::cat
+ # $opt::fifo
+ # $Global::JobQueue
+ # $opt::m
+ # $opt::X
+ # $CommandLine::already_spread
+ # $Global::max_jobs_running
+ # Returns: N/A
+ my $self = shift;
+ my $next_arg;
+ my $max_len = $Global::minimal_command_line_length || Limits::Command::max_length();
+
+ if($opt::cat or $opt::fifo) {
+ # Generate a tempfile name that will be used as {}
+ my($outfh,$name) = ::tmpfile(SUFFIX => ".pip");
+ close $outfh;
+ # Unlink is needed if: ssh otheruser@localhost
+ unlink $name;
+ $Global::JobQueue->{'commandlinequeue'}->{'arg_queue'}->unget([Arg->new($name)]);
+ }
+
+ while (not $self->{'arg_queue'}->empty()) {
+ $next_arg = $self->{'arg_queue'}->get();
+ if(not defined $next_arg) {
+ next;
+ }
+ $self->push($next_arg);
+ if($self->len() >= $max_len) {
+ # Command length is now > max_length
+ # If there are arguments: remove the last
+ # If there are no arguments: Error
+ # TODO stuff about -x opt_x
+ if($self->number_of_args() > 1) {
+ # There is something to work on
+ $self->{'arg_queue'}->unget($self->pop());
+ last;
+ } else {
+ my $args = join(" ", map { $_->orig() } @$next_arg);
+ ::error("Command line too long (",
+ $self->len(), " >= ",
+ $max_len,
+ ") at number ",
+ $self->{'arg_queue'}->arg_number(),
+ ": ".
+ (substr($args,0,50))."...\n");
+ $self->{'arg_queue'}->unget($self->pop());
+ ::wait_and_exit(255);
+ }
+ }
+
+ if(defined $self->{'max_number_of_args'}) {
+ if($self->number_of_args() >= $self->{'max_number_of_args'}) {
+ last;
+ }
+ }
+ }
+ if(($opt::m or $opt::X) and not $CommandLine::already_spread
+ and $self->{'arg_queue'}->empty() and $Global::max_jobs_running) {
+ # -m or -X and EOF => Spread the arguments over all jobslots
+ # (unless they are already spread)
+ $CommandLine::already_spread ||= 1;
+ if($self->number_of_args() > 1) {
+ $self->{'max_number_of_args'} =
+ ::ceil($self->number_of_args()/$Global::max_jobs_running);
+ $Global::JobQueue->{'commandlinequeue'}->{'max_number_of_args'} =
+ $self->{'max_number_of_args'};
+ $self->{'arg_queue'}->unget($self->pop_all());
+ while($self->number_of_args() < $self->{'max_number_of_args'}) {
+ $self->push($self->{'arg_queue'}->get());
+ }
+ }
+ }
+}
+
+sub push {
+ # Add one or more records as arguments
+ # Returns: N/A
+ my $self = shift;
+ my $record = shift;
+ push @{$self->{'arg_list'}}, $record;
+
+ my $quote_arg = $Global::noquote ? 0 : not $Global::quoting;
+ my $rep;
+ for my $arg (@$record) {
+ if(defined $arg) {
+ for my $perlexpr (keys %{$self->{'replacecount'}}) {
+ # 50% faster than below
+ $self->{'len'}{$perlexpr} += length $arg->replace($perlexpr,$quote_arg,$self);
+ # $rep = $arg->replace($perlexpr,$quote_arg,$self);
+ # $self->{'len'}{$perlexpr} += length $rep;
+ # ::debug("length", "Length: ", length $rep,
+ # "(", $perlexpr, "=>", $rep, ")\n");
+ }
+ }
+ }
+}
+
+sub pop {
+ # Remove last argument
+ # Returns:
+ # the last record
+ my $self = shift;
+ my $record = pop @{$self->{'arg_list'}};
+ my $quote_arg = $Global::noquote ? 0 : not $Global::quoting;
+ for my $arg (@$record) {
+ if(defined $arg) {
+ for my $perlexpr (keys %{$self->{'replacecount'}}) {
+ $self->{'len'}{$perlexpr} -=
+ length $arg->replace($perlexpr,$quote_arg,$self);
+ }
+ }
+ }
+ return $record;
+}
+
+sub pop_all {
+ # Remove all arguments and zeros the length of replacement strings
+ # Returns:
+ # all records
+ my $self = shift;
+ my @popped = @{$self->{'arg_list'}};
+ for my $replacement_string (keys %{$self->{'replacecount'}}) {
+ $self->{'len'}{$replacement_string} = 0;
+ }
+ $self->{'arg_list'} = [];
+ return @popped;
+}
+
+sub number_of_args {
+ # The number of records
+ # Returns:
+ # number of records
+ my $self = shift;
+ # Ftq rudef oaawuq ime dqxqmeqp az 2011-01-24 mzp ime iaz nk MQhmd
+ # Mdzrvadp Nvmdymeaz az 2011-04-10. Ftue oaawuq dqxqmeqp az
+ # 2013-08-18 ue m nuf tmdpqd me kag tmhq fa geq daf14. Bxqmeq
+ # qymux oaawuq@fmzsq.pw itqz kag dqmp ftue.
+ #
+ # U my ftq ymuzfmuzqd ar m buqoq ar rdqq earfimdq omxxqp SZG
+ # Bmdmxxqx. Rdqq earfimdq sgmdmzfqqe kag mooqee fa ftq eagdoq
+ # oapq, ngf U tmhq nqqz iazpqduzs tai ymzk mofgmxxk _dqmp_ ftq
+ # eagdoq oapq.
+ #
+ # Fa fqef ftue U bgf uz m oayyqzf fqxxuzs bqabxq fa qymux yq itqz
+ # ftqk dqmp ftue. Ftq oayyqzf ime bgf uz m eqofuaz ar ftq oapq
+ # ftmf za azq iagxp xaaw fa ruj ad uybdahq ftq earfimdq - ea ftq
+ # eagdoq oapq qcguhmxqzf fa m pgefk oadzqd. Fa ymwq egdq ftq
+ # oayyqzf iagxp zaf etai gb ur eayq azq vgef sdqbbqp ftdagst ftq
+ # eagdoq oapq U daf13'qp ftq eagdoq oapq
+ # tffb://qz.iuwubqpum.ads/iuwu/DAF13
+ #
+ # 2.5 yazfte xmfqd U dqoquhqp mz qymux rday eayqazq ita zaf azxk
+ # ymzmsqp fa ruzp ftq oayyqzf, ngf mxea ymzmsqp fa sgqee ftq oapq
+ # tmp fa nq daf13'qp.
+ #
+ # Ftue nduzse yq fa ftq oazoxgeuaz ftmf ftqdq _mdq_ bqabxq, ita
+ # mdq zaf mrruxumfqp iuft ftq bdavqof, ftmf iuxx dqmp ftq eagdoq
+ # oapq - ftagst uf ymk zaf tmbbqz hqdk arfqz.
+ #
+ # This is really the number of records
+ return $#{$self->{'arg_list'}}+1;
+}
+
+sub number_of_recargs {
+ # The number of args in records
+ # Returns:
+ # number of args records
+ my $self = shift;
+ my $sum = 0;
+ my $nrec = scalar @{$self->{'arg_list'}};
+ if($nrec) {
+ $sum = $nrec * (scalar @{$self->{'arg_list'}[0]});
+ }
+ return $sum;
+}
+
+sub args_as_string {
+ # Returns:
+ # all unmodified arguments joined with ' ' (similar to {})
+ my $self = shift;
+ return (join " ", map { $_->orig() }
+ map { @$_ } @{$self->{'arg_list'}});
+}
+
+sub args_as_dirname {
+ # Returns:
+ # all unmodified arguments joined with '/' (similar to {})
+ # \t \0 \\ and / are quoted as: \t \0 \\ \_
+ # If $Global::max_file_length: Keep subdirs < $Global::max_file_length
+ my $self = shift;
+ my @res = ();
+
+ for my $rec_ref (@{$self->{'arg_list'}}) {
+ # If headers are used, sort by them.
+ # Otherwise keep the order from the command line.
+ my @header_indexes_sorted = header_indexes_sorted($#$rec_ref+1);
+ for my $n (@header_indexes_sorted) {
+ CORE::push(@res,
+ $Global::input_source_header{$n},
+ map { my $s = $_;
+ # \t \0 \\ and / are quoted as: \t \0 \\ \_
+ $s =~ s/\\/\\\\/g;
+ $s =~ s/\t/\\t/g;
+ $s =~ s/\0/\\0/g;
+ $s =~ s:/:\\_:g;
+ if($Global::max_file_length) {
+ # Keep each subdir shorter than the longest
+ # allowed file name
+ $s = substr($s,0,$Global::max_file_length);
+ }
+ $s; }
+ $rec_ref->[$n-1]->orig());
+ }
+ }
+ return join "/", @res;
+}
+
+sub header_indexes_sorted {
+ # Sort headers first by number then by name.
+ # E.g.: 1a 1b 11a 11b
+ # Returns:
+ # Indexes of %Global::input_source_header sorted
+ my $max_col = shift;
+
+ no warnings 'numeric';
+ for my $col (1 .. $max_col) {
+ # Make sure the header is defined. If it is not: use column number
+ if(not defined $Global::input_source_header{$col}) {
+ $Global::input_source_header{$col} = $col;
+ }
+ }
+ my @header_indexes_sorted = sort {
+ # Sort headers numerically then asciibetically
+ $Global::input_source_header{$a} <=> $Global::input_source_header{$b}
+ or
+ $Global::input_source_header{$a} cmp $Global::input_source_header{$b}
+ } 1 .. $max_col;
+ return @header_indexes_sorted;
+}
+
+sub len {
+ # Uses:
+ # $opt::shellquote
+ # The length of the command line with args substituted
+ my $self = shift;
+ my $len = 0;
+ # Add length of the original command with no args
+ # Length of command w/ all replacement args removed
+ $len += $self->{'len'}{'noncontext'} + @{$self->{'command'}} -1;
+ ::debug("length", "noncontext + command: $len\n");
+ my $recargs = $self->number_of_recargs();
+ if($self->{'context_replace'}) {
+ # Context is duplicated for each arg
+ $len += $recargs * $self->{'len'}{'context'};
+ for my $replstring (keys %{$self->{'replacecount'}}) {
+ # If the replacements string is more than once: mulitply its length
+ $len += $self->{'len'}{$replstring} *
+ $self->{'replacecount'}{$replstring};
+ ::debug("length", $replstring, " ", $self->{'len'}{$replstring}, "*",
+ $self->{'replacecount'}{$replstring}, "\n");
+ }
+ # echo 11 22 33 44 55 66 77 88 99 1010
+ # echo 1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10
+ # 5 + ctxgrp*arg
+ ::debug("length", "Ctxgrp: ", $self->{'len'}{'contextgroups'},
+ " Groups: ", $self->{'len'}{'noncontextgroups'}, "\n");
+ # Add space between context groups
+ $len += ($recargs-1) * ($self->{'len'}{'contextgroups'});
+ } else {
+ # Each replacement string may occur several times
+ # Add the length for each time
+ $len += 1*$self->{'len'}{'context'};
+ ::debug("length", "context+noncontext + command: $len\n");
+ for my $replstring (keys %{$self->{'replacecount'}}) {
+ # (space between regargs + length of replacement)
+ # * number this replacement is used
+ $len += ($recargs -1 + $self->{'len'}{$replstring}) *
+ $self->{'replacecount'}{$replstring};
+ }
+ }
+ if($opt::nice) {
+ # Pessimistic length if --nice is set
+ # Worse than worst case: every char needs to be quoted with \
+ $len *= 2;
+ }
+ if($Global::quoting) {
+ # Pessimistic length if -q is set
+ # Worse than worst case: every char needs to be quoted with \
+ $len *= 2;
+ }
+ if($opt::shellquote) {
+ # Pessimistic length if --shellquote is set
+ # Worse than worst case: every char needs to be quoted with \ twice
+ $len *= 4;
+ }
+ # If we are using --env, add the prefix for that, too.
+ $len += $Global::envvarlen;
+
+ return $len;
+}
+
+sub replaced {
+ # Uses:
+ # $Global::noquote
+ # $Global::quoting
+ # Returns:
+ # $replaced = command with place holders replaced and prepended
+ my $self = shift;
+ if(not defined $self->{'replaced'}) {
+ # Don't quote arguments if the input is the full command line
+ my $quote_arg = $Global::noquote ? 0 : not $Global::quoting;
+ $self->{'replaced'} = $self->replace_placeholders($self->{'command'},$Global::quoting,$quote_arg);
+ my $len = length $self->{'replaced'};
+ if ($len != $self->len()) {
+ ::debug("length", $len, " != ", $self->len(), " ", $self->{'replaced'}, "\n");
+ } else {
+ ::debug("length", $len, " == ", $self->len(), " ", $self->{'replaced'}, "\n");
+ }
+ }
+ return $self->{'replaced'};
+}
+
+sub replace_placeholders {
+ # Replace foo{}bar with fooargbar
+ # Input:
+ # $targetref = command as shell words
+ # $quote = should everything be quoted?
+ # $quote_arg = should replaced arguments be quoted?
+ # Returns:
+ # @target with placeholders replaced
+ my $self = shift;
+ my $targetref = shift;
+ my $quote = shift;
+ my $quote_arg = shift;
+ my $context_replace = $self->{'context_replace'};
+ my @target = @$targetref;
+ ::debug("replace", "Replace @target\n");
+ # -X = context replace
+ # maybe multiple input sources
+ # maybe --xapply
+ if(not @target) {
+ # @target is empty: Return empty array
+ return @target;
+ }
+ # Fish out the words that have replacement strings in them
+ my %word;
+ for (@target) {
+ my $tt = $_;
+ ::debug("replace", "Target: $tt");
+ # a{1}b{}c{}d
+ # a{=1 $_=$_ =}b{= $_=$_ =}c{= $_=$_ =}d
+ # a\257<1 $_=$_ \257>b\257< $_=$_ \257>c\257< $_=$_ \257>d
+ # A B C => aAbA B CcA B Cd
+ # -X A B C => aAbAcAd aAbBcBd aAbCcCd
+
+ if($context_replace) {
+ while($tt =~ s/([^\s\257]* # before {=
+ (?:
+ \257< # {=
+ [^\257]*? # The perl expression
+ \257> # =}
+ [^\s\257]* # after =}
+ )+)/ /x) {
+ # $1 = pre \257 perlexpr \257 post
+ $word{"$1"} ||= 1;
+ }
+ } else {
+ while($tt =~ s/( (?: \257<([^\257]*?)\257>) )//x) {
+ # $f = \257 perlexpr \257
+ $word{$1} ||= 1;
+ }
+ }
+ }
+ my @word = keys %word;
+
+ my %replace;
+ my @arg;
+ for my $record (@{$self->{'arg_list'}}) {
+ # $self->{'arg_list'} = [ [Arg11, Arg12], [Arg21, Arg22], [Arg31, Arg32] ]
+ # Merge arg-objects from records into @arg for easy access
+ CORE::push @arg, @$record;
+ }
+ # Add one arg if empty to allow {#} and {%} to be computed only once
+ if(not @arg) { @arg = (Arg->new("")); }
+ # Number of arguments - used for positional arguments
+ my $n = $#_+1;
+
+ # This is actually a CommandLine-object,
+ # but it looks nice to be able to say {= $job->slot() =}
+ my $job = $self;
+ for my $word (@word) {
+ # word = AB \257< perlexpr \257> CD \257< perlexpr \257> EF
+ my $w = $word;
+ ::debug("replace", "Replacing in $w\n");
+
+ # Replace positional arguments
+ $w =~ s< ([^\s\257]*) # before {=
+ \257< # {=
+ (-?\d+) # Position (eg. -2 or 3)
+ ([^\257]*?) # The perl expression
+ \257> # =}
+ ([^\s\257]*) # after =}
+ >
+ { $1. # Context (pre)
+ (
+ $arg[$2 > 0 ? $2-1 : $n+$2] ? # If defined: replace
+ $arg[$2 > 0 ? $2-1 : $n+$2]->replace($3,$quote_arg,$self)
+ : "")
+ .$4 }egx;# Context (post)
+ ::debug("replace", "Positional replaced $word with: $w\n");
+
+ if($w !~ /\257/) {
+ # No more replacement strings in $w: No need to do more
+ if($quote) {
+ CORE::push(@{$replace{::shell_quote($word)}}, $w);
+ } else {
+ CORE::push(@{$replace{$word}}, $w);
+ }
+ next;
+ }
+ # for each arg:
+ # compute replacement for each string
+ # replace replacement strings with replacement in the word value
+ # push to replace word value
+ ::debug("replace", "Positional done: $w\n");
+ for my $arg (@arg) {
+ my $val = $w;
+ my $number_of_replacements = 0;
+ for my $perlexpr (keys %{$self->{'replacecount'}}) {
+ # Replace {= perl expr =} with value for each arg
+ $number_of_replacements +=
+ $val =~ s{\257<\Q$perlexpr\E\257>}
+ {$arg ? $arg->replace($perlexpr,$quote_arg,$self) : ""}eg;
+ }
+ my $ww = $word;
+ if($quote) {
+ $ww = ::shell_quote_scalar($word);
+ $val = ::shell_quote_scalar($val);
+ }
+ if($number_of_replacements) {
+ CORE::push(@{$replace{$ww}}, $val);
+ }
+ }
+ }
+
+ if($quote) {
+ @target = ::shell_quote(@target);
+ }
+ # ::debug("replace", "%replace=",::my_dump(%replace),"\n");
+ if(%replace) {
+ # Substitute the replace strings with the replacement values
+ # Must be sorted by length if a short word is a substring of a long word
+ my $regexp = join('|', map { my $s = $_; $s =~ s/(\W)/\\$1/g; $s }
+ sort { length $b <=> length $a } keys %replace);
+ for(@target) {
+ s/($regexp)/join(" ",@{$replace{$1}})/ge;
+ }
+ }
+ ::debug("replace", "Return @target\n");
+ return wantarray ? @target : "@target";
+}
+
+
+package CommandLineQueue;
+
+sub new {
+ my $class = shift;
+ my $commandref = shift;
+ my $read_from = shift;
+ my $context_replace = shift;
+ my $max_number_of_args = shift;
+ my $return_files = shift;
+ my @unget = ();
+ my ($count,%replacecount,$posrpl,$perlexpr,%len);
+ my @command = @$commandref;
+ # If the first command start with '-' it is probably an option
+ if($command[0] =~ /^\s*(-\S+)/) {
+ # Is this really a command in $PATH starting with '-'?
+ my $cmd = $1;
+ if(not ::which($cmd)) {
+ ::error("Command ($cmd) starts with '-'. Is this a wrong option?\n");
+ ::wait_and_exit(255);
+ }
+ }
+ # Replace replacement strings with {= perl expr =}
+ # Protect matching inside {= perl expr =}
+ # by replacing {= and =} with \257< and \257>
+ for(@command) {
+ if(/\257/) {
+ ::error("Command cannot contain the character \257. Use a function for that.\n");
+ ::wait_and_exit(255);
+ }
+ s/\Q$Global::parensleft\E(.*?)\Q$Global::parensright\E/\257<$1\257>/gx;
+ }
+ for my $rpl (keys %Global::rpl) {
+ # Replace the short hand string with the {= perl expr =} in $command and $opt::tagstring
+ # Avoid replacing inside existing {= perl expr =}
+ for(@command,@Global::ret_files) {
+ while(s/((^|\257>)[^\257]*?) # Don't replace after \257 unless \257>
+ \Q$rpl\E/$1\257<$Global::rpl{$rpl}\257>/xg) {
+ }
+ }
+ if(defined $opt::tagstring) {
+ for($opt::tagstring) {
+ while(s/((^|\257>)[^\257]*?) # Don't replace after \257 unless \257>
+ \Q$rpl\E/$1\257<$Global::rpl{$rpl}\257>/x) {}
+ }
+ }
+ # Do the same for the positional replacement strings
+ # A bit harder as we have to put in the position number
+ $posrpl = $rpl;
+ if($posrpl =~ s/^\{//) {
+ # Only do this if the shorthand start with {
+ for(@command,@Global::ret_files) {
+ s/\{(-?\d+)\Q$posrpl\E/\257<$1 $Global::rpl{$rpl}\257>/g;
+ }
+ if(defined $opt::tagstring) {
+ $opt::tagstring =~ s/\{(-?\d+)\Q$posrpl\E/\257<$1 $perlexpr\257>/g;
+ }
+ }
+ }
+ my $sum = 0;
+ while($sum == 0) {
+ # Count how many times each replacement string is used
+ my @cmd = @command;
+ my $contextlen = 0;
+ my $noncontextlen = 0;
+ my $contextgroups = 0;
+ for my $c (@cmd) {
+ while($c =~ s/ \257<([^\257]*?)\257> /\000/x) {
+ # %replacecount = { "perlexpr" => number of times seen }
+ # e.g { "$_++" => 2 }
+ $replacecount{$1} ++;
+ $sum++;
+ }
+ # Measure the length of the context around the {= perl expr =}
+ # Use that {=...=} has been replaced with \000 above
+ # So there is no need to deal with \257<
+ while($c =~ s/ (\S*\000\S*) //x) {
+ my $w = $1;
+ $w =~ tr/\000//d; # Remove all \000's
+ $contextlen += length($w);
+ $contextgroups++;
+ }
+ # All {= perl expr =} have been removed: The rest is non-context
+ $noncontextlen += length $c;
+ }
+ if($opt::tagstring) {
+ my $t = $opt::tagstring;
+ while($t =~ s/ \257<([^\257]*)\257> //x) {
+ # %replacecount = { "perlexpr" => number of times seen }
+ # e.g { "$_++" => 2 }
+ # But for tagstring we just need to mark it as seen
+ $replacecount{$1}||=1;
+ }
+ }
+
+ $len{'context'} = 0+$contextlen;
+ $len{'noncontext'} = $noncontextlen;
+ $len{'contextgroups'} = $contextgroups;
+ $len{'noncontextgroups'} = @cmd-$contextgroups;
+ ::debug("length", "@command Context: ", $len{'context'},
+ " Non: ", $len{'noncontext'}, " Ctxgrp: ", $len{'contextgroups'},
+ " NonCtxGrp: ", $len{'noncontextgroups'}, "\n");
+ if($sum == 0) {
+ # Default command = {}
+ # If not replacement string: append {}
+ if(not @command) {
+ @command = ("\257<\257>");
+ $Global::noquote = 1;
+ } elsif(($opt::pipe or $opt::pipepart)
+ and not $opt::fifo and not $opt::cat) {
+ # With --pipe / --pipe-part you can have no replacement
+ last;
+ } else {
+ # Append {} to the command if there are no {...}'s and no {=...=}
+ push @command, ("\257<\257>");
+ }
+ }
+ }
+
+ return bless {
+ 'unget' => \@unget,
+ 'command' => \@command,
+ 'replacecount' => \%replacecount,
+ 'arg_queue' => RecordQueue->new($read_from,$opt::colsep),
+ 'context_replace' => $context_replace,
+ 'len' => \%len,
+ 'max_number_of_args' => $max_number_of_args,
+ 'size' => undef,
+ 'return_files' => $return_files,
+ 'seq' => 1,
+ }, ref($class) || $class;
+}
+
+sub get {
+ my $self = shift;
+ if(@{$self->{'unget'}}) {
+ my $cmd_line = shift @{$self->{'unget'}};
+ return ($cmd_line);
+ } else {
+ my $cmd_line;
+ $cmd_line = CommandLine->new($self->seq(),
+ $self->{'command'},
+ $self->{'arg_queue'},
+ $self->{'context_replace'},
+ $self->{'max_number_of_args'},
+ $self->{'return_files'},
+ $self->{'replacecount'},
+ $self->{'len'},
+ );
+ $cmd_line->populate();
+ ::debug("init","cmd_line->number_of_args ",
+ $cmd_line->number_of_args(), "\n");
+ if($opt::pipe or $opt::pipepart) {
+ if($cmd_line->replaced() eq "") {
+ # Empty command - pipe requires a command
+ ::error("--pipe must have a command to pipe into (e.g. 'cat').\n");
+ ::wait_and_exit(255);
+ }
+ } else {
+ if($cmd_line->number_of_args() == 0) {
+ # We did not get more args - maybe at EOF string?
+ return undef;
+ } elsif($cmd_line->replaced() eq "") {
+ # Empty command - get the next instead
+ return $self->get();
+ }
+ }
+ $self->set_seq($self->seq()+1);
+ return $cmd_line;
+ }
+}
+
+sub unget {
+ my $self = shift;
+ unshift @{$self->{'unget'}}, @_;
+}
+
+sub empty {
+ my $self = shift;
+ my $empty = (not @{$self->{'unget'}}) && $self->{'arg_queue'}->empty();
+ ::debug("run", "CommandLineQueue->empty $empty");
+ return $empty;
+}
+
+sub seq {
+ my $self = shift;
+ return $self->{'seq'};
+}
+
+sub set_seq {
+ my $self = shift;
+ $self->{'seq'} = shift;
+}
+
+sub quote_args {
+ my $self = shift;
+ # If there is not command emulate |bash
+ return $self->{'command'};
+}
+
+sub size {
+ my $self = shift;
+ if(not $self->{'size'}) {
+ my @all_lines = ();
+ while(not $self->{'arg_queue'}->empty()) {
+ push @all_lines, CommandLine->new($self->{'command'},
+ $self->{'arg_queue'},
+ $self->{'context_replace'},
+ $self->{'max_number_of_args'});
+ }
+ $self->{'size'} = @all_lines;
+ $self->unget(@all_lines);
+ }
+ return $self->{'size'};
+}
+
+
+package Limits::Command;
+
+# Maximal command line length (for -m and -X)
+sub max_length {
+ # Find the max_length of a command line and cache it
+ # Returns:
+ # number of chars on the longest command line allowed
+ if(not $Limits::Command::line_max_len) {
+ # Disk cache of max command line length
+ my $len_cache = $ENV{'HOME'} . "/.parallel/tmp/linelen-" . ::hostname();
+ my $cached_limit;
+ if(-e $len_cache) {
+ open(my $fh, "<", $len_cache) || ::die_bug("Cannot read $len_cache");
+ $cached_limit = <$fh>;
+ close $fh;
+ } else {
+ $cached_limit = real_max_length();
+ # If $HOME is write protected: Do not fail
+ mkdir($ENV{'HOME'} . "/.parallel");
+ mkdir($ENV{'HOME'} . "/.parallel/tmp");
+ open(my $fh, ">", $len_cache);
+ print $fh $cached_limit;
+ close $fh;
+ }
+ $Limits::Command::line_max_len = $cached_limit;
+ if($opt::max_chars) {
+ if($opt::max_chars <= $cached_limit) {
+ $Limits::Command::line_max_len = $opt::max_chars;
+ } else {
+ ::warning("Value for -s option ",
+ "should be < $cached_limit.\n");
+ }
+ }
+ }
+ return $Limits::Command::line_max_len;
+}
+
+sub real_max_length {
+ # Find the max_length of a command line
+ # Returns:
+ # The maximal command line length
+ # Use an upper bound of 8 MB if the shell allows for for infinite long lengths
+ my $upper = 8_000_000;
+ my $len = 8;
+ do {
+ if($len > $upper) { return $len };
+ $len *= 16;
+ } while (is_acceptable_command_line_length($len));
+ # Then search for the actual max length between 0 and upper bound
+ return binary_find_max_length(int($len/16),$len);
+}
+
+sub binary_find_max_length {
+ # Given a lower and upper bound find the max_length of a command line
+ # Returns:
+ # number of chars on the longest command line allowed
+ my ($lower, $upper) = (@_);
+ if($lower == $upper or $lower == $upper-1) { return $lower; }
+ my $middle = int (($upper-$lower)/2 + $lower);
+ ::debug("init", "Maxlen: $lower,$upper,$middle : ");
+ if (is_acceptable_command_line_length($middle)) {
+ return binary_find_max_length($middle,$upper);
+ } else {
+ return binary_find_max_length($lower,$middle);
+ }
+}
+
+sub is_acceptable_command_line_length {
+ # Test if a command line of this length can run
+ # Returns:
+ # 0 if the command line length is too long
+ # 1 otherwise
+ my $len = shift;
+
+ local *STDERR;
+ open (STDERR, ">", "/dev/null");
+ system "true "."x"x$len;
+ close STDERR;
+ ::debug("init", "$len=$? ");
+ return not $?;
+}
+
+
+package RecordQueue;
+
+sub new {
+ my $class = shift;
+ my $fhs = shift;
+ my $colsep = shift;
+ my @unget = ();
+ my $arg_sub_queue;
+ if($colsep) {
+ # Open one file with colsep
+ $arg_sub_queue = RecordColQueue->new($fhs);
+ } else {
+ # Open one or more files if multiple -a
+ $arg_sub_queue = MultifileQueue->new($fhs);
+ }
+ return bless {
+ 'unget' => \@unget,
+ 'arg_number' => 0,
+ 'arg_sub_queue' => $arg_sub_queue,
+ }, ref($class) || $class;
+}
+
+sub get {
+ # Returns:
+ # reference to array of Arg-objects
+ my $self = shift;
+ if(@{$self->{'unget'}}) {
+ $self->{'arg_number'}++;
+ return shift @{$self->{'unget'}};
+ }
+ my $ret = $self->{'arg_sub_queue'}->get();
+ if(defined $Global::max_number_of_args
+ and $Global::max_number_of_args == 0) {
+ ::debug("run", "Read 1 but return 0 args\n");
+ return [Arg->new("")];
+ } else {
+ return $ret;
+ }
+}
+
+sub unget {
+ my $self = shift;
+ ::debug("run", "RecordQueue-unget '@_'\n");
+ $self->{'arg_number'} -= @_;
+ unshift @{$self->{'unget'}}, @_;
+}
+
+sub empty {
+ my $self = shift;
+ my $empty = not @{$self->{'unget'}};
+ $empty &&= $self->{'arg_sub_queue'}->empty();
+ ::debug("run", "RecordQueue->empty $empty");
+ return $empty;
+}
+
+sub arg_number {
+ my $self = shift;
+ return $self->{'arg_number'};
+}
+
+
+package RecordColQueue;
+
+sub new {
+ my $class = shift;
+ my $fhs = shift;
+ my @unget = ();
+ my $arg_sub_queue = MultifileQueue->new($fhs);
+ return bless {
+ 'unget' => \@unget,
+ 'arg_sub_queue' => $arg_sub_queue,
+ }, ref($class) || $class;
+}
+
+sub get {
+ # Returns:
+ # reference to array of Arg-objects
+ my $self = shift;
+ if(@{$self->{'unget'}}) {
+ return shift @{$self->{'unget'}};
+ }
+ my $unget_ref=$self->{'unget'};
+ if($self->{'arg_sub_queue'}->empty()) {
+ return undef;
+ }
+ my $in_record = $self->{'arg_sub_queue'}->get();
+ if(defined $in_record) {
+ my @out_record = ();
+ for my $arg (@$in_record) {
+ ::debug("run", "RecordColQueue::arg $arg\n");
+ my $line = $arg->orig();
+ ::debug("run", "line='$line'\n");
+ if($line ne "") {
+ for my $s (split /$opt::colsep/o, $line, -1) {
+ push @out_record, Arg->new($s);
+ }
+ } else {
+ push @out_record, Arg->new("");
+ }
+ }
+ return \@out_record;
+ } else {
+ return undef;
+ }
+}
+
+sub unget {
+ my $self = shift;
+ ::debug("run", "RecordColQueue-unget '@_'\n");
+ unshift @{$self->{'unget'}}, @_;
+}
+
+sub empty {
+ my $self = shift;
+ my $empty = (not @{$self->{'unget'}} and $self->{'arg_sub_queue'}->empty());
+ ::debug("run", "RecordColQueue->empty $empty");
+ return $empty;
+}
+
+
+package MultifileQueue;
+
+@Global::unget_argv=();
+
+sub new {
+ my $class = shift;
+ my $fhs = shift;
+ for my $fh (@$fhs) {
+ if(-t $fh) {
+ ::warning("Input is read from the terminal. ".
+ "Only experts do this on purpose. ".
+ "Press CTRL-D to exit.\n");
+ }
+ }
+ return bless {
+ 'unget' => \@Global::unget_argv,
+ 'fhs' => $fhs,
+ 'arg_matrix' => undef,
+ }, ref($class) || $class;
+}
+
+sub get {
+ my $self = shift;
+ if($opt::xapply) {
+ return $self->xapply_get();
+ } else {
+ return $self->nest_get();
+ }
+}
+
+sub unget {
+ my $self = shift;
+ ::debug("run", "MultifileQueue-unget '@_'\n");
+ unshift @{$self->{'unget'}}, @_;
+}
+
+sub empty {
+ my $self = shift;
+ my $empty = (not @Global::unget_argv
+ and not @{$self->{'unget'}});
+ for my $fh (@{$self->{'fhs'}}) {
+ $empty &&= eof($fh);
+ }
+ ::debug("run", "MultifileQueue->empty $empty ");
+ return $empty;
+}
+
+sub xapply_get {
+ my $self = shift;
+ if(@{$self->{'unget'}}) {
+ return shift @{$self->{'unget'}};
+ }
+ my @record = ();
+ my $prepend = undef;
+ my $empty = 1;
+ for my $fh (@{$self->{'fhs'}}) {
+ my $arg = read_arg_from_fh($fh);
+ if(defined $arg) {
+ # Record $arg for recycling at end of file
+ push @{$self->{'arg_matrix'}{$fh}}, $arg;
+ push @record, $arg;
+ $empty = 0;
+ } else {
+ ::debug("run", "EOA ");
+ # End of file: Recycle arguments
+ push @{$self->{'arg_matrix'}{$fh}}, shift @{$self->{'arg_matrix'}{$fh}};
+ # return last @{$args->{'args'}{$fh}};
+ push @record, @{$self->{'arg_matrix'}{$fh}}[-1];
+ }
+ }
+ if($empty) {
+ return undef;
+ } else {
+ return \@record;
+ }
+}
+
+sub nest_get {
+ my $self = shift;
+ if(@{$self->{'unget'}}) {
+ return shift @{$self->{'unget'}};
+ }
+ my @record = ();
+ my $prepend = undef;
+ my $empty = 1;
+ my $no_of_inputsources = $#{$self->{'fhs'}} + 1;
+ if(not $self->{'arg_matrix'}) {
+ # Initialize @arg_matrix with one arg from each file
+ # read one line from each file
+ my @first_arg_set;
+ my $all_empty = 1;
+ for (my $fhno = 0; $fhno < $no_of_inputsources ; $fhno++) {
+ my $arg = read_arg_from_fh($self->{'fhs'}[$fhno]);
+ if(defined $arg) {
+ $all_empty = 0;
+ }
+ $self->{'arg_matrix'}[$fhno][0] = $arg || Arg->new("");
+ push @first_arg_set, $self->{'arg_matrix'}[$fhno][0];
+ }
+ if($all_empty) {
+ # All filehandles were at eof or eof-string
+ return undef;
+ }
+ return [@first_arg_set];
+ }
+
+ # Treat the case with one input source special. For multiple
+ # input sources we need to remember all previously read values to
+ # generate all combinations. But for one input source we can
+ # forget the value after first use.
+ if($no_of_inputsources == 1) {
+ my $arg = read_arg_from_fh($self->{'fhs'}[0]);
+ if(defined($arg)) {
+ return [$arg];
+ }
+ return undef;
+ }
+ for (my $fhno = $no_of_inputsources - 1; $fhno >= 0; $fhno--) {
+ if(eof($self->{'fhs'}[$fhno])) {
+ next;
+ } else {
+ # read one
+ my $arg = read_arg_from_fh($self->{'fhs'}[$fhno]);
+ defined($arg) || next; # If we just read an EOF string: Treat this as EOF
+ my $len = $#{$self->{'arg_matrix'}[$fhno]} + 1;
+ $self->{'arg_matrix'}[$fhno][$len] = $arg;
+ # make all new combinations
+ my @combarg = ();
+ for (my $fhn = 0; $fhn < $no_of_inputsources; $fhn++) {
+ push @combarg, [0, $#{$self->{'arg_matrix'}[$fhn]}];
+ }
+ $combarg[$fhno] = [$len,$len]; # Find only combinations with this new entry
+ # map combinations
+ # [ 1, 3, 7 ], [ 2, 4, 1 ]
+ # =>
+ # [ m[0][1], m[1][3], m[3][7] ], [ m[0][2], m[1][4], m[2][1] ]
+ my @mapped;
+ for my $c (expand_combinations(@combarg)) {
+ my @a;
+ for my $n (0 .. $no_of_inputsources - 1 ) {
+ push @a, $self->{'arg_matrix'}[$n][$$c[$n]];
+ }
+ push @mapped, \@a;
+ }
+ # append the mapped to the ungotten arguments
+ push @{$self->{'unget'}}, @mapped;
+ # get the first
+ return shift @{$self->{'unget'}};
+ }
+ }
+ # all are eof or at EOF string; return from the unget queue
+ return shift @{$self->{'unget'}};
+}
+
+sub read_arg_from_fh {
+ # Read one Arg from filehandle
+ # Returns:
+ # Arg-object with one read line
+ # undef if end of file
+ my $fh = shift;
+ my $prepend = undef;
+ my $arg;
+ do {{
+ # This makes 10% faster
+ if(not ($arg = <$fh>)) {
+ if(defined $prepend) {
+ return Arg->new($prepend);
+ } else {
+ return undef;
+ }
+ }
+# ::debug("run", "read $arg\n");
+ # Remove delimiter
+ $arg =~ s:$/$::;
+ if($Global::end_of_file_string and
+ $arg eq $Global::end_of_file_string) {
+ # Ignore the rest of input file
+ close $fh;
+ ::debug("run", "EOF-string ($arg) met\n");
+ if(defined $prepend) {
+ return Arg->new($prepend);
+ } else {
+ return undef;
+ }
+ }
+ if(defined $prepend) {
+ $arg = $prepend.$arg; # For line continuation
+ $prepend = undef; #undef;
+ }
+ if($Global::ignore_empty) {
+ if($arg =~ /^\s*$/) {
+ redo; # Try the next line
+ }
+ }
+ if($Global::max_lines) {
+ if($arg =~ /\s$/) {
+ # Trailing space => continued on next line
+ $prepend = $arg;
+ redo;
+ }
+ }
+ }} while (1 == 0); # Dummy loop {{}} for redo
+ if(defined $arg) {
+ return Arg->new($arg);
+ } else {
+ ::die_bug("multiread arg undefined");
+ }
+}
+
+sub expand_combinations {
+ # Input:
+ # ([xmin,xmax], [ymin,ymax], ...)
+ # Returns: ([x,y,...],[x,y,...])
+ # where xmin <= x <= xmax and ymin <= y <= ymax
+ my $minmax_ref = shift;
+ my $xmin = $$minmax_ref[0];
+ my $xmax = $$minmax_ref[1];
+ my @p;
+ if(@_) {
+ # If there are more columns: Compute those recursively
+ my @rest = expand_combinations(@_);
+ for(my $x = $xmin; $x <= $xmax; $x++) {
+ push @p, map { [$x, @$_] } @rest;
+ }
+ } else {
+ for(my $x = $xmin; $x <= $xmax; $x++) {
+ push @p, [$x];
+ }
+ }
+ return @p;
+}
+
+
+package Arg;
+
+sub new {
+ my $class = shift;
+ my $orig = shift;
+ my @hostgroups;
+ if($opt::hostgroups) {
+ if($orig =~ s:@(.+)::) {
+ # We found hostgroups on the arg
+ @hostgroups = split(/\+/, $1);
+ if(not grep { defined $Global::hostgroups{$_} } @hostgroups) {
+ ::warning("No such hostgroup (@hostgroups)\n");
+ @hostgroups = (keys %Global::hostgroups);
+ }
+ } else {
+ @hostgroups = (keys %Global::hostgroups);
+ }
+ }
+ return bless {
+ 'orig' => $orig,
+ 'hostgroups' => \@hostgroups,
+ }, ref($class) || $class;
+}
+
+sub replace {
+ # Calculates the corresponding value for a given perl expression
+ # Returns:
+ # The calculated string (quoted if asked for)
+ my $self = shift;
+ my $perlexpr = shift; # E.g. $_=$_ or s/.gz//
+ my $quote = (shift) ? 1 : 0; # should the string be quoted?
+ # This is actually a CommandLine-object,
+ # but it looks nice to be able to say {= $job->slot() =}
+ my $job = shift;
+ $perlexpr =~ s/^-?\d+ //; # Positional replace treated as normal replace
+ if(not defined $self->{"rpl",0,$perlexpr}) {
+ local $_;
+ if($Global::trim eq "n") {
+ $_ = $self->{'orig'};
+ } else {
+ $_ = trim_of($self->{'orig'});
+ }
+ ::debug("replace", "eval ", $perlexpr, " ", $_, "\n");
+ if(not $Global::perleval{$perlexpr}) {
+ # Make an anonymous function of the $perlexpr
+ # And more importantly: Compile it only once
+ if($Global::perleval{$perlexpr} =
+ eval('sub { no strict; no warnings; my $job = shift; '.
+ $perlexpr.' }')) {
+ # All is good
+ } else {
+ # The eval failed. Maybe $perlexpr is invalid perl?
+ ::error("Cannot use $perlexpr: $@\n");
+ ::wait_and_exit(255);
+ }
+ }
+ # Execute the function
+ $Global::perleval{$perlexpr}->($job);
+ $self->{"rpl",0,$perlexpr} = $_;
+ }
+ if(not defined $self->{"rpl",$quote,$perlexpr}) {
+ $self->{"rpl",1,$perlexpr} =
+ ::shell_quote_scalar($self->{"rpl",0,$perlexpr});
+ }
+ return $self->{"rpl",$quote,$perlexpr};
+}
+
+sub orig {
+ my $self = shift;
+ return $self->{'orig'};
+}
+
+sub trim_of {
+ # Removes white space as specifed by --trim:
+ # n = nothing
+ # l = start
+ # r = end
+ # lr|rl = both
+ # Returns:
+ # string with white space removed as needed
+ my @strings = map { defined $_ ? $_ : "" } (@_);
+ my $arg;
+ if($Global::trim eq "n") {
+ # skip
+ } elsif($Global::trim eq "l") {
+ for my $arg (@strings) { $arg =~ s/^\s+//; }
+ } elsif($Global::trim eq "r") {
+ for my $arg (@strings) { $arg =~ s/\s+$//; }
+ } elsif($Global::trim eq "rl" or $Global::trim eq "lr") {
+ for my $arg (@strings) { $arg =~ s/^\s+//; $arg =~ s/\s+$//; }
+ } else {
+ ::error("--trim must be one of: r l rl lr.\n");
+ ::wait_and_exit(255);
+ }
+ return wantarray ? @strings : "@strings";
+}
+
+
+package TimeoutQueue;
+
+sub new {
+ my $class = shift;
+ my $delta_time = shift;
+ my ($pct);
+ if($delta_time =~ /(\d+(\.\d+)?)%/) {
+ # Timeout in percent
+ $pct = $1/100;
+ $delta_time = 1_000_000;
+ }
+ return bless {
+ 'queue' => [],
+ 'delta_time' => $delta_time,
+ 'pct' => $pct,
+ 'remedian_idx' => 0,
+ 'remedian_arr' => [],
+ 'remedian' => undef,
+ }, ref($class) || $class;
+}
+
+sub delta_time {
+ my $self = shift;
+ return $self->{'delta_time'};
+}
+
+sub set_delta_time {
+ my $self = shift;
+ $self->{'delta_time'} = shift;
+}
+
+sub remedian {
+ my $self = shift;
+ return $self->{'remedian'};
+}
+
+sub set_remedian {
+ # Set median of the last 999^3 (=997002999) values using Remedian
+ #
+ # Rousseeuw, Peter J., and Gilbert W. Bassett Jr. "The remedian: A
+ # robust averaging method for large data sets." Journal of the
+ # American Statistical Association 85.409 (1990): 97-104.
+ my $self = shift;
+ my $val = shift;
+ my $i = $self->{'remedian_idx'}++;
+ my $rref = $self->{'remedian_arr'};
+ $rref->[0][$i%999] = $val;
+ $rref->[1][$i/999%999] = (sort @{$rref->[0]})[$#{$rref->[0]}/2];
+ $rref->[2][$i/999/999%999] = (sort @{$rref->[1]})[$#{$rref->[1]}/2];
+ $self->{'remedian'} = (sort @{$rref->[2]})[$#{$rref->[2]}/2];
+}
+
+sub update_delta_time {
+ # Update delta_time based on runtime of finished job if timeout is
+ # a percentage
+ my $self = shift;
+ my $runtime = shift;
+ if($self->{'pct'}) {
+ $self->set_remedian($runtime);
+ $self->{'delta_time'} = $self->{'pct'} * $self->remedian();
+ ::debug("run", "Timeout: $self->{'delta_time'}s ");
+ }
+}
+
+sub process_timeouts {
+ # Check if there was a timeout
+ my $self = shift;
+ # $self->{'queue'} is sorted by start time
+ while (@{$self->{'queue'}}) {
+ my $job = $self->{'queue'}[0];
+ if($job->endtime()) {
+ # Job already finished. No need to timeout the job
+ # This could be because of --keep-order
+ shift @{$self->{'queue'}};
+ } elsif($job->timedout($self->{'delta_time'})) {
+ # Need to shift off queue before kill
+ # because kill calls usleep that calls process_timeouts
+ shift @{$self->{'queue'}};
+ $job->kill();
+ } else {
+ # Because they are sorted by start time the rest are later
+ last;
+ }
+ }
+}
+
+sub insert {
+ my $self = shift;
+ my $in = shift;
+ push @{$self->{'queue'}}, $in;
+}
+
+
+package Semaphore;
+
+# This package provides a counting semaphore
+#
+# If a process dies without releasing the semaphore the next process
+# that needs that entry will clean up dead semaphores
+#
+# The semaphores are stored in ~/.parallel/semaphores/id- Each
+# file in ~/.parallel/semaphores/id-/ is the process ID of the
+# process holding the entry. If the process dies, the entry can be
+# taken by another process.
+
+sub new {
+ my $class = shift;
+ my $id = shift;
+ my $count = shift;
+ $id=~s/([^-_a-z0-9])/unpack("H*",$1)/ige; # Convert non-word chars to hex
+ $id="id-".$id; # To distinguish it from a process id
+ my $parallel_dir = $ENV{'HOME'}."/.parallel";
+ -d $parallel_dir or mkdir_or_die($parallel_dir);
+ my $parallel_locks = $parallel_dir."/semaphores";
+ -d $parallel_locks or mkdir_or_die($parallel_locks);
+ my $lockdir = "$parallel_locks/$id";
+ my $lockfile = $lockdir.".lock";
+ if($count < 1) { ::die_bug("semaphore-count: $count"); }
+ return bless {
+ 'lockfile' => $lockfile,
+ 'lockfh' => Symbol::gensym(),
+ 'lockdir' => $lockdir,
+ 'id' => $id,
+ 'idfile' => $lockdir."/".$id,
+ 'pid' => $$,
+ 'pidfile' => $lockdir."/".$$.'@'.::hostname(),
+ 'count' => $count + 1 # nlinks returns a link for the 'id-' as well
+ }, ref($class) || $class;
+}
+
+sub acquire {
+ my $self = shift;
+ my $sleep = 1; # 1 ms
+ my $start_time = time;
+ while(1) {
+ $self->atomic_link_if_count_less_than() and last;
+ ::debug("sem", "Remove dead locks");
+ my $lockdir = $self->{'lockdir'};
+ for my $d (glob "$lockdir/*") {
+ ::debug("sem", "Lock $d $lockdir\n");
+ $d =~ m:$lockdir/([0-9]+)\@([-\._a-z0-9]+)$:o or next;
+ my ($pid, $host) = ($1, $2);
+ if($host eq ::hostname()) {
+ if(not kill 0, $1) {
+ ::debug("sem", "Dead: $d");
+ unlink $d;
+ } else {
+ ::debug("sem", "Alive: $d");
+ }
+ }
+ }
+ # try again
+ $self->atomic_link_if_count_less_than() and last;
+ # Retry slower and slower up to 1 second
+ $sleep = ($sleep < 1000) ? ($sleep * 1.1) : ($sleep);
+ # Random to avoid every sleeping job waking up at the same time
+ ::usleep(rand()*$sleep);
+ if(defined($opt::timeout) and
+ $start_time + $opt::timeout > time) {
+ # Acquire the lock anyway
+ if(not -e $self->{'idfile'}) {
+ open (my $fh, ">", $self->{'idfile'}) or
+ ::die_bug("timeout_write_idfile: $self->{'idfile'}");
+ close $fh;
+ }
+ link $self->{'idfile'}, $self->{'pidfile'};
+ last;
+ }
+ }
+ ::debug("sem", "acquired $self->{'pid'}\n");
+}
+
+sub release {
+ my $self = shift;
+ unlink $self->{'pidfile'};
+ if($self->nlinks() == 1) {
+ # This is the last link, so atomic cleanup
+ $self->lock();
+ if($self->nlinks() == 1) {
+ unlink $self->{'idfile'};
+ rmdir $self->{'lockdir'};
+ }
+ $self->unlock();
+ }
+ ::debug("run", "released $self->{'pid'}\n");
+}
+
+sub _release {
+ my $self = shift;
+
+ unlink $self->{'pidfile'};
+ $self->lock();
+ my $nlinks = $self->nlinks();
+ ::debug("sem", $nlinks, "<", $self->{'count'});
+ if($nlinks-- > 1) {
+ unlink $self->{'idfile'};
+ open (my $fh, ">", $self->{'idfile'}) or
+ ::die_bug("write_idfile: $self->{'idfile'}");
+ print $fh "#"x$nlinks;
+ close $fh;
+ } else {
+ unlink $self->{'idfile'};
+ rmdir $self->{'lockdir'};
+ }
+ $self->unlock();
+ ::debug("sem", "released $self->{'pid'}\n");
+}
+
+sub atomic_link_if_count_less_than {
+ # Link $file1 to $file2 if nlinks to $file1 < $count
+ my $self = shift;
+ my $retval = 0;
+ $self->lock();
+ ::debug($self->nlinks(), "<", $self->{'count'});
+ if($self->nlinks() < $self->{'count'}) {
+ -d $self->{'lockdir'} or mkdir_or_die($self->{'lockdir'});
+ if(not -e $self->{'idfile'}) {
+ open (my $fh, ">", $self->{'idfile'}) or
+ ::die_bug("write_idfile: $self->{'idfile'}");
+ close $fh;
+ }
+ $retval = link $self->{'idfile'}, $self->{'pidfile'};
+ }
+ $self->unlock();
+ ::debug("run", "atomic $retval");
+ return $retval;
+}
+
+sub _atomic_link_if_count_less_than {
+ # Link $file1 to $file2 if nlinks to $file1 < $count
+ my $self = shift;
+ my $retval = 0;
+ $self->lock();
+ my $nlinks = $self->nlinks();
+ ::debug("sem", $nlinks, "<", $self->{'count'});
+ if($nlinks++ < $self->{'count'}) {
+ -d $self->{'lockdir'} or mkdir_or_die($self->{'lockdir'});
+ if(not -e $self->{'idfile'}) {
+ open (my $fh, ">", $self->{'idfile'}) or
+ ::die_bug("write_idfile: $self->{'idfile'}");
+ close $fh;
+ }
+ open (my $fh, ">", $self->{'idfile'}) or
+ ::die_bug("write_idfile: $self->{'idfile'}");
+ print $fh "#"x$nlinks;
+ close $fh;
+ $retval = link $self->{'idfile'}, $self->{'pidfile'};
+ }
+ $self->unlock();
+ ::debug("sem", "atomic $retval");
+ return $retval;
+}
+
+sub nlinks {
+ my $self = shift;
+ if(-e $self->{'idfile'}) {
+ ::debug("sem", "nlinks", (stat(_))[3], "size", (stat(_))[7], "\n");
+ return (stat(_))[3];
+ } else {
+ return 0;
+ }
+}
+
+sub lock {
+ my $self = shift;
+ my $sleep = 100; # 100 ms
+ my $total_sleep = 0;
+ $Global::use{"Fcntl"} ||= eval "use Fcntl qw(:DEFAULT :flock); 1;";
+ my $locked = 0;
+ while(not $locked) {
+ if(tell($self->{'lockfh'}) == -1) {
+ # File not open
+ open($self->{'lockfh'}, ">", $self->{'lockfile'})
+ or ::debug("run", "Cannot open $self->{'lockfile'}");
+ }
+ if($self->{'lockfh'}) {
+ # File is open
+ chmod 0666, $self->{'lockfile'}; # assuming you want it a+rw
+ if(flock($self->{'lockfh'}, LOCK_EX()|LOCK_NB())) {
+ # The file is locked: No need to retry
+ $locked = 1;
+ last;
+ } else {
+ if ($! =~ m/Function not implemented/) {
+ ::warning("flock: $!");
+ ::warning("Will wait for a random while\n");
+ ::usleep(rand(5000));
+ # File cannot be locked: No need to retry
+ $locked = 2;
+ last;
+ }
+ }
+ }
+ # Locking failed in first round
+ # Sleep and try again
+ $sleep = ($sleep < 1000) ? ($sleep * 1.1) : ($sleep);
+ # Random to avoid every sleeping job waking up at the same time
+ ::usleep(rand()*$sleep);
+ $total_sleep += $sleep;
+ if($opt::semaphoretimeout) {
+ if($total_sleep/1000 > $opt::semaphoretimeout) {
+ # Timeout: bail out
+ ::warning("Semaphore timed out. Ignoring timeout.");
+ $locked = 3;
+ last;
+ }
+ } else {
+ if($total_sleep/1000 > 30) {
+ ::warning("Semaphore stuck for 30 seconds. Consider using --semaphoretimeout.");
+ }
+ }
+ }
+ ::debug("run", "locked $self->{'lockfile'}");
+}
+
+sub unlock {
+ my $self = shift;
+ unlink $self->{'lockfile'};
+ close $self->{'lockfh'};
+ ::debug("run", "unlocked\n");
+}
+
+sub mkdir_or_die {
+ # If dir is not writable: die
+ my $dir = shift;
+ my @dir_parts = split(m:/:,$dir);
+ my ($ddir,$part);
+ while(defined ($part = shift @dir_parts)) {
+ $part eq "" and next;
+ $ddir .= "/".$part;
+ -d $ddir and next;
+ mkdir $ddir;
+ }
+ if(not -w $dir) {
+ ::error("Cannot write to $dir: $!\n");
+ ::wait_and_exit(255);
+ }
+}
+
+# Keep perl -w happy
+$opt::x = $Semaphore::timeout = $Semaphore::wait =
+$Job::file_descriptor_warning_printed = 0;
diff --git a/c-deps/rocksdb/build_tools/make_package.sh b/c-deps/rocksdb/build_tools/make_package.sh
new file mode 100755
index 0000000000..58bac44739
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/make_package.sh
@@ -0,0 +1,128 @@
+#/usr/bin/env bash
+
+set -e
+
+function log() {
+ echo "[+] $1"
+}
+
+function fatal() {
+ echo "[!] $1"
+ exit 1
+}
+
+function platform() {
+ local __resultvar=$1
+ if [[ -f "/etc/yum.conf" ]]; then
+ eval $__resultvar="centos"
+ elif [[ -f "/etc/dpkg/dpkg.cfg" ]]; then
+ eval $__resultvar="ubuntu"
+ else
+ fatal "Unknwon operating system"
+ fi
+}
+platform OS
+
+function package() {
+ if [[ $OS = "ubuntu" ]]; then
+ if dpkg --get-selections | grep --quiet $1; then
+ log "$1 is already installed. skipping."
+ else
+ apt-get install $@ -y
+ fi
+ elif [[ $OS = "centos" ]]; then
+ if rpm -qa | grep --quiet $1; then
+ log "$1 is already installed. skipping."
+ else
+ yum install $@ -y
+ fi
+ fi
+}
+
+function detect_fpm_output() {
+ if [[ $OS = "ubuntu" ]]; then
+ export FPM_OUTPUT=deb
+ elif [[ $OS = "centos" ]]; then
+ export FPM_OUTPUT=rpm
+ fi
+}
+detect_fpm_output
+
+function gem_install() {
+ if gem list | grep --quiet $1; then
+ log "$1 is already installed. skipping."
+ else
+ gem install $@
+ fi
+}
+
+function main() {
+ if [[ $# -ne 1 ]]; then
+ fatal "Usage: $0 "
+ else
+ log "using rocksdb version: $1"
+ fi
+
+ if [[ -d /vagrant ]]; then
+ if [[ $OS = "ubuntu" ]]; then
+ package g++-4.8
+ export CXX=g++-4.8
+
+ # the deb would depend on libgflags2, but the static lib is the only thing
+ # installed by make install
+ package libgflags-dev
+
+ package ruby-all-dev
+ elif [[ $OS = "centos" ]]; then
+ pushd /etc/yum.repos.d
+ if [[ ! -f /etc/yum.repos.d/devtools-1.1.repo ]]; then
+ wget http://people.centos.org/tru/devtools-1.1/devtools-1.1.repo
+ fi
+ package devtoolset-1.1-gcc --enablerepo=testing-1.1-devtools-6
+ package devtoolset-1.1-gcc-c++ --enablerepo=testing-1.1-devtools-6
+ export CC=/opt/centos/devtoolset-1.1/root/usr/bin/gcc
+ export CPP=/opt/centos/devtoolset-1.1/root/usr/bin/cpp
+ export CXX=/opt/centos/devtoolset-1.1/root/usr/bin/c++
+ export PATH=$PATH:/opt/centos/devtoolset-1.1/root/usr/bin
+ popd
+ if ! rpm -qa | grep --quiet gflags; then
+ rpm -i https://github.com/schuhschuh/gflags/releases/download/v2.1.0/gflags-devel-2.1.0-1.amd64.rpm
+ fi
+
+ package ruby
+ package ruby-devel
+ package rubygems
+ package rpm-build
+ fi
+ fi
+ gem_install fpm
+
+ make static_lib
+ make install INSTALL_PATH=package
+
+ cd package
+
+ LIB_DIR=lib
+ if [[ -z "$ARCH" ]]; then
+ ARCH=$(getconf LONG_BIT)
+ fi
+ if [[ ("$FPM_OUTPUT" = "rpm") && ($ARCH -eq 64) ]]; then
+ mv lib lib64
+ LIB_DIR=lib64
+ fi
+
+ fpm \
+ -s dir \
+ -t $FPM_OUTPUT \
+ -n rocksdb \
+ -v $1 \
+ --prefix /usr \
+ --url http://rocksdb.org/ \
+ -m rocksdb@fb.com \
+ --license BSD \
+ --vendor Facebook \
+ --description "RocksDB is an embeddable persistent key-value store for fast storage." \
+ include $LIB_DIR
+}
+
+main $@
diff --git a/c-deps/rocksdb/build_tools/precommit_checker.py b/c-deps/rocksdb/build_tools/precommit_checker.py
new file mode 100755
index 0000000000..0f8884dfda
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/precommit_checker.py
@@ -0,0 +1,208 @@
+#!/usr/local/fbcode/gcc-4.9-glibc-2.20-fb/bin/python2.7
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+import argparse
+import commands
+import subprocess
+import sys
+import re
+import os
+import time
+
+
+#
+# Simple logger
+#
+
+class Log:
+
+ def __init__(self, filename):
+ self.filename = filename
+ self.f = open(self.filename, 'w+', 0)
+
+ def caption(self, str):
+ line = "\n##### %s #####\n" % str
+ if self.f:
+ self.f.write("%s \n" % line)
+ else:
+ print(line)
+
+ def error(self, str):
+ data = "\n\n##### ERROR ##### %s" % str
+ if self.f:
+ self.f.write("%s \n" % data)
+ else:
+ print(data)
+
+ def log(self, str):
+ if self.f:
+ self.f.write("%s \n" % str)
+ else:
+ print(str)
+
+#
+# Shell Environment
+#
+
+
+class Env(object):
+
+ def __init__(self, logfile, tests):
+ self.tests = tests
+ self.log = Log(logfile)
+
+ def shell(self, cmd, path=os.getcwd()):
+ if path:
+ os.chdir(path)
+
+ self.log.log("==== shell session ===========================")
+ self.log.log("%s> %s" % (path, cmd))
+ status = subprocess.call("cd %s; %s" % (path, cmd), shell=True,
+ stdout=self.log.f, stderr=self.log.f)
+ self.log.log("status = %s" % status)
+ self.log.log("============================================== \n\n")
+ return status
+
+ def GetOutput(self, cmd, path=os.getcwd()):
+ if path:
+ os.chdir(path)
+
+ self.log.log("==== shell session ===========================")
+ self.log.log("%s> %s" % (path, cmd))
+ status, out = commands.getstatusoutput(cmd)
+ self.log.log("status = %s" % status)
+ self.log.log("out = %s" % out)
+ self.log.log("============================================== \n\n")
+ return status, out
+
+#
+# Pre-commit checker
+#
+
+
+class PreCommitChecker(Env):
+
+ def __init__(self, args):
+ Env.__init__(self, args.logfile, args.tests)
+ self.ignore_failure = args.ignore_failure
+
+ #
+ # Get commands for a given job from the determinator file
+ #
+ def get_commands(self, test):
+ status, out = self.GetOutput(
+ "RATIO=1 build_tools/rocksdb-lego-determinator %s" % test, ".")
+ return status, out
+
+ #
+ # Run a specific CI job
+ #
+ def run_test(self, test):
+ self.log.caption("Running test %s locally" % test)
+
+ # get commands for the CI job determinator
+ status, cmds = self.get_commands(test)
+ if status != 0:
+ self.log.error("Error getting commands for test %s" % test)
+ return False
+
+ # Parse the JSON to extract the commands to run
+ cmds = re.findall("'shell':'([^\']*)'", cmds)
+
+ if len(cmds) == 0:
+ self.log.log("No commands found")
+ return False
+
+ # Run commands
+ for cmd in cmds:
+ # Replace J=<..> with the local environment variable
+ if "J" in os.environ:
+ cmd = cmd.replace("J=1", "J=%s" % os.environ["J"])
+ cmd = cmd.replace("make ", "make -j%s " % os.environ["J"])
+ # Run the command
+ status = self.shell(cmd, ".")
+ if status != 0:
+ self.log.error("Error running command %s for test %s"
+ % (cmd, test))
+ return False
+
+ return True
+
+ #
+ # Run specified CI jobs
+ #
+ def run_tests(self):
+ if not self.tests:
+ self.log.error("Invalid args. Please provide tests")
+ return False
+
+ self.print_separator()
+ self.print_row("TEST", "RESULT")
+ self.print_separator()
+
+ result = True
+ for test in self.tests:
+ start_time = time.time()
+ self.print_test(test)
+ result = self.run_test(test)
+ elapsed_min = (time.time() - start_time) / 60
+ if not result:
+ self.log.error("Error running test %s" % test)
+ self.print_result("FAIL (%dm)" % elapsed_min)
+ if not self.ignore_failure:
+ return False
+ result = False
+ else:
+ self.print_result("PASS (%dm)" % elapsed_min)
+
+ self.print_separator()
+ return result
+
+ #
+ # Print a line
+ #
+ def print_separator(self):
+ print("".ljust(60, "-"))
+
+ #
+ # Print two colums
+ #
+ def print_row(self, c0, c1):
+ print("%s%s" % (c0.ljust(40), c1.ljust(20)))
+
+ def print_test(self, test):
+ print(test.ljust(40), end="")
+ sys.stdout.flush()
+
+ def print_result(self, result):
+ print(result.ljust(20))
+
+#
+# Main
+#
+parser = argparse.ArgumentParser(description='RocksDB pre-commit checker.')
+
+# --log
+parser.add_argument('--logfile', default='/tmp/precommit-check.log',
+ help='Log file. Default is /tmp/precommit-check.log')
+# --ignore_failure
+parser.add_argument('--ignore_failure', action='store_true', default=False,
+ help='Stop when an error occurs')
+#
+parser.add_argument('tests', nargs='+',
+ help='CI test(s) to run. e.g: unit punit asan tsan ubsan')
+
+args = parser.parse_args()
+checker = PreCommitChecker(args)
+
+print("Please follow log %s" % checker.log.filename)
+
+if not checker.run_tests():
+ print("Error running tests. Please check log file %s"
+ % checker.log.filename)
+ sys.exit(1)
+
+sys.exit(0)
diff --git a/c-deps/rocksdb/build_tools/regression_build_test.sh b/c-deps/rocksdb/build_tools/regression_build_test.sh
new file mode 100755
index 0000000000..6980633287
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/regression_build_test.sh
@@ -0,0 +1,413 @@
+#!/usr/bin/env bash
+
+set -e
+
+NUM=10000000
+
+if [ $# -eq 1 ];then
+ DATA_DIR=$1
+elif [ $# -eq 2 ];then
+ DATA_DIR=$1
+ STAT_FILE=$2
+fi
+
+# On the production build servers, set data and stat
+# files/directories not in /tmp or else the tempdir cleaning
+# scripts will make you very unhappy.
+DATA_DIR=${DATA_DIR:-$(mktemp -t -d rocksdb_XXXX)}
+STAT_FILE=${STAT_FILE:-$(mktemp -t -u rocksdb_test_stats_XXXX)}
+
+function cleanup {
+ rm -rf $DATA_DIR
+ rm -f $STAT_FILE.fillseq
+ rm -f $STAT_FILE.readrandom
+ rm -f $STAT_FILE.overwrite
+ rm -f $STAT_FILE.memtablefillreadrandom
+}
+
+trap cleanup EXIT
+
+if [ -z $GIT_BRANCH ]; then
+ git_br=`git rev-parse --abbrev-ref HEAD`
+else
+ git_br=$(basename $GIT_BRANCH)
+fi
+
+if [ $git_br == "master" ]; then
+ git_br=""
+else
+ git_br="."$git_br
+fi
+
+make release
+
+# measure fillseq + fill up the DB for overwrite benchmark
+./db_bench \
+ --benchmarks=fillseq \
+ --db=$DATA_DIR \
+ --use_existing_db=0 \
+ --bloom_bits=10 \
+ --num=$NUM \
+ --writes=$NUM \
+ --cache_size=6442450944 \
+ --cache_numshardbits=6 \
+ --table_cache_numshardbits=4 \
+ --open_files=55000 \
+ --statistics=1 \
+ --histogram=1 \
+ --disable_wal=1 \
+ --sync=0 > ${STAT_FILE}.fillseq
+
+# measure overwrite performance
+./db_bench \
+ --benchmarks=overwrite \
+ --db=$DATA_DIR \
+ --use_existing_db=1 \
+ --bloom_bits=10 \
+ --num=$NUM \
+ --writes=$((NUM / 10)) \
+ --cache_size=6442450944 \
+ --cache_numshardbits=6 \
+ --table_cache_numshardbits=4 \
+ --open_files=55000 \
+ --statistics=1 \
+ --histogram=1 \
+ --disable_wal=1 \
+ --sync=0 \
+ --threads=8 > ${STAT_FILE}.overwrite
+
+# fill up the db for readrandom benchmark (1GB total size)
+./db_bench \
+ --benchmarks=fillseq \
+ --db=$DATA_DIR \
+ --use_existing_db=0 \
+ --bloom_bits=10 \
+ --num=$NUM \
+ --writes=$NUM \
+ --cache_size=6442450944 \
+ --cache_numshardbits=6 \
+ --table_cache_numshardbits=4 \
+ --open_files=55000 \
+ --statistics=1 \
+ --histogram=1 \
+ --disable_wal=1 \
+ --sync=0 \
+ --threads=1 > /dev/null
+
+# measure readrandom with 6GB block cache
+./db_bench \
+ --benchmarks=readrandom \
+ --db=$DATA_DIR \
+ --use_existing_db=1 \
+ --bloom_bits=10 \
+ --num=$NUM \
+ --reads=$((NUM / 5)) \
+ --cache_size=6442450944 \
+ --cache_numshardbits=6 \
+ --table_cache_numshardbits=4 \
+ --open_files=55000 \
+ --statistics=1 \
+ --histogram=1 \
+ --disable_wal=1 \
+ --sync=0 \
+ --threads=16 > ${STAT_FILE}.readrandom
+
+# measure readrandom with 6GB block cache and tailing iterator
+./db_bench \
+ --benchmarks=readrandom \
+ --db=$DATA_DIR \
+ --use_existing_db=1 \
+ --bloom_bits=10 \
+ --num=$NUM \
+ --reads=$((NUM / 5)) \
+ --cache_size=6442450944 \
+ --cache_numshardbits=6 \
+ --table_cache_numshardbits=4 \
+ --open_files=55000 \
+ --use_tailing_iterator=1 \
+ --statistics=1 \
+ --histogram=1 \
+ --disable_wal=1 \
+ --sync=0 \
+ --threads=16 > ${STAT_FILE}.readrandomtailing
+
+# measure readrandom with 100MB block cache
+./db_bench \
+ --benchmarks=readrandom \
+ --db=$DATA_DIR \
+ --use_existing_db=1 \
+ --bloom_bits=10 \
+ --num=$NUM \
+ --reads=$((NUM / 5)) \
+ --cache_size=104857600 \
+ --cache_numshardbits=6 \
+ --table_cache_numshardbits=4 \
+ --open_files=55000 \
+ --statistics=1 \
+ --histogram=1 \
+ --disable_wal=1 \
+ --sync=0 \
+ --threads=16 > ${STAT_FILE}.readrandomsmallblockcache
+
+# measure readrandom with 8k data in memtable
+./db_bench \
+ --benchmarks=overwrite,readrandom \
+ --db=$DATA_DIR \
+ --use_existing_db=1 \
+ --bloom_bits=10 \
+ --num=$NUM \
+ --reads=$((NUM / 5)) \
+ --writes=512 \
+ --cache_size=6442450944 \
+ --cache_numshardbits=6 \
+ --table_cache_numshardbits=4 \
+ --write_buffer_size=1000000000 \
+ --open_files=55000 \
+ --statistics=1 \
+ --histogram=1 \
+ --disable_wal=1 \
+ --sync=0 \
+ --threads=16 > ${STAT_FILE}.readrandom_mem_sst
+
+
+# fill up the db for readrandom benchmark with filluniquerandom (1GB total size)
+./db_bench \
+ --benchmarks=filluniquerandom \
+ --db=$DATA_DIR \
+ --use_existing_db=0 \
+ --bloom_bits=10 \
+ --num=$((NUM / 4)) \
+ --writes=$((NUM / 4)) \
+ --cache_size=6442450944 \
+ --cache_numshardbits=6 \
+ --table_cache_numshardbits=4 \
+ --open_files=55000 \
+ --statistics=1 \
+ --histogram=1 \
+ --disable_wal=1 \
+ --sync=0 \
+ --threads=1 > /dev/null
+
+# dummy test just to compact the data
+./db_bench \
+ --benchmarks=readrandom \
+ --db=$DATA_DIR \
+ --use_existing_db=1 \
+ --bloom_bits=10 \
+ --num=$((NUM / 1000)) \
+ --reads=$((NUM / 1000)) \
+ --cache_size=6442450944 \
+ --cache_numshardbits=6 \
+ --table_cache_numshardbits=4 \
+ --open_files=55000 \
+ --statistics=1 \
+ --histogram=1 \
+ --disable_wal=1 \
+ --sync=0 \
+ --threads=16 > /dev/null
+
+# measure readrandom after load with filluniquerandom with 6GB block cache
+./db_bench \
+ --benchmarks=readrandom \
+ --db=$DATA_DIR \
+ --use_existing_db=1 \
+ --bloom_bits=10 \
+ --num=$((NUM / 4)) \
+ --reads=$((NUM / 4)) \
+ --cache_size=6442450944 \
+ --cache_numshardbits=6 \
+ --table_cache_numshardbits=4 \
+ --open_files=55000 \
+ --disable_auto_compactions=1 \
+ --statistics=1 \
+ --histogram=1 \
+ --disable_wal=1 \
+ --sync=0 \
+ --threads=16 > ${STAT_FILE}.readrandom_filluniquerandom
+
+# measure readwhilewriting after load with filluniquerandom with 6GB block cache
+./db_bench \
+ --benchmarks=readwhilewriting \
+ --db=$DATA_DIR \
+ --use_existing_db=1 \
+ --bloom_bits=10 \
+ --num=$((NUM / 4)) \
+ --reads=$((NUM / 4)) \
+ --benchmark_write_rate_limit=$(( 110 * 1024 )) \
+ --write_buffer_size=100000000 \
+ --cache_size=6442450944 \
+ --cache_numshardbits=6 \
+ --table_cache_numshardbits=4 \
+ --open_files=55000 \
+ --statistics=1 \
+ --histogram=1 \
+ --disable_wal=1 \
+ --sync=0 \
+ --threads=16 > ${STAT_FILE}.readwhilewriting
+
+# measure memtable performance -- none of the data gets flushed to disk
+./db_bench \
+ --benchmarks=fillrandom,readrandom, \
+ --db=$DATA_DIR \
+ --use_existing_db=0 \
+ --num=$((NUM / 10)) \
+ --reads=$NUM \
+ --cache_size=6442450944 \
+ --cache_numshardbits=6 \
+ --table_cache_numshardbits=4 \
+ --write_buffer_size=1000000000 \
+ --open_files=55000 \
+ --statistics=1 \
+ --histogram=1 \
+ --disable_wal=1 \
+ --sync=0 \
+ --value_size=10 \
+ --threads=16 > ${STAT_FILE}.memtablefillreadrandom
+
+common_in_mem_args="--db=/dev/shm/rocksdb \
+ --num_levels=6 \
+ --key_size=20 \
+ --prefix_size=12 \
+ --keys_per_prefix=10 \
+ --value_size=100 \
+ --compression_type=none \
+ --compression_ratio=1 \
+ --hard_rate_limit=2 \
+ --write_buffer_size=134217728 \
+ --max_write_buffer_number=4 \
+ --level0_file_num_compaction_trigger=8 \
+ --level0_slowdown_writes_trigger=16 \
+ --level0_stop_writes_trigger=24 \
+ --target_file_size_base=134217728 \
+ --max_bytes_for_level_base=1073741824 \
+ --disable_wal=0 \
+ --wal_dir=/dev/shm/rocksdb \
+ --sync=0 \
+ --verify_checksum=1 \
+ --delete_obsolete_files_period_micros=314572800 \
+ --max_grandparent_overlap_factor=10 \
+ --use_plain_table=1 \
+ --open_files=-1 \
+ --mmap_read=1 \
+ --mmap_write=0 \
+ --memtablerep=prefix_hash \
+ --bloom_bits=10 \
+ --bloom_locality=1 \
+ --perf_level=0"
+
+# prepare a in-memory DB with 50M keys, total DB size is ~6G
+./db_bench \
+ $common_in_mem_args \
+ --statistics=0 \
+ --max_background_compactions=16 \
+ --max_background_flushes=16 \
+ --benchmarks=filluniquerandom \
+ --use_existing_db=0 \
+ --num=52428800 \
+ --threads=1 > /dev/null
+
+# Readwhilewriting
+./db_bench \
+ $common_in_mem_args \
+ --statistics=1 \
+ --max_background_compactions=4 \
+ --max_background_flushes=0 \
+ --benchmarks=readwhilewriting\
+ --use_existing_db=1 \
+ --duration=600 \
+ --threads=32 \
+ --benchmark_write_rate_limit=9502720 > ${STAT_FILE}.readwhilewriting_in_ram
+
+# Seekrandomwhilewriting
+./db_bench \
+ $common_in_mem_args \
+ --statistics=1 \
+ --max_background_compactions=4 \
+ --max_background_flushes=0 \
+ --benchmarks=seekrandomwhilewriting \
+ --use_existing_db=1 \
+ --use_tailing_iterator=1 \
+ --duration=600 \
+ --threads=32 \
+ --benchmark_write_rate_limit=9502720 > ${STAT_FILE}.seekwhilewriting_in_ram
+
+# measure fillseq with bunch of column families
+./db_bench \
+ --benchmarks=fillseq \
+ --num_column_families=500 \
+ --write_buffer_size=1048576 \
+ --db=$DATA_DIR \
+ --use_existing_db=0 \
+ --num=$NUM \
+ --writes=$NUM \
+ --open_files=55000 \
+ --statistics=1 \
+ --histogram=1 \
+ --disable_wal=1 \
+ --sync=0 > ${STAT_FILE}.fillseq_lots_column_families
+
+# measure overwrite performance with bunch of column families
+./db_bench \
+ --benchmarks=overwrite \
+ --num_column_families=500 \
+ --write_buffer_size=1048576 \
+ --db=$DATA_DIR \
+ --use_existing_db=1 \
+ --num=$NUM \
+ --writes=$((NUM / 10)) \
+ --open_files=55000 \
+ --statistics=1 \
+ --histogram=1 \
+ --disable_wal=1 \
+ --sync=0 \
+ --threads=8 > ${STAT_FILE}.overwrite_lots_column_families
+
+# send data to ods
+function send_to_ods {
+ key="$1"
+ value="$2"
+
+ if [ -z $JENKINS_HOME ]; then
+ # running on devbox, just print out the values
+ echo $1 $2
+ return
+ fi
+
+ if [ -z "$value" ];then
+ echo >&2 "ERROR: Key $key doesn't have a value."
+ return
+ fi
+ curl -s "https://www.intern.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build$git_br&key=$key&value=$value" \
+ --connect-timeout 60
+}
+
+function send_benchmark_to_ods {
+ bench="$1"
+ bench_key="$2"
+ file="$3"
+
+ QPS=$(grep $bench $file | awk '{print $5}')
+ P50_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $3}' )
+ P75_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $5}' )
+ P99_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $7}' )
+
+ send_to_ods rocksdb.build.$bench_key.qps $QPS
+ send_to_ods rocksdb.build.$bench_key.p50_micros $P50_MICROS
+ send_to_ods rocksdb.build.$bench_key.p75_micros $P75_MICROS
+ send_to_ods rocksdb.build.$bench_key.p99_micros $P99_MICROS
+}
+
+send_benchmark_to_ods overwrite overwrite $STAT_FILE.overwrite
+send_benchmark_to_ods fillseq fillseq $STAT_FILE.fillseq
+send_benchmark_to_ods readrandom readrandom $STAT_FILE.readrandom
+send_benchmark_to_ods readrandom readrandom_tailing $STAT_FILE.readrandomtailing
+send_benchmark_to_ods readrandom readrandom_smallblockcache $STAT_FILE.readrandomsmallblockcache
+send_benchmark_to_ods readrandom readrandom_memtable_sst $STAT_FILE.readrandom_mem_sst
+send_benchmark_to_ods readrandom readrandom_fillunique_random $STAT_FILE.readrandom_filluniquerandom
+send_benchmark_to_ods fillrandom memtablefillrandom $STAT_FILE.memtablefillreadrandom
+send_benchmark_to_ods readrandom memtablereadrandom $STAT_FILE.memtablefillreadrandom
+send_benchmark_to_ods readwhilewriting readwhilewriting $STAT_FILE.readwhilewriting
+send_benchmark_to_ods readwhilewriting readwhilewriting_in_ram ${STAT_FILE}.readwhilewriting_in_ram
+send_benchmark_to_ods seekrandomwhilewriting seekwhilewriting_in_ram ${STAT_FILE}.seekwhilewriting_in_ram
+send_benchmark_to_ods fillseq fillseq_lots_column_families ${STAT_FILE}.fillseq_lots_column_families
+send_benchmark_to_ods overwrite overwrite_lots_column_families ${STAT_FILE}.overwrite_lots_column_families
diff --git a/c-deps/rocksdb/build_tools/rocksdb-lego-determinator b/c-deps/rocksdb/build_tools/rocksdb-lego-determinator
new file mode 100755
index 0000000000..6e8ae9cd73
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/rocksdb-lego-determinator
@@ -0,0 +1,782 @@
+#!/usr/bin/env bash
+# This script is executed by Sandcastle
+# to determine next steps to run
+
+# Usage:
+# EMAIL= ONCALL= TRIGGER= SUBSCRIBER= rocks_ci.py
+#
+# Input Value
+# -------------------------------------------------------------------------
+# EMAIL Email address to report on trigger conditions
+# ONCALL Email address to raise a task on failure
+# TRIGGER Trigger conditions for email. Valid values are fail, warn, all
+# SUBSCRIBER Email addresss to add as subscriber for task
+#
+
+#
+# Report configuration
+#
+REPORT_EMAIL=
+if [ ! -z $EMAIL ]; then
+ if [ -z $TRIGGER ]; then
+ TRIGGER="fail"
+ fi
+
+ REPORT_EMAIL="
+ {
+ 'type':'email',
+ 'triggers': [ '$TRIGGER' ],
+ 'emails':['$EMAIL']
+ },"
+fi
+
+CREATE_TASK=
+if [ ! -z $ONCALL ]; then
+ CREATE_TASK="
+ {
+ 'type':'task',
+ 'triggers':[ 'fail' ],
+ 'priority':0,
+ 'subscribers':[ '$SUBSCRIBER' ],
+ 'tags':[ 'rocksdb', 'ci' ],
+ },"
+fi
+
+# For now, create the tasks using only the dedicated task creation tool.
+CREATE_TASK=
+
+REPORT=
+if [[ ! -z $REPORT_EMAIL || ! -z $CREATE_TASK ]]; then
+ REPORT="'report': [
+ $REPORT_EMAIL
+ $CREATE_TASK
+ ]"
+fi
+
+#
+# Helper variables
+#
+CLEANUP_ENV="
+{
+ 'name':'Cleanup environment',
+ 'shell':'rm -rf /dev/shm/rocksdb && mkdir /dev/shm/rocksdb && (chmod +t /dev/shm || true) && make clean',
+ 'user':'root'
+}"
+
+# We will eventually set the RATIO to 1, but we want do this
+# in steps. RATIO=$(nproc) will make it work as J=1
+if [ -z $RATIO ]; then
+ RATIO=$(nproc)
+fi
+
+if [ -z $PARALLEL_J ]; then
+ PARALLEL_J="J=$(expr $(nproc) / ${RATIO})"
+fi
+
+if [ -z $PARALLEL_j ]; then
+ PARALLEL_j="-j$(expr $(nproc) / ${RATIO})"
+fi
+
+PARALLELISM="$PARALLEL_J $PARALLEL_j"
+
+DEBUG="OPT=-g"
+SHM="TEST_TMPDIR=/dev/shm/rocksdb"
+NON_SHM="TMPD=/tmp/rocksdb_test_tmp"
+GCC_481="ROCKSDB_FBCODE_BUILD_WITH_481=1"
+ASAN="COMPILE_WITH_ASAN=1"
+CLANG="USE_CLANG=1"
+LITE="OPT=\"-DROCKSDB_LITE -g\""
+TSAN="COMPILE_WITH_TSAN=1"
+UBSAN="COMPILE_WITH_UBSAN=1"
+DISABLE_JEMALLOC="DISABLE_JEMALLOC=1"
+HTTP_PROXY="https_proxy=http://fwdproxy.29.prn1:8080 http_proxy=http://fwdproxy.29.prn1:8080 ftp_proxy=http://fwdproxy.29.prn1:8080"
+SETUP_JAVA_ENV="export $HTTP_PROXY; export JAVA_HOME=/usr/local/jdk-8u60-64/; export PATH=\$JAVA_HOME/bin:\$PATH"
+PARSER="'parser':'python build_tools/error_filter.py $1'"
+
+CONTRUN_NAME="ROCKSDB_CONTRUN_NAME"
+
+# This code is getting called under various scenarios. What we care about is to
+# understand when it's called from nightly contruns because in that case we'll
+# create tasks for any failures. To follow the existing pattern, we'll check
+# the value of $ONCALL. If it's a diff then just call `false` to make sure
+# that errors will be properly propagated to the caller.
+if [ ! -z $ONCALL ]; then
+ TASK_CREATION_TOOL="/usr/local/bin/mysql_mtr_filter --rocksdb --oncall $ONCALL"
+else
+ TASK_CREATION_TOOL="false"
+fi
+
+ARTIFACTS=" 'artifacts': [
+ {
+ 'name':'database',
+ 'paths':[ '/dev/shm/rocksdb' ],
+ }
+]"
+
+#
+# A mechanism to disable tests temporarily
+#
+DISABLE_COMMANDS="[
+ {
+ 'name':'Disable test',
+ 'oncall':'$ONCALL',
+ 'steps': [
+ {
+ 'name':'Job disabled. Please contact test owner',
+ 'shell':'exit 1',
+ 'user':'root'
+ },
+ ],
+ }
+]"
+
+#
+# RocksDB unit test
+#
+UNIT_TEST_COMMANDS="[
+ {
+ 'name':'Rocksdb Unit Test',
+ 'oncall':'$ONCALL',
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Build and test RocksDB debug version',
+ 'shell':'$SHM $DEBUG make $PARALLELISM check || $CONTRUN_NAME=check $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB unit test not under /dev/shm
+#
+UNIT_TEST_NON_SHM_COMMANDS="[
+ {
+ 'name':'Rocksdb Unit Test',
+ 'oncall':'$ONCALL',
+ 'timeout': 86400,
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Build and test RocksDB debug version',
+ 'timeout': 86400,
+ 'shell':'$NON_SHM $DEBUG make $PARALLELISM check || $CONTRUN_NAME=non_shm_check $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB release build and unit tests
+#
+RELEASE_BUILD_COMMANDS="[
+ {
+ 'name':'Rocksdb Release Build',
+ 'oncall':'$ONCALL',
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Build RocksDB release',
+ 'shell':'make $PARALLEL_j release || $CONTRUN_NAME=release $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB unit test on gcc-4.8.1
+#
+UNIT_TEST_COMMANDS_481="[
+ {
+ 'name':'Rocksdb Unit Test on GCC 4.8.1',
+ 'oncall':'$ONCALL',
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Build and test RocksDB debug version',
+ 'shell':'$SHM $GCC_481 $DEBUG make $PARALLELISM check || $CONTRUN_NAME=unit_gcc_481_check $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB release build and unit tests
+#
+RELEASE_BUILD_COMMANDS_481="[
+ {
+ 'name':'Rocksdb Release on GCC 4.8.1',
+ 'oncall':'$ONCALL',
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Build RocksDB release on GCC 4.8.1',
+ 'shell':'$GCC_481 make $PARALLEL_j release || $CONTRUN_NAME=release_gcc481 $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB unit test with CLANG
+#
+CLANG_UNIT_TEST_COMMANDS="[
+ {
+ 'name':'Rocksdb Unit Test',
+ 'oncall':'$ONCALL',
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Build and test RocksDB debug',
+ 'shell':'$CLANG $SHM $DEBUG make $PARALLELISM check || $CONTRUN_NAME=clang_check $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB release build with CLANG
+#
+CLANG_RELEASE_BUILD_COMMANDS="[
+ {
+ 'name':'Rocksdb CLANG Release Build',
+ 'oncall':'$ONCALL',
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Build RocksDB release',
+ 'shell':'$CLANG make $PARALLEL_j release|| $CONTRUN_NAME=clang_release $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB analyze
+#
+CLANG_ANALYZE_COMMANDS="[
+ {
+ 'name':'Rocksdb analyze',
+ 'oncall':'$ONCALL',
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'RocksDB build and analyze',
+ 'shell':'$CLANG $SHM $DEBUG make $PARALLEL_j analyze || $CONTRUN_NAME=clang_analyze $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB code coverage
+#
+CODE_COV_COMMANDS="[
+ {
+ 'name':'Rocksdb Unit Test Code Coverage',
+ 'oncall':'$ONCALL',
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Build, test and collect code coverage info',
+ 'shell':'$SHM $DEBUG make $PARALLELISM coverage || $CONTRUN_NAME=coverage $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB unity
+#
+UNITY_COMMANDS="[
+ {
+ 'name':'Rocksdb Unity',
+ 'oncall':'$ONCALL',
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Build, test unity test',
+ 'shell':'$SHM $DEBUG V=1 make J=1 unity_test || $CONTRUN_NAME=unity_test $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# Build RocksDB lite
+#
+LITE_BUILD_COMMANDS="[
+ {
+ 'name':'Rocksdb Lite build',
+ 'oncall':'$ONCALL',
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Build RocksDB debug version',
+ 'shell':'$LITE make J=1 all check || $CONTRUN_NAME=lite $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB stress/crash test
+#
+STRESS_CRASH_TEST_COMMANDS="[
+ {
+ 'name':'Rocksdb Stress/Crash Test',
+ 'oncall':'$ONCALL',
+ 'timeout': 86400,
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Build and run RocksDB debug stress tests',
+ 'shell':'$SHM $DEBUG make J=1 db_stress || $CONTRUN_NAME=db_stress $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ {
+ 'name':'Build and run RocksDB debug crash tests',
+ 'timeout': 86400,
+ 'shell':'$SHM $DEBUG make J=1 crash_test || $CONTRUN_NAME=crash_test $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ }
+ ],
+ $ARTIFACTS,
+ $REPORT
+ }
+]"
+
+# RocksDB write stress test.
+# We run on disk device on purpose (i.e. no $SHM)
+# because we want to add some randomness to fsync commands
+WRITE_STRESS_COMMANDS="[
+ {
+ 'name':'Rocksdb Write Stress Test',
+ 'oncall':'$ONCALL',
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Build and run RocksDB write stress tests',
+ 'shell':'make write_stress && python tools/write_stress_runner.py --runtime_sec=3600 --db=/tmp/rocksdb_write_stress || $CONTRUN_NAME=write_stress $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ }
+ ],
+ 'artifacts': [{'name': 'database', 'paths': ['/tmp/rocksdb_write_stress']}],
+ $REPORT
+ }
+]"
+
+
+#
+# RocksDB test under address sanitizer
+#
+ASAN_TEST_COMMANDS="[
+ {
+ 'name':'Rocksdb Unit Test under ASAN',
+ 'oncall':'$ONCALL',
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Test RocksDB debug under ASAN',
+'shell':'set -o pipefail && ($SHM $ASAN $DEBUG make $PARALLELISM asan_check || $CONTRUN_NAME=asan_check $TASK_CREATION_TOOL) |& /usr/facebook/ops/scripts/asan_symbolize.py -d',
+ 'user':'root',
+ $PARSER
+ }
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB crash testing under address sanitizer
+#
+ASAN_CRASH_TEST_COMMANDS="[
+ {
+ 'name':'Rocksdb crash test under ASAN',
+ 'oncall':'$ONCALL',
+ 'timeout': 86400,
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Build and run RocksDB debug asan_crash_test',
+ 'timeout': 86400,
+ 'shell':'$SHM $DEBUG make J=1 asan_crash_test || $CONTRUN_NAME=asan_crash_test $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB test under undefined behavior sanitizer
+#
+UBSAN_TEST_COMMANDS="[
+ {
+ 'name':'Rocksdb Unit Test under UBSAN',
+ 'oncall':'$ONCALL',
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Test RocksDB debug under UBSAN',
+ 'shell':'set -o pipefail && $SHM $UBSAN $DEBUG make $PARALLELISM ubsan_check || $CONTRUN_NAME=ubsan_check $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ }
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB crash testing under udnefined behavior sanitizer
+#
+UBSAN_CRASH_TEST_COMMANDS="[
+ {
+ 'name':'Rocksdb crash test under UBSAN',
+ 'oncall':'$ONCALL',
+ 'timeout': 86400,
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Build and run RocksDB debug ubsan_crash_test',
+ 'timeout': 86400,
+ 'shell':'$SHM $DEBUG make J=1 ubsan_crash_test || $CONTRUN_NAME=ubsan_crash_test $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB unit test under valgrind
+#
+VALGRIND_TEST_COMMANDS="[
+ {
+ 'name':'Rocksdb Unit Test under valgrind',
+ 'oncall':'$ONCALL',
+ 'timeout': 86400,
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Run RocksDB debug unit tests',
+ 'timeout': 86400,
+ 'shell':'$SHM $DEBUG make $PARALLELISM valgrind_test || $CONTRUN_NAME=valgrind_check $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB test under TSAN
+#
+TSAN_UNIT_TEST_COMMANDS="[
+ {
+ 'name':'Rocksdb Unit Test under TSAN',
+ 'oncall':'$ONCALL',
+ 'timeout': 86400,
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Run RocksDB debug unit test',
+ 'timeout': 86400,
+ 'shell':'set -o pipefail && $SHM $DEBUG $TSAN make $PARALLELISM check || $CONTRUN_NAME=tsan_check $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB crash test under TSAN
+#
+TSAN_CRASH_TEST_COMMANDS="[
+ {
+ 'name':'Rocksdb Crash Test under TSAN',
+ 'oncall':'$ONCALL',
+ 'timeout': 86400,
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Compile and run',
+ 'timeout': 86400,
+ 'shell':'set -o pipefail && $SHM $DEBUG $TSAN CRASH_TEST_KILL_ODD=1887 CRASH_TEST_EXT_ARGS=--log2_keys_per_lock=22 make J=1 crash_test || $CONTRUN_NAME=tsan_crash_test $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB format compatible
+#
+
+run_format_compatible()
+{
+ export TEST_TMPDIR=/dev/shm/rocksdb
+ rm -rf /dev/shm/rocksdb
+ mkdir /dev/shm/rocksdb
+
+ tools/check_format_compatible.sh
+}
+
+FORMAT_COMPATIBLE_COMMANDS="[
+ {
+ 'name':'Rocksdb Format Compatible tests',
+ 'oncall':'$ONCALL',
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Run RocksDB debug unit test',
+ 'shell':'build_tools/rocksdb-lego-determinator run_format_compatible || $CONTRUN_NAME=run_format_compatible $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB no compression
+#
+run_no_compression()
+{
+ export TEST_TMPDIR=/dev/shm/rocksdb
+ rm -rf /dev/shm/rocksdb
+ mkdir /dev/shm/rocksdb
+ make clean
+ cat build_tools/fbcode_config.sh | grep -iv dzlib | grep -iv dlz4 | grep -iv dsnappy | grep -iv dbzip2 > .tmp.fbcode_config.sh
+ mv .tmp.fbcode_config.sh build_tools/fbcode_config.sh
+ cat Makefile | grep -v tools/ldb_test.py > .tmp.Makefile
+ mv .tmp.Makefile Makefile
+ make $DEBUG J=1 check
+}
+
+NO_COMPRESSION_COMMANDS="[
+ {
+ 'name':'Rocksdb No Compression tests',
+ 'oncall':'$ONCALL',
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Run RocksDB debug unit test',
+ 'shell':'build_tools/rocksdb-lego-determinator run_no_compression || $CONTRUN_NAME=run_no_compression $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB regression
+#
+run_regression()
+{
+ time -v bash -vx ./build_tools/regression_build_test.sh $(mktemp -d $WORKSPACE/leveldb.XXXX) $(mktemp leveldb_test_stats.XXXX)
+
+ # ======= report size to ODS ========
+
+ # parameters: $1 -- key, $2 -- value
+ function send_size_to_ods {
+ curl -s "https://www.intern.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build&key=rocksdb.build_size.$1&value=$2" \
+ --connect-timeout 60
+ }
+
+ # === normal build ===
+ make clean
+ make -j$(nproc) static_lib
+ send_size_to_ods static_lib $(stat --printf="%s" librocksdb.a)
+ strip librocksdb.a
+ send_size_to_ods static_lib_stripped $(stat --printf="%s" librocksdb.a)
+
+ make -j$(nproc) shared_lib
+ send_size_to_ods shared_lib $(stat --printf="%s" `readlink -f librocksdb.so`)
+ strip `readlink -f librocksdb.so`
+ send_size_to_ods shared_lib_stripped $(stat --printf="%s" `readlink -f librocksdb.so`)
+
+ # === lite build ===
+ make clean
+ OPT=-DROCKSDB_LITE make -j$(nproc) static_lib
+ send_size_to_ods static_lib_lite $(stat --printf="%s" librocksdb.a)
+ strip librocksdb.a
+ send_size_to_ods static_lib_lite_stripped $(stat --printf="%s" librocksdb.a)
+
+ OPT=-DROCKSDB_LITE make -j$(nproc) shared_lib
+ send_size_to_ods shared_lib_lite $(stat --printf="%s" `readlink -f librocksdb.so`)
+ strip `readlink -f librocksdb.so`
+ send_size_to_ods shared_lib_lite_stripped $(stat --printf="%s" `readlink -f librocksdb.so`)
+}
+
+REGRESSION_COMMANDS="[
+ {
+ 'name':'Rocksdb regression commands',
+ 'oncall':'$ONCALL',
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Make and run script',
+ 'shell':'build_tools/rocksdb-lego-determinator run_regression || $CONTRUN_NAME=run_regression $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+#
+# RocksDB Java build
+#
+JAVA_BUILD_TEST_COMMANDS="[
+ {
+ 'name':'Rocksdb Java Build',
+ 'oncall':'$ONCALL',
+ 'steps': [
+ $CLEANUP_ENV,
+ {
+ 'name':'Build RocksDB for Java',
+ 'shell':'$SETUP_JAVA_ENV; $SHM make rocksdbjava || $CONTRUN_NAME=rocksdbjava $TASK_CREATION_TOOL',
+ 'user':'root',
+ $PARSER
+ },
+ ],
+ $REPORT
+ }
+]"
+
+
+case $1 in
+ unit)
+ echo $UNIT_TEST_COMMANDS
+ ;;
+ unit_non_shm)
+ echo $UNIT_TEST_NON_SHM_COMMANDS
+ ;;
+ release)
+ echo $RELEASE_BUILD_COMMANDS
+ ;;
+ unit_481)
+ echo $UNIT_TEST_COMMANDS_481
+ ;;
+ release_481)
+ echo $RELEASE_BUILD_COMMANDS_481
+ ;;
+ clang_unit)
+ echo $CLANG_UNIT_TEST_COMMANDS
+ ;;
+ clang_release)
+ echo $CLANG_RELEASE_BUILD_COMMANDS
+ ;;
+ clang_analyze)
+ echo $CLANG_ANALYZE_COMMANDS
+ ;;
+ code_cov)
+ echo $CODE_COV_COMMANDS
+ ;;
+ unity)
+ echo $UNITY_COMMANDS
+ ;;
+ lite)
+ echo $LITE_BUILD_COMMANDS
+ ;;
+ stress_crash)
+ echo $STRESS_CRASH_TEST_COMMANDS
+ ;;
+ write_stress)
+ echo $WRITE_STRESS_COMMANDS
+ ;;
+ asan)
+ echo $ASAN_TEST_COMMANDS
+ ;;
+ asan_crash)
+ echo $ASAN_CRASH_TEST_COMMANDS
+ ;;
+ ubsan)
+ echo $UBSAN_TEST_COMMANDS
+ ;;
+ ubsan_crash)
+ echo $UBSAN_CRASH_TEST_COMMANDS
+ ;;
+ valgrind)
+ echo $VALGRIND_TEST_COMMANDS
+ ;;
+ tsan)
+ echo $TSAN_UNIT_TEST_COMMANDS
+ ;;
+ tsan_crash)
+ echo $TSAN_CRASH_TEST_COMMANDS
+ ;;
+ format_compatible)
+ echo $FORMAT_COMPATIBLE_COMMANDS
+ ;;
+ run_format_compatible)
+ run_format_compatible
+ ;;
+ no_compression)
+ echo $NO_COMPRESSION_COMMANDS
+ ;;
+ run_no_compression)
+ run_no_compression
+ ;;
+ regression)
+ echo $REGRESSION_COMMANDS
+ ;;
+ run_regression)
+ run_regression
+ ;;
+ java_build)
+ echo $JAVA_BUILD_TEST_COMMANDS
+ ;;
+ *)
+ echo "Invalid determinator command"
+ ;;
+esac
diff --git a/c-deps/rocksdb/build_tools/run_ci_db_test.ps1 b/c-deps/rocksdb/build_tools/run_ci_db_test.ps1
new file mode 100644
index 0000000000..c8167ed957
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/run_ci_db_test.ps1
@@ -0,0 +1,456 @@
+# This script enables you running RocksDB tests by running
+# All the tests concurrently and utilizing all the cores
+Param(
+ [switch]$EnableJE = $false, # Look for and use _je executable, append _je to listed exclusions
+ [switch]$RunAll = $false, # Will attempt discover all *_test[_je].exe binaries and run all
+ # of them as Google suites. I.e. It will run test cases concurrently
+ # except those mentioned as $Run, those will run as individual test cases
+ # And any execlued with $ExcludeExes or $ExcludeCases
+ # It will also not run any individual test cases
+ # excluded but $ExcludeCasese
+ [string]$SuiteRun = "", # Split test suites in test cases and run in parallel, not compatible with $RunAll
+ [string]$Run = "", # Run specified executables in parallel but do not split to test cases
+ [string]$ExcludeCases = "", # Exclude test cases, expects a comma separated list, no spaces
+ # Takes effect when $RunAll or $SuiteRun is specified. Must have full
+ # Test cases name including a group and a parameter if any
+ [string]$ExcludeExes = "", # Exclude exes from consideration, expects a comma separated list,
+ # no spaces. Takes effect only when $RunAll is specified
+ [string]$WorkFolder = "", # Direct tests to use that folder. SSD or Ram drive are better options.
+ # Number of async tasks that would run concurrently. Recommend a number below 64.
+ # However, CPU utlization really depends on the storage media. Recommend ram based disk.
+ # a value of 1 will run everything serially
+ [int]$Concurrency = 8,
+ [int]$Limit = -1 # -1 means do not limit for test purposes
+)
+
+# Folders and commands must be fullpath to run assuming
+# the current folder is at the root of the git enlistment
+$StartDate = (Get-Date)
+$StartDate
+
+
+$DebugPreference = "Continue"
+
+# These tests are not google test suites and we should guard
+# Against running them as suites
+$RunOnly = New-Object System.Collections.Generic.HashSet[string]
+$RunOnly.Add("c_test") | Out-Null
+$RunOnly.Add("compact_on_deletion_collector_test") | Out-Null
+$RunOnly.Add("merge_test") | Out-Null
+$RunOnly.Add("stringappend_test") | Out-Null # Apparently incorrectly written
+$RunOnly.Add("backupable_db_test") | Out-Null # Disabled
+
+
+if($RunAll -and $SuiteRun -ne "") {
+ Write-Error "$RunAll and $SuiteRun are not compatible"
+ exit 1
+}
+
+# If running under Appveyor assume that root
+[string]$Appveyor = $Env:APPVEYOR_BUILD_FOLDER
+if($Appveyor -ne "") {
+ $RootFolder = $Appveyor
+} else {
+ $RootFolder = $PSScriptRoot -replace '\\build_tools', ''
+}
+
+$LogFolder = -Join($RootFolder, "\db_logs\")
+$BinariesFolder = -Join($RootFolder, "\build\Debug\")
+
+if($WorkFolder -eq "") {
+
+ # If TEST_TMPDIR is set use it
+ [string]$var = $Env:TEST_TMPDIR
+ if($var -eq "") {
+ $WorkFolder = -Join($RootFolder, "\db_tests\")
+ $Env:TEST_TMPDIR = $WorkFolder
+ } else {
+ $WorkFolder = $var
+ }
+} else {
+# Override from a command line
+ $Env:TEST_TMPDIR = $WorkFolder
+}
+
+Write-Output "Root: $RootFolder, WorkFolder: $WorkFolder"
+Write-Output "BinariesFolder: $BinariesFolder, LogFolder: $LogFolder"
+
+# Create test directories in the current folder
+md -Path $WorkFolder -ErrorAction Ignore | Out-Null
+md -Path $LogFolder -ErrorAction Ignore | Out-Null
+
+
+$ExcludeCasesSet = New-Object System.Collections.Generic.HashSet[string]
+if($ExcludeCases -ne "") {
+ Write-Host "ExcludeCases: $ExcludeCases"
+ $l = $ExcludeCases -split ' '
+ ForEach($t in $l) {
+ $ExcludeCasesSet.Add($t) | Out-Null
+ }
+}
+
+$ExcludeExesSet = New-Object System.Collections.Generic.HashSet[string]
+if($ExcludeExes -ne "") {
+ Write-Host "ExcludeExe: $ExcludeExes"
+ $l = $ExcludeExes -split ' '
+ ForEach($t in $l) {
+ $ExcludeExesSet.Add($t) | Out-Null
+ }
+}
+
+
+# Extract the names of its tests by running db_test with --gtest_list_tests.
+# This filter removes the "#"-introduced comments, and expands to
+# fully-qualified names by changing input like this:
+#
+# DBTest.
+# Empty
+# WriteEmptyBatch
+# MultiThreaded/MultiThreadedDBTest.
+# MultiThreaded/0 # GetParam() = 0
+# MultiThreaded/1 # GetParam() = 1
+#
+# into this:
+#
+# DBTest.Empty
+# DBTest.WriteEmptyBatch
+# MultiThreaded/MultiThreadedDBTest.MultiThreaded/0
+# MultiThreaded/MultiThreadedDBTest.MultiThreaded/1
+#
+# Output into the parameter in a form TestName -> Log File Name
+function ExtractTestCases([string]$GTestExe, $HashTable) {
+
+ $Tests = @()
+# Run db_test to get a list of tests and store it into $a array
+ &$GTestExe --gtest_list_tests | tee -Variable Tests | Out-Null
+
+ # Current group
+ $Group=""
+
+ ForEach( $l in $Tests) {
+
+ # Leading whitespace is fine
+ $l = $l -replace '^\s+',''
+ # but no whitespace any other place
+ if($l -match "\s+") {
+ continue
+ }
+ # Trailing dot is a test group but no whitespace
+ elseif ( $l -match "\.$" ) {
+ $Group = $l
+ } else {
+ # Otherwise it is a test name, remove leading space
+ $test = $l
+ # remove trailing comment if any and create a log name
+ $test = $test -replace '\s+\#.*',''
+ $test = "$Group$test"
+
+ if($ExcludeCasesSet.Contains($test)) {
+ Write-Warning "$test case is excluded"
+ continue
+ }
+
+ $test_log = $test -replace '[\./]','_'
+ $test_log += ".log"
+ $log_path = -join ($LogFolder, $test_log)
+
+ # Add to a hashtable
+ $HashTable.Add($test, $log_path);
+ }
+ }
+}
+
+# The function removes trailing .exe siffix if any,
+# creates a name for the log file
+# Then adds the test name if it was not excluded into
+# a HashTable in a form of test_name -> log_path
+function MakeAndAdd([string]$token, $HashTable) {
+
+ $test_name = $token -replace '.exe$', ''
+ $log_name = -join ($test_name, ".log")
+ $log_path = -join ($LogFolder, $log_name)
+ $HashTable.Add($test_name, $log_path)
+}
+
+# This function takes a list of Suites to run
+# Lists all the test cases in each of the suite
+# and populates HashOfHashes
+# Ordered by suite(exe) @{ Exe = @{ TestCase = LogName }}
+function ProcessSuites($ListOfSuites, $HashOfHashes) {
+
+ $suite_list = $ListOfSuites
+ # Problem: if you run --gtest_list_tests on
+ # a non Google Test executable then it will start executing
+ # and we will get nowhere
+ ForEach($suite in $suite_list) {
+
+ if($RunOnly.Contains($suite)) {
+ Write-Warning "$suite is excluded from running as Google test suite"
+ continue
+ }
+
+ if($EnableJE) {
+ $suite += "_je"
+ }
+
+ $Cases = [ordered]@{}
+ $Cases.Clear()
+ $suite_exe = -Join ($BinariesFolder, $suite)
+ ExtractTestCases -GTestExe $suite_exe -HashTable $Cases
+ if($Cases.Count -gt 0) {
+ $HashOfHashes.Add($suite, $Cases);
+ }
+ }
+
+ # Make logs and run
+ if($CasesToRun.Count -lt 1) {
+ Write-Error "Failed to extract tests from $SuiteRun"
+ exit 1
+ }
+
+}
+
+# This will contain all test executables to run
+
+# Hash table that contains all non suite
+# Test executable to run
+$TestExes = [ordered]@{}
+
+# Check for test exe that are not
+# Google Test Suites
+# Since this is explicitely mentioned it is not subject
+# for exclusions
+if($Run -ne "") {
+
+ $test_list = $Run -split ' '
+
+ ForEach($t in $test_list) {
+
+ if($EnableJE) {
+ $t += "_je"
+ }
+
+ MakeAndAdd -token $t -HashTable $TestExes
+ }
+
+ if($TestExes.Count -lt 1) {
+ Write-Error "Failed to extract tests from $Run"
+ exit 1
+ }
+}
+
+# Ordered by exe @{ Exe = @{ TestCase = LogName }}
+$CasesToRun = [ordered]@{}
+
+if($SuiteRun -ne "") {
+ $suite_list = $SuiteRun -split ' '
+ ProcessSuites -ListOfSuites $suite_list -HashOfHashes $CasesToRun
+}
+
+if($RunAll) {
+# Discover all the test binaries
+ if($EnableJE) {
+ $pattern = "*_test_je.exe"
+ } else {
+ $pattern = "*_test.exe"
+ }
+
+
+ $search_path = -join ($BinariesFolder, $pattern)
+ Write-Host "Binaries Search Path: $search_path"
+
+ $ListOfExe = @()
+ dir -Path $search_path | ForEach-Object {
+ $ListOfExe += ($_.Name)
+ }
+
+ # Exclude those in RunOnly from running as suites
+ $ListOfSuites = @()
+ ForEach($e in $ListOfExe) {
+
+ $e = $e -replace '.exe$', ''
+ $bare_name = $e -replace '_je$', ''
+
+ if($ExcludeExesSet.Contains($bare_name)) {
+ Write-Warning "Test $e is excluded"
+ continue
+ }
+
+ if($RunOnly.Contains($bare_name)) {
+ MakeAndAdd -token $e -HashTable $TestExes
+ } else {
+ $ListOfSuites += $bare_name
+ }
+ }
+
+ ProcessSuites -ListOfSuites $ListOfSuites -HashOfHashes $CasesToRun
+}
+
+
+Write-Host "Attempting to start: $NumTestsToStart tests"
+
+# Invoke a test with a filter and redirect all output
+$InvokeTestCase = {
+ param($exe, $test, $log);
+ &$exe --gtest_filter=$test > $log 2>&1
+}
+
+# Invoke all tests and redirect output
+$InvokeTestAsync = {
+ param($exe, $log)
+ &$exe > $log 2>&1
+}
+
+# Hash that contains tests to rerun if any failed
+# Those tests will be rerun sequentially
+# $Rerun = [ordered]@{}
+# Test limiting factor here
+[int]$count = 0
+# Overall status
+[bool]$success = $true;
+
+function RunJobs($Suites, $TestCmds, [int]$ConcurrencyVal)
+{
+ # Array to wait for any of the running jobs
+ $jobs = @()
+ # Hash JobToLog
+ $JobToLog = @{}
+
+ # Wait for all to finish and get the results
+ while(($JobToLog.Count -gt 0) -or
+ ($TestCmds.Count -gt 0) -or
+ ($Suites.Count -gt 0)) {
+
+ # Make sure we have maximum concurrent jobs running if anything
+ # and the $Limit either not set or allows to proceed
+ while(($JobToLog.Count -lt $ConcurrencyVal) -and
+ ((($TestCmds.Count -gt 0) -or ($Suites.Count -gt 0)) -and
+ (($Limit -lt 0) -or ($count -lt $Limit)))) {
+
+ # We always favore suites to run if available
+ [string]$exe_name = ""
+ [string]$log_path = ""
+ $Cases = @{}
+
+ if($Suites.Count -gt 0) {
+ # Will the first one
+ ForEach($e in $Suites.Keys) {
+ $exe_name = $e
+ $Cases = $Suites[$e]
+ break
+ }
+ [string]$test_case = ""
+ [string]$log_path = ""
+ ForEach($c in $Cases.Keys) {
+ $test_case = $c
+ $log_path = $Cases[$c]
+ break
+ }
+
+ Write-Host "Starting $exe_name::$test_case"
+ [string]$Exe = -Join ($BinariesFolder, $exe_name)
+ $job = Start-Job -Name "$exe_name::$test_case" -ArgumentList @($Exe,$test_case,$log_path) -ScriptBlock $InvokeTestCase
+ $JobToLog.Add($job, $log_path)
+
+ $Cases.Remove($test_case)
+ if($Cases.Count -lt 1) {
+ $Suites.Remove($exe_name)
+ }
+
+ } elseif ($TestCmds.Count -gt 0) {
+
+ ForEach($e in $TestCmds.Keys) {
+ $exe_name = $e
+ $log_path = $TestCmds[$e]
+ break
+ }
+
+ [string]$Exe = -Join ($BinariesFolder, $exe_name)
+ $job = Start-Job -Name $exe_name -ScriptBlock $InvokeTestAsync -ArgumentList @($Exe,$log_path)
+ $JobToLog.Add($job, $log_path)
+
+ $TestCmds.Remove($exe_name)
+
+ } else {
+ Write-Error "In the job loop but nothing to run"
+ exit 1
+ }
+
+ ++$count
+ } # End of Job starting loop
+
+ if($JobToLog.Count -lt 1) {
+ break
+ }
+
+ $jobs = @()
+ foreach($k in $JobToLog.Keys) { $jobs += $k }
+
+ $completed = Wait-Job -Job $jobs -Any
+ $log = $JobToLog[$completed]
+ $JobToLog.Remove($completed)
+
+ $message = -join @($completed.Name, " State: ", ($completed.State))
+
+ $log_content = @(Get-Content $log)
+
+ if($completed.State -ne "Completed") {
+ $success = $false
+ Write-Warning $message
+ $log_content | Write-Warning
+ } else {
+ # Scan the log. If we find PASSED and no occurrence of FAILED
+ # then it is a success
+ [bool]$pass_found = $false
+ ForEach($l in $log_content) {
+
+ if(($l -match "^\[\s+FAILED") -or
+ ($l -match "Assertion failed:")) {
+ $pass_found = $false
+ break
+ }
+
+ if(($l -match "^\[\s+PASSED") -or
+ ($l -match " : PASSED$") -or
+ ($l -match "^PASS$") -or # Special c_test case
+ ($l -match "Passed all tests!") ) {
+ $pass_found = $true
+ }
+ }
+
+ if(!$pass_found) {
+ $success = $false;
+ Write-Warning $message
+ $log_content | Write-Warning
+ } else {
+ Write-Host $message
+ }
+ }
+
+ # Remove cached job info from the system
+ # Should be no output
+ Receive-Job -Job $completed | Out-Null
+ }
+}
+
+RunJobs -Suites $CasesToRun -TestCmds $TestExes -ConcurrencyVal $Concurrency
+
+$EndDate = (Get-Date)
+
+New-TimeSpan -Start $StartDate -End $EndDate |
+ ForEach-Object {
+ "Elapsed time: {0:g}" -f $_
+ }
+
+
+if(!$success) {
+# This does not succeed killing off jobs quick
+# So we simply exit
+# Remove-Job -Job $jobs -Force
+# indicate failure using this exit code
+ exit 1
+ }
+
+ exit 0
+
+
diff --git a/c-deps/rocksdb/build_tools/update_dependencies.sh b/c-deps/rocksdb/build_tools/update_dependencies.sh
new file mode 100755
index 0000000000..e72d3dbd67
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/update_dependencies.sh
@@ -0,0 +1,132 @@
+#!/bin/sh
+#
+# Update dependencies.sh file with the latest avaliable versions
+
+BASEDIR=$(dirname $0)
+OUTPUT=""
+
+function log_variable()
+{
+ echo "$1=${!1}" >> "$OUTPUT"
+}
+
+
+TP2_LATEST="/mnt/vol/engshare/fbcode/third-party2"
+## $1 => lib name
+## $2 => lib version (if not provided, will try to pick latest)
+## $3 => platform (if not provided, will try to pick latest gcc)
+##
+## get_lib_base will set a variable named ${LIB_NAME}_BASE to the lib location
+function get_lib_base()
+{
+ local lib_name=$1
+ local lib_version=$2
+ local lib_platform=$3
+
+ local result="$TP2_LATEST/$lib_name/"
+
+ # Lib Version
+ if [ -z "$lib_version" ] || [ "$lib_version" = "LATEST" ]; then
+ # version is not provided, use latest
+ result=`ls -dr1v $result/*/ | head -n1`
+ else
+ result="$result/$lib_version/"
+ fi
+
+ # Lib Platform
+ if [ -z "$lib_platform" ]; then
+ # platform is not provided, use latest gcc
+ result=`ls -dr1v $result/gcc-*[^fb]/ | head -n1`
+ else
+ echo $lib_platform
+ result="$result/$lib_platform/"
+ fi
+
+ result=`ls -1d $result/*/ | head -n1`
+
+ # lib_name => LIB_NAME_BASE
+ local __res_var=${lib_name^^}"_BASE"
+ __res_var=`echo $__res_var | tr - _`
+ # LIB_NAME_BASE=$result
+ eval $__res_var=`readlink -f $result`
+
+ log_variable $__res_var
+}
+
+###########################################################
+# 5.x dependencies #
+###########################################################
+
+OUTPUT="$BASEDIR/dependencies.sh"
+
+rm -f "$OUTPUT"
+touch "$OUTPUT"
+
+echo "Writing dependencies to $OUTPUT"
+
+# Compilers locations
+GCC_BASE=`readlink -f $TP2_LATEST/gcc/5.x/centos6-native/*/`
+CLANG_BASE=`readlink -f $TP2_LATEST/llvm-fb/stable/centos6-native/*/`
+
+log_variable GCC_BASE
+log_variable CLANG_BASE
+
+# Libraries locations
+get_lib_base libgcc 5.x gcc-5-glibc-2.23
+get_lib_base glibc 2.23 gcc-5-glibc-2.23
+get_lib_base snappy LATEST gcc-5-glibc-2.23
+get_lib_base zlib LATEST gcc-5-glibc-2.23
+get_lib_base bzip2 LATEST gcc-5-glibc-2.23
+get_lib_base lz4 LATEST gcc-5-glibc-2.23
+get_lib_base zstd LATEST gcc-5-glibc-2.23
+get_lib_base gflags LATEST gcc-5-glibc-2.23
+get_lib_base jemalloc LATEST gcc-5-glibc-2.23
+get_lib_base numa LATEST gcc-5-glibc-2.23
+get_lib_base libunwind LATEST gcc-5-glibc-2.23
+get_lib_base tbb LATEST gcc-5-glibc-2.23
+
+get_lib_base kernel-headers 4.0.9-36_fbk5_2933_gd092e3f gcc-5-glibc-2.23
+get_lib_base binutils LATEST centos6-native
+get_lib_base valgrind LATEST gcc-5-glibc-2.23
+get_lib_base lua 5.2.3 gcc-5-glibc-2.23
+
+git diff $OUTPUT
+
+###########################################################
+# 4.8.1 dependencies #
+###########################################################
+
+OUTPUT="$BASEDIR/dependencies_4.8.1.sh"
+
+rm -f "$OUTPUT"
+touch "$OUTPUT"
+
+echo "Writing 4.8.1 dependencies to $OUTPUT"
+
+# Compilers locations
+GCC_BASE=`readlink -f $TP2_LATEST/gcc/4.8.1/centos6-native/*/`
+CLANG_BASE=`readlink -f $TP2_LATEST/llvm-fb/stable/centos6-native/*/`
+
+log_variable GCC_BASE
+log_variable CLANG_BASE
+
+# Libraries locations
+get_lib_base libgcc 4.8.1 gcc-4.8.1-glibc-2.17
+get_lib_base glibc 2.17 gcc-4.8.1-glibc-2.17
+get_lib_base snappy LATEST gcc-4.8.1-glibc-2.17
+get_lib_base zlib LATEST gcc-4.8.1-glibc-2.17
+get_lib_base bzip2 LATEST gcc-4.8.1-glibc-2.17
+get_lib_base lz4 LATEST gcc-4.8.1-glibc-2.17
+get_lib_base zstd LATEST gcc-4.8.1-glibc-2.17
+get_lib_base gflags LATEST gcc-4.8.1-glibc-2.17
+get_lib_base jemalloc LATEST gcc-4.8.1-glibc-2.17
+get_lib_base numa LATEST gcc-4.8.1-glibc-2.17
+get_lib_base libunwind LATEST gcc-4.8.1-glibc-2.17
+get_lib_base tbb 4.0_update2 gcc-4.8.1-glibc-2.17
+
+get_lib_base kernel-headers LATEST gcc-4.8.1-glibc-2.17
+get_lib_base binutils LATEST centos6-native
+get_lib_base valgrind 3.8.1 gcc-4.8.1-glibc-2.17
+get_lib_base lua 5.2.3 centos6-native
+
+git diff $OUTPUT
diff --git a/c-deps/rocksdb/build_tools/version.sh b/c-deps/rocksdb/build_tools/version.sh
new file mode 100755
index 0000000000..f3ca98cf61
--- /dev/null
+++ b/c-deps/rocksdb/build_tools/version.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+if [ "$#" = "0" ]; then
+ echo "Usage: $0 major|minor|patch|full"
+ exit 1
+fi
+
+if [ "$1" = "major" ]; then
+ cat include/rocksdb/version.h | grep MAJOR | head -n1 | awk '{print $3}'
+fi
+if [ "$1" = "minor" ]; then
+ cat include/rocksdb/version.h | grep MINOR | head -n1 | awk '{print $3}'
+fi
+if [ "$1" = "patch" ]; then
+ cat include/rocksdb/version.h | grep PATCH | head -n1 | awk '{print $3}'
+fi
+if [ "$1" = "full" ]; then
+ awk '/#define ROCKSDB/ { env[$2] = $3 }
+ END { printf "%s.%s.%s\n", env["ROCKSDB_MAJOR"],
+ env["ROCKSDB_MINOR"],
+ env["ROCKSDB_PATCH"] }' \
+ include/rocksdb/version.h
+fi
diff --git a/c-deps/rocksdb/cache/cache_bench.cc b/c-deps/rocksdb/cache/cache_bench.cc
new file mode 100644
index 0000000000..16c2ced1dd
--- /dev/null
+++ b/c-deps/rocksdb/cache/cache_bench.cc
@@ -0,0 +1,284 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+#ifndef GFLAGS
+#include
+int main() {
+ fprintf(stderr, "Please install gflags to run rocksdb tools\n");
+ return 1;
+}
+#else
+
+#include
+#include
+#include
+#include
+
+#include "rocksdb/db.h"
+#include "rocksdb/cache.h"
+#include "rocksdb/env.h"
+#include "port/port.h"
+#include "util/mutexlock.h"
+#include "util/random.h"
+
+using GFLAGS::ParseCommandLineFlags;
+
+static const uint32_t KB = 1024;
+
+DEFINE_int32(threads, 16, "Number of concurrent threads to run.");
+DEFINE_int64(cache_size, 8 * KB * KB,
+ "Number of bytes to use as a cache of uncompressed data.");
+DEFINE_int32(num_shard_bits, 4, "shard_bits.");
+
+DEFINE_int64(max_key, 1 * KB * KB * KB, "Max number of key to place in cache");
+DEFINE_uint64(ops_per_thread, 1200000, "Number of operations per thread.");
+
+DEFINE_bool(populate_cache, false, "Populate cache before operations");
+DEFINE_int32(insert_percent, 40,
+ "Ratio of insert to total workload (expressed as a percentage)");
+DEFINE_int32(lookup_percent, 50,
+ "Ratio of lookup to total workload (expressed as a percentage)");
+DEFINE_int32(erase_percent, 10,
+ "Ratio of erase to total workload (expressed as a percentage)");
+
+DEFINE_bool(use_clock_cache, false, "");
+
+namespace rocksdb {
+
+class CacheBench;
+namespace {
+void deleter(const Slice& key, void* value) {
+ delete reinterpret_cast(value);
+}
+
+// State shared by all concurrent executions of the same benchmark.
+class SharedState {
+ public:
+ explicit SharedState(CacheBench* cache_bench)
+ : cv_(&mu_),
+ num_threads_(FLAGS_threads),
+ num_initialized_(0),
+ start_(false),
+ num_done_(0),
+ cache_bench_(cache_bench) {
+ }
+
+ ~SharedState() {}
+
+ port::Mutex* GetMutex() {
+ return &mu_;
+ }
+
+ port::CondVar* GetCondVar() {
+ return &cv_;
+ }
+
+ CacheBench* GetCacheBench() const {
+ return cache_bench_;
+ }
+
+ void IncInitialized() {
+ num_initialized_++;
+ }
+
+ void IncDone() {
+ num_done_++;
+ }
+
+ bool AllInitialized() const {
+ return num_initialized_ >= num_threads_;
+ }
+
+ bool AllDone() const {
+ return num_done_ >= num_threads_;
+ }
+
+ void SetStart() {
+ start_ = true;
+ }
+
+ bool Started() const {
+ return start_;
+ }
+
+ private:
+ port::Mutex mu_;
+ port::CondVar cv_;
+
+ const uint64_t num_threads_;
+ uint64_t num_initialized_;
+ bool start_;
+ uint64_t num_done_;
+
+ CacheBench* cache_bench_;
+};
+
+// Per-thread state for concurrent executions of the same benchmark.
+struct ThreadState {
+ uint32_t tid;
+ Random rnd;
+ SharedState* shared;
+
+ ThreadState(uint32_t index, SharedState* _shared)
+ : tid(index), rnd(1000 + index), shared(_shared) {}
+};
+} // namespace
+
+class CacheBench {
+ public:
+ CacheBench() : num_threads_(FLAGS_threads) {
+ if (FLAGS_use_clock_cache) {
+ cache_ = NewClockCache(FLAGS_cache_size, FLAGS_num_shard_bits);
+ if (!cache_) {
+ fprintf(stderr, "Clock cache not supported.\n");
+ exit(1);
+ }
+ } else {
+ cache_ = NewLRUCache(FLAGS_cache_size, FLAGS_num_shard_bits);
+ }
+ }
+
+ ~CacheBench() {}
+
+ void PopulateCache() {
+ Random rnd(1);
+ for (int64_t i = 0; i < FLAGS_cache_size; i++) {
+ uint64_t rand_key = rnd.Next() % FLAGS_max_key;
+ // Cast uint64* to be char*, data would be copied to cache
+ Slice key(reinterpret_cast(&rand_key), 8);
+ // do insert
+ cache_->Insert(key, new char[10], 1, &deleter);
+ }
+ }
+
+ bool Run() {
+ rocksdb::Env* env = rocksdb::Env::Default();
+
+ PrintEnv();
+ SharedState shared(this);
+ std::vector threads(num_threads_);
+ for (uint32_t i = 0; i < num_threads_; i++) {
+ threads[i] = new ThreadState(i, &shared);
+ env->StartThread(ThreadBody, threads[i]);
+ }
+ {
+ MutexLock l(shared.GetMutex());
+ while (!shared.AllInitialized()) {
+ shared.GetCondVar()->Wait();
+ }
+ // Record start time
+ uint64_t start_time = env->NowMicros();
+
+ // Start all threads
+ shared.SetStart();
+ shared.GetCondVar()->SignalAll();
+
+ // Wait threads to complete
+ while (!shared.AllDone()) {
+ shared.GetCondVar()->Wait();
+ }
+
+ // Record end time
+ uint64_t end_time = env->NowMicros();
+ double elapsed = static_cast(end_time - start_time) * 1e-6;
+ uint32_t qps = static_cast(
+ static_cast(FLAGS_threads * FLAGS_ops_per_thread) / elapsed);
+ fprintf(stdout, "Complete in %.3f s; QPS = %u\n", elapsed, qps);
+ }
+ return true;
+ }
+
+ private:
+ std::shared_ptr cache_;
+ uint32_t num_threads_;
+
+ static void ThreadBody(void* v) {
+ ThreadState* thread = reinterpret_cast(v);
+ SharedState* shared = thread->shared;
+
+ {
+ MutexLock l(shared->GetMutex());
+ shared->IncInitialized();
+ if (shared->AllInitialized()) {
+ shared->GetCondVar()->SignalAll();
+ }
+ while (!shared->Started()) {
+ shared->GetCondVar()->Wait();
+ }
+ }
+ thread->shared->GetCacheBench()->OperateCache(thread);
+
+ {
+ MutexLock l(shared->GetMutex());
+ shared->IncDone();
+ if (shared->AllDone()) {
+ shared->GetCondVar()->SignalAll();
+ }
+ }
+ }
+
+ void OperateCache(ThreadState* thread) {
+ for (uint64_t i = 0; i < FLAGS_ops_per_thread; i++) {
+ uint64_t rand_key = thread->rnd.Next() % FLAGS_max_key;
+ // Cast uint64* to be char*, data would be copied to cache
+ Slice key(reinterpret_cast(&rand_key), 8);
+ int32_t prob_op = thread->rnd.Uniform(100);
+ if (prob_op >= 0 && prob_op < FLAGS_insert_percent) {
+ // do insert
+ cache_->Insert(key, new char[10], 1, &deleter);
+ } else if (prob_op -= FLAGS_insert_percent &&
+ prob_op < FLAGS_lookup_percent) {
+ // do lookup
+ auto handle = cache_->Lookup(key);
+ if (handle) {
+ cache_->Release(handle);
+ }
+ } else if (prob_op -= FLAGS_lookup_percent &&
+ prob_op < FLAGS_erase_percent) {
+ // do erase
+ cache_->Erase(key);
+ }
+ }
+ }
+
+ void PrintEnv() const {
+ printf("RocksDB version : %d.%d\n", kMajorVersion, kMinorVersion);
+ printf("Number of threads : %d\n", FLAGS_threads);
+ printf("Ops per thread : %" PRIu64 "\n", FLAGS_ops_per_thread);
+ printf("Cache size : %" PRIu64 "\n", FLAGS_cache_size);
+ printf("Num shard bits : %d\n", FLAGS_num_shard_bits);
+ printf("Max key : %" PRIu64 "\n", FLAGS_max_key);
+ printf("Populate cache : %d\n", FLAGS_populate_cache);
+ printf("Insert percentage : %d%%\n", FLAGS_insert_percent);
+ printf("Lookup percentage : %d%%\n", FLAGS_lookup_percent);
+ printf("Erase percentage : %d%%\n", FLAGS_erase_percent);
+ printf("----------------------------\n");
+ }
+};
+} // namespace rocksdb
+
+int main(int argc, char** argv) {
+ ParseCommandLineFlags(&argc, &argv, true);
+
+ if (FLAGS_threads <= 0) {
+ fprintf(stderr, "threads number <= 0\n");
+ exit(1);
+ }
+
+ rocksdb::CacheBench bench;
+ if (FLAGS_populate_cache) {
+ bench.PopulateCache();
+ }
+ if (bench.Run()) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+#endif // GFLAGS
diff --git a/c-deps/rocksdb/cache/cache_test.cc b/c-deps/rocksdb/cache/cache_test.cc
new file mode 100644
index 0000000000..8e241226d9
--- /dev/null
+++ b/c-deps/rocksdb/cache/cache_test.cc
@@ -0,0 +1,703 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "rocksdb/cache.h"
+
+#include
+#include
+#include
+#include
+#include
+#include "cache/clock_cache.h"
+#include "cache/lru_cache.h"
+#include "util/coding.h"
+#include "util/string_util.h"
+#include "util/testharness.h"
+
+namespace rocksdb {
+
+// Conversions between numeric keys/values and the types expected by Cache.
+static std::string EncodeKey(int k) {
+ std::string result;
+ PutFixed32(&result, k);
+ return result;
+}
+static int DecodeKey(const Slice& k) {
+ assert(k.size() == 4);
+ return DecodeFixed32(k.data());
+}
+static void* EncodeValue(uintptr_t v) { return reinterpret_cast(v); }
+static int DecodeValue(void* v) {
+ return static_cast(reinterpret_cast(v));
+}
+
+const std::string kLRU = "lru";
+const std::string kClock = "clock";
+
+void dumbDeleter(const Slice& key, void* value) {}
+
+void eraseDeleter(const Slice& key, void* value) {
+ Cache* cache = reinterpret_cast(value);
+ cache->Erase("foo");
+}
+
+class CacheTest : public testing::TestWithParam {
+ public:
+ static CacheTest* current_;
+
+ static void Deleter(const Slice& key, void* v) {
+ current_->deleted_keys_.push_back(DecodeKey(key));
+ current_->deleted_values_.push_back(DecodeValue(v));
+ }
+
+ static const int kCacheSize = 1000;
+ static const int kNumShardBits = 4;
+
+ static const int kCacheSize2 = 100;
+ static const int kNumShardBits2 = 2;
+
+ std::vector deleted_keys_;
+ std::vector deleted_values_;
+ shared_ptr cache_;
+ shared_ptr cache2_;
+
+ CacheTest()
+ : cache_(NewCache(kCacheSize, kNumShardBits, false)),
+ cache2_(NewCache(kCacheSize2, kNumShardBits2, false)) {
+ current_ = this;
+ }
+
+ ~CacheTest() {
+ }
+
+ std::shared_ptr NewCache(size_t capacity) {
+ auto type = GetParam();
+ if (type == kLRU) {
+ return NewLRUCache(capacity);
+ }
+ if (type == kClock) {
+ return NewClockCache(capacity);
+ }
+ return nullptr;
+ }
+
+ std::shared_ptr NewCache(size_t capacity, int num_shard_bits,
+ bool strict_capacity_limit) {
+ auto type = GetParam();
+ if (type == kLRU) {
+ return NewLRUCache(capacity, num_shard_bits, strict_capacity_limit);
+ }
+ if (type == kClock) {
+ return NewClockCache(capacity, num_shard_bits, strict_capacity_limit);
+ }
+ return nullptr;
+ }
+
+ int Lookup(shared_ptr cache, int key) {
+ Cache::Handle* handle = cache->Lookup(EncodeKey(key));
+ const int r = (handle == nullptr) ? -1 : DecodeValue(cache->Value(handle));
+ if (handle != nullptr) {
+ cache->Release(handle);
+ }
+ return r;
+ }
+
+ void Insert(shared_ptr cache, int key, int value, int charge = 1) {
+ cache->Insert(EncodeKey(key), EncodeValue(value), charge,
+ &CacheTest::Deleter);
+ }
+
+ void Erase(shared_ptr cache, int key) {
+ cache->Erase(EncodeKey(key));
+ }
+
+
+ int Lookup(int key) {
+ return Lookup(cache_, key);
+ }
+
+ void Insert(int key, int value, int charge = 1) {
+ Insert(cache_, key, value, charge);
+ }
+
+ void Erase(int key) {
+ Erase(cache_, key);
+ }
+
+ int Lookup2(int key) {
+ return Lookup(cache2_, key);
+ }
+
+ void Insert2(int key, int value, int charge = 1) {
+ Insert(cache2_, key, value, charge);
+ }
+
+ void Erase2(int key) {
+ Erase(cache2_, key);
+ }
+};
+CacheTest* CacheTest::current_;
+
+TEST_P(CacheTest, UsageTest) {
+ // cache is shared_ptr and will be automatically cleaned up.
+ const uint64_t kCapacity = 100000;
+ auto cache = NewCache(kCapacity, 8, false);
+
+ size_t usage = 0;
+ char value[10] = "abcdef";
+ // make sure everything will be cached
+ for (int i = 1; i < 100; ++i) {
+ std::string key(i, 'a');
+ auto kv_size = key.size() + 5;
+ cache->Insert(key, reinterpret_cast(value), kv_size, dumbDeleter);
+ usage += kv_size;
+ ASSERT_EQ(usage, cache->GetUsage());
+ }
+
+ // make sure the cache will be overloaded
+ for (uint64_t i = 1; i < kCapacity; ++i) {
+ auto key = ToString(i);
+ cache->Insert(key, reinterpret_cast(value), key.size() + 5,
+ dumbDeleter);
+ }
+
+ // the usage should be close to the capacity
+ ASSERT_GT(kCapacity, cache->GetUsage());
+ ASSERT_LT(kCapacity * 0.95, cache->GetUsage());
+}
+
+TEST_P(CacheTest, PinnedUsageTest) {
+ // cache is shared_ptr and will be automatically cleaned up.
+ const uint64_t kCapacity = 100000;
+ auto cache = NewCache(kCapacity, 8, false);
+
+ size_t pinned_usage = 0;
+ char value[10] = "abcdef";
+
+ std::forward_list unreleased_handles;
+
+ // Add entries. Unpin some of them after insertion. Then, pin some of them
+ // again. Check GetPinnedUsage().
+ for (int i = 1; i < 100; ++i) {
+ std::string key(i, 'a');
+ auto kv_size = key.size() + 5;
+ Cache::Handle* handle;
+ cache->Insert(key, reinterpret_cast(value), kv_size, dumbDeleter,
+ &handle);
+ pinned_usage += kv_size;
+ ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
+ if (i % 2 == 0) {
+ cache->Release(handle);
+ pinned_usage -= kv_size;
+ ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
+ } else {
+ unreleased_handles.push_front(handle);
+ }
+ if (i % 3 == 0) {
+ unreleased_handles.push_front(cache->Lookup(key));
+ // If i % 2 == 0, then the entry was unpinned before Lookup, so pinned
+ // usage increased
+ if (i % 2 == 0) {
+ pinned_usage += kv_size;
+ }
+ ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
+ }
+ }
+
+ // check that overloading the cache does not change the pinned usage
+ for (uint64_t i = 1; i < 2 * kCapacity; ++i) {
+ auto key = ToString(i);
+ cache->Insert(key, reinterpret_cast(value), key.size() + 5,
+ dumbDeleter);
+ }
+ ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
+
+ // release handles for pinned entries to prevent memory leaks
+ for (auto handle : unreleased_handles) {
+ cache->Release(handle);
+ }
+}
+
+TEST_P(CacheTest, HitAndMiss) {
+ ASSERT_EQ(-1, Lookup(100));
+
+ Insert(100, 101);
+ ASSERT_EQ(101, Lookup(100));
+ ASSERT_EQ(-1, Lookup(200));
+ ASSERT_EQ(-1, Lookup(300));
+
+ Insert(200, 201);
+ ASSERT_EQ(101, Lookup(100));
+ ASSERT_EQ(201, Lookup(200));
+ ASSERT_EQ(-1, Lookup(300));
+
+ Insert(100, 102);
+ ASSERT_EQ(102, Lookup(100));
+ ASSERT_EQ(201, Lookup(200));
+ ASSERT_EQ(-1, Lookup(300));
+
+ ASSERT_EQ(1U, deleted_keys_.size());
+ ASSERT_EQ(100, deleted_keys_[0]);
+ ASSERT_EQ(101, deleted_values_[0]);
+}
+
+TEST_P(CacheTest, InsertSameKey) {
+ Insert(1, 1);
+ Insert(1, 2);
+ ASSERT_EQ(2, Lookup(1));
+}
+
+TEST_P(CacheTest, Erase) {
+ Erase(200);
+ ASSERT_EQ(0U, deleted_keys_.size());
+
+ Insert(100, 101);
+ Insert(200, 201);
+ Erase(100);
+ ASSERT_EQ(-1, Lookup(100));
+ ASSERT_EQ(201, Lookup(200));
+ ASSERT_EQ(1U, deleted_keys_.size());
+ ASSERT_EQ(100, deleted_keys_[0]);
+ ASSERT_EQ(101, deleted_values_[0]);
+
+ Erase(100);
+ ASSERT_EQ(-1, Lookup(100));
+ ASSERT_EQ(201, Lookup(200));
+ ASSERT_EQ(1U, deleted_keys_.size());
+}
+
+TEST_P(CacheTest, EntriesArePinned) {
+ Insert(100, 101);
+ Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
+ ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
+ ASSERT_EQ(1U, cache_->GetUsage());
+
+ Insert(100, 102);
+ Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
+ ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
+ ASSERT_EQ(0U, deleted_keys_.size());
+ ASSERT_EQ(2U, cache_->GetUsage());
+
+ cache_->Release(h1);
+ ASSERT_EQ(1U, deleted_keys_.size());
+ ASSERT_EQ(100, deleted_keys_[0]);
+ ASSERT_EQ(101, deleted_values_[0]);
+ ASSERT_EQ(1U, cache_->GetUsage());
+
+ Erase(100);
+ ASSERT_EQ(-1, Lookup(100));
+ ASSERT_EQ(1U, deleted_keys_.size());
+ ASSERT_EQ(1U, cache_->GetUsage());
+
+ cache_->Release(h2);
+ ASSERT_EQ(2U, deleted_keys_.size());
+ ASSERT_EQ(100, deleted_keys_[1]);
+ ASSERT_EQ(102, deleted_values_[1]);
+ ASSERT_EQ(0U, cache_->GetUsage());
+}
+
+TEST_P(CacheTest, EvictionPolicy) {
+ Insert(100, 101);
+ Insert(200, 201);
+
+ // Frequently used entry must be kept around
+ for (int i = 0; i < kCacheSize + 100; i++) {
+ Insert(1000+i, 2000+i);
+ ASSERT_EQ(101, Lookup(100));
+ }
+ ASSERT_EQ(101, Lookup(100));
+ ASSERT_EQ(-1, Lookup(200));
+}
+
+TEST_P(CacheTest, ExternalRefPinsEntries) {
+ Insert(100, 101);
+ Cache::Handle* h = cache_->Lookup(EncodeKey(100));
+ ASSERT_TRUE(cache_->Ref(h));
+ ASSERT_EQ(101, DecodeValue(cache_->Value(h)));
+ ASSERT_EQ(1U, cache_->GetUsage());
+
+ for (int i = 0; i < 3; ++i) {
+ if (i > 0) {
+ // First release (i == 1) corresponds to Ref(), second release (i == 2)
+ // corresponds to Lookup(). Then, since all external refs are released,
+ // the below insertions should push out the cache entry.
+ cache_->Release(h);
+ }
+ // double cache size because the usage bit in block cache prevents 100 from
+ // being evicted in the first kCacheSize iterations
+ for (int j = 0; j < 2 * kCacheSize + 100; j++) {
+ Insert(1000 + j, 2000 + j);
+ }
+ if (i < 2) {
+ ASSERT_EQ(101, Lookup(100));
+ }
+ }
+ ASSERT_EQ(-1, Lookup(100));
+}
+
+TEST_P(CacheTest, EvictionPolicyRef) {
+ Insert(100, 101);
+ Insert(101, 102);
+ Insert(102, 103);
+ Insert(103, 104);
+ Insert(200, 101);
+ Insert(201, 102);
+ Insert(202, 103);
+ Insert(203, 104);
+ Cache::Handle* h201 = cache_->Lookup(EncodeKey(200));
+ Cache::Handle* h202 = cache_->Lookup(EncodeKey(201));
+ Cache::Handle* h203 = cache_->Lookup(EncodeKey(202));
+ Cache::Handle* h204 = cache_->Lookup(EncodeKey(203));
+ Insert(300, 101);
+ Insert(301, 102);
+ Insert(302, 103);
+ Insert(303, 104);
+
+ // Insert entries much more than Cache capacity
+ for (int i = 0; i < kCacheSize + 100; i++) {
+ Insert(1000 + i, 2000 + i);
+ }
+
+ // Check whether the entries inserted in the beginning
+ // are evicted. Ones without extra ref are evicted and
+ // those with are not.
+ ASSERT_EQ(-1, Lookup(100));
+ ASSERT_EQ(-1, Lookup(101));
+ ASSERT_EQ(-1, Lookup(102));
+ ASSERT_EQ(-1, Lookup(103));
+
+ ASSERT_EQ(-1, Lookup(300));
+ ASSERT_EQ(-1, Lookup(301));
+ ASSERT_EQ(-1, Lookup(302));
+ ASSERT_EQ(-1, Lookup(303));
+
+ ASSERT_EQ(101, Lookup(200));
+ ASSERT_EQ(102, Lookup(201));
+ ASSERT_EQ(103, Lookup(202));
+ ASSERT_EQ(104, Lookup(203));
+
+ // Cleaning up all the handles
+ cache_->Release(h201);
+ cache_->Release(h202);
+ cache_->Release(h203);
+ cache_->Release(h204);
+}
+
+TEST_P(CacheTest, EvictEmptyCache) {
+ // Insert item large than capacity to trigger eviction on empty cache.
+ auto cache = NewCache(1, 0, false);
+ ASSERT_OK(cache->Insert("foo", nullptr, 10, dumbDeleter));
+}
+
+TEST_P(CacheTest, EraseFromDeleter) {
+ // Have deleter which will erase item from cache, which will re-enter
+ // the cache at that point.
+ std::shared_ptr cache = NewCache(10, 0, false);
+ ASSERT_OK(cache->Insert("foo", nullptr, 1, dumbDeleter));
+ ASSERT_OK(cache->Insert("bar", cache.get(), 1, eraseDeleter));
+ cache->Erase("bar");
+ ASSERT_EQ(nullptr, cache->Lookup("foo"));
+ ASSERT_EQ(nullptr, cache->Lookup("bar"));
+}
+
+TEST_P(CacheTest, ErasedHandleState) {
+ // insert a key and get two handles
+ Insert(100, 1000);
+ Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
+ Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
+ ASSERT_EQ(h1, h2);
+ ASSERT_EQ(DecodeValue(cache_->Value(h1)), 1000);
+ ASSERT_EQ(DecodeValue(cache_->Value(h2)), 1000);
+
+ // delete the key from the cache
+ Erase(100);
+ // can no longer find in the cache
+ ASSERT_EQ(-1, Lookup(100));
+
+ // release one handle
+ cache_->Release(h1);
+ // still can't find in cache
+ ASSERT_EQ(-1, Lookup(100));
+
+ cache_->Release(h2);
+}
+
+TEST_P(CacheTest, HeavyEntries) {
+ // Add a bunch of light and heavy entries and then count the combined
+ // size of items still in the cache, which must be approximately the
+ // same as the total capacity.
+ const int kLight = 1;
+ const int kHeavy = 10;
+ int added = 0;
+ int index = 0;
+ while (added < 2*kCacheSize) {
+ const int weight = (index & 1) ? kLight : kHeavy;
+ Insert(index, 1000+index, weight);
+ added += weight;
+ index++;
+ }
+
+ int cached_weight = 0;
+ for (int i = 0; i < index; i++) {
+ const int weight = (i & 1 ? kLight : kHeavy);
+ int r = Lookup(i);
+ if (r >= 0) {
+ cached_weight += weight;
+ ASSERT_EQ(1000+i, r);
+ }
+ }
+ ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
+}
+
+TEST_P(CacheTest, NewId) {
+ uint64_t a = cache_->NewId();
+ uint64_t b = cache_->NewId();
+ ASSERT_NE(a, b);
+}
+
+
+class Value {
+ public:
+ explicit Value(size_t v) : v_(v) { }
+
+ size_t v_;
+};
+
+namespace {
+void deleter(const Slice& key, void* value) {
+ delete static_cast(value);
+}
+} // namespace
+
+TEST_P(CacheTest, ReleaseAndErase) {
+ std::shared_ptr cache = NewCache(5, 0, false);
+ Cache::Handle* handle;
+ Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
+ &CacheTest::Deleter, &handle);
+ ASSERT_TRUE(s.ok());
+ ASSERT_EQ(5U, cache->GetCapacity());
+ ASSERT_EQ(1U, cache->GetUsage());
+ ASSERT_EQ(0U, deleted_keys_.size());
+ auto erased = cache->Release(handle, true);
+ ASSERT_TRUE(erased);
+ // This tests that deleter has been called
+ ASSERT_EQ(1U, deleted_keys_.size());
+}
+
+TEST_P(CacheTest, ReleaseWithoutErase) {
+ std::shared_ptr cache = NewCache(5, 0, false);
+ Cache::Handle* handle;
+ Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
+ &CacheTest::Deleter, &handle);
+ ASSERT_TRUE(s.ok());
+ ASSERT_EQ(5U, cache->GetCapacity());
+ ASSERT_EQ(1U, cache->GetUsage());
+ ASSERT_EQ(0U, deleted_keys_.size());
+ auto erased = cache->Release(handle);
+ ASSERT_FALSE(erased);
+ // This tests that deleter is not called. When cache has free capacity it is
+ // not expected to immediately erase the released items.
+ ASSERT_EQ(0U, deleted_keys_.size());
+}
+
+TEST_P(CacheTest, SetCapacity) {
+ // test1: increase capacity
+ // lets create a cache with capacity 5,
+ // then, insert 5 elements, then increase capacity
+ // to 10, returned capacity should be 10, usage=5
+ std::shared_ptr cache = NewCache(5, 0, false);
+ std::vector handles(10);
+ // Insert 5 entries, but not releasing.
+ for (size_t i = 0; i < 5; i++) {
+ std::string key = ToString(i+1);
+ Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
+ ASSERT_TRUE(s.ok());
+ }
+ ASSERT_EQ(5U, cache->GetCapacity());
+ ASSERT_EQ(5U, cache->GetUsage());
+ cache->SetCapacity(10);
+ ASSERT_EQ(10U, cache->GetCapacity());
+ ASSERT_EQ(5U, cache->GetUsage());
+
+ // test2: decrease capacity
+ // insert 5 more elements to cache, then release 5,
+ // then decrease capacity to 7, final capacity should be 7
+ // and usage should be 7
+ for (size_t i = 5; i < 10; i++) {
+ std::string key = ToString(i+1);
+ Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
+ ASSERT_TRUE(s.ok());
+ }
+ ASSERT_EQ(10U, cache->GetCapacity());
+ ASSERT_EQ(10U, cache->GetUsage());
+ for (size_t i = 0; i < 5; i++) {
+ cache->Release(handles[i]);
+ }
+ ASSERT_EQ(10U, cache->GetCapacity());
+ ASSERT_EQ(10U, cache->GetUsage());
+ cache->SetCapacity(7);
+ ASSERT_EQ(7, cache->GetCapacity());
+ ASSERT_EQ(7, cache->GetUsage());
+
+ // release remaining 5 to keep valgrind happy
+ for (size_t i = 5; i < 10; i++) {
+ cache->Release(handles[i]);
+ }
+}
+
+TEST_P(CacheTest, SetStrictCapacityLimit) {
+ // test1: set the flag to false. Insert more keys than capacity. See if they
+ // all go through.
+ std::shared_ptr cache = NewLRUCache(5, 0, false);
+ std::vector handles(10);
+ Status s;
+ for (size_t i = 0; i < 10; i++) {
+ std::string key = ToString(i + 1);
+ s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
+ ASSERT_OK(s);
+ ASSERT_NE(nullptr, handles[i]);
+ }
+
+ // test2: set the flag to true. Insert and check if it fails.
+ std::string extra_key = "extra";
+ Value* extra_value = new Value(0);
+ cache->SetStrictCapacityLimit(true);
+ Cache::Handle* handle;
+ s = cache->Insert(extra_key, extra_value, 1, &deleter, &handle);
+ ASSERT_TRUE(s.IsIncomplete());
+ ASSERT_EQ(nullptr, handle);
+
+ for (size_t i = 0; i < 10; i++) {
+ cache->Release(handles[i]);
+ }
+
+ // test3: init with flag being true.
+ std::shared_ptr cache2 = NewLRUCache(5, 0, true);
+ for (size_t i = 0; i < 5; i++) {
+ std::string key = ToString(i + 1);
+ s = cache2->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
+ ASSERT_OK(s);
+ ASSERT_NE(nullptr, handles[i]);
+ }
+ s = cache2->Insert(extra_key, extra_value, 1, &deleter, &handle);
+ ASSERT_TRUE(s.IsIncomplete());
+ ASSERT_EQ(nullptr, handle);
+ // test insert without handle
+ s = cache2->Insert(extra_key, extra_value, 1, &deleter);
+ // AS if the key have been inserted into cache but get evicted immediately.
+ ASSERT_OK(s);
+ ASSERT_EQ(5, cache->GetUsage());
+ ASSERT_EQ(nullptr, cache2->Lookup(extra_key));
+
+ for (size_t i = 0; i < 5; i++) {
+ cache2->Release(handles[i]);
+ }
+}
+
+TEST_P(CacheTest, OverCapacity) {
+ size_t n = 10;
+
+ // a LRUCache with n entries and one shard only
+ std::shared_ptr cache = NewCache(n, 0, false);
+
+ std::vector handles(n+1);
+
+ // Insert n+1 entries, but not releasing.
+ for (size_t i = 0; i < n + 1; i++) {
+ std::string key = ToString(i+1);
+ Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
+ ASSERT_TRUE(s.ok());
+ }
+
+ // Guess what's in the cache now?
+ for (size_t i = 0; i < n + 1; i++) {
+ std::string key = ToString(i+1);
+ auto h = cache->Lookup(key);
+ ASSERT_TRUE(h != nullptr);
+ if (h) cache->Release(h);
+ }
+
+ // the cache is over capacity since nothing could be evicted
+ ASSERT_EQ(n + 1U, cache->GetUsage());
+ for (size_t i = 0; i < n + 1; i++) {
+ cache->Release(handles[i]);
+ }
+ // Make sure eviction is triggered.
+ cache->SetCapacity(n);
+
+ // cache is under capacity now since elements were released
+ ASSERT_EQ(n, cache->GetUsage());
+
+ // element 0 is evicted and the rest is there
+ // This is consistent with the LRU policy since the element 0
+ // was released first
+ for (size_t i = 0; i < n + 1; i++) {
+ std::string key = ToString(i+1);
+ auto h = cache->Lookup(key);
+ if (h) {
+ ASSERT_NE(i, 0U);
+ cache->Release(h);
+ } else {
+ ASSERT_EQ(i, 0U);
+ }
+ }
+}
+
+namespace {
+std::vector> callback_state;
+void callback(void* entry, size_t charge) {
+ callback_state.push_back({DecodeValue(entry), static_cast(charge)});
+}
+};
+
+TEST_P(CacheTest, ApplyToAllCacheEntiresTest) {
+ std::vector> inserted;
+ callback_state.clear();
+
+ for (int i = 0; i < 10; ++i) {
+ Insert(i, i * 2, i + 1);
+ inserted.push_back({i * 2, i + 1});
+ }
+ cache_->ApplyToAllCacheEntries(callback, true);
+
+ std::sort(inserted.begin(), inserted.end());
+ std::sort(callback_state.begin(), callback_state.end());
+ ASSERT_TRUE(inserted == callback_state);
+}
+
+TEST_P(CacheTest, DefaultShardBits) {
+ // test1: set the flag to false. Insert more keys than capacity. See if they
+ // all go through.
+ std::shared_ptr cache = NewCache(16 * 1024L * 1024L);
+ ShardedCache* sc = dynamic_cast(cache.get());
+ ASSERT_EQ(5, sc->GetNumShardBits());
+
+ cache = NewLRUCache(511 * 1024L, -1, true);
+ sc = dynamic_cast(cache.get());
+ ASSERT_EQ(0, sc->GetNumShardBits());
+
+ cache = NewLRUCache(1024L * 1024L * 1024L, -1, true);
+ sc = dynamic_cast(cache.get());
+ ASSERT_EQ(6, sc->GetNumShardBits());
+}
+
+#ifdef SUPPORT_CLOCK_CACHE
+shared_ptr (*new_clock_cache_func)(size_t, int, bool) = NewClockCache;
+INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
+ testing::Values(kLRU, kClock));
+#else
+INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest, testing::Values(kLRU));
+#endif // SUPPORT_CLOCK_CACHE
+
+} // namespace rocksdb
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/c-deps/rocksdb/cache/clock_cache.cc b/c-deps/rocksdb/cache/clock_cache.cc
new file mode 100644
index 0000000000..7e42714ef1
--- /dev/null
+++ b/c-deps/rocksdb/cache/clock_cache.cc
@@ -0,0 +1,729 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "cache/clock_cache.h"
+
+#ifndef SUPPORT_CLOCK_CACHE
+
+namespace rocksdb {
+
+std::shared_ptr NewClockCache(size_t capacity, int num_shard_bits,
+ bool strict_capacity_limit) {
+ // Clock cache not supported.
+ return nullptr;
+}
+
+} // namespace rocksdb
+
+#else
+
+#include
+#include
+#include
+
+// "tbb/concurrent_hash_map.h" requires RTTI if exception is enabled.
+// Disable it so users can chooose to disable RTTI.
+#ifndef ROCKSDB_USE_RTTI
+#define TBB_USE_EXCEPTIONS 0
+#endif
+#include "tbb/concurrent_hash_map.h"
+
+#include "cache/sharded_cache.h"
+#include "port/port.h"
+#include "util/autovector.h"
+#include "util/mutexlock.h"
+
+namespace rocksdb {
+
+namespace {
+
+// An implementation of the Cache interface based on CLOCK algorithm, with
+// better concurrent performance than LRUCache. The idea of CLOCK algorithm
+// is to maintain all cache entries in a circular list, and an iterator
+// (the "head") pointing to the last examined entry. Eviction starts from the
+// current head. Each entry is given a second chance before eviction, if it
+// has been access since last examine. In contrast to LRU, no modification
+// to the internal data-structure (except for flipping the usage bit) needs
+// to be done upon lookup. This gives us oppertunity to implement a cache
+// with better concurrency.
+//
+// Each cache entry is represented by a cache handle, and all the handles
+// are arranged in a circular list, as describe above. Upon erase of an entry,
+// we never remove the handle. Instead, the handle is put into a recycle bin
+// to be re-use. This is to avoid memory dealocation, which is hard to deal
+// with in concurrent environment.
+//
+// The cache also maintains a concurrent hash map for lookup. Any concurrent
+// hash map implementation should do the work. We currently use
+// tbb::concurrent_hash_map because it supports concurrent erase.
+//
+// Each cache handle has the following flags and counters, which are squeeze
+// in an atomic interger, to make sure the handle always be in a consistent
+// state:
+//
+// * In-cache bit: whether the entry is reference by the cache itself. If
+// an entry is in cache, its key would also be available in the hash map.
+// * Usage bit: whether the entry has been access by user since last
+// examine for eviction. Can be reset by eviction.
+// * Reference count: reference count by user.
+//
+// An entry can be reference only when it's in cache. An entry can be evicted
+// only when it is in cache, has no usage since last examine, and reference
+// count is zero.
+//
+// The follow figure shows a possible layout of the cache. Boxes represents
+// cache handles and numbers in each box being in-cache bit, usage bit and
+// reference count respectively.
+//
+// hash map:
+// +-------+--------+
+// | key | handle |
+// +-------+--------+
+// | "foo" | 5 |-------------------------------------+
+// +-------+--------+ |
+// | "bar" | 2 |--+ |
+// +-------+--------+ | |
+// | |
+// head | |
+// | | |
+// circular list: | | |
+// +-------+ +-------+ +-------+ +-------+ +-------+ +-------
+// |(0,0,0)|---|(1,1,0)|---|(0,0,0)|---|(0,1,3)|---|(1,0,0)|---| ...
+// +-------+ +-------+ +-------+ +-------+ +-------+ +-------
+// | |
+// +-------+ +-----------+
+// | |
+// +---+---+
+// recycle bin: | 1 | 3 |
+// +---+---+
+//
+// Suppose we try to insert "baz" into the cache at this point and the cache is
+// full. The cache will first look for entries to evict, starting from where
+// head points to (the second entry). It resets usage bit of the second entry,
+// skips the third and fourth entry since they are not in cache, and finally
+// evict the fifth entry ("foo"). It looks at recycle bin for available handle,
+// grabs handle 3, and insert the key into the handle. The following figure
+// shows the resulting layout.
+//
+// hash map:
+// +-------+--------+
+// | key | handle |
+// +-------+--------+
+// | "baz" | 3 |-------------+
+// +-------+--------+ |
+// | "bar" | 2 |--+ |
+// +-------+--------+ | |
+// | |
+// | | head
+// | | |
+// circular list: | | |
+// +-------+ +-------+ +-------+ +-------+ +-------+ +-------
+// |(0,0,0)|---|(1,0,0)|---|(1,0,0)|---|(0,1,3)|---|(0,0,0)|---| ...
+// +-------+ +-------+ +-------+ +-------+ +-------+ +-------
+// | |
+// +-------+ +-----------------------------------+
+// | |
+// +---+---+
+// recycle bin: | 1 | 5 |
+// +---+---+
+//
+// A global mutex guards the circular list, the head, and the recycle bin.
+// We additionally require that modifying the hash map needs to hold the mutex.
+// As such, Modifying the cache (such as Insert() and Erase()) require to
+// hold the mutex. Lookup() only access the hash map and the flags associated
+// with each handle, and don't require explicit locking. Release() has to
+// acquire the mutex only when it releases the last reference to the entry and
+// the entry has been erased from cache explicitly. A future improvement could
+// be to remove the mutex completely.
+//
+// Benchmark:
+// We run readrandom db_bench on a test DB of size 13GB, with size of each
+// level:
+//
+// Level Files Size(MB)
+// -------------------------
+// L0 1 0.01
+// L1 18 17.32
+// L2 230 182.94
+// L3 1186 1833.63
+// L4 4602 8140.30
+//
+// We test with both 32 and 16 read threads, with 2GB cache size (the whole DB
+// doesn't fits in) and 64GB cache size (the whole DB can fit in cache), and
+// whether to put index and filter blocks in block cache. The benchmark runs
+// with
+// with RocksDB 4.10. We got the following result:
+//
+// Threads Cache Cache ClockCache LRUCache
+// Size Index/Filter Throughput(MB/s) Hit Throughput(MB/s) Hit
+// 32 2GB yes 466.7 85.9% 433.7 86.5%
+// 32 2GB no 529.9 72.7% 532.7 73.9%
+// 32 64GB yes 649.9 99.9% 507.9 99.9%
+// 32 64GB no 740.4 99.9% 662.8 99.9%
+// 16 2GB yes 278.4 85.9% 283.4 86.5%
+// 16 2GB no 318.6 72.7% 335.8 73.9%
+// 16 64GB yes 391.9 99.9% 353.3 99.9%
+// 16 64GB no 433.8 99.8% 419.4 99.8%
+
+// Cache entry meta data.
+struct CacheHandle {
+ Slice key;
+ uint32_t hash;
+ void* value;
+ size_t charge;
+ void (*deleter)(const Slice&, void* value);
+
+ // Flags and counters associated with the cache handle:
+ // lowest bit: n-cache bit
+ // second lowest bit: usage bit
+ // the rest bits: reference count
+ // The handle is unused when flags equals to 0. The thread decreases the count
+ // to 0 is responsible to put the handle back to recycle_ and cleanup memory.
+ std::atomic flags;
+
+ CacheHandle() = default;
+
+ CacheHandle(const CacheHandle& a) { *this = a; }
+
+ CacheHandle(const Slice& k, void* v,
+ void (*del)(const Slice& key, void* value))
+ : key(k), value(v), deleter(del) {}
+
+ CacheHandle& operator=(const CacheHandle& a) {
+ // Only copy members needed for deletion.
+ key = a.key;
+ value = a.value;
+ deleter = a.deleter;
+ return *this;
+ }
+};
+
+// Key of hash map. We store hash value with the key for convenience.
+struct CacheKey {
+ Slice key;
+ uint32_t hash_value;
+
+ CacheKey() = default;
+
+ CacheKey(const Slice& k, uint32_t h) {
+ key = k;
+ hash_value = h;
+ }
+
+ static bool equal(const CacheKey& a, const CacheKey& b) {
+ return a.hash_value == b.hash_value && a.key == b.key;
+ }
+
+ static size_t hash(const CacheKey& a) {
+ return static_cast(a.hash_value);
+ }
+};
+
+struct CleanupContext {
+ // List of values to be deleted, along with the key and deleter.
+ autovector to_delete_value;
+
+ // List of keys to be deleted.
+ autovector to_delete_key;
+};
+
+// A cache shard which maintains its own CLOCK cache.
+class ClockCacheShard : public CacheShard {
+ public:
+ // Hash map type.
+ typedef tbb::concurrent_hash_map HashTable;
+
+ ClockCacheShard();
+ ~ClockCacheShard();
+
+ // Interfaces
+ virtual void SetCapacity(size_t capacity) override;
+ virtual void SetStrictCapacityLimit(bool strict_capacity_limit) override;
+ virtual Status Insert(const Slice& key, uint32_t hash, void* value,
+ size_t charge,
+ void (*deleter)(const Slice& key, void* value),
+ Cache::Handle** handle,
+ Cache::Priority priority) override;
+ virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) override;
+ // If the entry in in cache, increase reference count and return true.
+ // Return false otherwise.
+ //
+ // Not necessary to hold mutex_ before being called.
+ virtual bool Ref(Cache::Handle* handle) override;
+ virtual bool Release(Cache::Handle* handle,
+ bool force_erase = false) override;
+ virtual void Erase(const Slice& key, uint32_t hash) override;
+ bool EraseAndConfirm(const Slice& key, uint32_t hash,
+ CleanupContext* context);
+ virtual size_t GetUsage() const override;
+ virtual size_t GetPinnedUsage() const override;
+ virtual void EraseUnRefEntries() override;
+ virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
+ bool thread_safe) override;
+
+ private:
+ static const uint32_t kInCacheBit = 1;
+ static const uint32_t kUsageBit = 2;
+ static const uint32_t kRefsOffset = 2;
+ static const uint32_t kOneRef = 1 << kRefsOffset;
+
+ // Helper functions to extract cache handle flags and counters.
+ static bool InCache(uint32_t flags) { return flags & kInCacheBit; }
+ static bool HasUsage(uint32_t flags) { return flags & kUsageBit; }
+ static uint32_t CountRefs(uint32_t flags) { return flags >> kRefsOffset; }
+
+ // Decrease reference count of the entry. If this decreases the count to 0,
+ // recycle the entry. If set_usage is true, also set the usage bit.
+ //
+ // returns true if a value is erased.
+ //
+ // Not necessary to hold mutex_ before being called.
+ bool Unref(CacheHandle* handle, bool set_usage, CleanupContext* context);
+
+ // Unset in-cache bit of the entry. Recycle the handle if necessary.
+ //
+ // returns true if a value is erased.
+ //
+ // Has to hold mutex_ before being called.
+ bool UnsetInCache(CacheHandle* handle, CleanupContext* context);
+
+ // Put the handle back to recycle_ list, and put the value associated with
+ // it into to-be-deleted list. It doesn't cleanup the key as it might be
+ // reused by another handle.
+ //
+ // Has to hold mutex_ before being called.
+ void RecycleHandle(CacheHandle* handle, CleanupContext* context);
+
+ // Delete keys and values in to-be-deleted list. Call the method without
+ // holding mutex, as destructors can be expensive.
+ void Cleanup(const CleanupContext& context);
+
+ // Examine the handle for eviction. If the handle is in cache, usage bit is
+ // not set, and referece count is 0, evict it from cache. Otherwise unset
+ // the usage bit.
+ //
+ // Has to hold mutex_ before being called.
+ bool TryEvict(CacheHandle* value, CleanupContext* context);
+
+ // Scan through the circular list, evict entries until we get enough capacity
+ // for new cache entry of specific size. Return true if success, false
+ // otherwise.
+ //
+ // Has to hold mutex_ before being called.
+ bool EvictFromCache(size_t charge, CleanupContext* context);
+
+ CacheHandle* Insert(const Slice& key, uint32_t hash, void* value,
+ size_t change,
+ void (*deleter)(const Slice& key, void* value),
+ bool hold_reference, CleanupContext* context);
+
+ // Guards list_, head_, and recycle_. In addition, updating table_ also has
+ // to hold the mutex, to avoid the cache being in inconsistent state.
+ mutable port::Mutex mutex_;
+
+ // The circular list of cache handles. Initially the list is empty. Once a
+ // handle is needed by insertion, and no more handles are available in
+ // recycle bin, one more handle is appended to the end.
+ //
+ // We use std::deque for the circular list because we want to make sure
+ // pointers to handles are valid through out the life-cycle of the cache
+ // (in contrast to std::vector), and be able to grow the list (in contrast
+ // to statically allocated arrays).
+ std::deque list_;
+
+ // Pointer to the next handle in the circular list to be examine for
+ // eviction.
+ size_t head_;
+
+ // Recycle bin of cache handles.
+ autovector recycle_;
+
+ // Maximum cache size.
+ std::atomic capacity_;
+
+ // Current total size of the cache.
+ std::atomic usage_;
+
+ // Total un-released cache size.
+ std::atomic pinned_usage_;
+
+ // Whether allow insert into cache if cache is full.
+ std::atomic strict_capacity_limit_;
+
+ // Hash table (tbb::concurrent_hash_map) for lookup.
+ HashTable table_;
+};
+
+ClockCacheShard::ClockCacheShard()
+ : head_(0), usage_(0), pinned_usage_(0), strict_capacity_limit_(false) {}
+
+ClockCacheShard::~ClockCacheShard() {
+ for (auto& handle : list_) {
+ uint32_t flags = handle.flags.load(std::memory_order_relaxed);
+ if (InCache(flags) || CountRefs(flags) > 0) {
+ (*handle.deleter)(handle.key, handle.value);
+ delete[] handle.key.data();
+ }
+ }
+}
+
+size_t ClockCacheShard::GetUsage() const {
+ return usage_.load(std::memory_order_relaxed);
+}
+
+size_t ClockCacheShard::GetPinnedUsage() const {
+ return pinned_usage_.load(std::memory_order_relaxed);
+}
+
+void ClockCacheShard::ApplyToAllCacheEntries(void (*callback)(void*, size_t),
+ bool thread_safe) {
+ if (thread_safe) {
+ mutex_.Lock();
+ }
+ for (auto& handle : list_) {
+ // Use relaxed semantics instead of acquire semantics since we are either
+ // holding mutex, or don't have thread safe requirement.
+ uint32_t flags = handle.flags.load(std::memory_order_relaxed);
+ if (InCache(flags)) {
+ callback(handle.value, handle.charge);
+ }
+ }
+ if (thread_safe) {
+ mutex_.Unlock();
+ }
+}
+
+void ClockCacheShard::RecycleHandle(CacheHandle* handle,
+ CleanupContext* context) {
+ mutex_.AssertHeld();
+ assert(!InCache(handle->flags) && CountRefs(handle->flags) == 0);
+ context->to_delete_key.push_back(handle->key.data());
+ context->to_delete_value.emplace_back(*handle);
+ handle->key.clear();
+ handle->value = nullptr;
+ handle->deleter = nullptr;
+ recycle_.push_back(handle);
+ usage_.fetch_sub(handle->charge, std::memory_order_relaxed);
+}
+
+void ClockCacheShard::Cleanup(const CleanupContext& context) {
+ for (const CacheHandle& handle : context.to_delete_value) {
+ if (handle.deleter) {
+ (*handle.deleter)(handle.key, handle.value);
+ }
+ }
+ for (const char* key : context.to_delete_key) {
+ delete[] key;
+ }
+}
+
+bool ClockCacheShard::Ref(Cache::Handle* h) {
+ auto handle = reinterpret_cast(h);
+ // CAS loop to increase reference count.
+ uint32_t flags = handle->flags.load(std::memory_order_relaxed);
+ while (InCache(flags)) {
+ // Use acquire semantics on success, as further operations on the cache
+ // entry has to be order after reference count is increased.
+ if (handle->flags.compare_exchange_weak(flags, flags + kOneRef,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ if (CountRefs(flags) == 0) {
+ // No reference count before the operation.
+ pinned_usage_.fetch_add(handle->charge, std::memory_order_relaxed);
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+bool ClockCacheShard::Unref(CacheHandle* handle, bool set_usage,
+ CleanupContext* context) {
+ if (set_usage) {
+ handle->flags.fetch_or(kUsageBit, std::memory_order_relaxed);
+ }
+ // Use acquire-release semantics as previous operations on the cache entry
+ // has to be order before reference count is decreased, and potential cleanup
+ // of the entry has to be order after.
+ uint32_t flags = handle->flags.fetch_sub(kOneRef, std::memory_order_acq_rel);
+ assert(CountRefs(flags) > 0);
+ if (CountRefs(flags) == 1) {
+ // this is the last reference.
+ pinned_usage_.fetch_sub(handle->charge, std::memory_order_relaxed);
+ // Cleanup if it is the last reference.
+ if (!InCache(flags)) {
+ MutexLock l(&mutex_);
+ RecycleHandle(handle, context);
+ }
+ }
+ return context->to_delete_value.size();
+}
+
+bool ClockCacheShard::UnsetInCache(CacheHandle* handle,
+ CleanupContext* context) {
+ mutex_.AssertHeld();
+ // Use acquire-release semantics as previous operations on the cache entry
+ // has to be order before reference count is decreased, and potential cleanup
+ // of the entry has to be order after.
+ uint32_t flags =
+ handle->flags.fetch_and(~kInCacheBit, std::memory_order_acq_rel);
+ // Cleanup if it is the last reference.
+ if (InCache(flags) && CountRefs(flags) == 0) {
+ RecycleHandle(handle, context);
+ }
+ return context->to_delete_value.size();
+}
+
+bool ClockCacheShard::TryEvict(CacheHandle* handle, CleanupContext* context) {
+ mutex_.AssertHeld();
+ uint32_t flags = kInCacheBit;
+ if (handle->flags.compare_exchange_strong(flags, 0, std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ bool erased __attribute__((__unused__)) =
+ table_.erase(CacheKey(handle->key, handle->hash));
+ assert(erased);
+ RecycleHandle(handle, context);
+ return true;
+ }
+ handle->flags.fetch_and(~kUsageBit, std::memory_order_relaxed);
+ return false;
+}
+
+bool ClockCacheShard::EvictFromCache(size_t charge, CleanupContext* context) {
+ size_t usage = usage_.load(std::memory_order_relaxed);
+ size_t capacity = capacity_.load(std::memory_order_relaxed);
+ if (usage == 0) {
+ return charge <= capacity;
+ }
+ size_t new_head = head_;
+ bool second_iteration = false;
+ while (usage + charge > capacity) {
+ assert(new_head < list_.size());
+ if (TryEvict(&list_[new_head], context)) {
+ usage = usage_.load(std::memory_order_relaxed);
+ }
+ new_head = (new_head + 1 >= list_.size()) ? 0 : new_head + 1;
+ if (new_head == head_) {
+ if (second_iteration) {
+ return false;
+ } else {
+ second_iteration = true;
+ }
+ }
+ }
+ head_ = new_head;
+ return true;
+}
+
+void ClockCacheShard::SetCapacity(size_t capacity) {
+ CleanupContext context;
+ {
+ MutexLock l(&mutex_);
+ capacity_.store(capacity, std::memory_order_relaxed);
+ EvictFromCache(0, &context);
+ }
+ Cleanup(context);
+}
+
+void ClockCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
+ strict_capacity_limit_.store(strict_capacity_limit,
+ std::memory_order_relaxed);
+}
+
+CacheHandle* ClockCacheShard::Insert(
+ const Slice& key, uint32_t hash, void* value, size_t charge,
+ void (*deleter)(const Slice& key, void* value), bool hold_reference,
+ CleanupContext* context) {
+ MutexLock l(&mutex_);
+ bool success = EvictFromCache(charge, context);
+ bool strict = strict_capacity_limit_.load(std::memory_order_relaxed);
+ if (!success && (strict || !hold_reference)) {
+ context->to_delete_key.push_back(key.data());
+ if (!hold_reference) {
+ context->to_delete_value.emplace_back(key, value, deleter);
+ }
+ return nullptr;
+ }
+ // Grab available handle from recycle bin. If recycle bin is empty, create
+ // and append new handle to end of circular list.
+ CacheHandle* handle = nullptr;
+ if (!recycle_.empty()) {
+ handle = recycle_.back();
+ recycle_.pop_back();
+ } else {
+ list_.emplace_back();
+ handle = &list_.back();
+ }
+ // Fill handle.
+ handle->key = key;
+ handle->hash = hash;
+ handle->value = value;
+ handle->charge = charge;
+ handle->deleter = deleter;
+ uint32_t flags = hold_reference ? kInCacheBit + kOneRef : kInCacheBit;
+ handle->flags.store(flags, std::memory_order_relaxed);
+ HashTable::accessor accessor;
+ if (table_.find(accessor, CacheKey(key, hash))) {
+ CacheHandle* existing_handle = accessor->second;
+ table_.erase(accessor);
+ UnsetInCache(existing_handle, context);
+ }
+ table_.insert(HashTable::value_type(CacheKey(key, hash), handle));
+ if (hold_reference) {
+ pinned_usage_.fetch_add(charge, std::memory_order_relaxed);
+ }
+ usage_.fetch_add(charge, std::memory_order_relaxed);
+ return handle;
+}
+
+Status ClockCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
+ size_t charge,
+ void (*deleter)(const Slice& key, void* value),
+ Cache::Handle** out_handle,
+ Cache::Priority priority) {
+ CleanupContext context;
+ HashTable::accessor accessor;
+ char* key_data = new char[key.size()];
+ memcpy(key_data, key.data(), key.size());
+ Slice key_copy(key_data, key.size());
+ CacheHandle* handle = Insert(key_copy, hash, value, charge, deleter,
+ out_handle != nullptr, &context);
+ Status s;
+ if (out_handle != nullptr) {
+ if (handle == nullptr) {
+ s = Status::Incomplete("Insert failed due to LRU cache being full.");
+ } else {
+ *out_handle = reinterpret_cast(handle);
+ }
+ }
+ Cleanup(context);
+ return s;
+}
+
+Cache::Handle* ClockCacheShard::Lookup(const Slice& key, uint32_t hash) {
+ HashTable::const_accessor accessor;
+ if (!table_.find(accessor, CacheKey(key, hash))) {
+ return nullptr;
+ }
+ CacheHandle* handle = accessor->second;
+ accessor.release();
+ // Ref() could fail if another thread sneak in and evict/erase the cache
+ // entry before we are able to hold reference.
+ if (!Ref(reinterpret_cast(handle))) {
+ return nullptr;
+ }
+ // Double check the key since the handle may now representing another key
+ // if other threads sneak in, evict/erase the entry and re-used the handle
+ // for another cache entry.
+ if (hash != handle->hash || key != handle->key) {
+ CleanupContext context;
+ Unref(handle, false, &context);
+ // It is possible Unref() delete the entry, so we need to cleanup.
+ Cleanup(context);
+ return nullptr;
+ }
+ return reinterpret_cast(handle);
+}
+
+bool ClockCacheShard::Release(Cache::Handle* h, bool force_erase) {
+ CleanupContext context;
+ CacheHandle* handle = reinterpret_cast(h);
+ bool erased = Unref(handle, true, &context);
+ if (force_erase && !erased) {
+ erased = EraseAndConfirm(handle->key, handle->hash, &context);
+ }
+ Cleanup(context);
+ return erased;
+}
+
+void ClockCacheShard::Erase(const Slice& key, uint32_t hash) {
+ CleanupContext context;
+ EraseAndConfirm(key, hash, &context);
+ Cleanup(context);
+}
+
+bool ClockCacheShard::EraseAndConfirm(const Slice& key, uint32_t hash,
+ CleanupContext* context) {
+ MutexLock l(&mutex_);
+ HashTable::accessor accessor;
+ bool erased = false;
+ if (table_.find(accessor, CacheKey(key, hash))) {
+ CacheHandle* handle = accessor->second;
+ table_.erase(accessor);
+ erased = UnsetInCache(handle, context);
+ }
+ return erased;
+}
+
+void ClockCacheShard::EraseUnRefEntries() {
+ CleanupContext context;
+ {
+ MutexLock l(&mutex_);
+ table_.clear();
+ for (auto& handle : list_) {
+ UnsetInCache(&handle, &context);
+ }
+ }
+ Cleanup(context);
+}
+
+class ClockCache : public ShardedCache {
+ public:
+ ClockCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit)
+ : ShardedCache(capacity, num_shard_bits, strict_capacity_limit) {
+ int num_shards = 1 << num_shard_bits;
+ shards_ = new ClockCacheShard[num_shards];
+ SetCapacity(capacity);
+ SetStrictCapacityLimit(strict_capacity_limit);
+ }
+
+ virtual ~ClockCache() { delete[] shards_; }
+
+ virtual const char* Name() const override { return "ClockCache"; }
+
+ virtual CacheShard* GetShard(int shard) override {
+ return reinterpret_cast(&shards_[shard]);
+ }
+
+ virtual const CacheShard* GetShard(int shard) const override {
+ return reinterpret_cast(&shards_[shard]);
+ }
+
+ virtual void* Value(Handle* handle) override {
+ return reinterpret_cast(handle)->value;
+ }
+
+ virtual size_t GetCharge(Handle* handle) const override {
+ return reinterpret_cast(handle)->charge;
+ }
+
+ virtual uint32_t GetHash(Handle* handle) const override {
+ return reinterpret_cast(handle)->hash;
+ }
+
+ virtual void DisownData() override { shards_ = nullptr; }
+
+ private:
+ ClockCacheShard* shards_;
+};
+
+} // end anonymous namespace
+
+std::shared_ptr NewClockCache(size_t capacity, int num_shard_bits,
+ bool strict_capacity_limit) {
+ if (num_shard_bits < 0) {
+ num_shard_bits = GetDefaultCacheShardBits(capacity);
+ }
+ return std::make_shared(capacity, num_shard_bits,
+ strict_capacity_limit);
+}
+
+} // namespace rocksdb
+
+#endif // SUPPORT_CLOCK_CACHE
diff --git a/c-deps/rocksdb/cache/clock_cache.h b/c-deps/rocksdb/cache/clock_cache.h
new file mode 100644
index 0000000000..1614c0ed45
--- /dev/null
+++ b/c-deps/rocksdb/cache/clock_cache.h
@@ -0,0 +1,16 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#pragma once
+
+#include "rocksdb/cache.h"
+
+#if defined(TBB) && !defined(ROCKSDB_LITE)
+#define SUPPORT_CLOCK_CACHE
+#endif
diff --git a/c-deps/rocksdb/cache/lru_cache.cc b/c-deps/rocksdb/cache/lru_cache.cc
new file mode 100644
index 0000000000..e4ab4065ee
--- /dev/null
+++ b/c-deps/rocksdb/cache/lru_cache.cc
@@ -0,0 +1,533 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include "cache/lru_cache.h"
+
+#include
+#include
+#include
+#include
+
+#include "util/mutexlock.h"
+
+namespace rocksdb {
+
+LRUHandleTable::LRUHandleTable() : list_(nullptr), length_(0), elems_(0) {
+ Resize();
+}
+
+LRUHandleTable::~LRUHandleTable() {
+ ApplyToAllCacheEntries([](LRUHandle* h) {
+ if (h->refs == 1) {
+ h->Free();
+ }
+ });
+ delete[] list_;
+}
+
+LRUHandle* LRUHandleTable::Lookup(const Slice& key, uint32_t hash) {
+ return *FindPointer(key, hash);
+}
+
+LRUHandle* LRUHandleTable::Insert(LRUHandle* h) {
+ LRUHandle** ptr = FindPointer(h->key(), h->hash);
+ LRUHandle* old = *ptr;
+ h->next_hash = (old == nullptr ? nullptr : old->next_hash);
+ *ptr = h;
+ if (old == nullptr) {
+ ++elems_;
+ if (elems_ > length_) {
+ // Since each cache entry is fairly large, we aim for a small
+ // average linked list length (<= 1).
+ Resize();
+ }
+ }
+ return old;
+}
+
+LRUHandle* LRUHandleTable::Remove(const Slice& key, uint32_t hash) {
+ LRUHandle** ptr = FindPointer(key, hash);
+ LRUHandle* result = *ptr;
+ if (result != nullptr) {
+ *ptr = result->next_hash;
+ --elems_;
+ }
+ return result;
+}
+
+LRUHandle** LRUHandleTable::FindPointer(const Slice& key, uint32_t hash) {
+ LRUHandle** ptr = &list_[hash & (length_ - 1)];
+ while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
+ ptr = &(*ptr)->next_hash;
+ }
+ return ptr;
+}
+
+void LRUHandleTable::Resize() {
+ uint32_t new_length = 16;
+ while (new_length < elems_ * 1.5) {
+ new_length *= 2;
+ }
+ LRUHandle** new_list = new LRUHandle*[new_length];
+ memset(new_list, 0, sizeof(new_list[0]) * new_length);
+ uint32_t count = 0;
+ for (uint32_t i = 0; i < length_; i++) {
+ LRUHandle* h = list_[i];
+ while (h != nullptr) {
+ LRUHandle* next = h->next_hash;
+ uint32_t hash = h->hash;
+ LRUHandle** ptr = &new_list[hash & (new_length - 1)];
+ h->next_hash = *ptr;
+ *ptr = h;
+ h = next;
+ count++;
+ }
+ }
+ assert(elems_ == count);
+ delete[] list_;
+ list_ = new_list;
+ length_ = new_length;
+}
+
+LRUCacheShard::LRUCacheShard()
+ : capacity_(0), high_pri_pool_usage_(0), strict_capacity_limit_(false),
+ high_pri_pool_ratio_(0), high_pri_pool_capacity_(0), usage_(0),
+ lru_usage_(0) {
+ // Make empty circular linked list
+ lru_.next = &lru_;
+ lru_.prev = &lru_;
+ lru_low_pri_ = &lru_;
+}
+
+LRUCacheShard::~LRUCacheShard() {}
+
+bool LRUCacheShard::Unref(LRUHandle* e) {
+ assert(e->refs > 0);
+ e->refs--;
+ return e->refs == 0;
+}
+
+// Call deleter and free
+
+void LRUCacheShard::EraseUnRefEntries() {
+ autovector last_reference_list;
+ {
+ MutexLock l(&mutex_);
+ while (lru_.next != &lru_) {
+ LRUHandle* old = lru_.next;
+ assert(old->InCache());
+ assert(old->refs ==
+ 1); // LRU list contains elements which may be evicted
+ LRU_Remove(old);
+ table_.Remove(old->key(), old->hash);
+ old->SetInCache(false);
+ Unref(old);
+ usage_ -= old->charge;
+ last_reference_list.push_back(old);
+ }
+ }
+
+ for (auto entry : last_reference_list) {
+ entry->Free();
+ }
+}
+
+void LRUCacheShard::ApplyToAllCacheEntries(void (*callback)(void*, size_t),
+ bool thread_safe) {
+ if (thread_safe) {
+ mutex_.Lock();
+ }
+ table_.ApplyToAllCacheEntries(
+ [callback](LRUHandle* h) { callback(h->value, h->charge); });
+ if (thread_safe) {
+ mutex_.Unlock();
+ }
+}
+
+void LRUCacheShard::TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri) {
+ *lru = &lru_;
+ *lru_low_pri = lru_low_pri_;
+}
+
+size_t LRUCacheShard::TEST_GetLRUSize() {
+ LRUHandle* lru_handle = lru_.next;
+ size_t lru_size = 0;
+ while (lru_handle != &lru_) {
+ lru_size++;
+ lru_handle = lru_handle->next;
+ }
+ return lru_size;
+}
+
+void LRUCacheShard::LRU_Remove(LRUHandle* e) {
+ assert(e->next != nullptr);
+ assert(e->prev != nullptr);
+ if (lru_low_pri_ == e) {
+ lru_low_pri_ = e->prev;
+ }
+ e->next->prev = e->prev;
+ e->prev->next = e->next;
+ e->prev = e->next = nullptr;
+ lru_usage_ -= e->charge;
+ if (e->InHighPriPool()) {
+ assert(high_pri_pool_usage_ >= e->charge);
+ high_pri_pool_usage_ -= e->charge;
+ }
+}
+
+void LRUCacheShard::LRU_Insert(LRUHandle* e) {
+ assert(e->next == nullptr);
+ assert(e->prev == nullptr);
+ if (high_pri_pool_ratio_ > 0 && e->IsHighPri()) {
+ // Inset "e" to head of LRU list.
+ e->next = &lru_;
+ e->prev = lru_.prev;
+ e->prev->next = e;
+ e->next->prev = e;
+ e->SetInHighPriPool(true);
+ high_pri_pool_usage_ += e->charge;
+ MaintainPoolSize();
+ } else {
+ // Insert "e" to the head of low-pri pool. Note that when
+ // high_pri_pool_ratio is 0, head of low-pri pool is also head of LRU list.
+ e->next = lru_low_pri_->next;
+ e->prev = lru_low_pri_;
+ e->prev->next = e;
+ e->next->prev = e;
+ e->SetInHighPriPool(false);
+ lru_low_pri_ = e;
+ }
+ lru_usage_ += e->charge;
+}
+
+void LRUCacheShard::MaintainPoolSize() {
+ while (high_pri_pool_usage_ > high_pri_pool_capacity_) {
+ // Overflow last entry in high-pri pool to low-pri pool.
+ lru_low_pri_ = lru_low_pri_->next;
+ assert(lru_low_pri_ != &lru_);
+ lru_low_pri_->SetInHighPriPool(false);
+ high_pri_pool_usage_ -= lru_low_pri_->charge;
+ }
+}
+
+void LRUCacheShard::EvictFromLRU(size_t charge,
+ autovector* deleted) {
+ while (usage_ + charge > capacity_ && lru_.next != &lru_) {
+ LRUHandle* old = lru_.next;
+ assert(old->InCache());
+ assert(old->refs == 1); // LRU list contains elements which may be evicted
+ LRU_Remove(old);
+ table_.Remove(old->key(), old->hash);
+ old->SetInCache(false);
+ Unref(old);
+ usage_ -= old->charge;
+ deleted->push_back(old);
+ }
+}
+
+void* LRUCacheShard::operator new(size_t size) {
+ return port::cacheline_aligned_alloc(size);
+}
+
+void* LRUCacheShard::operator new[](size_t size) {
+ return port::cacheline_aligned_alloc(size);
+}
+
+void LRUCacheShard::operator delete(void *memblock) {
+ port::cacheline_aligned_free(memblock);
+}
+
+void LRUCacheShard::operator delete[](void* memblock) {
+ port::cacheline_aligned_free(memblock);
+}
+
+void LRUCacheShard::SetCapacity(size_t capacity) {
+ autovector last_reference_list;
+ {
+ MutexLock l(&mutex_);
+ capacity_ = capacity;
+ high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
+ EvictFromLRU(0, &last_reference_list);
+ }
+ // we free the entries here outside of mutex for
+ // performance reasons
+ for (auto entry : last_reference_list) {
+ entry->Free();
+ }
+}
+
+void LRUCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
+ MutexLock l(&mutex_);
+ strict_capacity_limit_ = strict_capacity_limit;
+}
+
+Cache::Handle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash) {
+ MutexLock l(&mutex_);
+ LRUHandle* e = table_.Lookup(key, hash);
+ if (e != nullptr) {
+ assert(e->InCache());
+ if (e->refs == 1) {
+ LRU_Remove(e);
+ }
+ e->refs++;
+ }
+ return reinterpret_cast(e);
+}
+
+bool LRUCacheShard::Ref(Cache::Handle* h) {
+ LRUHandle* handle = reinterpret_cast(h);
+ MutexLock l(&mutex_);
+ if (handle->InCache() && handle->refs == 1) {
+ LRU_Remove(handle);
+ }
+ handle->refs++;
+ return true;
+}
+
+void LRUCacheShard::SetHighPriorityPoolRatio(double high_pri_pool_ratio) {
+ MutexLock l(&mutex_);
+ high_pri_pool_ratio_ = high_pri_pool_ratio;
+ high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
+ MaintainPoolSize();
+}
+
+bool LRUCacheShard::Release(Cache::Handle* handle, bool force_erase) {
+ if (handle == nullptr) {
+ return false;
+ }
+ LRUHandle* e = reinterpret_cast(handle);
+ bool last_reference = false;
+ {
+ MutexLock l(&mutex_);
+ last_reference = Unref(e);
+ if (last_reference) {
+ usage_ -= e->charge;
+ }
+ if (e->refs == 1 && e->InCache()) {
+ // The item is still in cache, and nobody else holds a reference to it
+ if (usage_ > capacity_ || force_erase) {
+ // the cache is full
+ // The LRU list must be empty since the cache is full
+ assert(!(usage_ > capacity_) || lru_.next == &lru_);
+ // take this opportunity and remove the item
+ table_.Remove(e->key(), e->hash);
+ e->SetInCache(false);
+ Unref(e);
+ usage_ -= e->charge;
+ last_reference = true;
+ } else {
+ // put the item on the list to be potentially freed
+ LRU_Insert(e);
+ }
+ }
+ }
+
+ // free outside of mutex
+ if (last_reference) {
+ e->Free();
+ }
+ return last_reference;
+}
+
+Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
+ size_t charge,
+ void (*deleter)(const Slice& key, void* value),
+ Cache::Handle** handle, Cache::Priority priority) {
+ // Allocate the memory here outside of the mutex
+ // If the cache is full, we'll have to release it
+ // It shouldn't happen very often though.
+ LRUHandle* e = reinterpret_cast(
+ new char[sizeof(LRUHandle) - 1 + key.size()]);
+ Status s;
+ autovector last_reference_list;
+
+ e->value = value;
+ e->deleter = deleter;
+ e->charge = charge;
+ e->key_length = key.size();
+ e->flags = 0;
+ e->hash = hash;
+ e->refs = (handle == nullptr
+ ? 1
+ : 2); // One from LRUCache, one for the returned handle
+ e->next = e->prev = nullptr;
+ e->SetInCache(true);
+ e->SetPriority(priority);
+ memcpy(e->key_data, key.data(), key.size());
+
+ {
+ MutexLock l(&mutex_);
+
+ // Free the space following strict LRU policy until enough space
+ // is freed or the lru list is empty
+ EvictFromLRU(charge, &last_reference_list);
+
+ if (usage_ - lru_usage_ + charge > capacity_ &&
+ (strict_capacity_limit_ || handle == nullptr)) {
+ if (handle == nullptr) {
+ // Don't insert the entry but still return ok, as if the entry inserted
+ // into cache and get evicted immediately.
+ last_reference_list.push_back(e);
+ } else {
+ delete[] reinterpret_cast(e);
+ *handle = nullptr;
+ s = Status::Incomplete("Insert failed due to LRU cache being full.");
+ }
+ } else {
+ // insert into the cache
+ // note that the cache might get larger than its capacity if not enough
+ // space was freed
+ LRUHandle* old = table_.Insert(e);
+ usage_ += e->charge;
+ if (old != nullptr) {
+ old->SetInCache(false);
+ if (Unref(old)) {
+ usage_ -= old->charge;
+ // old is on LRU because it's in cache and its reference count
+ // was just 1 (Unref returned 0)
+ LRU_Remove(old);
+ last_reference_list.push_back(old);
+ }
+ }
+ if (handle == nullptr) {
+ LRU_Insert(e);
+ } else {
+ *handle = reinterpret_cast(e);
+ }
+ s = Status::OK();
+ }
+ }
+
+ // we free the entries here outside of mutex for
+ // performance reasons
+ for (auto entry : last_reference_list) {
+ entry->Free();
+ }
+
+ return s;
+}
+
+void LRUCacheShard::Erase(const Slice& key, uint32_t hash) {
+ LRUHandle* e;
+ bool last_reference = false;
+ {
+ MutexLock l(&mutex_);
+ e = table_.Remove(key, hash);
+ if (e != nullptr) {
+ last_reference = Unref(e);
+ if (last_reference) {
+ usage_ -= e->charge;
+ }
+ if (last_reference && e->InCache()) {
+ LRU_Remove(e);
+ }
+ e->SetInCache(false);
+ }
+ }
+
+ // mutex not held here
+ // last_reference will only be true if e != nullptr
+ if (last_reference) {
+ e->Free();
+ }
+}
+
+size_t LRUCacheShard::GetUsage() const {
+ MutexLock l(&mutex_);
+ return usage_;
+}
+
+size_t LRUCacheShard::GetPinnedUsage() const {
+ MutexLock l(&mutex_);
+ assert(usage_ >= lru_usage_);
+ return usage_ - lru_usage_;
+}
+
+std::string LRUCacheShard::GetPrintableOptions() const {
+ const int kBufferSize = 200;
+ char buffer[kBufferSize];
+ {
+ MutexLock l(&mutex_);
+ snprintf(buffer, kBufferSize, " high_pri_pool_ratio: %.3lf\n",
+ high_pri_pool_ratio_);
+ }
+ return std::string(buffer);
+}
+
+LRUCache::LRUCache(size_t capacity, int num_shard_bits,
+ bool strict_capacity_limit, double high_pri_pool_ratio)
+ : ShardedCache(capacity, num_shard_bits, strict_capacity_limit) {
+ num_shards_ = 1 << num_shard_bits;
+ shards_ = new LRUCacheShard[num_shards_];
+ SetCapacity(capacity);
+ SetStrictCapacityLimit(strict_capacity_limit);
+ for (int i = 0; i < num_shards_; i++) {
+ shards_[i].SetHighPriorityPoolRatio(high_pri_pool_ratio);
+ }
+}
+
+LRUCache::~LRUCache() { delete[] shards_; }
+
+CacheShard* LRUCache::GetShard(int shard) {
+ return reinterpret_cast(&shards_[shard]);
+}
+
+const CacheShard* LRUCache::GetShard(int shard) const {
+ return reinterpret_cast(&shards_[shard]);
+}
+
+void* LRUCache::Value(Handle* handle) {
+ return reinterpret_cast(handle)->value;
+}
+
+size_t LRUCache::GetCharge(Handle* handle) const {
+ return reinterpret_cast(handle)->charge;
+}
+
+uint32_t LRUCache::GetHash(Handle* handle) const {
+ return reinterpret_cast(handle)->hash;
+}
+
+void LRUCache::DisownData() {
+// Do not drop data if compile with ASAN to suppress leak warning.
+#ifndef __SANITIZE_ADDRESS__
+ shards_ = nullptr;
+#endif // !__SANITIZE_ADDRESS__
+}
+
+size_t LRUCache::TEST_GetLRUSize() {
+ size_t lru_size_of_all_shards = 0;
+ for (int i = 0; i < num_shards_; i++) {
+ lru_size_of_all_shards += shards_[i].TEST_GetLRUSize();
+ }
+ return lru_size_of_all_shards;
+}
+
+std::shared_ptr NewLRUCache(size_t capacity, int num_shard_bits,
+ bool strict_capacity_limit,
+ double high_pri_pool_ratio) {
+ if (num_shard_bits >= 20) {
+ return nullptr; // the cache cannot be sharded into too many fine pieces
+ }
+ if (high_pri_pool_ratio < 0.0 || high_pri_pool_ratio > 1.0) {
+ // invalid high_pri_pool_ratio
+ return nullptr;
+ }
+ if (num_shard_bits < 0) {
+ num_shard_bits = GetDefaultCacheShardBits(capacity);
+ }
+ return std::make_shared(capacity, num_shard_bits,
+ strict_capacity_limit, high_pri_pool_ratio);
+}
+
+} // namespace rocksdb
diff --git a/c-deps/rocksdb/cache/lru_cache.h b/c-deps/rocksdb/cache/lru_cache.h
new file mode 100644
index 0000000000..abe78fd0c7
--- /dev/null
+++ b/c-deps/rocksdb/cache/lru_cache.h
@@ -0,0 +1,302 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#pragma once
+
+#include
+
+#include "cache/sharded_cache.h"
+
+#include "port/port.h"
+#include "util/autovector.h"
+
+namespace rocksdb {
+
+// LRU cache implementation
+
+// An entry is a variable length heap-allocated structure.
+// Entries are referenced by cache and/or by any external entity.
+// The cache keeps all its entries in table. Some elements
+// are also stored on LRU list.
+//
+// LRUHandle can be in these states:
+// 1. Referenced externally AND in hash table.
+// In that case the entry is *not* in the LRU. (refs > 1 && in_cache == true)
+// 2. Not referenced externally and in hash table. In that case the entry is
+// in the LRU and can be freed. (refs == 1 && in_cache == true)
+// 3. Referenced externally and not in hash table. In that case the entry is
+// in not on LRU and not in table. (refs >= 1 && in_cache == false)
+//
+// All newly created LRUHandles are in state 1. If you call
+// LRUCacheShard::Release
+// on entry in state 1, it will go into state 2. To move from state 1 to
+// state 3, either call LRUCacheShard::Erase or LRUCacheShard::Insert with the
+// same key.
+// To move from state 2 to state 1, use LRUCacheShard::Lookup.
+// Before destruction, make sure that no handles are in state 1. This means
+// that any successful LRUCacheShard::Lookup/LRUCacheShard::Insert have a
+// matching
+// RUCache::Release (to move into state 2) or LRUCacheShard::Erase (for state 3)
+
+struct LRUHandle {
+ void* value;
+ void (*deleter)(const Slice&, void* value);
+ LRUHandle* next_hash;
+ LRUHandle* next;
+ LRUHandle* prev;
+ size_t charge; // TODO(opt): Only allow uint32_t?
+ size_t key_length;
+ uint32_t refs; // a number of refs to this entry
+ // cache itself is counted as 1
+
+ // Include the following flags:
+ // in_cache: whether this entry is referenced by the hash table.
+ // is_high_pri: whether this entry is high priority entry.
+ // in_high_pro_pool: whether this entry is in high-pri pool.
+ char flags;
+
+ uint32_t hash; // Hash of key(); used for fast sharding and comparisons
+
+ char key_data[1]; // Beginning of key
+
+ Slice key() const {
+ // For cheaper lookups, we allow a temporary Handle object
+ // to store a pointer to a key in "value".
+ if (next == this) {
+ return *(reinterpret_cast(value));
+ } else {
+ return Slice(key_data, key_length);
+ }
+ }
+
+ bool InCache() { return flags & 1; }
+ bool IsHighPri() { return flags & 2; }
+ bool InHighPriPool() { return flags & 4; }
+
+ void SetInCache(bool in_cache) {
+ if (in_cache) {
+ flags |= 1;
+ } else {
+ flags &= ~1;
+ }
+ }
+
+ void SetPriority(Cache::Priority priority) {
+ if (priority == Cache::Priority::HIGH) {
+ flags |= 2;
+ } else {
+ flags &= ~2;
+ }
+ }
+
+ void SetInHighPriPool(bool in_high_pri_pool) {
+ if (in_high_pri_pool) {
+ flags |= 4;
+ } else {
+ flags &= ~4;
+ }
+ }
+
+ void Free() {
+ assert((refs == 1 && InCache()) || (refs == 0 && !InCache()));
+ if (deleter) {
+ (*deleter)(key(), value);
+ }
+ delete[] reinterpret_cast(this);
+ }
+};
+
+// We provide our own simple hash table since it removes a whole bunch
+// of porting hacks and is also faster than some of the built-in hash
+// table implementations in some of the compiler/runtime combinations
+// we have tested. E.g., readrandom speeds up by ~5% over the g++
+// 4.4.3's builtin hashtable.
+class LRUHandleTable {
+ public:
+ LRUHandleTable();
+ ~LRUHandleTable();
+
+ LRUHandle* Lookup(const Slice& key, uint32_t hash);
+ LRUHandle* Insert(LRUHandle* h);
+ LRUHandle* Remove(const Slice& key, uint32_t hash);
+
+ template
+ void ApplyToAllCacheEntries(T func) {
+ for (uint32_t i = 0; i < length_; i++) {
+ LRUHandle* h = list_[i];
+ while (h != nullptr) {
+ auto n = h->next_hash;
+ assert(h->InCache());
+ func(h);
+ h = n;
+ }
+ }
+ }
+
+ private:
+ // Return a pointer to slot that points to a cache entry that
+ // matches key/hash. If there is no such cache entry, return a
+ // pointer to the trailing slot in the corresponding linked list.
+ LRUHandle** FindPointer(const Slice& key, uint32_t hash);
+
+ void Resize();
+
+ // The table consists of an array of buckets where each bucket is
+ // a linked list of cache entries that hash into the bucket.
+ LRUHandle** list_;
+ uint32_t length_;
+ uint32_t elems_;
+};
+
+// A single shard of sharded cache.
+class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard : public CacheShard {
+ public:
+ LRUCacheShard();
+ virtual ~LRUCacheShard();
+
+ // Separate from constructor so caller can easily make an array of LRUCache
+ // if current usage is more than new capacity, the function will attempt to
+ // free the needed space
+ virtual void SetCapacity(size_t capacity) override;
+
+ // Set the flag to reject insertion if cache if full.
+ virtual void SetStrictCapacityLimit(bool strict_capacity_limit) override;
+
+ // Set percentage of capacity reserved for high-pri cache entries.
+ void SetHighPriorityPoolRatio(double high_pri_pool_ratio);
+
+ // Like Cache methods, but with an extra "hash" parameter.
+ virtual Status Insert(const Slice& key, uint32_t hash, void* value,
+ size_t charge,
+ void (*deleter)(const Slice& key, void* value),
+ Cache::Handle** handle,
+ Cache::Priority priority) override;
+ virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) override;
+ virtual bool Ref(Cache::Handle* handle) override;
+ virtual bool Release(Cache::Handle* handle,
+ bool force_erase = false) override;
+ virtual void Erase(const Slice& key, uint32_t hash) override;
+
+ // Although in some platforms the update of size_t is atomic, to make sure
+ // GetUsage() and GetPinnedUsage() work correctly under any platform, we'll
+ // protect them with mutex_.
+
+ virtual size_t GetUsage() const override;
+ virtual size_t GetPinnedUsage() const override;
+
+ virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
+ bool thread_safe) override;
+
+ virtual void EraseUnRefEntries() override;
+
+ virtual std::string GetPrintableOptions() const override;
+
+ void TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri);
+
+ // Retrieves number of elements in LRU, for unit test purpose only
+ // not threadsafe
+ size_t TEST_GetLRUSize();
+
+ // Overloading to aligned it to cache line size
+ void* operator new(size_t);
+
+ void* operator new[](size_t);
+
+ void operator delete(void *);
+
+ void operator delete[](void*);
+
+ private:
+ void LRU_Remove(LRUHandle* e);
+ void LRU_Insert(LRUHandle* e);
+
+ // Overflow the last entry in high-pri pool to low-pri pool until size of
+ // high-pri pool is no larger than the size specify by high_pri_pool_pct.
+ void MaintainPoolSize();
+
+ // Just reduce the reference count by 1.
+ // Return true if last reference
+ bool Unref(LRUHandle* e);
+
+ // Free some space following strict LRU policy until enough space
+ // to hold (usage_ + charge) is freed or the lru list is empty
+ // This function is not thread safe - it needs to be executed while
+ // holding the mutex_
+ void EvictFromLRU(size_t charge, autovector* deleted);
+
+ // Initialized before use.
+ size_t capacity_;
+
+ // Memory size for entries in high-pri pool.
+ size_t high_pri_pool_usage_;
+
+ // Whether to reject insertion if cache reaches its full capacity.
+ bool strict_capacity_limit_;
+
+ // Ratio of capacity reserved for high priority cache entries.
+ double high_pri_pool_ratio_;
+
+ // High-pri pool size, equals to capacity * high_pri_pool_ratio.
+ // Remember the value to avoid recomputing each time.
+ double high_pri_pool_capacity_;
+
+ // Dummy head of LRU list.
+ // lru.prev is newest entry, lru.next is oldest entry.
+ // LRU contains items which can be evicted, ie reference only by cache
+ LRUHandle lru_;
+
+ // Pointer to head of low-pri pool in LRU list.
+ LRUHandle* lru_low_pri_;
+
+ // ------------^^^^^^^^^^^^^-----------
+ // Not frequently modified data members
+ // ------------------------------------
+ //
+ // We separate data members that are updated frequently from the ones that
+ // are not frequently updated so that they don't share the same cache line
+ // which will lead into false cache sharing
+ //
+ // ------------------------------------
+ // Frequently modified data members
+ // ------------vvvvvvvvvvvvv-----------
+ LRUHandleTable table_;
+
+ // Memory size for entries residing in the cache
+ size_t usage_;
+
+ // Memory size for entries residing only in the LRU list
+ size_t lru_usage_;
+
+ // mutex_ protects the following state.
+ // We don't count mutex_ as the cache's internal state so semantically we
+ // don't mind mutex_ invoking the non-const actions.
+ mutable port::Mutex mutex_;
+};
+
+class LRUCache : public ShardedCache {
+ public:
+ LRUCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit,
+ double high_pri_pool_ratio);
+ virtual ~LRUCache();
+ virtual const char* Name() const override { return "LRUCache"; }
+ virtual CacheShard* GetShard(int shard) override;
+ virtual const CacheShard* GetShard(int shard) const override;
+ virtual void* Value(Handle* handle) override;
+ virtual size_t GetCharge(Handle* handle) const override;
+ virtual uint32_t GetHash(Handle* handle) const override;
+ virtual void DisownData() override;
+
+ // Retrieves number of elements in LRU, for unit test purpose only
+ size_t TEST_GetLRUSize();
+
+ private:
+ LRUCacheShard* shards_;
+ int num_shards_ = 0;
+};
+
+} // namespace rocksdb
diff --git a/c-deps/rocksdb/cache/lru_cache_test.cc b/c-deps/rocksdb/cache/lru_cache_test.cc
new file mode 100644
index 0000000000..1b83033c36
--- /dev/null
+++ b/c-deps/rocksdb/cache/lru_cache_test.cc
@@ -0,0 +1,172 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+#include "cache/lru_cache.h"
+
+#include
+#include
+#include "util/testharness.h"
+
+namespace rocksdb {
+
+class LRUCacheTest : public testing::Test {
+ public:
+ LRUCacheTest() {}
+ ~LRUCacheTest() {}
+
+ void NewCache(size_t capacity, double high_pri_pool_ratio = 0.0) {
+ cache_.reset(
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable: 4316) // We've validated the alignment with the new operators
+#endif
+ new LRUCacheShard()
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+ );
+ cache_->SetCapacity(capacity);
+ cache_->SetStrictCapacityLimit(false);
+ cache_->SetHighPriorityPoolRatio(high_pri_pool_ratio);
+ }
+
+ void Insert(const std::string& key,
+ Cache::Priority priority = Cache::Priority::LOW) {
+ cache_->Insert(key, 0 /*hash*/, nullptr /*value*/, 1 /*charge*/,
+ nullptr /*deleter*/, nullptr /*handle*/, priority);
+ }
+
+ void Insert(char key, Cache::Priority priority = Cache::Priority::LOW) {
+ Insert(std::string(1, key), priority);
+ }
+
+ bool Lookup(const std::string& key) {
+ auto handle = cache_->Lookup(key, 0 /*hash*/);
+ if (handle) {
+ cache_->Release(handle);
+ return true;
+ }
+ return false;
+ }
+
+ bool Lookup(char key) { return Lookup(std::string(1, key)); }
+
+ void Erase(const std::string& key) { cache_->Erase(key, 0 /*hash*/); }
+
+ void ValidateLRUList(std::vector keys,
+ size_t num_high_pri_pool_keys = 0) {
+ LRUHandle* lru;
+ LRUHandle* lru_low_pri;
+ cache_->TEST_GetLRUList(&lru, &lru_low_pri);
+ LRUHandle* iter = lru;
+ bool in_high_pri_pool = false;
+ size_t high_pri_pool_keys = 0;
+ if (iter == lru_low_pri) {
+ in_high_pri_pool = true;
+ }
+ for (const auto& key : keys) {
+ iter = iter->next;
+ ASSERT_NE(lru, iter);
+ ASSERT_EQ(key, iter->key().ToString());
+ ASSERT_EQ(in_high_pri_pool, iter->InHighPriPool());
+ if (in_high_pri_pool) {
+ high_pri_pool_keys++;
+ }
+ if (iter == lru_low_pri) {
+ ASSERT_FALSE(in_high_pri_pool);
+ in_high_pri_pool = true;
+ }
+ }
+ ASSERT_EQ(lru, iter->next);
+ ASSERT_TRUE(in_high_pri_pool);
+ ASSERT_EQ(num_high_pri_pool_keys, high_pri_pool_keys);
+ }
+
+ private:
+ std::unique_ptr cache_;
+};
+
+TEST_F(LRUCacheTest, BasicLRU) {
+ NewCache(5);
+ for (char ch = 'a'; ch <= 'e'; ch++) {
+ Insert(ch);
+ }
+ ValidateLRUList({"a", "b", "c", "d", "e"});
+ for (char ch = 'x'; ch <= 'z'; ch++) {
+ Insert(ch);
+ }
+ ValidateLRUList({"d", "e", "x", "y", "z"});
+ ASSERT_FALSE(Lookup("b"));
+ ValidateLRUList({"d", "e", "x", "y", "z"});
+ ASSERT_TRUE(Lookup("e"));
+ ValidateLRUList({"d", "x", "y", "z", "e"});
+ ASSERT_TRUE(Lookup("z"));
+ ValidateLRUList({"d", "x", "y", "e", "z"});
+ Erase("x");
+ ValidateLRUList({"d", "y", "e", "z"});
+ ASSERT_TRUE(Lookup("d"));
+ ValidateLRUList({"y", "e", "z", "d"});
+ Insert("u");
+ ValidateLRUList({"y", "e", "z", "d", "u"});
+ Insert("v");
+ ValidateLRUList({"e", "z", "d", "u", "v"});
+}
+
+TEST_F(LRUCacheTest, MidPointInsertion) {
+ // Allocate 2 cache entries to high-pri pool.
+ NewCache(5, 0.45);
+
+ Insert("a", Cache::Priority::LOW);
+ Insert("b", Cache::Priority::LOW);
+ Insert("c", Cache::Priority::LOW);
+ ValidateLRUList({"a", "b", "c"}, 0);
+
+ // Low-pri entries can take high-pri pool capacity if available
+ Insert("u", Cache::Priority::LOW);
+ Insert("v", Cache::Priority::LOW);
+ ValidateLRUList({"a", "b", "c", "u", "v"}, 0);
+
+ Insert("X", Cache::Priority::HIGH);
+ Insert("Y", Cache::Priority::HIGH);
+ ValidateLRUList({"c", "u", "v", "X", "Y"}, 2);
+
+ // High-pri entries can overflow to low-pri pool.
+ Insert("Z", Cache::Priority::HIGH);
+ ValidateLRUList({"u", "v", "X", "Y", "Z"}, 2);
+
+ // Low-pri entries will be inserted to head of low-pri pool.
+ Insert("a", Cache::Priority::LOW);
+ ValidateLRUList({"v", "X", "a", "Y", "Z"}, 2);
+
+ // Low-pri entries will be inserted to head of low-pri pool after lookup.
+ ASSERT_TRUE(Lookup("v"));
+ ValidateLRUList({"X", "a", "v", "Y", "Z"}, 2);
+
+ // High-pri entries will be inserted to the head of the list after lookup.
+ ASSERT_TRUE(Lookup("X"));
+ ValidateLRUList({"a", "v", "Y", "Z", "X"}, 2);
+ ASSERT_TRUE(Lookup("Z"));
+ ValidateLRUList({"a", "v", "Y", "X", "Z"}, 2);
+
+ Erase("Y");
+ ValidateLRUList({"a", "v", "X", "Z"}, 2);
+ Erase("X");
+ ValidateLRUList({"a", "v", "Z"}, 1);
+ Insert("d", Cache::Priority::LOW);
+ Insert("e", Cache::Priority::LOW);
+ ValidateLRUList({"a", "v", "d", "e", "Z"}, 1);
+ Insert("f", Cache::Priority::LOW);
+ Insert("g", Cache::Priority::LOW);
+ ValidateLRUList({"d", "e", "f", "g", "Z"}, 1);
+ ASSERT_TRUE(Lookup("d"));
+ ValidateLRUList({"e", "f", "g", "d", "Z"}, 1);
+}
+
+} // namespace rocksdb
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/c-deps/rocksdb/cache/sharded_cache.cc b/c-deps/rocksdb/cache/sharded_cache.cc
new file mode 100644
index 0000000000..9bdea3a08e
--- /dev/null
+++ b/c-deps/rocksdb/cache/sharded_cache.cc
@@ -0,0 +1,161 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include "cache/sharded_cache.h"
+
+#include
+
+#include "util/mutexlock.h"
+
+namespace rocksdb {
+
+ShardedCache::ShardedCache(size_t capacity, int num_shard_bits,
+ bool strict_capacity_limit)
+ : num_shard_bits_(num_shard_bits),
+ capacity_(capacity),
+ strict_capacity_limit_(strict_capacity_limit),
+ last_id_(1) {}
+
+void ShardedCache::SetCapacity(size_t capacity) {
+ int num_shards = 1 << num_shard_bits_;
+ const size_t per_shard = (capacity + (num_shards - 1)) / num_shards;
+ MutexLock l(&capacity_mutex_);
+ for (int s = 0; s < num_shards; s++) {
+ GetShard(s)->SetCapacity(per_shard);
+ }
+ capacity_ = capacity;
+}
+
+void ShardedCache::SetStrictCapacityLimit(bool strict_capacity_limit) {
+ int num_shards = 1 << num_shard_bits_;
+ MutexLock l(&capacity_mutex_);
+ for (int s = 0; s < num_shards; s++) {
+ GetShard(s)->SetStrictCapacityLimit(strict_capacity_limit);
+ }
+ strict_capacity_limit_ = strict_capacity_limit;
+}
+
+Status ShardedCache::Insert(const Slice& key, void* value, size_t charge,
+ void (*deleter)(const Slice& key, void* value),
+ Handle** handle, Priority priority) {
+ uint32_t hash = HashSlice(key);
+ return GetShard(Shard(hash))
+ ->Insert(key, hash, value, charge, deleter, handle, priority);
+}
+
+Cache::Handle* ShardedCache::Lookup(const Slice& key, Statistics* stats) {
+ uint32_t hash = HashSlice(key);
+ return GetShard(Shard(hash))->Lookup(key, hash);
+}
+
+bool ShardedCache::Ref(Handle* handle) {
+ uint32_t hash = GetHash(handle);
+ return GetShard(Shard(hash))->Ref(handle);
+}
+
+bool ShardedCache::Release(Handle* handle, bool force_erase) {
+ uint32_t hash = GetHash(handle);
+ return GetShard(Shard(hash))->Release(handle, force_erase);
+}
+
+void ShardedCache::Erase(const Slice& key) {
+ uint32_t hash = HashSlice(key);
+ GetShard(Shard(hash))->Erase(key, hash);
+}
+
+uint64_t ShardedCache::NewId() {
+ return last_id_.fetch_add(1, std::memory_order_relaxed);
+}
+
+size_t ShardedCache::GetCapacity() const {
+ MutexLock l(&capacity_mutex_);
+ return capacity_;
+}
+
+bool ShardedCache::HasStrictCapacityLimit() const {
+ MutexLock l(&capacity_mutex_);
+ return strict_capacity_limit_;
+}
+
+size_t ShardedCache::GetUsage() const {
+ // We will not lock the cache when getting the usage from shards.
+ int num_shards = 1 << num_shard_bits_;
+ size_t usage = 0;
+ for (int s = 0; s < num_shards; s++) {
+ usage += GetShard(s)->GetUsage();
+ }
+ return usage;
+}
+
+size_t ShardedCache::GetUsage(Handle* handle) const {
+ return GetCharge(handle);
+}
+
+size_t ShardedCache::GetPinnedUsage() const {
+ // We will not lock the cache when getting the usage from shards.
+ int num_shards = 1 << num_shard_bits_;
+ size_t usage = 0;
+ for (int s = 0; s < num_shards; s++) {
+ usage += GetShard(s)->GetPinnedUsage();
+ }
+ return usage;
+}
+
+void ShardedCache::ApplyToAllCacheEntries(void (*callback)(void*, size_t),
+ bool thread_safe) {
+ int num_shards = 1 << num_shard_bits_;
+ for (int s = 0; s < num_shards; s++) {
+ GetShard(s)->ApplyToAllCacheEntries(callback, thread_safe);
+ }
+}
+
+void ShardedCache::EraseUnRefEntries() {
+ int num_shards = 1 << num_shard_bits_;
+ for (int s = 0; s < num_shards; s++) {
+ GetShard(s)->EraseUnRefEntries();
+ }
+}
+
+std::string ShardedCache::GetPrintableOptions() const {
+ std::string ret;
+ ret.reserve(20000);
+ const int kBufferSize = 200;
+ char buffer[kBufferSize];
+ {
+ MutexLock l(&capacity_mutex_);
+ snprintf(buffer, kBufferSize, " capacity : %" ROCKSDB_PRIszt "\n",
+ capacity_);
+ ret.append(buffer);
+ snprintf(buffer, kBufferSize, " num_shard_bits : %d\n", num_shard_bits_);
+ ret.append(buffer);
+ snprintf(buffer, kBufferSize, " strict_capacity_limit : %d\n",
+ strict_capacity_limit_);
+ ret.append(buffer);
+ }
+ ret.append(GetShard(0)->GetPrintableOptions());
+ return ret;
+}
+int GetDefaultCacheShardBits(size_t capacity) {
+ int num_shard_bits = 0;
+ size_t min_shard_size = 512L * 1024L; // Every shard is at least 512KB.
+ size_t num_shards = capacity / min_shard_size;
+ while (num_shards >>= 1) {
+ if (++num_shard_bits >= 6) {
+ // No more than 6.
+ return num_shard_bits;
+ }
+ }
+ return num_shard_bits;
+}
+
+} // namespace rocksdb
diff --git a/c-deps/rocksdb/cache/sharded_cache.h b/c-deps/rocksdb/cache/sharded_cache.h
new file mode 100644
index 0000000000..4f9dea2ad0
--- /dev/null
+++ b/c-deps/rocksdb/cache/sharded_cache.h
@@ -0,0 +1,102 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#pragma once
+
+#include
+#include
+
+#include "port/port.h"
+#include "rocksdb/cache.h"
+#include "util/hash.h"
+
+namespace rocksdb {
+
+// Single cache shard interface.
+class CacheShard {
+ public:
+ CacheShard() = default;
+ virtual ~CacheShard() = default;
+
+ virtual Status Insert(const Slice& key, uint32_t hash, void* value,
+ size_t charge,
+ void (*deleter)(const Slice& key, void* value),
+ Cache::Handle** handle, Cache::Priority priority) = 0;
+ virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) = 0;
+ virtual bool Ref(Cache::Handle* handle) = 0;
+ virtual bool Release(Cache::Handle* handle, bool force_erase = false) = 0;
+ virtual void Erase(const Slice& key, uint32_t hash) = 0;
+ virtual void SetCapacity(size_t capacity) = 0;
+ virtual void SetStrictCapacityLimit(bool strict_capacity_limit) = 0;
+ virtual size_t GetUsage() const = 0;
+ virtual size_t GetPinnedUsage() const = 0;
+ virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
+ bool thread_safe) = 0;
+ virtual void EraseUnRefEntries() = 0;
+ virtual std::string GetPrintableOptions() const { return ""; }
+};
+
+// Generic cache interface which shards cache by hash of keys. 2^num_shard_bits
+// shards will be created, with capacity split evenly to each of the shards.
+// Keys are sharded by the highest num_shard_bits bits of hash value.
+class ShardedCache : public Cache {
+ public:
+ ShardedCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit);
+ virtual ~ShardedCache() = default;
+ virtual const char* Name() const override = 0;
+ virtual CacheShard* GetShard(int shard) = 0;
+ virtual const CacheShard* GetShard(int shard) const = 0;
+ virtual void* Value(Handle* handle) override = 0;
+ virtual size_t GetCharge(Handle* handle) const = 0;
+ virtual uint32_t GetHash(Handle* handle) const = 0;
+ virtual void DisownData() override = 0;
+
+ virtual void SetCapacity(size_t capacity) override;
+ virtual void SetStrictCapacityLimit(bool strict_capacity_limit) override;
+
+ virtual Status Insert(const Slice& key, void* value, size_t charge,
+ void (*deleter)(const Slice& key, void* value),
+ Handle** handle, Priority priority) override;
+ virtual Handle* Lookup(const Slice& key, Statistics* stats) override;
+ virtual bool Ref(Handle* handle) override;
+ virtual bool Release(Handle* handle, bool force_erase = false) override;
+ virtual void Erase(const Slice& key) override;
+ virtual uint64_t NewId() override;
+ virtual size_t GetCapacity() const override;
+ virtual bool HasStrictCapacityLimit() const override;
+ virtual size_t GetUsage() const override;
+ virtual size_t GetUsage(Handle* handle) const override;
+ virtual size_t GetPinnedUsage() const override;
+ virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
+ bool thread_safe) override;
+ virtual void EraseUnRefEntries() override;
+ virtual std::string GetPrintableOptions() const override;
+
+ int GetNumShardBits() const { return num_shard_bits_; }
+
+ private:
+ static inline uint32_t HashSlice(const Slice& s) {
+ return Hash(s.data(), s.size(), 0);
+ }
+
+ uint32_t Shard(uint32_t hash) {
+ // Note, hash >> 32 yields hash in gcc, not the zero we expect!
+ return (num_shard_bits_ > 0) ? (hash >> (32 - num_shard_bits_)) : 0;
+ }
+
+ int num_shard_bits_;
+ mutable port::Mutex capacity_mutex_;
+ size_t capacity_;
+ bool strict_capacity_limit_;
+ std::atomic last_id_;
+};
+
+extern int GetDefaultCacheShardBits(size_t capacity);
+
+} // namespace rocksdb
diff --git a/c-deps/rocksdb/cmake/RocksDBConfig.cmake.in b/c-deps/rocksdb/cmake/RocksDBConfig.cmake.in
new file mode 100644
index 0000000000..b3cb2b27ad
--- /dev/null
+++ b/c-deps/rocksdb/cmake/RocksDBConfig.cmake.in
@@ -0,0 +1,3 @@
+@PACKAGE_INIT@
+include("${CMAKE_CURRENT_LIST_DIR}/RocksDBTargets.cmake")
+check_required_components(RocksDB)
diff --git a/c-deps/rocksdb/coverage/coverage_test.sh b/c-deps/rocksdb/coverage/coverage_test.sh
new file mode 100755
index 0000000000..6d87ae9086
--- /dev/null
+++ b/c-deps/rocksdb/coverage/coverage_test.sh
@@ -0,0 +1,78 @@
+#!/usr/bin/env bash
+
+# Exit on error.
+set -e
+
+if [ -n "$USE_CLANG" ]; then
+ echo "Error: Coverage test is supported only for gcc."
+ exit 1
+fi
+
+ROOT=".."
+# Fetch right version of gcov
+if [ -d /mnt/gvfs/third-party -a -z "$CXX" ]; then
+ source $ROOT/build_tools/fbcode_config.sh
+ GCOV=$GCC_BASE/bin/gcov
+else
+ GCOV=$(which gcov)
+fi
+
+COVERAGE_DIR="$PWD/COVERAGE_REPORT"
+mkdir -p $COVERAGE_DIR
+
+# Find all gcno files to generate the coverage report
+
+GCNO_FILES=`find $ROOT -name "*.gcno"`
+$GCOV --preserve-paths --relative-only --no-output $GCNO_FILES 2>/dev/null |
+ # Parse the raw gcov report to more human readable form.
+ python $ROOT/coverage/parse_gcov_output.py |
+ # Write the output to both stdout and report file.
+ tee $COVERAGE_DIR/coverage_report_all.txt &&
+echo -e "Generated coverage report for all files: $COVERAGE_DIR/coverage_report_all.txt\n"
+
+# TODO: we also need to get the files of the latest commits.
+# Get the most recently committed files.
+LATEST_FILES=`
+ git show --pretty="format:" --name-only HEAD |
+ grep -v "^$" |
+ paste -s -d,`
+RECENT_REPORT=$COVERAGE_DIR/coverage_report_recent.txt
+
+echo -e "Recently updated files: $LATEST_FILES\n" > $RECENT_REPORT
+$GCOV --preserve-paths --relative-only --no-output $GCNO_FILES 2>/dev/null |
+ python $ROOT/coverage/parse_gcov_output.py -interested-files $LATEST_FILES |
+ tee -a $RECENT_REPORT &&
+echo -e "Generated coverage report for recently updated files: $RECENT_REPORT\n"
+
+# Unless otherwise specified, we'll not generate html report by default
+if [ -z "$HTML" ]; then
+ exit 0
+fi
+
+# Generate the html report. If we cannot find lcov in this machine, we'll simply
+# skip this step.
+echo "Generating the html coverage report..."
+
+LCOV=$(which lcov || true 2>/dev/null)
+if [ -z $LCOV ]
+then
+ echo "Skip: Cannot find lcov to generate the html report."
+ exit 0
+fi
+
+LCOV_VERSION=$(lcov -v | grep 1.1 || true)
+if [ $LCOV_VERSION ]
+then
+ echo "Not supported lcov version. Expect lcov 1.1."
+ exit 0
+fi
+
+(cd $ROOT; lcov --no-external \
+ --capture \
+ --directory $PWD \
+ --gcov-tool $GCOV \
+ --output-file $COVERAGE_DIR/coverage.info)
+
+genhtml $COVERAGE_DIR/coverage.info -o $COVERAGE_DIR
+
+echo "HTML Coverage report is generated in $COVERAGE_DIR"
diff --git a/c-deps/rocksdb/coverage/parse_gcov_output.py b/c-deps/rocksdb/coverage/parse_gcov_output.py
new file mode 100644
index 0000000000..72e8b07230
--- /dev/null
+++ b/c-deps/rocksdb/coverage/parse_gcov_output.py
@@ -0,0 +1,118 @@
+import optparse
+import re
+import sys
+
+from optparse import OptionParser
+
+# the gcov report follows certain pattern. Each file will have two lines
+# of report, from which we can extract the file name, total lines and coverage
+# percentage.
+def parse_gcov_report(gcov_input):
+ per_file_coverage = {}
+ total_coverage = None
+
+ for line in sys.stdin:
+ line = line.strip()
+
+ # --First line of the coverage report (with file name in it)?
+ match_obj = re.match("^File '(.*)'$", line)
+ if match_obj:
+ # fetch the file name from the first line of the report.
+ current_file = match_obj.group(1)
+ continue
+
+ # -- Second line of the file report (with coverage percentage)
+ match_obj = re.match("^Lines executed:(.*)% of (.*)", line)
+
+ if match_obj:
+ coverage = float(match_obj.group(1))
+ lines = int(match_obj.group(2))
+
+ if current_file is not None:
+ per_file_coverage[current_file] = (coverage, lines)
+ current_file = None
+ else:
+ # If current_file is not set, we reach the last line of report,
+ # which contains the summarized coverage percentage.
+ total_coverage = (coverage, lines)
+ continue
+
+ # If the line's pattern doesn't fall into the above categories. We
+ # can simply ignore them since they're either empty line or doesn't
+ # find executable lines of the given file.
+ current_file = None
+
+ return per_file_coverage, total_coverage
+
+def get_option_parser():
+ usage = "Parse the gcov output and generate more human-readable code " +\
+ "coverage report."
+ parser = OptionParser(usage)
+
+ parser.add_option(
+ "--interested-files", "-i",
+ dest="filenames",
+ help="Comma separated files names. if specified, we will display " +
+ "the coverage report only for interested source files. " +
+ "Otherwise we will display the coverage report for all " +
+ "source files."
+ )
+ return parser
+
+def display_file_coverage(per_file_coverage, total_coverage):
+ # To print out auto-adjustable column, we need to know the longest
+ # length of file names.
+ max_file_name_length = max(
+ len(fname) for fname in per_file_coverage.keys()
+ )
+
+ # -- Print header
+ # size of separator is determined by 3 column sizes:
+ # file name, coverage percentage and lines.
+ header_template = \
+ "%" + str(max_file_name_length) + "s\t%s\t%s"
+ separator = "-" * (max_file_name_length + 10 + 20)
+ print header_template % ("Filename", "Coverage", "Lines")
+ print separator
+
+ # -- Print body
+ # template for printing coverage report for each file.
+ record_template = "%" + str(max_file_name_length) + "s\t%5.2f%%\t%10d"
+
+ for fname, coverage_info in per_file_coverage.items():
+ coverage, lines = coverage_info
+ print record_template % (fname, coverage, lines)
+
+ # -- Print footer
+ if total_coverage:
+ print separator
+ print record_template % ("Total", total_coverage[0], total_coverage[1])
+
+def report_coverage():
+ parser = get_option_parser()
+ (options, args) = parser.parse_args()
+
+ interested_files = set()
+ if options.filenames is not None:
+ interested_files = set(f.strip() for f in options.filenames.split(','))
+
+ # To make things simple, right now we only read gcov report from the input
+ per_file_coverage, total_coverage = parse_gcov_report(sys.stdin)
+
+ # Check if we need to display coverage info for interested files.
+ if len(interested_files):
+ per_file_coverage = dict(
+ (fname, per_file_coverage[fname]) for fname in interested_files
+ if fname in per_file_coverage
+ )
+ # If we only interested in several files, it makes no sense to report
+ # the total_coverage
+ total_coverage = None
+
+ if not len(per_file_coverage):
+ print >> sys.stderr, "Cannot find coverage info for the given files."
+ return
+ display_file_coverage(per_file_coverage, total_coverage)
+
+if __name__ == "__main__":
+ report_coverage()
diff --git a/c-deps/rocksdb/db/builder.cc b/c-deps/rocksdb/db/builder.cc
new file mode 100644
index 0000000000..11b8fc783e
--- /dev/null
+++ b/c-deps/rocksdb/db/builder.cc
@@ -0,0 +1,227 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/builder.h"
+
+#include
+#include
+#include
+
+#include "db/compaction_iterator.h"
+#include "db/dbformat.h"
+#include "db/event_helpers.h"
+#include "db/internal_stats.h"
+#include "db/merge_helper.h"
+#include "db/table_cache.h"
+#include "db/version_edit.h"
+#include "monitoring/iostats_context_imp.h"
+#include "monitoring/thread_status_util.h"
+#include "rocksdb/db.h"
+#include "rocksdb/env.h"
+#include "rocksdb/iterator.h"
+#include "rocksdb/options.h"
+#include "rocksdb/table.h"
+#include "table/block_based_table_builder.h"
+#include "table/internal_iterator.h"
+#include "util/file_reader_writer.h"
+#include "util/filename.h"
+#include "util/stop_watch.h"
+#include "util/sync_point.h"
+
+namespace rocksdb {
+
+class TableFactory;
+
+TableBuilder* NewTableBuilder(
+ const ImmutableCFOptions& ioptions,
+ const InternalKeyComparator& internal_comparator,
+ const std::vector>*
+ int_tbl_prop_collector_factories,
+ uint32_t column_family_id, const std::string& column_family_name,
+ WritableFileWriter* file, const CompressionType compression_type,
+ const CompressionOptions& compression_opts, int level,
+ const std::string* compression_dict, const bool skip_filters,
+ const uint64_t creation_time, const uint64_t oldest_key_time) {
+ assert((column_family_id ==
+ TablePropertiesCollectorFactory::Context::kUnknownColumnFamily) ==
+ column_family_name.empty());
+ return ioptions.table_factory->NewTableBuilder(
+ TableBuilderOptions(
+ ioptions, internal_comparator, int_tbl_prop_collector_factories,
+ compression_type, compression_opts, compression_dict, skip_filters,
+ column_family_name, level, creation_time, oldest_key_time),
+ column_family_id, file);
+}
+
+Status BuildTable(
+ const std::string& dbname, Env* env, const ImmutableCFOptions& ioptions,
+ const MutableCFOptions& mutable_cf_options, const EnvOptions& env_options,
+ TableCache* table_cache, InternalIterator* iter,
+ std::unique_ptr range_del_iter, FileMetaData* meta,
+ const InternalKeyComparator& internal_comparator,
+ const std::vector>*
+ int_tbl_prop_collector_factories,
+ uint32_t column_family_id, const std::string& column_family_name,
+ std::vector snapshots,
+ SequenceNumber earliest_write_conflict_snapshot,
+ SnapshotChecker* snapshot_checker, const CompressionType compression,
+ const CompressionOptions& compression_opts, bool paranoid_file_checks,
+ InternalStats* internal_stats, TableFileCreationReason reason,
+ EventLogger* event_logger, int job_id, const Env::IOPriority io_priority,
+ TableProperties* table_properties, int level, const uint64_t creation_time,
+ const uint64_t oldest_key_time) {
+ assert((column_family_id ==
+ TablePropertiesCollectorFactory::Context::kUnknownColumnFamily) ==
+ column_family_name.empty());
+ // Reports the IOStats for flush for every following bytes.
+ const size_t kReportFlushIOStatsEvery = 1048576;
+ Status s;
+ meta->fd.file_size = 0;
+ iter->SeekToFirst();
+ std::unique_ptr range_del_agg(
+ new RangeDelAggregator(internal_comparator, snapshots));
+ s = range_del_agg->AddTombstones(std::move(range_del_iter));
+ if (!s.ok()) {
+ // may be non-ok if a range tombstone key is unparsable
+ return s;
+ }
+
+ std::string fname = TableFileName(ioptions.db_paths, meta->fd.GetNumber(),
+ meta->fd.GetPathId());
+#ifndef ROCKSDB_LITE
+ EventHelpers::NotifyTableFileCreationStarted(
+ ioptions.listeners, dbname, column_family_name, fname, job_id, reason);
+#endif // !ROCKSDB_LITE
+ TableProperties tp;
+
+ if (iter->Valid() || range_del_agg->ShouldAddTombstones()) {
+ TableBuilder* builder;
+ unique_ptr file_writer;
+ {
+ unique_ptr file;
+#ifndef NDEBUG
+ bool use_direct_writes = env_options.use_direct_writes;
+ TEST_SYNC_POINT_CALLBACK("BuildTable:create_file", &use_direct_writes);
+#endif // !NDEBUG
+ s = NewWritableFile(env, fname, &file, env_options);
+ if (!s.ok()) {
+ EventHelpers::LogAndNotifyTableFileCreationFinished(
+ event_logger, ioptions.listeners, dbname, column_family_name, fname,
+ job_id, meta->fd, tp, reason, s);
+ return s;
+ }
+ file->SetIOPriority(io_priority);
+
+ file_writer.reset(new WritableFileWriter(std::move(file), env_options,
+ ioptions.statistics));
+ builder = NewTableBuilder(
+ ioptions, internal_comparator, int_tbl_prop_collector_factories,
+ column_family_id, column_family_name, file_writer.get(), compression,
+ compression_opts, level, nullptr /* compression_dict */,
+ false /* skip_filters */, creation_time, oldest_key_time);
+ }
+
+ MergeHelper merge(env, internal_comparator.user_comparator(),
+ ioptions.merge_operator, nullptr, ioptions.info_log,
+ true /* internal key corruption is not ok */,
+ snapshots.empty() ? 0 : snapshots.back());
+
+ CompactionIterator c_iter(
+ iter, internal_comparator.user_comparator(), &merge, kMaxSequenceNumber,
+ &snapshots, earliest_write_conflict_snapshot, snapshot_checker, env,
+ true /* internal key corruption is not ok */, range_del_agg.get());
+ c_iter.SeekToFirst();
+ for (; c_iter.Valid(); c_iter.Next()) {
+ const Slice& key = c_iter.key();
+ const Slice& value = c_iter.value();
+ builder->Add(key, value);
+ meta->UpdateBoundaries(key, c_iter.ikey().sequence);
+
+ // TODO(noetzli): Update stats after flush, too.
+ if (io_priority == Env::IO_HIGH &&
+ IOSTATS(bytes_written) >= kReportFlushIOStatsEvery) {
+ ThreadStatusUtil::SetThreadOperationProperty(
+ ThreadStatus::FLUSH_BYTES_WRITTEN, IOSTATS(bytes_written));
+ }
+ }
+ // nullptr for table_{min,max} so all range tombstones will be flushed
+ range_del_agg->AddToBuilder(builder, nullptr /* lower_bound */,
+ nullptr /* upper_bound */, meta);
+
+ // Finish and check for builder errors
+ bool empty = builder->NumEntries() == 0;
+ s = c_iter.status();
+ if (!s.ok() || empty) {
+ builder->Abandon();
+ } else {
+ s = builder->Finish();
+ }
+
+ if (s.ok() && !empty) {
+ uint64_t file_size = builder->FileSize();
+ meta->fd.file_size = file_size;
+ meta->marked_for_compaction = builder->NeedCompact();
+ assert(meta->fd.GetFileSize() > 0);
+ tp = builder->GetTableProperties();
+ if (table_properties) {
+ *table_properties = tp;
+ }
+ }
+ delete builder;
+
+ // Finish and check for file errors
+ if (s.ok() && !empty) {
+ StopWatch sw(env, ioptions.statistics, TABLE_SYNC_MICROS);
+ s = file_writer->Sync(ioptions.use_fsync);
+ }
+ if (s.ok() && !empty) {
+ s = file_writer->Close();
+ }
+
+ if (s.ok() && !empty) {
+ // Verify that the table is usable
+ // We set for_compaction to false and don't OptimizeForCompactionTableRead
+ // here because this is a special case after we finish the table building
+ // No matter whether use_direct_io_for_flush_and_compaction is true,
+ // we will regrad this verification as user reads since the goal is
+ // to cache it here for further user reads
+ std::unique_ptr it(table_cache->NewIterator(
+ ReadOptions(), env_options, internal_comparator, meta->fd,
+ nullptr /* range_del_agg */, nullptr,
+ (internal_stats == nullptr) ? nullptr
+ : internal_stats->GetFileReadHist(0),
+ false /* for_compaction */, nullptr /* arena */,
+ false /* skip_filter */, level));
+ s = it->status();
+ if (s.ok() && paranoid_file_checks) {
+ for (it->SeekToFirst(); it->Valid(); it->Next()) {
+ }
+ s = it->status();
+ }
+ }
+ }
+
+ // Check for input iterator errors
+ if (!iter->status().ok()) {
+ s = iter->status();
+ }
+
+ if (!s.ok() || meta->fd.GetFileSize() == 0) {
+ env->DeleteFile(fname);
+ }
+
+ // Output to event logger and fire events.
+ EventHelpers::LogAndNotifyTableFileCreationFinished(
+ event_logger, ioptions.listeners, dbname, column_family_name, fname,
+ job_id, meta->fd, tp, reason, s);
+
+ return s;
+}
+
+} // namespace rocksdb
diff --git a/c-deps/rocksdb/db/builder.h b/c-deps/rocksdb/db/builder.h
new file mode 100644
index 0000000000..fa96e12d2b
--- /dev/null
+++ b/c-deps/rocksdb/db/builder.h
@@ -0,0 +1,84 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#pragma once
+#include
+#include
+#include
+#include "db/table_properties_collector.h"
+#include "options/cf_options.h"
+#include "rocksdb/comparator.h"
+#include "rocksdb/env.h"
+#include "rocksdb/listener.h"
+#include "rocksdb/options.h"
+#include "rocksdb/status.h"
+#include "rocksdb/table_properties.h"
+#include "rocksdb/types.h"
+#include "table/scoped_arena_iterator.h"
+#include "util/event_logger.h"
+
+namespace rocksdb {
+
+struct Options;
+struct FileMetaData;
+
+class Env;
+struct EnvOptions;
+class Iterator;
+class SnapshotChecker;
+class TableCache;
+class VersionEdit;
+class TableBuilder;
+class WritableFileWriter;
+class InternalStats;
+class InternalIterator;
+
+// @param column_family_name Name of the column family that is also identified
+// by column_family_id, or empty string if unknown. It must outlive the
+// TableBuilder returned by this function.
+// @param compression_dict Data for presetting the compression library's
+// dictionary, or nullptr.
+TableBuilder* NewTableBuilder(
+ const ImmutableCFOptions& options,
+ const InternalKeyComparator& internal_comparator,
+ const std::vector>*
+ int_tbl_prop_collector_factories,
+ uint32_t column_family_id, const std::string& column_family_name,
+ WritableFileWriter* file, const CompressionType compression_type,
+ const CompressionOptions& compression_opts, int level,
+ const std::string* compression_dict = nullptr,
+ const bool skip_filters = false, const uint64_t creation_time = 0,
+ const uint64_t oldest_key_time = 0);
+
+// Build a Table file from the contents of *iter. The generated file
+// will be named according to number specified in meta. On success, the rest of
+// *meta will be filled with metadata about the generated table.
+// If no data is present in *iter, meta->file_size will be set to
+// zero, and no Table file will be produced.
+//
+// @param column_family_name Name of the column family that is also identified
+// by column_family_id, or empty string if unknown.
+extern Status BuildTable(
+ const std::string& dbname, Env* env, const ImmutableCFOptions& options,
+ const MutableCFOptions& mutable_cf_options, const EnvOptions& env_options,
+ TableCache* table_cache, InternalIterator* iter,
+ std::unique_ptr range_del_iter, FileMetaData* meta,
+ const InternalKeyComparator& internal_comparator,
+ const std::vector>*
+ int_tbl_prop_collector_factories,
+ uint32_t column_family_id, const std::string& column_family_name,
+ std::vector snapshots,
+ SequenceNumber earliest_write_conflict_snapshot,
+ SnapshotChecker* snapshot_checker, const CompressionType compression,
+ const CompressionOptions& compression_opts, bool paranoid_file_checks,
+ InternalStats* internal_stats, TableFileCreationReason reason,
+ EventLogger* event_logger = nullptr, int job_id = 0,
+ const Env::IOPriority io_priority = Env::IO_HIGH,
+ TableProperties* table_properties = nullptr, int level = -1,
+ const uint64_t creation_time = 0, const uint64_t oldest_key_time = 0);
+
+} // namespace rocksdb
diff --git a/c-deps/rocksdb/db/c.cc b/c-deps/rocksdb/db/c.cc
new file mode 100644
index 0000000000..2e8d0db3cd
--- /dev/null
+++ b/c-deps/rocksdb/db/c.cc
@@ -0,0 +1,3739 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef ROCKSDB_LITE
+
+#include "rocksdb/c.h"
+
+#include
+#include "port/port.h"
+#include "rocksdb/cache.h"
+#include "rocksdb/compaction_filter.h"
+#include "rocksdb/comparator.h"
+#include "rocksdb/convenience.h"
+#include "rocksdb/db.h"
+#include "rocksdb/env.h"
+#include "rocksdb/filter_policy.h"
+#include "rocksdb/iterator.h"
+#include "rocksdb/memtablerep.h"
+#include "rocksdb/merge_operator.h"
+#include "rocksdb/options.h"
+#include "rocksdb/rate_limiter.h"
+#include "rocksdb/slice_transform.h"
+#include "rocksdb/statistics.h"
+#include "rocksdb/status.h"
+#include "rocksdb/table.h"
+#include "rocksdb/universal_compaction.h"
+#include "rocksdb/utilities/backupable_db.h"
+#include "rocksdb/utilities/checkpoint.h"
+#include "rocksdb/utilities/optimistic_transaction_db.h"
+#include "rocksdb/utilities/transaction.h"
+#include "rocksdb/utilities/transaction_db.h"
+#include "rocksdb/utilities/write_batch_with_index.h"
+#include "rocksdb/write_batch.h"
+#include "utilities/merge_operators.h"
+
+using rocksdb::BytewiseComparator;
+using rocksdb::Cache;
+using rocksdb::ColumnFamilyDescriptor;
+using rocksdb::ColumnFamilyHandle;
+using rocksdb::ColumnFamilyOptions;
+using rocksdb::CompactionFilter;
+using rocksdb::CompactionFilterFactory;
+using rocksdb::CompactionFilterContext;
+using rocksdb::CompactionOptionsFIFO;
+using rocksdb::Comparator;
+using rocksdb::CompressionType;
+using rocksdb::WALRecoveryMode;
+using rocksdb::DB;
+using rocksdb::DBOptions;
+using rocksdb::DbPath;
+using rocksdb::Env;
+using rocksdb::EnvOptions;
+using rocksdb::InfoLogLevel;
+using rocksdb::FileLock;
+using rocksdb::FilterPolicy;
+using rocksdb::FlushOptions;
+using rocksdb::IngestExternalFileOptions;
+using rocksdb::Iterator;
+using rocksdb::Logger;
+using rocksdb::MergeOperator;
+using rocksdb::MergeOperators;
+using rocksdb::NewBloomFilterPolicy;
+using rocksdb::NewLRUCache;
+using rocksdb::Options;
+using rocksdb::BlockBasedTableOptions;
+using rocksdb::CuckooTableOptions;
+using rocksdb::RandomAccessFile;
+using rocksdb::Range;
+using rocksdb::ReadOptions;
+using rocksdb::SequentialFile;
+using rocksdb::Slice;
+using rocksdb::SliceParts;
+using rocksdb::SliceTransform;
+using rocksdb::Snapshot;
+using rocksdb::SstFileWriter;
+using rocksdb::Status;
+using rocksdb::WritableFile;
+using rocksdb::WriteBatch;
+using rocksdb::WriteBatchWithIndex;
+using rocksdb::WriteOptions;
+using rocksdb::LiveFileMetaData;
+using rocksdb::BackupEngine;
+using rocksdb::BackupableDBOptions;
+using rocksdb::BackupInfo;
+using rocksdb::RestoreOptions;
+using rocksdb::CompactRangeOptions;
+using rocksdb::RateLimiter;
+using rocksdb::NewGenericRateLimiter;
+using rocksdb::PinnableSlice;
+using rocksdb::TransactionDBOptions;
+using rocksdb::TransactionDB;
+using rocksdb::TransactionOptions;
+using rocksdb::OptimisticTransactionDB;
+using rocksdb::OptimisticTransactionOptions;
+using rocksdb::Transaction;
+using rocksdb::Checkpoint;
+
+using std::shared_ptr;
+
+extern "C" {
+
+struct rocksdb_t { DB* rep; };
+struct rocksdb_backup_engine_t { BackupEngine* rep; };
+struct rocksdb_backup_engine_info_t { std::vector rep; };
+struct rocksdb_restore_options_t { RestoreOptions rep; };
+struct rocksdb_iterator_t { Iterator* rep; };
+struct rocksdb_writebatch_t { WriteBatch rep; };
+struct rocksdb_writebatch_wi_t { WriteBatchWithIndex* rep; };
+struct rocksdb_snapshot_t { const Snapshot* rep; };
+struct rocksdb_flushoptions_t { FlushOptions rep; };
+struct rocksdb_fifo_compaction_options_t { CompactionOptionsFIFO rep; };
+struct rocksdb_readoptions_t {
+ ReadOptions rep;
+ Slice upper_bound; // stack variable to set pointer to in ReadOptions
+};
+struct rocksdb_writeoptions_t { WriteOptions rep; };
+struct rocksdb_options_t { Options rep; };
+struct rocksdb_compactoptions_t {
+ CompactRangeOptions rep;
+};
+struct rocksdb_block_based_table_options_t { BlockBasedTableOptions rep; };
+struct rocksdb_cuckoo_table_options_t { CuckooTableOptions rep; };
+struct rocksdb_seqfile_t { SequentialFile* rep; };
+struct rocksdb_randomfile_t { RandomAccessFile* rep; };
+struct rocksdb_writablefile_t { WritableFile* rep; };
+struct rocksdb_filelock_t { FileLock* rep; };
+struct rocksdb_logger_t { shared_ptr rep; };
+struct rocksdb_cache_t { shared_ptr rep; };
+struct rocksdb_livefiles_t { std::vector rep; };
+struct rocksdb_column_family_handle_t { ColumnFamilyHandle* rep; };
+struct rocksdb_envoptions_t { EnvOptions rep; };
+struct rocksdb_ingestexternalfileoptions_t { IngestExternalFileOptions rep; };
+struct rocksdb_sstfilewriter_t { SstFileWriter* rep; };
+struct rocksdb_ratelimiter_t { RateLimiter* rep; };
+struct rocksdb_pinnableslice_t {
+ PinnableSlice rep;
+};
+struct rocksdb_transactiondb_options_t {
+ TransactionDBOptions rep;
+};
+struct rocksdb_transactiondb_t {
+ TransactionDB* rep;
+};
+struct rocksdb_transaction_options_t {
+ TransactionOptions rep;
+};
+struct rocksdb_transaction_t {
+ Transaction* rep;
+};
+struct rocksdb_checkpoint_t {
+ Checkpoint* rep;
+};
+struct rocksdb_optimistictransactiondb_t {
+ OptimisticTransactionDB* rep;
+};
+struct rocksdb_optimistictransaction_options_t {
+ OptimisticTransactionOptions rep;
+};
+
+struct rocksdb_compactionfiltercontext_t {
+ CompactionFilter::Context rep;
+};
+
+struct rocksdb_compactionfilter_t : public CompactionFilter {
+ void* state_;
+ void (*destructor_)(void*);
+ unsigned char (*filter_)(
+ void*,
+ int level,
+ const char* key, size_t key_length,
+ const char* existing_value, size_t value_length,
+ char** new_value, size_t *new_value_length,
+ unsigned char* value_changed);
+ const char* (*name_)(void*);
+ unsigned char ignore_snapshots_;
+
+ virtual ~rocksdb_compactionfilter_t() {
+ (*destructor_)(state_);
+ }
+
+ virtual bool Filter(int level, const Slice& key, const Slice& existing_value,
+ std::string* new_value,
+ bool* value_changed) const override {
+ char* c_new_value = nullptr;
+ size_t new_value_length = 0;
+ unsigned char c_value_changed = 0;
+ unsigned char result = (*filter_)(
+ state_,
+ level,
+ key.data(), key.size(),
+ existing_value.data(), existing_value.size(),
+ &c_new_value, &new_value_length, &c_value_changed);
+ if (c_value_changed) {
+ new_value->assign(c_new_value, new_value_length);
+ *value_changed = true;
+ }
+ return result;
+ }
+
+ virtual const char* Name() const override { return (*name_)(state_); }
+
+ virtual bool IgnoreSnapshots() const override { return ignore_snapshots_; }
+};
+
+struct rocksdb_compactionfilterfactory_t : public CompactionFilterFactory {
+ void* state_;
+ void (*destructor_)(void*);
+ rocksdb_compactionfilter_t* (*create_compaction_filter_)(
+ void*, rocksdb_compactionfiltercontext_t* context);
+ const char* (*name_)(void*);
+
+ virtual ~rocksdb_compactionfilterfactory_t() { (*destructor_)(state_); }
+
+ virtual std::unique_ptr CreateCompactionFilter(
+ const CompactionFilter::Context& context) override {
+ rocksdb_compactionfiltercontext_t ccontext;
+ ccontext.rep = context;
+ CompactionFilter* cf = (*create_compaction_filter_)(state_, &ccontext);
+ return std::unique_ptr(cf);
+ }
+
+ virtual const char* Name() const override { return (*name_)(state_); }
+};
+
+struct rocksdb_comparator_t : public Comparator {
+ void* state_;
+ void (*destructor_)(void*);
+ int (*compare_)(
+ void*,
+ const char* a, size_t alen,
+ const char* b, size_t blen);
+ const char* (*name_)(void*);
+
+ virtual ~rocksdb_comparator_t() {
+ (*destructor_)(state_);
+ }
+
+ virtual int Compare(const Slice& a, const Slice& b) const override {
+ return (*compare_)(state_, a.data(), a.size(), b.data(), b.size());
+ }
+
+ virtual const char* Name() const override { return (*name_)(state_); }
+
+ // No-ops since the C binding does not support key shortening methods.
+ virtual void FindShortestSeparator(std::string*,
+ const Slice&) const override {}
+ virtual void FindShortSuccessor(std::string* key) const override {}
+};
+
+struct rocksdb_filterpolicy_t : public FilterPolicy {
+ void* state_;
+ void (*destructor_)(void*);
+ const char* (*name_)(void*);
+ char* (*create_)(
+ void*,
+ const char* const* key_array, const size_t* key_length_array,
+ int num_keys,
+ size_t* filter_length);
+ unsigned char (*key_match_)(
+ void*,
+ const char* key, size_t length,
+ const char* filter, size_t filter_length);
+ void (*delete_filter_)(
+ void*,
+ const char* filter, size_t filter_length);
+
+ virtual ~rocksdb_filterpolicy_t() {
+ (*destructor_)(state_);
+ }
+
+ virtual const char* Name() const override { return (*name_)(state_); }
+
+ virtual void CreateFilter(const Slice* keys, int n,
+ std::string* dst) const override {
+ std::vector key_pointers(n);
+ std::vector key_sizes(n);
+ for (int i = 0; i < n; i++) {
+ key_pointers[i] = keys[i].data();
+ key_sizes[i] = keys[i].size();
+ }
+ size_t len;
+ char* filter = (*create_)(state_, &key_pointers[0], &key_sizes[0], n, &len);
+ dst->append(filter, len);
+
+ if (delete_filter_ != nullptr) {
+ (*delete_filter_)(state_, filter, len);
+ } else {
+ free(filter);
+ }
+ }
+
+ virtual bool KeyMayMatch(const Slice& key,
+ const Slice& filter) const override {
+ return (*key_match_)(state_, key.data(), key.size(),
+ filter.data(), filter.size());
+ }
+};
+
+struct rocksdb_mergeoperator_t : public MergeOperator {
+ void* state_;
+ void (*destructor_)(void*);
+ const char* (*name_)(void*);
+ char* (*full_merge_)(
+ void*,
+ const char* key, size_t key_length,
+ const char* existing_value, size_t existing_value_length,
+ const char* const* operands_list, const size_t* operands_list_length,
+ int num_operands,
+ unsigned char* success, size_t* new_value_length);
+ char* (*partial_merge_)(void*, const char* key, size_t key_length,
+ const char* const* operands_list,
+ const size_t* operands_list_length, int num_operands,
+ unsigned char* success, size_t* new_value_length);
+ void (*delete_value_)(
+ void*,
+ const char* value, size_t value_length);
+
+ virtual ~rocksdb_mergeoperator_t() {
+ (*destructor_)(state_);
+ }
+
+ virtual const char* Name() const override { return (*name_)(state_); }
+
+ virtual bool FullMergeV2(const MergeOperationInput& merge_in,
+ MergeOperationOutput* merge_out) const override {
+ size_t n = merge_in.operand_list.size();
+ std::vector operand_pointers(n);
+ std::vector operand_sizes(n);
+ for (size_t i = 0; i < n; i++) {
+ Slice operand(merge_in.operand_list[i]);
+ operand_pointers[i] = operand.data();
+ operand_sizes[i] = operand.size();
+ }
+
+ const char* existing_value_data = nullptr;
+ size_t existing_value_len = 0;
+ if (merge_in.existing_value != nullptr) {
+ existing_value_data = merge_in.existing_value->data();
+ existing_value_len = merge_in.existing_value->size();
+ }
+
+ unsigned char success;
+ size_t new_value_len;
+ char* tmp_new_value = (*full_merge_)(
+ state_, merge_in.key.data(), merge_in.key.size(), existing_value_data,
+ existing_value_len, &operand_pointers[0], &operand_sizes[0],
+ static_cast(n), &success, &new_value_len);
+ merge_out->new_value.assign(tmp_new_value, new_value_len);
+
+ if (delete_value_ != nullptr) {
+ (*delete_value_)(state_, tmp_new_value, new_value_len);
+ } else {
+ free(tmp_new_value);
+ }
+
+ return success;
+ }
+
+ virtual bool PartialMergeMulti(const Slice& key,
+ const std::deque& operand_list,
+ std::string* new_value,
+ Logger* logger) const override {
+ size_t operand_count = operand_list.size();
+ std::vector operand_pointers(operand_count);
+ std::vector operand_sizes(operand_count);
+ for (size_t i = 0; i < operand_count; ++i) {
+ Slice operand(operand_list[i]);
+ operand_pointers[i] = operand.data();
+ operand_sizes[i] = operand.size();
+ }
+
+ unsigned char success;
+ size_t new_value_len;
+ char* tmp_new_value = (*partial_merge_)(
+ state_, key.data(), key.size(), &operand_pointers[0], &operand_sizes[0],
+ static_cast(operand_count), &success, &new_value_len);
+ new_value->assign(tmp_new_value, new_value_len);
+
+ if (delete_value_ != nullptr) {
+ (*delete_value_)(state_, tmp_new_value, new_value_len);
+ } else {
+ free(tmp_new_value);
+ }
+
+ return success;
+ }
+};
+
+struct rocksdb_dbpath_t {
+ DbPath rep;
+};
+
+struct rocksdb_env_t {
+ Env* rep;
+ bool is_default;
+};
+
+struct rocksdb_slicetransform_t : public SliceTransform {
+ void* state_;
+ void (*destructor_)(void*);
+ const char* (*name_)(void*);
+ char* (*transform_)(
+ void*,
+ const char* key, size_t length,
+ size_t* dst_length);
+ unsigned char (*in_domain_)(
+ void*,
+ const char* key, size_t length);
+ unsigned char (*in_range_)(
+ void*,
+ const char* key, size_t length);
+
+ virtual ~rocksdb_slicetransform_t() {
+ (*destructor_)(state_);
+ }
+
+ virtual const char* Name() const override { return (*name_)(state_); }
+
+ virtual Slice Transform(const Slice& src) const override {
+ size_t len;
+ char* dst = (*transform_)(state_, src.data(), src.size(), &len);
+ return Slice(dst, len);
+ }
+
+ virtual bool InDomain(const Slice& src) const override {
+ return (*in_domain_)(state_, src.data(), src.size());
+ }
+
+ virtual bool InRange(const Slice& src) const override {
+ return (*in_range_)(state_, src.data(), src.size());
+ }
+};
+
+struct rocksdb_universal_compaction_options_t {
+ rocksdb::CompactionOptionsUniversal *rep;
+};
+
+static bool SaveError(char** errptr, const Status& s) {
+ assert(errptr != nullptr);
+ if (s.ok()) {
+ return false;
+ } else if (*errptr == nullptr) {
+ *errptr = strdup(s.ToString().c_str());
+ } else {
+ // TODO(sanjay): Merge with existing error?
+ // This is a bug if *errptr is not created by malloc()
+ free(*errptr);
+ *errptr = strdup(s.ToString().c_str());
+ }
+ return true;
+}
+
+static char* CopyString(const std::string& str) {
+ char* result = reinterpret_cast(malloc(sizeof(char) * str.size()));
+ memcpy(result, str.data(), sizeof(char) * str.size());
+ return result;
+}
+
+rocksdb_t* rocksdb_open(
+ const rocksdb_options_t* options,
+ const char* name,
+ char** errptr) {
+ DB* db;
+ if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) {
+ return nullptr;
+ }
+ rocksdb_t* result = new rocksdb_t;
+ result->rep = db;
+ return result;
+}
+
+rocksdb_t* rocksdb_open_for_read_only(
+ const rocksdb_options_t* options,
+ const char* name,
+ unsigned char error_if_log_file_exist,
+ char** errptr) {
+ DB* db;
+ if (SaveError(errptr, DB::OpenForReadOnly(options->rep, std::string(name), &db, error_if_log_file_exist))) {
+ return nullptr;
+ }
+ rocksdb_t* result = new rocksdb_t;
+ result->rep = db;
+ return result;
+}
+
+rocksdb_backup_engine_t* rocksdb_backup_engine_open(
+ const rocksdb_options_t* options, const char* path, char** errptr) {
+ BackupEngine* be;
+ if (SaveError(errptr, BackupEngine::Open(options->rep.env,
+ BackupableDBOptions(path,
+ nullptr,
+ true,
+ options->rep.info_log.get()),
+ &be))) {
+ return nullptr;
+ }
+ rocksdb_backup_engine_t* result = new rocksdb_backup_engine_t;
+ result->rep = be;
+ return result;
+}
+
+void rocksdb_backup_engine_create_new_backup(rocksdb_backup_engine_t* be,
+ rocksdb_t* db, char** errptr) {
+ SaveError(errptr, be->rep->CreateNewBackup(db->rep));
+}
+
+void rocksdb_backup_engine_purge_old_backups(rocksdb_backup_engine_t* be,
+ uint32_t num_backups_to_keep,
+ char** errptr) {
+ SaveError(errptr, be->rep->PurgeOldBackups(num_backups_to_keep));
+}
+
+rocksdb_restore_options_t* rocksdb_restore_options_create() {
+ return new rocksdb_restore_options_t;
+}
+
+void rocksdb_restore_options_destroy(rocksdb_restore_options_t* opt) {
+ delete opt;
+}
+
+void rocksdb_restore_options_set_keep_log_files(rocksdb_restore_options_t* opt,
+ int v) {
+ opt->rep.keep_log_files = v;
+}
+
+void rocksdb_backup_engine_restore_db_from_latest_backup(
+ rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir,
+ const rocksdb_restore_options_t* restore_options, char** errptr) {
+ SaveError(errptr, be->rep->RestoreDBFromLatestBackup(std::string(db_dir),
+ std::string(wal_dir),
+ restore_options->rep));
+}
+
+const rocksdb_backup_engine_info_t* rocksdb_backup_engine_get_backup_info(
+ rocksdb_backup_engine_t* be) {
+ rocksdb_backup_engine_info_t* result = new rocksdb_backup_engine_info_t;
+ be->rep->GetBackupInfo(&result->rep);
+ return result;
+}
+
+int rocksdb_backup_engine_info_count(const rocksdb_backup_engine_info_t* info) {
+ return static_cast(info->rep.size());
+}
+
+int64_t rocksdb_backup_engine_info_timestamp(
+ const rocksdb_backup_engine_info_t* info, int index) {
+ return info->rep[index].timestamp;
+}
+
+uint32_t rocksdb_backup_engine_info_backup_id(
+ const rocksdb_backup_engine_info_t* info, int index) {
+ return info->rep[index].backup_id;
+}
+
+uint64_t rocksdb_backup_engine_info_size(
+ const rocksdb_backup_engine_info_t* info, int index) {
+ return info->rep[index].size;
+}
+
+uint32_t rocksdb_backup_engine_info_number_files(
+ const rocksdb_backup_engine_info_t* info, int index) {
+ return info->rep[index].number_files;
+}
+
+void rocksdb_backup_engine_info_destroy(
+ const rocksdb_backup_engine_info_t* info) {
+ delete info;
+}
+
+void rocksdb_backup_engine_close(rocksdb_backup_engine_t* be) {
+ delete be->rep;
+ delete be;
+}
+
+rocksdb_checkpoint_t* rocksdb_checkpoint_object_create(rocksdb_t* db,
+ char** errptr) {
+ Checkpoint* checkpoint;
+ if (SaveError(errptr, Checkpoint::Create(db->rep, &checkpoint))) {
+ return nullptr;
+ }
+ rocksdb_checkpoint_t* result = new rocksdb_checkpoint_t;
+ result->rep = checkpoint;
+ return result;
+}
+
+void rocksdb_checkpoint_create(rocksdb_checkpoint_t* checkpoint,
+ const char* checkpoint_dir,
+ uint64_t log_size_for_flush, char** errptr) {
+ SaveError(errptr, checkpoint->rep->CreateCheckpoint(
+ std::string(checkpoint_dir), log_size_for_flush));
+}
+
+void rocksdb_checkpoint_object_destroy(rocksdb_checkpoint_t* checkpoint) {
+ delete checkpoint->rep;
+ delete checkpoint;
+}
+
+void rocksdb_close(rocksdb_t* db) {
+ delete db->rep;
+ delete db;
+}
+
+void rocksdb_options_set_uint64add_merge_operator(rocksdb_options_t* opt) {
+ opt->rep.merge_operator = rocksdb::MergeOperators::CreateUInt64AddOperator();
+}
+
+rocksdb_t* rocksdb_open_column_families(
+ const rocksdb_options_t* db_options,
+ const char* name,
+ int num_column_families,
+ const char** column_family_names,
+ const rocksdb_options_t** column_family_options,
+ rocksdb_column_family_handle_t** column_family_handles,
+ char** errptr) {
+ std::vector column_families;
+ for (int i = 0; i < num_column_families; i++) {
+ column_families.push_back(ColumnFamilyDescriptor(
+ std::string(column_family_names[i]),
+ ColumnFamilyOptions(column_family_options[i]->rep)));
+ }
+
+ DB* db;
+ std::vector handles;
+ if (SaveError(errptr, DB::Open(DBOptions(db_options->rep),
+ std::string(name), column_families, &handles, &db))) {
+ return nullptr;
+ }
+
+ for (size_t i = 0; i < handles.size(); i++) {
+ rocksdb_column_family_handle_t* c_handle = new rocksdb_column_family_handle_t;
+ c_handle->rep = handles[i];
+ column_family_handles[i] = c_handle;
+ }
+ rocksdb_t* result = new rocksdb_t;
+ result->rep = db;
+ return result;
+}
+
+rocksdb_t* rocksdb_open_for_read_only_column_families(
+ const rocksdb_options_t* db_options,
+ const char* name,
+ int num_column_families,
+ const char** column_family_names,
+ const rocksdb_options_t** column_family_options,
+ rocksdb_column_family_handle_t** column_family_handles,
+ unsigned char error_if_log_file_exist,
+ char** errptr) {
+ std::vector column_families;
+ for (int i = 0; i < num_column_families; i++) {
+ column_families.push_back(ColumnFamilyDescriptor(
+ std::string(column_family_names[i]),
+ ColumnFamilyOptions(column_family_options[i]->rep)));
+ }
+
+ DB* db;
+ std::vector handles;
+ if (SaveError(errptr, DB::OpenForReadOnly(DBOptions(db_options->rep),
+ std::string(name), column_families, &handles, &db, error_if_log_file_exist))) {
+ return nullptr;
+ }
+
+ for (size_t i = 0; i < handles.size(); i++) {
+ rocksdb_column_family_handle_t* c_handle = new rocksdb_column_family_handle_t;
+ c_handle->rep = handles[i];
+ column_family_handles[i] = c_handle;
+ }
+ rocksdb_t* result = new rocksdb_t;
+ result->rep = db;
+ return result;
+}
+
+char** rocksdb_list_column_families(
+ const rocksdb_options_t* options,
+ const char* name,
+ size_t* lencfs,
+ char** errptr) {
+ std::vector fams;
+ SaveError(errptr,
+ DB::ListColumnFamilies(DBOptions(options->rep),
+ std::string(name), &fams));
+
+ *lencfs = fams.size();
+ char** column_families = static_cast(malloc(sizeof(char*) * fams.size()));
+ for (size_t i = 0; i < fams.size(); i++) {
+ column_families[i] = strdup(fams[i].c_str());
+ }
+ return column_families;
+}
+
+void rocksdb_list_column_families_destroy(char** list, size_t len) {
+ for (size_t i = 0; i < len; ++i) {
+ free(list[i]);
+ }
+ free(list);
+}
+
+rocksdb_column_family_handle_t* rocksdb_create_column_family(
+ rocksdb_t* db,
+ const rocksdb_options_t* column_family_options,
+ const char* column_family_name,
+ char** errptr) {
+ rocksdb_column_family_handle_t* handle = new rocksdb_column_family_handle_t;
+ SaveError(errptr,
+ db->rep->CreateColumnFamily(ColumnFamilyOptions(column_family_options->rep),
+ std::string(column_family_name), &(handle->rep)));
+ return handle;
+}
+
+void rocksdb_drop_column_family(
+ rocksdb_t* db,
+ rocksdb_column_family_handle_t* handle,
+ char** errptr) {
+ SaveError(errptr, db->rep->DropColumnFamily(handle->rep));
+}
+
+void rocksdb_column_family_handle_destroy(rocksdb_column_family_handle_t* handle) {
+ delete handle->rep;
+ delete handle;
+}
+
+void rocksdb_put(
+ rocksdb_t* db,
+ const rocksdb_writeoptions_t* options,
+ const char* key, size_t keylen,
+ const char* val, size_t vallen,
+ char** errptr) {
+ SaveError(errptr,
+ db->rep->Put(options->rep, Slice(key, keylen), Slice(val, vallen)));
+}
+
+void rocksdb_put_cf(
+ rocksdb_t* db,
+ const rocksdb_writeoptions_t* options,
+ rocksdb_column_family_handle_t* column_family,
+ const char* key, size_t keylen,
+ const char* val, size_t vallen,
+ char** errptr) {
+ SaveError(errptr,
+ db->rep->Put(options->rep, column_family->rep,
+ Slice(key, keylen), Slice(val, vallen)));
+}
+
+void rocksdb_delete(
+ rocksdb_t* db,
+ const rocksdb_writeoptions_t* options,
+ const char* key, size_t keylen,
+ char** errptr) {
+ SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen)));
+}
+
+void rocksdb_delete_cf(
+ rocksdb_t* db,
+ const rocksdb_writeoptions_t* options,
+ rocksdb_column_family_handle_t* column_family,
+ const char* key, size_t keylen,
+ char** errptr) {
+ SaveError(errptr, db->rep->Delete(options->rep, column_family->rep,
+ Slice(key, keylen)));
+}
+
+void rocksdb_merge(
+ rocksdb_t* db,
+ const rocksdb_writeoptions_t* options,
+ const char* key, size_t keylen,
+ const char* val, size_t vallen,
+ char** errptr) {
+ SaveError(errptr,
+ db->rep->Merge(options->rep, Slice(key, keylen), Slice(val, vallen)));
+}
+
+void rocksdb_merge_cf(
+ rocksdb_t* db,
+ const rocksdb_writeoptions_t* options,
+ rocksdb_column_family_handle_t* column_family,
+ const char* key, size_t keylen,
+ const char* val, size_t vallen,
+ char** errptr) {
+ SaveError(errptr,
+ db->rep->Merge(options->rep, column_family->rep,
+ Slice(key, keylen), Slice(val, vallen)));
+}
+
+void rocksdb_write(
+ rocksdb_t* db,
+ const rocksdb_writeoptions_t* options,
+ rocksdb_writebatch_t* batch,
+ char** errptr) {
+ SaveError(errptr, db->rep->Write(options->rep, &batch->rep));
+}
+
+char* rocksdb_get(
+ rocksdb_t* db,
+ const rocksdb_readoptions_t* options,
+ const char* key, size_t keylen,
+ size_t* vallen,
+ char** errptr) {
+ char* result = nullptr;
+ std::string tmp;
+ Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp);
+ if (s.ok()) {
+ *vallen = tmp.size();
+ result = CopyString(tmp);
+ } else {
+ *vallen = 0;
+ if (!s.IsNotFound()) {
+ SaveError(errptr, s);
+ }
+ }
+ return result;
+}
+
+char* rocksdb_get_cf(
+ rocksdb_t* db,
+ const rocksdb_readoptions_t* options,
+ rocksdb_column_family_handle_t* column_family,
+ const char* key, size_t keylen,
+ size_t* vallen,
+ char** errptr) {
+ char* result = nullptr;
+ std::string tmp;
+ Status s = db->rep->Get(options->rep, column_family->rep,
+ Slice(key, keylen), &tmp);
+ if (s.ok()) {
+ *vallen = tmp.size();
+ result = CopyString(tmp);
+ } else {
+ *vallen = 0;
+ if (!s.IsNotFound()) {
+ SaveError(errptr, s);
+ }
+ }
+ return result;
+}
+
+void rocksdb_multi_get(
+ rocksdb_t* db,
+ const rocksdb_readoptions_t* options,
+ size_t num_keys, const char* const* keys_list,
+ const size_t* keys_list_sizes,
+ char** values_list, size_t* values_list_sizes,
+ char** errs) {
+ std::vector keys(num_keys);
+ for (size_t i = 0; i < num_keys; i++) {
+ keys[i] = Slice(keys_list[i], keys_list_sizes[i]);
+ }
+ std::vector values(num_keys);
+ std::vector statuses = db->rep->MultiGet(options->rep, keys, &values);
+ for (size_t i = 0; i < num_keys; i++) {
+ if (statuses[i].ok()) {
+ values_list[i] = CopyString(values[i]);
+ values_list_sizes[i] = values[i].size();
+ errs[i] = nullptr;
+ } else {
+ values_list[i] = nullptr;
+ values_list_sizes[i] = 0;
+ if (!statuses[i].IsNotFound()) {
+ errs[i] = strdup(statuses[i].ToString().c_str());
+ } else {
+ errs[i] = nullptr;
+ }
+ }
+ }
+}
+
+void rocksdb_multi_get_cf(
+ rocksdb_t* db,
+ const rocksdb_readoptions_t* options,
+ const rocksdb_column_family_handle_t* const* column_families,
+ size_t num_keys, const char* const* keys_list,
+ const size_t* keys_list_sizes,
+ char** values_list, size_t* values_list_sizes,
+ char** errs) {
+ std::vector keys(num_keys);
+ std::vector cfs(num_keys);
+ for (size_t i = 0; i < num_keys; i++) {
+ keys[i] = Slice(keys_list[i], keys_list_sizes[i]);
+ cfs[i] = column_families[i]->rep;
+ }
+ std::vector values(num_keys);
+ std::vector statuses = db->rep->MultiGet(options->rep, cfs, keys, &values);
+ for (size_t i = 0; i < num_keys; i++) {
+ if (statuses[i].ok()) {
+ values_list[i] = CopyString(values[i]);
+ values_list_sizes[i] = values[i].size();
+ errs[i] = nullptr;
+ } else {
+ values_list[i] = nullptr;
+ values_list_sizes[i] = 0;
+ if (!statuses[i].IsNotFound()) {
+ errs[i] = strdup(statuses[i].ToString().c_str());
+ } else {
+ errs[i] = nullptr;
+ }
+ }
+ }
+}
+
+rocksdb_iterator_t* rocksdb_create_iterator(
+ rocksdb_t* db,
+ const rocksdb_readoptions_t* options) {
+ rocksdb_iterator_t* result = new rocksdb_iterator_t;
+ result->rep = db->rep->NewIterator(options->rep);
+ return result;
+}
+
+rocksdb_iterator_t* rocksdb_create_iterator_cf(
+ rocksdb_t* db,
+ const rocksdb_readoptions_t* options,
+ rocksdb_column_family_handle_t* column_family) {
+ rocksdb_iterator_t* result = new rocksdb_iterator_t;
+ result->rep = db->rep->NewIterator(options->rep, column_family->rep);
+ return result;
+}
+
+void rocksdb_create_iterators(
+ rocksdb_t *db,
+ rocksdb_readoptions_t* opts,
+ rocksdb_column_family_handle_t** column_families,
+ rocksdb_iterator_t** iterators,
+ size_t size,
+ char** errptr) {
+ std::vector column_families_vec;
+ for (size_t i = 0; i < size; i++) {
+ column_families_vec.push_back(column_families[i]->rep);
+ }
+
+ std::vector res;
+ Status status = db->rep->NewIterators(opts->rep, column_families_vec, &res);
+ assert(res.size() == size);
+ if (SaveError(errptr, status)) {
+ return;
+ }
+
+ for (size_t i = 0; i < size; i++) {
+ iterators[i] = new rocksdb_iterator_t;
+ iterators[i]->rep = res[i];
+ }
+}
+
+const rocksdb_snapshot_t* rocksdb_create_snapshot(
+ rocksdb_t* db) {
+ rocksdb_snapshot_t* result = new rocksdb_snapshot_t;
+ result->rep = db->rep->GetSnapshot();
+ return result;
+}
+
+void rocksdb_release_snapshot(
+ rocksdb_t* db,
+ const rocksdb_snapshot_t* snapshot) {
+ db->rep->ReleaseSnapshot(snapshot->rep);
+ delete snapshot;
+}
+
+char* rocksdb_property_value(
+ rocksdb_t* db,
+ const char* propname) {
+ std::string tmp;
+ if (db->rep->GetProperty(Slice(propname), &tmp)) {
+ // We use strdup() since we expect human readable output.
+ return strdup(tmp.c_str());
+ } else {
+ return nullptr;
+ }
+}
+
+int rocksdb_property_int(
+ rocksdb_t* db,
+ const char* propname,
+ uint64_t *out_val) {
+ if (db->rep->GetIntProperty(Slice(propname), out_val)) {
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+char* rocksdb_property_value_cf(
+ rocksdb_t* db,
+ rocksdb_column_family_handle_t* column_family,
+ const char* propname) {
+ std::string tmp;
+ if (db->rep->GetProperty(column_family->rep, Slice(propname), &tmp)) {
+ // We use strdup() since we expect human readable output.
+ return strdup(tmp.c_str());
+ } else {
+ return nullptr;
+ }
+}
+
+void rocksdb_approximate_sizes(
+ rocksdb_t* db,
+ int num_ranges,
+ const char* const* range_start_key, const size_t* range_start_key_len,
+ const char* const* range_limit_key, const size_t* range_limit_key_len,
+ uint64_t* sizes) {
+ Range* ranges = new Range[num_ranges];
+ for (int i = 0; i < num_ranges; i++) {
+ ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
+ ranges[i].limit = Slice(range_limit_key[i], range_limit_key_len[i]);
+ }
+ db->rep->GetApproximateSizes(ranges, num_ranges, sizes);
+ delete[] ranges;
+}
+
+void rocksdb_approximate_sizes_cf(
+ rocksdb_t* db,
+ rocksdb_column_family_handle_t* column_family,
+ int num_ranges,
+ const char* const* range_start_key, const size_t* range_start_key_len,
+ const char* const* range_limit_key, const size_t* range_limit_key_len,
+ uint64_t* sizes) {
+ Range* ranges = new Range[num_ranges];
+ for (int i = 0; i < num_ranges; i++) {
+ ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
+ ranges[i].limit = Slice(range_limit_key[i], range_limit_key_len[i]);
+ }
+ db->rep->GetApproximateSizes(column_family->rep, ranges, num_ranges, sizes);
+ delete[] ranges;
+}
+
+void rocksdb_delete_file(
+ rocksdb_t* db,
+ const char* name) {
+ db->rep->DeleteFile(name);
+}
+
+const rocksdb_livefiles_t* rocksdb_livefiles(
+ rocksdb_t* db) {
+ rocksdb_livefiles_t* result = new rocksdb_livefiles_t;
+ db->rep->GetLiveFilesMetaData(&result->rep);
+ return result;
+}
+
+void rocksdb_compact_range(
+ rocksdb_t* db,
+ const char* start_key, size_t start_key_len,
+ const char* limit_key, size_t limit_key_len) {
+ Slice a, b;
+ db->rep->CompactRange(
+ CompactRangeOptions(),
+ // Pass nullptr Slice if corresponding "const char*" is nullptr
+ (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
+ (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
+}
+
+void rocksdb_compact_range_cf(
+ rocksdb_t* db,
+ rocksdb_column_family_handle_t* column_family,
+ const char* start_key, size_t start_key_len,
+ const char* limit_key, size_t limit_key_len) {
+ Slice a, b;
+ db->rep->CompactRange(
+ CompactRangeOptions(), column_family->rep,
+ // Pass nullptr Slice if corresponding "const char*" is nullptr
+ (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
+ (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
+}
+
+void rocksdb_compact_range_opt(rocksdb_t* db, rocksdb_compactoptions_t* opt,
+ const char* start_key, size_t start_key_len,
+ const char* limit_key, size_t limit_key_len) {
+ Slice a, b;
+ db->rep->CompactRange(
+ opt->rep,
+ // Pass nullptr Slice if corresponding "const char*" is nullptr
+ (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
+ (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
+}
+
+void rocksdb_compact_range_cf_opt(rocksdb_t* db,
+ rocksdb_column_family_handle_t* column_family,
+ rocksdb_compactoptions_t* opt,
+ const char* start_key, size_t start_key_len,
+ const char* limit_key, size_t limit_key_len) {
+ Slice a, b;
+ db->rep->CompactRange(
+ opt->rep, column_family->rep,
+ // Pass nullptr Slice if corresponding "const char*" is nullptr
+ (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
+ (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
+}
+
+void rocksdb_flush(
+ rocksdb_t* db,
+ const rocksdb_flushoptions_t* options,
+ char** errptr) {
+ SaveError(errptr, db->rep->Flush(options->rep));
+}
+
+void rocksdb_disable_file_deletions(
+ rocksdb_t* db,
+ char** errptr) {
+ SaveError(errptr, db->rep->DisableFileDeletions());
+}
+
+void rocksdb_enable_file_deletions(
+ rocksdb_t* db,
+ unsigned char force,
+ char** errptr) {
+ SaveError(errptr, db->rep->EnableFileDeletions(force));
+}
+
+void rocksdb_destroy_db(
+ const rocksdb_options_t* options,
+ const char* name,
+ char** errptr) {
+ SaveError(errptr, DestroyDB(name, options->rep));
+}
+
+void rocksdb_repair_db(
+ const rocksdb_options_t* options,
+ const char* name,
+ char** errptr) {
+ SaveError(errptr, RepairDB(name, options->rep));
+}
+
+void rocksdb_iter_destroy(rocksdb_iterator_t* iter) {
+ delete iter->rep;
+ delete iter;
+}
+
+unsigned char rocksdb_iter_valid(const rocksdb_iterator_t* iter) {
+ return iter->rep->Valid();
+}
+
+void rocksdb_iter_seek_to_first(rocksdb_iterator_t* iter) {
+ iter->rep->SeekToFirst();
+}
+
+void rocksdb_iter_seek_to_last(rocksdb_iterator_t* iter) {
+ iter->rep->SeekToLast();
+}
+
+void rocksdb_iter_seek(rocksdb_iterator_t* iter, const char* k, size_t klen) {
+ iter->rep->Seek(Slice(k, klen));
+}
+
+void rocksdb_iter_seek_for_prev(rocksdb_iterator_t* iter, const char* k,
+ size_t klen) {
+ iter->rep->SeekForPrev(Slice(k, klen));
+}
+
+void rocksdb_iter_next(rocksdb_iterator_t* iter) {
+ iter->rep->Next();
+}
+
+void rocksdb_iter_prev(rocksdb_iterator_t* iter) {
+ iter->rep->Prev();
+}
+
+const char* rocksdb_iter_key(const rocksdb_iterator_t* iter, size_t* klen) {
+ Slice s = iter->rep->key();
+ *klen = s.size();
+ return s.data();
+}
+
+const char* rocksdb_iter_value(const rocksdb_iterator_t* iter, size_t* vlen) {
+ Slice s = iter->rep->value();
+ *vlen = s.size();
+ return s.data();
+}
+
+void rocksdb_iter_get_error(const rocksdb_iterator_t* iter, char** errptr) {
+ SaveError(errptr, iter->rep->status());
+}
+
+rocksdb_writebatch_t* rocksdb_writebatch_create() {
+ return new rocksdb_writebatch_t;
+}
+
+rocksdb_writebatch_t* rocksdb_writebatch_create_from(const char* rep,
+ size_t size) {
+ rocksdb_writebatch_t* b = new rocksdb_writebatch_t;
+ b->rep = WriteBatch(std::string(rep, size));
+ return b;
+}
+
+void rocksdb_writebatch_destroy(rocksdb_writebatch_t* b) {
+ delete b;
+}
+
+void rocksdb_writebatch_clear(rocksdb_writebatch_t* b) {
+ b->rep.Clear();
+}
+
+int rocksdb_writebatch_count(rocksdb_writebatch_t* b) {
+ return b->rep.Count();
+}
+
+void rocksdb_writebatch_put(
+ rocksdb_writebatch_t* b,
+ const char* key, size_t klen,
+ const char* val, size_t vlen) {
+ b->rep.Put(Slice(key, klen), Slice(val, vlen));
+}
+
+void rocksdb_writebatch_put_cf(
+ rocksdb_writebatch_t* b,
+ rocksdb_column_family_handle_t* column_family,
+ const char* key, size_t klen,
+ const char* val, size_t vlen) {
+ b->rep.Put(column_family->rep, Slice(key, klen), Slice(val, vlen));
+}
+
+void rocksdb_writebatch_putv(
+ rocksdb_writebatch_t* b,
+ int num_keys, const char* const* keys_list,
+ const size_t* keys_list_sizes,
+ int num_values, const char* const* values_list,
+ const size_t* values_list_sizes) {
+ std::vector key_slices(num_keys);
+ for (int i = 0; i < num_keys; i++) {
+ key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
+ }
+ std::vector value_slices(num_values);
+ for (int i = 0; i < num_values; i++) {
+ value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
+ }
+ b->rep.Put(SliceParts(key_slices.data(), num_keys),
+ SliceParts(value_slices.data(), num_values));
+}
+
+void rocksdb_writebatch_putv_cf(
+ rocksdb_writebatch_t* b,
+ rocksdb_column_family_handle_t* column_family,
+ int num_keys, const char* const* keys_list,
+ const size_t* keys_list_sizes,
+ int num_values, const char* const* values_list,
+ const size_t* values_list_sizes) {
+ std::vector key_slices(num_keys);
+ for (int i = 0; i < num_keys; i++) {
+ key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
+ }
+ std::vector value_slices(num_values);
+ for (int i = 0; i < num_values; i++) {
+ value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
+ }
+ b->rep.Put(column_family->rep, SliceParts(key_slices.data(), num_keys),
+ SliceParts(value_slices.data(), num_values));
+}
+
+void rocksdb_writebatch_merge(
+ rocksdb_writebatch_t* b,
+ const char* key, size_t klen,
+ const char* val, size_t vlen) {
+ b->rep.Merge(Slice(key, klen), Slice(val, vlen));
+}
+
+void rocksdb_writebatch_merge_cf(
+ rocksdb_writebatch_t* b,
+ rocksdb_column_family_handle_t* column_family,
+ const char* key, size_t klen,
+ const char* val, size_t vlen) {
+ b->rep.Merge(column_family->rep, Slice(key, klen), Slice(val, vlen));
+}
+
+void rocksdb_writebatch_mergev(
+ rocksdb_writebatch_t* b,
+ int num_keys, const char* const* keys_list,
+ const size_t* keys_list_sizes,
+ int num_values, const char* const* values_list,
+ const size_t* values_list_sizes) {
+ std::vector key_slices(num_keys);
+ for (int i = 0; i < num_keys; i++) {
+ key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
+ }
+ std::vector value_slices(num_values);
+ for (int i = 0; i < num_values; i++) {
+ value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
+ }
+ b->rep.Merge(SliceParts(key_slices.data(), num_keys),
+ SliceParts(value_slices.data(), num_values));
+}
+
+void rocksdb_writebatch_mergev_cf(
+ rocksdb_writebatch_t* b,
+ rocksdb_column_family_handle_t* column_family,
+ int num_keys, const char* const* keys_list,
+ const size_t* keys_list_sizes,
+ int num_values, const char* const* values_list,
+ const size_t* values_list_sizes) {
+ std::vector key_slices(num_keys);
+ for (int i = 0; i < num_keys; i++) {
+ key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
+ }
+ std::vector value_slices(num_values);
+ for (int i = 0; i < num_values; i++) {
+ value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
+ }
+ b->rep.Merge(column_family->rep, SliceParts(key_slices.data(), num_keys),
+ SliceParts(value_slices.data(), num_values));
+}
+
+void rocksdb_writebatch_delete(
+ rocksdb_writebatch_t* b,
+ const char* key, size_t klen) {
+ b->rep.Delete(Slice(key, klen));
+}
+
+void rocksdb_writebatch_delete_cf(
+ rocksdb_writebatch_t* b,
+ rocksdb_column_family_handle_t* column_family,
+ const char* key, size_t klen) {
+ b->rep.Delete(column_family->rep, Slice(key, klen));
+}
+
+void rocksdb_writebatch_deletev(
+ rocksdb_writebatch_t* b,
+ int num_keys, const char* const* keys_list,
+ const size_t* keys_list_sizes) {
+ std::vector key_slices(num_keys);
+ for (int i = 0; i < num_keys; i++) {
+ key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
+ }
+ b->rep.Delete(SliceParts(key_slices.data(), num_keys));
+}
+
+void rocksdb_writebatch_deletev_cf(
+ rocksdb_writebatch_t* b,
+ rocksdb_column_family_handle_t* column_family,
+ int num_keys, const char* const* keys_list,
+ const size_t* keys_list_sizes) {
+ std::vector key_slices(num_keys);
+ for (int i = 0; i < num_keys; i++) {
+ key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
+ }
+ b->rep.Delete(column_family->rep, SliceParts(key_slices.data(), num_keys));
+}
+
+void rocksdb_writebatch_delete_range(rocksdb_writebatch_t* b,
+ const char* start_key,
+ size_t start_key_len, const char* end_key,
+ size_t end_key_len) {
+ b->rep.DeleteRange(Slice(start_key, start_key_len),
+ Slice(end_key, end_key_len));
+}
+
+void rocksdb_writebatch_delete_range_cf(
+ rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family,
+ const char* start_key, size_t start_key_len, const char* end_key,
+ size_t end_key_len) {
+ b->rep.DeleteRange(column_family->rep, Slice(start_key, start_key_len),
+ Slice(end_key, end_key_len));
+}
+
+void rocksdb_writebatch_delete_rangev(rocksdb_writebatch_t* b, int num_keys,
+ const char* const* start_keys_list,
+ const size_t* start_keys_list_sizes,
+ const char* const* end_keys_list,
+ const size_t* end_keys_list_sizes) {
+ std::vector