mirror of
https://github.com/google/leveldb.git
synced 2025-03-28 12:10:08 +08:00
Merge branch 'master' into patch-2
This commit is contained in:
commit
1d94fe2f4d
35
.appveyor.yml
Normal file
35
.appveyor.yml
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
# Build matrix / environment variables are explained on:
|
||||||
|
# https://www.appveyor.com/docs/appveyor-yml/
|
||||||
|
# This file can be validated on: https://ci.appveyor.com/tools/validate-yaml
|
||||||
|
|
||||||
|
version: "{build}"
|
||||||
|
|
||||||
|
environment:
|
||||||
|
matrix:
|
||||||
|
# AppVeyor currently has no custom job name feature.
|
||||||
|
# http://help.appveyor.com/discussions/questions/1623-can-i-provide-a-friendly-name-for-jobs
|
||||||
|
- JOB: Visual Studio 2017
|
||||||
|
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
|
||||||
|
CMAKE_GENERATOR: Visual Studio 15 2017
|
||||||
|
|
||||||
|
platform:
|
||||||
|
- x86
|
||||||
|
- x64
|
||||||
|
|
||||||
|
configuration:
|
||||||
|
- RelWithDebInfo
|
||||||
|
- Debug
|
||||||
|
|
||||||
|
build_script:
|
||||||
|
- git submodule update --init --recursive
|
||||||
|
- mkdir build
|
||||||
|
- cd build
|
||||||
|
- if "%platform%"=="x64" set CMAKE_GENERATOR=%CMAKE_GENERATOR% Win64
|
||||||
|
- cmake --version
|
||||||
|
- cmake .. -G "%CMAKE_GENERATOR%"
|
||||||
|
-DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%"
|
||||||
|
- cmake --build . --config "%CONFIGURATION%"
|
||||||
|
- cd ..
|
||||||
|
|
||||||
|
test_script:
|
||||||
|
- cd build && ctest --verbose --build-config "%CONFIGURATION%" && cd ..
|
18
.clang-format
Normal file
18
.clang-format
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# Run manually to reformat a file:
|
||||||
|
# clang-format -i --style=file <file>
|
||||||
|
# find . -iname '*.cc' -o -iname '*.h' -o -iname '*.h.in' | xargs clang-format -i --style=file
|
||||||
|
BasedOnStyle: Google
|
||||||
|
DerivePointerAlignment: false
|
||||||
|
|
||||||
|
# Public headers are in a different location in the internal Google repository.
|
||||||
|
# Order them so that when imported to the authoritative repository they will be
|
||||||
|
# in correct alphabetical order.
|
||||||
|
IncludeCategories:
|
||||||
|
- Regex: '^(<|"(benchmarks|db|helpers)/)'
|
||||||
|
Priority: 1
|
||||||
|
- Regex: '^"(leveldb)/'
|
||||||
|
Priority: 2
|
||||||
|
- Regex: '^(<|"(issues|port|table|third_party|util)/)'
|
||||||
|
Priority: 3
|
||||||
|
- Regex: '.*'
|
||||||
|
Priority: 4
|
17
.gitignore
vendored
17
.gitignore
vendored
@ -1,9 +1,8 @@
|
|||||||
build_config.mk
|
# Editors.
|
||||||
*.a
|
*.sw*
|
||||||
*.o
|
.vscode
|
||||||
*.dylib*
|
.DS_Store
|
||||||
*.so
|
|
||||||
*.so.*
|
# Build directory.
|
||||||
*_test
|
build/
|
||||||
db_bench
|
out/
|
||||||
leveldbutil
|
|
||||||
|
78
.travis.yml
78
.travis.yml
@ -1,13 +1,79 @@
|
|||||||
|
# Build matrix / environment variables are explained on:
|
||||||
|
# http://about.travis-ci.org/docs/user/build-configuration/
|
||||||
|
# This file can be validated on: http://lint.travis-ci.org/
|
||||||
|
|
||||||
language: cpp
|
language: cpp
|
||||||
|
dist: xenial
|
||||||
|
osx_image: xcode10.2
|
||||||
|
|
||||||
compiler:
|
compiler:
|
||||||
- clang
|
|
||||||
- gcc
|
- gcc
|
||||||
|
- clang
|
||||||
os:
|
os:
|
||||||
- linux
|
- linux
|
||||||
- osx
|
- osx
|
||||||
sudo: false
|
|
||||||
before_install:
|
env:
|
||||||
- echo $LANG
|
- BUILD_TYPE=Debug
|
||||||
- echo $LC_ALL
|
- BUILD_TYPE=RelWithDebInfo
|
||||||
|
|
||||||
|
addons:
|
||||||
|
apt:
|
||||||
|
sources:
|
||||||
|
- llvm-toolchain-xenial-8
|
||||||
|
- ubuntu-toolchain-r-test
|
||||||
|
packages:
|
||||||
|
- clang-8
|
||||||
|
- cmake
|
||||||
|
- gcc-8
|
||||||
|
- g++-8
|
||||||
|
- libgoogle-perftools-dev
|
||||||
|
- libkyotocabinet-dev
|
||||||
|
- libsnappy-dev
|
||||||
|
- libsqlite3-dev
|
||||||
|
- ninja-build
|
||||||
|
homebrew:
|
||||||
|
packages:
|
||||||
|
- cmake
|
||||||
|
- crc32c
|
||||||
|
- gcc@8
|
||||||
|
- gperftools
|
||||||
|
- kyotocabinet
|
||||||
|
- llvm@8
|
||||||
|
- ninja
|
||||||
|
- snappy
|
||||||
|
- sqlite3
|
||||||
|
update: true
|
||||||
|
|
||||||
|
install:
|
||||||
|
# The following Homebrew packages aren't linked by default, and need to be
|
||||||
|
# prepended to the path explicitly.
|
||||||
|
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then
|
||||||
|
export PATH="$(brew --prefix llvm)/bin:$PATH";
|
||||||
|
fi
|
||||||
|
# /usr/bin/gcc points to an older compiler on both Linux and macOS.
|
||||||
|
- if [ "$CXX" = "g++" ]; then export CXX="g++-8" CC="gcc-8"; fi
|
||||||
|
# /usr/bin/clang points to an older compiler on both Linux and macOS.
|
||||||
|
#
|
||||||
|
# Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values
|
||||||
|
# below don't work on macOS. Fortunately, the path change above makes the
|
||||||
|
# default values (clang and clang++) resolve to the correct compiler on macOS.
|
||||||
|
- if [ "$TRAVIS_OS_NAME" == "linux" ]; then
|
||||||
|
if [ "$CXX" = "clang++" ]; then export CXX="clang++-8" CC="clang-8"; fi;
|
||||||
|
fi
|
||||||
|
- echo ${CC}
|
||||||
|
- echo ${CXX}
|
||||||
|
- ${CXX} --version
|
||||||
|
- cmake --version
|
||||||
|
|
||||||
|
before_script:
|
||||||
|
- mkdir -p build && cd build
|
||||||
|
- cmake .. -G Ninja -DCMAKE_BUILD_TYPE=$BUILD_TYPE
|
||||||
|
- cmake --build .
|
||||||
|
- cd ..
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- make -j 4 check
|
- cd build && ctest --verbose && cd ..
|
||||||
|
- "if [ -f build/db_bench ] ; then build/db_bench ; fi"
|
||||||
|
- "if [ -f build/db_bench_sqlite3 ] ; then build/db_bench_sqlite3 ; fi"
|
||||||
|
- "if [ -f build/db_bench_tree_db ] ; then build/db_bench_tree_db ; fi"
|
||||||
|
454
CMakeLists.txt
Normal file
454
CMakeLists.txt
Normal file
@ -0,0 +1,454 @@
|
|||||||
|
# Copyright 2017 The LevelDB Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
|
cmake_minimum_required(VERSION 3.9)
|
||||||
|
# Keep the version below in sync with the one in db.h
|
||||||
|
project(leveldb VERSION 1.22.0 LANGUAGES C CXX)
|
||||||
|
|
||||||
|
# This project can use C11, but will gracefully decay down to C89.
|
||||||
|
set(CMAKE_C_STANDARD 11)
|
||||||
|
set(CMAKE_C_STANDARD_REQUIRED OFF)
|
||||||
|
set(CMAKE_C_EXTENSIONS OFF)
|
||||||
|
|
||||||
|
# This project requires C++11.
|
||||||
|
set(CMAKE_CXX_STANDARD 11)
|
||||||
|
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||||
|
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||||
|
|
||||||
|
if (WIN32)
|
||||||
|
set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_WINDOWS)
|
||||||
|
# TODO(cmumford): Make UNICODE configurable for Windows.
|
||||||
|
add_definitions(-D_UNICODE -DUNICODE)
|
||||||
|
else (WIN32)
|
||||||
|
set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_POSIX)
|
||||||
|
endif (WIN32)
|
||||||
|
|
||||||
|
option(LEVELDB_BUILD_TESTS "Build LevelDB's unit tests" ON)
|
||||||
|
option(LEVELDB_BUILD_BENCHMARKS "Build LevelDB's benchmarks" ON)
|
||||||
|
option(LEVELDB_INSTALL "Install LevelDB's header and library" ON)
|
||||||
|
|
||||||
|
include(TestBigEndian)
|
||||||
|
test_big_endian(LEVELDB_IS_BIG_ENDIAN)
|
||||||
|
|
||||||
|
include(CheckIncludeFile)
|
||||||
|
check_include_file("unistd.h" HAVE_UNISTD_H)
|
||||||
|
|
||||||
|
include(CheckLibraryExists)
|
||||||
|
check_library_exists(crc32c crc32c_value "" HAVE_CRC32C)
|
||||||
|
check_library_exists(snappy snappy_compress "" HAVE_SNAPPY)
|
||||||
|
check_library_exists(tcmalloc malloc "" HAVE_TCMALLOC)
|
||||||
|
|
||||||
|
include(CheckCXXSymbolExists)
|
||||||
|
# Using check_cxx_symbol_exists() instead of check_c_symbol_exists() because
|
||||||
|
# we're including the header from C++, and feature detection should use the same
|
||||||
|
# compiler language that the project will use later. Principles aside, some
|
||||||
|
# versions of do not expose fdatasync() in <unistd.h> in standard C mode
|
||||||
|
# (-std=c11), but do expose the function in standard C++ mode (-std=c++11).
|
||||||
|
check_cxx_symbol_exists(fdatasync "unistd.h" HAVE_FDATASYNC)
|
||||||
|
check_cxx_symbol_exists(F_FULLFSYNC "fcntl.h" HAVE_FULLFSYNC)
|
||||||
|
check_cxx_symbol_exists(O_CLOEXEC "fcntl.h" HAVE_O_CLOEXEC)
|
||||||
|
|
||||||
|
include(CheckCXXSourceCompiles)
|
||||||
|
|
||||||
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wstrict-prototypes")
|
||||||
|
|
||||||
|
# Test whether -Wthread-safety is available. See
|
||||||
|
# https://clang.llvm.org/docs/ThreadSafetyAnalysis.html
|
||||||
|
# -Werror is necessary because unknown attributes only generate warnings.
|
||||||
|
set(OLD_CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS})
|
||||||
|
list(APPEND CMAKE_REQUIRED_FLAGS -Werror -Wthread-safety)
|
||||||
|
check_cxx_source_compiles("
|
||||||
|
struct __attribute__((lockable)) Lock {
|
||||||
|
void Acquire() __attribute__((exclusive_lock_function()));
|
||||||
|
void Release() __attribute__((unlock_function()));
|
||||||
|
};
|
||||||
|
struct ThreadSafeType {
|
||||||
|
Lock lock_;
|
||||||
|
int data_ __attribute__((guarded_by(lock_)));
|
||||||
|
};
|
||||||
|
int main() { return 0; }
|
||||||
|
" HAVE_CLANG_THREAD_SAFETY)
|
||||||
|
set(CMAKE_REQUIRED_FLAGS ${OLD_CMAKE_REQUIRED_FLAGS})
|
||||||
|
|
||||||
|
# Test whether C++17 __has_include is available.
|
||||||
|
check_cxx_source_compiles("
|
||||||
|
#if defined(__has_include) && __has_include(<string>)
|
||||||
|
#include <string>
|
||||||
|
#endif
|
||||||
|
int main() { std::string str; return 0; }
|
||||||
|
" HAVE_CXX17_HAS_INCLUDE)
|
||||||
|
|
||||||
|
set(LEVELDB_PUBLIC_INCLUDE_DIR "include/leveldb")
|
||||||
|
set(LEVELDB_PORT_CONFIG_DIR "include/port")
|
||||||
|
|
||||||
|
configure_file(
|
||||||
|
"${PROJECT_SOURCE_DIR}/port/port_config.h.in"
|
||||||
|
"${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
|
||||||
|
)
|
||||||
|
|
||||||
|
include_directories(
|
||||||
|
"${PROJECT_BINARY_DIR}/include"
|
||||||
|
"${PROJECT_SOURCE_DIR}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if(BUILD_SHARED_LIBS)
|
||||||
|
# Only export LEVELDB_EXPORT symbols from the shared library.
|
||||||
|
add_compile_options(-fvisibility=hidden)
|
||||||
|
endif(BUILD_SHARED_LIBS)
|
||||||
|
|
||||||
|
add_library(leveldb "")
|
||||||
|
target_sources(leveldb
|
||||||
|
PRIVATE
|
||||||
|
"${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/builder.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/builder.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/c.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/db_impl.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/db_impl.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/db_iter.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/db_iter.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/dbformat.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/dbformat.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/dumpfile.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/filename.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/filename.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/log_format.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/log_reader.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/log_reader.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/log_writer.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/log_writer.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/memtable.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/memtable.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/repair.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/skiplist.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/snapshot.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/table_cache.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/table_cache.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/version_edit.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/version_edit.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/version_set.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/version_set.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/write_batch_internal.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/write_batch.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/port/port_stdcxx.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/port/port.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/port/thread_annotations.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/table/block_builder.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/table/block_builder.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/table/block.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/table/block.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/table/filter_block.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/table/filter_block.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/table/format.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/table/format.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/table/iterator_wrapper.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/table/iterator.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/table/merger.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/table/merger.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/table/table_builder.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/table/table.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/table/two_level_iterator.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/table/two_level_iterator.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/arena.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/arena.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/bloom.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/cache.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/coding.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/coding.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/comparator.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/crc32c.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/crc32c.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/env.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/filter_policy.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/hash.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/hash.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/logging.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/logging.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/mutexlock.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/no_destructor.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/options.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/random.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/status.cc"
|
||||||
|
|
||||||
|
# Only CMake 3.3+ supports PUBLIC sources in targets exported by "install".
|
||||||
|
$<$<VERSION_GREATER:CMAKE_VERSION,3.2>:PUBLIC>
|
||||||
|
"${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h"
|
||||||
|
"${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h"
|
||||||
|
"${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h"
|
||||||
|
"${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h"
|
||||||
|
"${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h"
|
||||||
|
"${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h"
|
||||||
|
"${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h"
|
||||||
|
"${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h"
|
||||||
|
"${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h"
|
||||||
|
"${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h"
|
||||||
|
"${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h"
|
||||||
|
"${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h"
|
||||||
|
"${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h"
|
||||||
|
"${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h"
|
||||||
|
"${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h"
|
||||||
|
)
|
||||||
|
|
||||||
|
if (WIN32)
|
||||||
|
target_sources(leveldb
|
||||||
|
PRIVATE
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/env_windows.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/windows_logger.h"
|
||||||
|
)
|
||||||
|
else (WIN32)
|
||||||
|
target_sources(leveldb
|
||||||
|
PRIVATE
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/env_posix.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/posix_logger.h"
|
||||||
|
)
|
||||||
|
endif (WIN32)
|
||||||
|
|
||||||
|
# MemEnv is not part of the interface and could be pulled to a separate library.
|
||||||
|
target_sources(leveldb
|
||||||
|
PRIVATE
|
||||||
|
"${PROJECT_SOURCE_DIR}/helpers/memenv/memenv.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/helpers/memenv/memenv.h"
|
||||||
|
)
|
||||||
|
|
||||||
|
target_include_directories(leveldb
|
||||||
|
PUBLIC
|
||||||
|
$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>
|
||||||
|
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
|
||||||
|
)
|
||||||
|
|
||||||
|
set_target_properties(leveldb
|
||||||
|
PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR})
|
||||||
|
|
||||||
|
target_compile_definitions(leveldb
|
||||||
|
PRIVATE
|
||||||
|
# Used by include/export.h when building shared libraries.
|
||||||
|
LEVELDB_COMPILE_LIBRARY
|
||||||
|
# Used by port/port.h.
|
||||||
|
${LEVELDB_PLATFORM_NAME}=1
|
||||||
|
)
|
||||||
|
if (NOT HAVE_CXX17_HAS_INCLUDE)
|
||||||
|
target_compile_definitions(leveldb
|
||||||
|
PRIVATE
|
||||||
|
LEVELDB_HAS_PORT_CONFIG_H=1
|
||||||
|
)
|
||||||
|
endif(NOT HAVE_CXX17_HAS_INCLUDE)
|
||||||
|
|
||||||
|
if(BUILD_SHARED_LIBS)
|
||||||
|
target_compile_definitions(leveldb
|
||||||
|
PUBLIC
|
||||||
|
# Used by include/export.h.
|
||||||
|
LEVELDB_SHARED_LIBRARY
|
||||||
|
)
|
||||||
|
endif(BUILD_SHARED_LIBS)
|
||||||
|
|
||||||
|
if(HAVE_CLANG_THREAD_SAFETY)
|
||||||
|
target_compile_options(leveldb
|
||||||
|
PUBLIC
|
||||||
|
-Werror -Wthread-safety)
|
||||||
|
endif(HAVE_CLANG_THREAD_SAFETY)
|
||||||
|
|
||||||
|
if(HAVE_CRC32C)
|
||||||
|
target_link_libraries(leveldb crc32c)
|
||||||
|
endif(HAVE_CRC32C)
|
||||||
|
if(HAVE_SNAPPY)
|
||||||
|
target_link_libraries(leveldb snappy)
|
||||||
|
endif(HAVE_SNAPPY)
|
||||||
|
if(HAVE_TCMALLOC)
|
||||||
|
target_link_libraries(leveldb tcmalloc)
|
||||||
|
endif(HAVE_TCMALLOC)
|
||||||
|
|
||||||
|
# Needed by port_stdcxx.h
|
||||||
|
find_package(Threads REQUIRED)
|
||||||
|
target_link_libraries(leveldb Threads::Threads)
|
||||||
|
|
||||||
|
add_executable(leveldbutil
|
||||||
|
"${PROJECT_SOURCE_DIR}/db/leveldbutil.cc"
|
||||||
|
)
|
||||||
|
target_link_libraries(leveldbutil leveldb)
|
||||||
|
|
||||||
|
if(LEVELDB_BUILD_TESTS)
|
||||||
|
enable_testing()
|
||||||
|
|
||||||
|
function(leveldb_test test_file)
|
||||||
|
get_filename_component(test_target_name "${test_file}" NAME_WE)
|
||||||
|
|
||||||
|
add_executable("${test_target_name}" "")
|
||||||
|
target_sources("${test_target_name}"
|
||||||
|
PRIVATE
|
||||||
|
"${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/testharness.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/testharness.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/testutil.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/testutil.h"
|
||||||
|
|
||||||
|
"${test_file}"
|
||||||
|
)
|
||||||
|
target_link_libraries("${test_target_name}" leveldb)
|
||||||
|
target_compile_definitions("${test_target_name}"
|
||||||
|
PRIVATE
|
||||||
|
${LEVELDB_PLATFORM_NAME}=1
|
||||||
|
)
|
||||||
|
if (NOT HAVE_CXX17_HAS_INCLUDE)
|
||||||
|
target_compile_definitions("${test_target_name}"
|
||||||
|
PRIVATE
|
||||||
|
LEVELDB_HAS_PORT_CONFIG_H=1
|
||||||
|
)
|
||||||
|
endif(NOT HAVE_CXX17_HAS_INCLUDE)
|
||||||
|
|
||||||
|
add_test(NAME "${test_target_name}" COMMAND "${test_target_name}")
|
||||||
|
endfunction(leveldb_test)
|
||||||
|
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/db/c_test.c")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/db/fault_injection_test.cc")
|
||||||
|
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue178_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue200_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue320_test.cc")
|
||||||
|
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/util/env_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/util/status_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/util/no_destructor_test.cc")
|
||||||
|
|
||||||
|
if(NOT BUILD_SHARED_LIBS)
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/db/autocompact_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/db/corruption_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/db/db_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/db/dbformat_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/db/filename_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/db/log_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/db/recovery_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/db/skiplist_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/db/version_edit_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/db/version_set_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/db/write_batch_test.cc")
|
||||||
|
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/helpers/memenv/memenv_test.cc")
|
||||||
|
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/table/filter_block_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/table/table_test.cc")
|
||||||
|
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/util/arena_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/util/bloom_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/util/cache_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/util/coding_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/util/crc32c_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/util/hash_test.cc")
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/util/logging_test.cc")
|
||||||
|
|
||||||
|
# TODO(costan): This test also uses
|
||||||
|
# "${PROJECT_SOURCE_DIR}/util/env_{posix|windows}_test_helper.h"
|
||||||
|
if (WIN32)
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/util/env_windows_test.cc")
|
||||||
|
else (WIN32)
|
||||||
|
leveldb_test("${PROJECT_SOURCE_DIR}/util/env_posix_test.cc")
|
||||||
|
endif (WIN32)
|
||||||
|
endif(NOT BUILD_SHARED_LIBS)
|
||||||
|
endif(LEVELDB_BUILD_TESTS)
|
||||||
|
|
||||||
|
if(LEVELDB_BUILD_BENCHMARKS)
|
||||||
|
function(leveldb_benchmark bench_file)
|
||||||
|
get_filename_component(bench_target_name "${bench_file}" NAME_WE)
|
||||||
|
|
||||||
|
add_executable("${bench_target_name}" "")
|
||||||
|
target_sources("${bench_target_name}"
|
||||||
|
PRIVATE
|
||||||
|
"${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/histogram.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/histogram.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/testharness.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/testharness.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/testutil.cc"
|
||||||
|
"${PROJECT_SOURCE_DIR}/util/testutil.h"
|
||||||
|
|
||||||
|
"${bench_file}"
|
||||||
|
)
|
||||||
|
target_link_libraries("${bench_target_name}" leveldb)
|
||||||
|
target_compile_definitions("${bench_target_name}"
|
||||||
|
PRIVATE
|
||||||
|
${LEVELDB_PLATFORM_NAME}=1
|
||||||
|
)
|
||||||
|
if (NOT HAVE_CXX17_HAS_INCLUDE)
|
||||||
|
target_compile_definitions("${bench_target_name}"
|
||||||
|
PRIVATE
|
||||||
|
LEVELDB_HAS_PORT_CONFIG_H=1
|
||||||
|
)
|
||||||
|
endif(NOT HAVE_CXX17_HAS_INCLUDE)
|
||||||
|
endfunction(leveldb_benchmark)
|
||||||
|
|
||||||
|
if(NOT BUILD_SHARED_LIBS)
|
||||||
|
leveldb_benchmark("${PROJECT_SOURCE_DIR}/benchmarks/db_bench.cc")
|
||||||
|
endif(NOT BUILD_SHARED_LIBS)
|
||||||
|
|
||||||
|
check_library_exists(sqlite3 sqlite3_open "" HAVE_SQLITE3)
|
||||||
|
if(HAVE_SQLITE3)
|
||||||
|
leveldb_benchmark("${PROJECT_SOURCE_DIR}/benchmarks/db_bench_sqlite3.cc")
|
||||||
|
target_link_libraries(db_bench_sqlite3 sqlite3)
|
||||||
|
endif(HAVE_SQLITE3)
|
||||||
|
|
||||||
|
# check_library_exists is insufficient here because the library names have
|
||||||
|
# different manglings when compiled with clang or gcc, at least when installed
|
||||||
|
# with Homebrew on Mac.
|
||||||
|
set(OLD_CMAKE_REQURED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES})
|
||||||
|
list(APPEND CMAKE_REQUIRED_LIBRARIES kyotocabinet)
|
||||||
|
check_cxx_source_compiles("
|
||||||
|
#include <kcpolydb.h>
|
||||||
|
|
||||||
|
int main() {
|
||||||
|
kyotocabinet::TreeDB* db = new kyotocabinet::TreeDB();
|
||||||
|
delete db;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
" HAVE_KYOTOCABINET)
|
||||||
|
set(CMAKE_REQUIRED_LIBRARIES ${OLD_CMAKE_REQURED_LIBRARIES})
|
||||||
|
if(HAVE_KYOTOCABINET)
|
||||||
|
leveldb_benchmark("${PROJECT_SOURCE_DIR}/benchmarks/db_bench_tree_db.cc")
|
||||||
|
target_link_libraries(db_bench_tree_db kyotocabinet)
|
||||||
|
endif(HAVE_KYOTOCABINET)
|
||||||
|
endif(LEVELDB_BUILD_BENCHMARKS)
|
||||||
|
|
||||||
|
if(LEVELDB_INSTALL)
|
||||||
|
include(GNUInstallDirs)
|
||||||
|
install(TARGETS leveldb
|
||||||
|
EXPORT leveldbTargets
|
||||||
|
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
|
||||||
|
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
||||||
|
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
||||||
|
)
|
||||||
|
install(
|
||||||
|
FILES
|
||||||
|
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h"
|
||||||
|
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h"
|
||||||
|
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/leveldb
|
||||||
|
)
|
||||||
|
|
||||||
|
include(CMakePackageConfigHelpers)
|
||||||
|
write_basic_package_version_file(
|
||||||
|
"${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake"
|
||||||
|
COMPATIBILITY SameMajorVersion
|
||||||
|
)
|
||||||
|
install(
|
||||||
|
EXPORT leveldbTargets
|
||||||
|
NAMESPACE leveldb::
|
||||||
|
DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb"
|
||||||
|
)
|
||||||
|
install(
|
||||||
|
FILES
|
||||||
|
"${PROJECT_SOURCE_DIR}/cmake/leveldbConfig.cmake"
|
||||||
|
"${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake"
|
||||||
|
DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb"
|
||||||
|
)
|
||||||
|
endif(LEVELDB_INSTALL)
|
@ -31,6 +31,6 @@ the CLA.
|
|||||||
|
|
||||||
## Writing Code ##
|
## Writing Code ##
|
||||||
|
|
||||||
If your contribution contains code, please make sure that it follows
|
If your contribution contains code, please make sure that it follows
|
||||||
[the style guide](http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml).
|
[the style guide](http://google.github.io/styleguide/cppguide.html).
|
||||||
Otherwise we will have to ask you to make changes, and that's no fun for anyone.
|
Otherwise we will have to ask you to make changes, and that's no fun for anyone.
|
||||||
|
424
Makefile
424
Makefile
@ -1,424 +0,0 @@
|
|||||||
# Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
||||||
# Use of this source code is governed by a BSD-style license that can be
|
|
||||||
# found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
||||||
|
|
||||||
#-----------------------------------------------
|
|
||||||
# Uncomment exactly one of the lines labelled (A), (B), and (C) below
|
|
||||||
# to switch between compilation modes.
|
|
||||||
|
|
||||||
# (A) Production use (optimized mode)
|
|
||||||
OPT ?= -O2 -DNDEBUG
|
|
||||||
# (B) Debug mode, w/ full line-level debugging symbols
|
|
||||||
# OPT ?= -g2
|
|
||||||
# (C) Profiling mode: opt, but w/debugging symbols
|
|
||||||
# OPT ?= -O2 -g2 -DNDEBUG
|
|
||||||
#-----------------------------------------------
|
|
||||||
|
|
||||||
# detect what platform we're building on
|
|
||||||
$(shell CC="$(CC)" CXX="$(CXX)" TARGET_OS="$(TARGET_OS)" \
|
|
||||||
./build_detect_platform build_config.mk ./)
|
|
||||||
# this file is generated by the previous line to set build flags and sources
|
|
||||||
include build_config.mk
|
|
||||||
|
|
||||||
TESTS = \
|
|
||||||
db/autocompact_test \
|
|
||||||
db/c_test \
|
|
||||||
db/corruption_test \
|
|
||||||
db/db_test \
|
|
||||||
db/dbformat_test \
|
|
||||||
db/fault_injection_test \
|
|
||||||
db/filename_test \
|
|
||||||
db/log_test \
|
|
||||||
db/recovery_test \
|
|
||||||
db/skiplist_test \
|
|
||||||
db/version_edit_test \
|
|
||||||
db/version_set_test \
|
|
||||||
db/write_batch_test \
|
|
||||||
helpers/memenv/memenv_test \
|
|
||||||
issues/issue178_test \
|
|
||||||
issues/issue200_test \
|
|
||||||
table/filter_block_test \
|
|
||||||
table/table_test \
|
|
||||||
util/arena_test \
|
|
||||||
util/bloom_test \
|
|
||||||
util/cache_test \
|
|
||||||
util/coding_test \
|
|
||||||
util/crc32c_test \
|
|
||||||
util/env_posix_test \
|
|
||||||
util/env_test \
|
|
||||||
util/hash_test
|
|
||||||
|
|
||||||
UTILS = \
|
|
||||||
db/db_bench \
|
|
||||||
db/leveldbutil
|
|
||||||
|
|
||||||
# Put the object files in a subdirectory, but the application at the top of the object dir.
|
|
||||||
PROGNAMES := $(notdir $(TESTS) $(UTILS))
|
|
||||||
|
|
||||||
# On Linux may need libkyotocabinet-dev for dependency.
|
|
||||||
BENCHMARKS = \
|
|
||||||
doc/bench/db_bench_sqlite3 \
|
|
||||||
doc/bench/db_bench_tree_db
|
|
||||||
|
|
||||||
CFLAGS += -I. -I./include $(PLATFORM_CCFLAGS) $(OPT)
|
|
||||||
CXXFLAGS += -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT)
|
|
||||||
|
|
||||||
LDFLAGS += $(PLATFORM_LDFLAGS)
|
|
||||||
LIBS += $(PLATFORM_LIBS)
|
|
||||||
|
|
||||||
SIMULATOR_OUTDIR=out-ios-x86
|
|
||||||
DEVICE_OUTDIR=out-ios-arm
|
|
||||||
|
|
||||||
ifeq ($(PLATFORM), IOS)
|
|
||||||
# Note: iOS should probably be using libtool, not ar.
|
|
||||||
AR=xcrun ar
|
|
||||||
SIMULATORSDK=$(shell xcrun -sdk iphonesimulator --show-sdk-path)
|
|
||||||
DEVICESDK=$(shell xcrun -sdk iphoneos --show-sdk-path)
|
|
||||||
DEVICE_CFLAGS = -isysroot "$(DEVICESDK)" -arch armv6 -arch armv7 -arch armv7s -arch arm64
|
|
||||||
SIMULATOR_CFLAGS = -isysroot "$(SIMULATORSDK)" -arch i686 -arch x86_64
|
|
||||||
STATIC_OUTDIR=out-ios-universal
|
|
||||||
else
|
|
||||||
STATIC_OUTDIR=out-static
|
|
||||||
SHARED_OUTDIR=out-shared
|
|
||||||
STATIC_PROGRAMS := $(addprefix $(STATIC_OUTDIR)/, $(PROGNAMES))
|
|
||||||
SHARED_PROGRAMS := $(addprefix $(SHARED_OUTDIR)/, db_bench)
|
|
||||||
endif
|
|
||||||
|
|
||||||
STATIC_LIBOBJECTS := $(addprefix $(STATIC_OUTDIR)/, $(SOURCES:.cc=.o))
|
|
||||||
STATIC_MEMENVOBJECTS := $(addprefix $(STATIC_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
|
|
||||||
|
|
||||||
DEVICE_LIBOBJECTS := $(addprefix $(DEVICE_OUTDIR)/, $(SOURCES:.cc=.o))
|
|
||||||
DEVICE_MEMENVOBJECTS := $(addprefix $(DEVICE_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
|
|
||||||
|
|
||||||
SIMULATOR_LIBOBJECTS := $(addprefix $(SIMULATOR_OUTDIR)/, $(SOURCES:.cc=.o))
|
|
||||||
SIMULATOR_MEMENVOBJECTS := $(addprefix $(SIMULATOR_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
|
|
||||||
|
|
||||||
SHARED_LIBOBJECTS := $(addprefix $(SHARED_OUTDIR)/, $(SOURCES:.cc=.o))
|
|
||||||
SHARED_MEMENVOBJECTS := $(addprefix $(SHARED_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
|
|
||||||
|
|
||||||
TESTUTIL := $(STATIC_OUTDIR)/util/testutil.o
|
|
||||||
TESTHARNESS := $(STATIC_OUTDIR)/util/testharness.o $(TESTUTIL)
|
|
||||||
|
|
||||||
STATIC_TESTOBJS := $(addprefix $(STATIC_OUTDIR)/, $(addsuffix .o, $(TESTS)))
|
|
||||||
STATIC_UTILOBJS := $(addprefix $(STATIC_OUTDIR)/, $(addsuffix .o, $(UTILS)))
|
|
||||||
STATIC_ALLOBJS := $(STATIC_LIBOBJECTS) $(STATIC_MEMENVOBJECTS) $(STATIC_TESTOBJS) $(STATIC_UTILOBJS) $(TESTHARNESS)
|
|
||||||
DEVICE_ALLOBJS := $(DEVICE_LIBOBJECTS) $(DEVICE_MEMENVOBJECTS)
|
|
||||||
SIMULATOR_ALLOBJS := $(SIMULATOR_LIBOBJECTS) $(SIMULATOR_MEMENVOBJECTS)
|
|
||||||
|
|
||||||
default: all
|
|
||||||
|
|
||||||
# Should we build shared libraries?
|
|
||||||
ifneq ($(PLATFORM_SHARED_EXT),)
|
|
||||||
|
|
||||||
# Many leveldb test apps use non-exported API's. Only build a subset for testing.
|
|
||||||
SHARED_ALLOBJS := $(SHARED_LIBOBJECTS) $(SHARED_MEMENVOBJECTS) $(TESTHARNESS)
|
|
||||||
|
|
||||||
ifneq ($(PLATFORM_SHARED_VERSIONED),true)
|
|
||||||
SHARED_LIB1 = libleveldb.$(PLATFORM_SHARED_EXT)
|
|
||||||
SHARED_LIB2 = $(SHARED_LIB1)
|
|
||||||
SHARED_LIB3 = $(SHARED_LIB1)
|
|
||||||
SHARED_LIBS = $(SHARED_LIB1)
|
|
||||||
SHARED_MEMENVLIB = $(SHARED_OUTDIR)/libmemenv.a
|
|
||||||
else
|
|
||||||
# Update db.h if you change these.
|
|
||||||
SHARED_VERSION_MAJOR = 1
|
|
||||||
SHARED_VERSION_MINOR = 20
|
|
||||||
SHARED_LIB1 = libleveldb.$(PLATFORM_SHARED_EXT)
|
|
||||||
SHARED_LIB2 = $(SHARED_LIB1).$(SHARED_VERSION_MAJOR)
|
|
||||||
SHARED_LIB3 = $(SHARED_LIB1).$(SHARED_VERSION_MAJOR).$(SHARED_VERSION_MINOR)
|
|
||||||
SHARED_LIBS = $(SHARED_OUTDIR)/$(SHARED_LIB1) $(SHARED_OUTDIR)/$(SHARED_LIB2) $(SHARED_OUTDIR)/$(SHARED_LIB3)
|
|
||||||
$(SHARED_OUTDIR)/$(SHARED_LIB1): $(SHARED_OUTDIR)/$(SHARED_LIB3)
|
|
||||||
ln -fs $(SHARED_LIB3) $(SHARED_OUTDIR)/$(SHARED_LIB1)
|
|
||||||
$(SHARED_OUTDIR)/$(SHARED_LIB2): $(SHARED_OUTDIR)/$(SHARED_LIB3)
|
|
||||||
ln -fs $(SHARED_LIB3) $(SHARED_OUTDIR)/$(SHARED_LIB2)
|
|
||||||
SHARED_MEMENVLIB = $(SHARED_OUTDIR)/libmemenv.a
|
|
||||||
endif
|
|
||||||
|
|
||||||
$(SHARED_OUTDIR)/$(SHARED_LIB3): $(SHARED_LIBOBJECTS)
|
|
||||||
$(CXX) $(LDFLAGS) $(PLATFORM_SHARED_LDFLAGS)$(SHARED_LIB2) $(SHARED_LIBOBJECTS) -o $(SHARED_OUTDIR)/$(SHARED_LIB3) $(LIBS)
|
|
||||||
|
|
||||||
endif # PLATFORM_SHARED_EXT
|
|
||||||
|
|
||||||
all: $(SHARED_LIBS) $(SHARED_PROGRAMS) $(STATIC_OUTDIR)/libleveldb.a $(STATIC_OUTDIR)/libmemenv.a $(STATIC_PROGRAMS)
|
|
||||||
|
|
||||||
check: $(STATIC_PROGRAMS)
|
|
||||||
for t in $(notdir $(TESTS)); do echo "***** Running $$t"; $(STATIC_OUTDIR)/$$t || exit 1; done
|
|
||||||
|
|
||||||
clean:
|
|
||||||
-rm -rf out-static out-shared out-ios-x86 out-ios-arm out-ios-universal
|
|
||||||
-rm -f build_config.mk
|
|
||||||
-rm -rf ios-x86 ios-arm
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR):
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/db: | $(STATIC_OUTDIR)
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/helpers/memenv: | $(STATIC_OUTDIR)
|
|
||||||
mkdir -p $@
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/port: | $(STATIC_OUTDIR)
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/table: | $(STATIC_OUTDIR)
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/util: | $(STATIC_OUTDIR)
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
.PHONY: STATIC_OBJDIRS
|
|
||||||
STATIC_OBJDIRS: \
|
|
||||||
$(STATIC_OUTDIR)/db \
|
|
||||||
$(STATIC_OUTDIR)/port \
|
|
||||||
$(STATIC_OUTDIR)/table \
|
|
||||||
$(STATIC_OUTDIR)/util \
|
|
||||||
$(STATIC_OUTDIR)/helpers/memenv
|
|
||||||
|
|
||||||
$(SHARED_OUTDIR):
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
$(SHARED_OUTDIR)/db: | $(SHARED_OUTDIR)
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
$(SHARED_OUTDIR)/helpers/memenv: | $(SHARED_OUTDIR)
|
|
||||||
mkdir -p $@
|
|
||||||
|
|
||||||
$(SHARED_OUTDIR)/port: | $(SHARED_OUTDIR)
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
$(SHARED_OUTDIR)/table: | $(SHARED_OUTDIR)
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
$(SHARED_OUTDIR)/util: | $(SHARED_OUTDIR)
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
.PHONY: SHARED_OBJDIRS
|
|
||||||
SHARED_OBJDIRS: \
|
|
||||||
$(SHARED_OUTDIR)/db \
|
|
||||||
$(SHARED_OUTDIR)/port \
|
|
||||||
$(SHARED_OUTDIR)/table \
|
|
||||||
$(SHARED_OUTDIR)/util \
|
|
||||||
$(SHARED_OUTDIR)/helpers/memenv
|
|
||||||
|
|
||||||
$(DEVICE_OUTDIR):
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
$(DEVICE_OUTDIR)/db: | $(DEVICE_OUTDIR)
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
$(DEVICE_OUTDIR)/helpers/memenv: | $(DEVICE_OUTDIR)
|
|
||||||
mkdir -p $@
|
|
||||||
|
|
||||||
$(DEVICE_OUTDIR)/port: | $(DEVICE_OUTDIR)
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
$(DEVICE_OUTDIR)/table: | $(DEVICE_OUTDIR)
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
$(DEVICE_OUTDIR)/util: | $(DEVICE_OUTDIR)
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
.PHONY: DEVICE_OBJDIRS
|
|
||||||
DEVICE_OBJDIRS: \
|
|
||||||
$(DEVICE_OUTDIR)/db \
|
|
||||||
$(DEVICE_OUTDIR)/port \
|
|
||||||
$(DEVICE_OUTDIR)/table \
|
|
||||||
$(DEVICE_OUTDIR)/util \
|
|
||||||
$(DEVICE_OUTDIR)/helpers/memenv
|
|
||||||
|
|
||||||
$(SIMULATOR_OUTDIR):
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
$(SIMULATOR_OUTDIR)/db: | $(SIMULATOR_OUTDIR)
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
$(SIMULATOR_OUTDIR)/helpers/memenv: | $(SIMULATOR_OUTDIR)
|
|
||||||
mkdir -p $@
|
|
||||||
|
|
||||||
$(SIMULATOR_OUTDIR)/port: | $(SIMULATOR_OUTDIR)
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
$(SIMULATOR_OUTDIR)/table: | $(SIMULATOR_OUTDIR)
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
$(SIMULATOR_OUTDIR)/util: | $(SIMULATOR_OUTDIR)
|
|
||||||
mkdir $@
|
|
||||||
|
|
||||||
.PHONY: SIMULATOR_OBJDIRS
|
|
||||||
SIMULATOR_OBJDIRS: \
|
|
||||||
$(SIMULATOR_OUTDIR)/db \
|
|
||||||
$(SIMULATOR_OUTDIR)/port \
|
|
||||||
$(SIMULATOR_OUTDIR)/table \
|
|
||||||
$(SIMULATOR_OUTDIR)/util \
|
|
||||||
$(SIMULATOR_OUTDIR)/helpers/memenv
|
|
||||||
|
|
||||||
$(STATIC_ALLOBJS): | STATIC_OBJDIRS
|
|
||||||
$(DEVICE_ALLOBJS): | DEVICE_OBJDIRS
|
|
||||||
$(SIMULATOR_ALLOBJS): | SIMULATOR_OBJDIRS
|
|
||||||
$(SHARED_ALLOBJS): | SHARED_OBJDIRS
|
|
||||||
|
|
||||||
ifeq ($(PLATFORM), IOS)
|
|
||||||
$(DEVICE_OUTDIR)/libleveldb.a: $(DEVICE_LIBOBJECTS)
|
|
||||||
rm -f $@
|
|
||||||
$(AR) -rs $@ $(DEVICE_LIBOBJECTS)
|
|
||||||
|
|
||||||
$(SIMULATOR_OUTDIR)/libleveldb.a: $(SIMULATOR_LIBOBJECTS)
|
|
||||||
rm -f $@
|
|
||||||
$(AR) -rs $@ $(SIMULATOR_LIBOBJECTS)
|
|
||||||
|
|
||||||
$(DEVICE_OUTDIR)/libmemenv.a: $(DEVICE_MEMENVOBJECTS)
|
|
||||||
rm -f $@
|
|
||||||
$(AR) -rs $@ $(DEVICE_MEMENVOBJECTS)
|
|
||||||
|
|
||||||
$(SIMULATOR_OUTDIR)/libmemenv.a: $(SIMULATOR_MEMENVOBJECTS)
|
|
||||||
rm -f $@
|
|
||||||
$(AR) -rs $@ $(SIMULATOR_MEMENVOBJECTS)
|
|
||||||
|
|
||||||
# For iOS, create universal object libraries to be used on both the simulator and
|
|
||||||
# a device.
|
|
||||||
$(STATIC_OUTDIR)/libleveldb.a: $(STATIC_OUTDIR) $(DEVICE_OUTDIR)/libleveldb.a $(SIMULATOR_OUTDIR)/libleveldb.a
|
|
||||||
lipo -create $(DEVICE_OUTDIR)/libleveldb.a $(SIMULATOR_OUTDIR)/libleveldb.a -output $@
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/libmemenv.a: $(STATIC_OUTDIR) $(DEVICE_OUTDIR)/libmemenv.a $(SIMULATOR_OUTDIR)/libmemenv.a
|
|
||||||
lipo -create $(DEVICE_OUTDIR)/libmemenv.a $(SIMULATOR_OUTDIR)/libmemenv.a -output $@
|
|
||||||
else
|
|
||||||
$(STATIC_OUTDIR)/libleveldb.a:$(STATIC_LIBOBJECTS)
|
|
||||||
rm -f $@
|
|
||||||
$(AR) -rs $@ $(STATIC_LIBOBJECTS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/libmemenv.a:$(STATIC_MEMENVOBJECTS)
|
|
||||||
rm -f $@
|
|
||||||
$(AR) -rs $@ $(STATIC_MEMENVOBJECTS)
|
|
||||||
endif
|
|
||||||
|
|
||||||
$(SHARED_MEMENVLIB):$(SHARED_MEMENVOBJECTS)
|
|
||||||
rm -f $@
|
|
||||||
$(AR) -rs $@ $(SHARED_MEMENVOBJECTS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/db_bench:db/db_bench.cc $(STATIC_LIBOBJECTS) $(TESTUTIL)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) db/db_bench.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/db_bench_sqlite3:doc/bench/db_bench_sqlite3.cc $(STATIC_LIBOBJECTS) $(TESTUTIL)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) doc/bench/db_bench_sqlite3.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ -lsqlite3 $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/db_bench_tree_db:doc/bench/db_bench_tree_db.cc $(STATIC_LIBOBJECTS) $(TESTUTIL)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) doc/bench/db_bench_tree_db.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ -lkyotocabinet $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/leveldbutil:db/leveldbutil.cc $(STATIC_LIBOBJECTS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) db/leveldbutil.cc $(STATIC_LIBOBJECTS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/arena_test:util/arena_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) util/arena_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/autocompact_test:db/autocompact_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) db/autocompact_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/bloom_test:util/bloom_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) util/bloom_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/c_test:$(STATIC_OUTDIR)/db/c_test.o $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(STATIC_OUTDIR)/db/c_test.o $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/cache_test:util/cache_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) util/cache_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/coding_test:util/coding_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) util/coding_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/corruption_test:db/corruption_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) db/corruption_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/crc32c_test:util/crc32c_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) util/crc32c_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/db_test:db/db_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) db/db_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/dbformat_test:db/dbformat_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) db/dbformat_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/env_posix_test:util/env_posix_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) util/env_posix_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/env_test:util/env_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) util/env_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/fault_injection_test:db/fault_injection_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) db/fault_injection_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/filename_test:db/filename_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) db/filename_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/filter_block_test:table/filter_block_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) table/filter_block_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/hash_test:util/hash_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) util/hash_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/issue178_test:issues/issue178_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) issues/issue178_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/issue200_test:issues/issue200_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) issues/issue200_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/log_test:db/log_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) db/log_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/recovery_test:db/recovery_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) db/recovery_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/table_test:table/table_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) table/table_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/skiplist_test:db/skiplist_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) db/skiplist_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/version_edit_test:db/version_edit_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) db/version_edit_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/version_set_test:db/version_set_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) db/version_set_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/write_batch_test:db/write_batch_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
|
||||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) db/write_batch_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/memenv_test:$(STATIC_OUTDIR)/helpers/memenv/memenv_test.o $(STATIC_OUTDIR)/libmemenv.a $(STATIC_OUTDIR)/libleveldb.a $(TESTHARNESS)
|
|
||||||
$(XCRUN) $(CXX) $(LDFLAGS) $(STATIC_OUTDIR)/helpers/memenv/memenv_test.o $(STATIC_OUTDIR)/libmemenv.a $(STATIC_OUTDIR)/libleveldb.a $(TESTHARNESS) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
$(SHARED_OUTDIR)/db_bench:$(SHARED_OUTDIR)/db/db_bench.o $(SHARED_LIBS) $(TESTUTIL)
|
|
||||||
$(XCRUN) $(CXX) $(LDFLAGS) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(SHARED_OUTDIR)/db/db_bench.o $(TESTUTIL) $(SHARED_OUTDIR)/$(SHARED_LIB3) -o $@ $(LIBS)
|
|
||||||
|
|
||||||
.PHONY: run-shared
|
|
||||||
run-shared: $(SHARED_OUTDIR)/db_bench
|
|
||||||
LD_LIBRARY_PATH=$(SHARED_OUTDIR) $(SHARED_OUTDIR)/db_bench
|
|
||||||
|
|
||||||
$(SIMULATOR_OUTDIR)/%.o: %.cc
|
|
||||||
xcrun -sdk iphonesimulator $(CXX) $(CXXFLAGS) $(SIMULATOR_CFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
$(DEVICE_OUTDIR)/%.o: %.cc
|
|
||||||
xcrun -sdk iphoneos $(CXX) $(CXXFLAGS) $(DEVICE_CFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
$(SIMULATOR_OUTDIR)/%.o: %.c
|
|
||||||
xcrun -sdk iphonesimulator $(CC) $(CFLAGS) $(SIMULATOR_CFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
$(DEVICE_OUTDIR)/%.o: %.c
|
|
||||||
xcrun -sdk iphoneos $(CC) $(CFLAGS) $(DEVICE_CFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/%.o: %.cc
|
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/%.o: %.c
|
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
$(SHARED_OUTDIR)/%.o: %.cc
|
|
||||||
$(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
$(SHARED_OUTDIR)/%.o: %.c
|
|
||||||
$(CC) $(CFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
$(STATIC_OUTDIR)/port/port_posix_sse.o: port/port_posix_sse.cc
|
|
||||||
$(CXX) $(CXXFLAGS) $(PLATFORM_SSEFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
$(SHARED_OUTDIR)/port/port_posix_sse.o: port/port_posix_sse.cc
|
|
||||||
$(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(PLATFORM_SSEFLAGS) -c $< -o $@
|
|
61
README.md
61
README.md
@ -1,10 +1,12 @@
|
|||||||
**LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.**
|
**LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.**
|
||||||
|
|
||||||
[](https://travis-ci.org/google/leveldb)
|
[](https://travis-ci.org/google/leveldb)
|
||||||
|
[](https://ci.appveyor.com/project/pwnall/leveldb)
|
||||||
|
|
||||||
Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
|
Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
|
||||||
|
|
||||||
# Features
|
# Features
|
||||||
|
|
||||||
* Keys and values are arbitrary byte arrays.
|
* Keys and values are arbitrary byte arrays.
|
||||||
* Data is stored sorted by key.
|
* Data is stored sorted by key.
|
||||||
* Callers can provide a custom comparison function to override the sort order.
|
* Callers can provide a custom comparison function to override the sort order.
|
||||||
@ -16,15 +18,55 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
|
|||||||
* External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions.
|
* External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions.
|
||||||
|
|
||||||
# Documentation
|
# Documentation
|
||||||
|
|
||||||
[LevelDB library documentation](https://github.com/google/leveldb/blob/master/doc/index.md) is online and bundled with the source code.
|
[LevelDB library documentation](https://github.com/google/leveldb/blob/master/doc/index.md) is online and bundled with the source code.
|
||||||
|
|
||||||
|
|
||||||
# Limitations
|
# Limitations
|
||||||
|
|
||||||
* This is not a SQL database. It does not have a relational data model, it does not support SQL queries, and it has no support for indexes.
|
* This is not a SQL database. It does not have a relational data model, it does not support SQL queries, and it has no support for indexes.
|
||||||
* Only a single process (possibly multi-threaded) can access a particular database at a time.
|
* Only a single process (possibly multi-threaded) can access a particular database at a time.
|
||||||
* There is no client-server support builtin to the library. An application that needs such support will have to wrap their own server around the library.
|
* There is no client-server support builtin to the library. An application that needs such support will have to wrap their own server around the library.
|
||||||
|
|
||||||
|
# Building
|
||||||
|
|
||||||
|
This project supports [CMake](https://cmake.org/) out of the box.
|
||||||
|
|
||||||
|
### Build for POSIX
|
||||||
|
|
||||||
|
Quick start:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir -p build && cd build
|
||||||
|
cmake -DCMAKE_BUILD_TYPE=Release .. && cmake --build .
|
||||||
|
```
|
||||||
|
|
||||||
|
### Building for Windows
|
||||||
|
|
||||||
|
First generate the Visual Studio 2017 project/solution files:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake -G "Visual Studio 15" ..
|
||||||
|
```
|
||||||
|
The default default will build for x86. For 64-bit run:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
cmake -G "Visual Studio 15 Win64" ..
|
||||||
|
```
|
||||||
|
|
||||||
|
To compile the Windows solution from the command-line:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
devenv /build Debug leveldb.sln
|
||||||
|
```
|
||||||
|
|
||||||
|
or open leveldb.sln in Visual Studio and build from within.
|
||||||
|
|
||||||
|
Please see the CMake documentation and `CMakeLists.txt` for more advanced usage.
|
||||||
|
|
||||||
# Contributing to the leveldb Project
|
# Contributing to the leveldb Project
|
||||||
|
|
||||||
The leveldb project welcomes contributions. leveldb's primary goal is to be
|
The leveldb project welcomes contributions. leveldb's primary goal is to be
|
||||||
a reliable and fast key/value store. Changes that are in line with the
|
a reliable and fast key/value store. Changes that are in line with the
|
||||||
features/limitations outlined above, and meet the requirements below,
|
features/limitations outlined above, and meet the requirements below,
|
||||||
@ -32,10 +74,10 @@ will be considered.
|
|||||||
|
|
||||||
Contribution requirements:
|
Contribution requirements:
|
||||||
|
|
||||||
1. **POSIX only**. We _generally_ will only accept changes that are both
|
1. **Tested platforms only**. We _generally_ will only accept changes for
|
||||||
compiled, and tested on a POSIX platform - usually Linux. Very small
|
platforms that are compiled and tested. This means POSIX (for Linux and
|
||||||
changes will sometimes be accepted, but consider that more of an
|
macOS) or Windows. Very small changes will sometimes be accepted, but
|
||||||
exception than the rule.
|
consider that more of an exception than the rule.
|
||||||
|
|
||||||
2. **Stable API**. We strive very hard to maintain a stable API. Changes that
|
2. **Stable API**. We strive very hard to maintain a stable API. Changes that
|
||||||
require changes for projects using leveldb _might_ be rejected without
|
require changes for projects using leveldb _might_ be rejected without
|
||||||
@ -44,7 +86,16 @@ Contribution requirements:
|
|||||||
3. **Tests**: All changes must be accompanied by a new (or changed) test, or
|
3. **Tests**: All changes must be accompanied by a new (or changed) test, or
|
||||||
a sufficient explanation as to why a new (or changed) test is not required.
|
a sufficient explanation as to why a new (or changed) test is not required.
|
||||||
|
|
||||||
|
4. **Consistent Style**: This project conforms to the
|
||||||
|
[Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html).
|
||||||
|
To ensure your changes are properly formatted please run:
|
||||||
|
|
||||||
|
```
|
||||||
|
clang-format -i --style=file <file>
|
||||||
|
```
|
||||||
|
|
||||||
## Submitting a Pull Request
|
## Submitting a Pull Request
|
||||||
|
|
||||||
Before any pull request will be accepted the author must first sign a
|
Before any pull request will be accepted the author must first sign a
|
||||||
Contributor License Agreement (CLA) at https://cla.developers.google.com/.
|
Contributor License Agreement (CLA) at https://cla.developers.google.com/.
|
||||||
|
|
||||||
|
@ -2,14 +2,14 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include "db/db_impl.h"
|
#include <sys/types.h>
|
||||||
#include "db/version_set.h"
|
|
||||||
#include "leveldb/cache.h"
|
#include "leveldb/cache.h"
|
||||||
#include "leveldb/db.h"
|
#include "leveldb/db.h"
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
|
#include "leveldb/filter_policy.h"
|
||||||
#include "leveldb/write_batch.h"
|
#include "leveldb/write_batch.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "util/crc32c.h"
|
#include "util/crc32c.h"
|
||||||
@ -35,7 +35,6 @@
|
|||||||
// seekrandom -- N random seeks
|
// seekrandom -- N random seeks
|
||||||
// open -- cost of opening a DB
|
// open -- cost of opening a DB
|
||||||
// crc32c -- repeated crc32c of 4K of data
|
// crc32c -- repeated crc32c of 4K of data
|
||||||
// acquireload -- load N*1000 times
|
|
||||||
// Meta operations:
|
// Meta operations:
|
||||||
// compact -- Compact the entire DB
|
// compact -- Compact the entire DB
|
||||||
// stats -- Print DB stats
|
// stats -- Print DB stats
|
||||||
@ -57,9 +56,7 @@ static const char* FLAGS_benchmarks =
|
|||||||
"fill100K,"
|
"fill100K,"
|
||||||
"crc32c,"
|
"crc32c,"
|
||||||
"snappycomp,"
|
"snappycomp,"
|
||||||
"snappyuncomp,"
|
"snappyuncomp,";
|
||||||
"acquireload,"
|
|
||||||
;
|
|
||||||
|
|
||||||
// Number of key/values to place in database
|
// Number of key/values to place in database
|
||||||
static int FLAGS_num = 1000000;
|
static int FLAGS_num = 1000000;
|
||||||
@ -112,12 +109,12 @@ static bool FLAGS_use_existing_db = false;
|
|||||||
static bool FLAGS_reuse_logs = false;
|
static bool FLAGS_reuse_logs = false;
|
||||||
|
|
||||||
// Use the db with the following name.
|
// Use the db with the following name.
|
||||||
static const char* FLAGS_db = NULL;
|
static const char* FLAGS_db = nullptr;
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
leveldb::Env* g_env = NULL;
|
leveldb::Env* g_env = nullptr;
|
||||||
|
|
||||||
// Helper for quickly generating random data.
|
// Helper for quickly generating random data.
|
||||||
class RandomGenerator {
|
class RandomGenerator {
|
||||||
@ -158,7 +155,7 @@ static Slice TrimSpace(Slice s) {
|
|||||||
start++;
|
start++;
|
||||||
}
|
}
|
||||||
size_t limit = s.size();
|
size_t limit = s.size();
|
||||||
while (limit > start && isspace(s[limit-1])) {
|
while (limit > start && isspace(s[limit - 1])) {
|
||||||
limit--;
|
limit--;
|
||||||
}
|
}
|
||||||
return Slice(s.data() + start, limit - start);
|
return Slice(s.data() + start, limit - start);
|
||||||
@ -190,14 +187,12 @@ class Stats {
|
|||||||
|
|
||||||
void Start() {
|
void Start() {
|
||||||
next_report_ = 100;
|
next_report_ = 100;
|
||||||
last_op_finish_ = start_;
|
|
||||||
hist_.Clear();
|
hist_.Clear();
|
||||||
done_ = 0;
|
done_ = 0;
|
||||||
bytes_ = 0;
|
bytes_ = 0;
|
||||||
seconds_ = 0;
|
seconds_ = 0;
|
||||||
start_ = g_env->NowMicros();
|
|
||||||
finish_ = start_;
|
|
||||||
message_.clear();
|
message_.clear();
|
||||||
|
start_ = finish_ = last_op_finish_ = g_env->NowMicros();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Merge(const Stats& other) {
|
void Merge(const Stats& other) {
|
||||||
@ -217,9 +212,7 @@ class Stats {
|
|||||||
seconds_ = (finish_ - start_) * 1e-6;
|
seconds_ = (finish_ - start_) * 1e-6;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddMessage(Slice msg) {
|
void AddMessage(Slice msg) { AppendWithSpace(&message_, msg); }
|
||||||
AppendWithSpace(&message_, msg);
|
|
||||||
}
|
|
||||||
|
|
||||||
void FinishedSingleOp() {
|
void FinishedSingleOp() {
|
||||||
if (FLAGS_histogram) {
|
if (FLAGS_histogram) {
|
||||||
@ -235,21 +228,26 @@ class Stats {
|
|||||||
|
|
||||||
done_++;
|
done_++;
|
||||||
if (done_ >= next_report_) {
|
if (done_ >= next_report_) {
|
||||||
if (next_report_ < 1000) next_report_ += 100;
|
if (next_report_ < 1000)
|
||||||
else if (next_report_ < 5000) next_report_ += 500;
|
next_report_ += 100;
|
||||||
else if (next_report_ < 10000) next_report_ += 1000;
|
else if (next_report_ < 5000)
|
||||||
else if (next_report_ < 50000) next_report_ += 5000;
|
next_report_ += 500;
|
||||||
else if (next_report_ < 100000) next_report_ += 10000;
|
else if (next_report_ < 10000)
|
||||||
else if (next_report_ < 500000) next_report_ += 50000;
|
next_report_ += 1000;
|
||||||
else next_report_ += 100000;
|
else if (next_report_ < 50000)
|
||||||
|
next_report_ += 5000;
|
||||||
|
else if (next_report_ < 100000)
|
||||||
|
next_report_ += 10000;
|
||||||
|
else if (next_report_ < 500000)
|
||||||
|
next_report_ += 50000;
|
||||||
|
else
|
||||||
|
next_report_ += 100000;
|
||||||
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
|
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
|
||||||
fflush(stderr);
|
fflush(stderr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddBytes(int64_t n) {
|
void AddBytes(int64_t n) { bytes_ += n; }
|
||||||
bytes_ += n;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Report(const Slice& name) {
|
void Report(const Slice& name) {
|
||||||
// Pretend at least one op was done in case we are running a benchmark
|
// Pretend at least one op was done in case we are running a benchmark
|
||||||
@ -268,11 +266,8 @@ class Stats {
|
|||||||
}
|
}
|
||||||
AppendWithSpace(&extra, message_);
|
AppendWithSpace(&extra, message_);
|
||||||
|
|
||||||
fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
|
fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
|
||||||
name.ToString().c_str(),
|
seconds_ * 1e6 / done_, (extra.empty() ? "" : " "), extra.c_str());
|
||||||
seconds_ * 1e6 / done_,
|
|
||||||
(extra.empty() ? "" : " "),
|
|
||||||
extra.c_str());
|
|
||||||
if (FLAGS_histogram) {
|
if (FLAGS_histogram) {
|
||||||
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
|
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
|
||||||
}
|
}
|
||||||
@ -283,8 +278,8 @@ class Stats {
|
|||||||
// State shared by all concurrent executions of the same benchmark.
|
// State shared by all concurrent executions of the same benchmark.
|
||||||
struct SharedState {
|
struct SharedState {
|
||||||
port::Mutex mu;
|
port::Mutex mu;
|
||||||
port::CondVar cv;
|
port::CondVar cv GUARDED_BY(mu);
|
||||||
int total;
|
int total GUARDED_BY(mu);
|
||||||
|
|
||||||
// Each thread goes through the following states:
|
// Each thread goes through the following states:
|
||||||
// (1) initializing
|
// (1) initializing
|
||||||
@ -292,24 +287,22 @@ struct SharedState {
|
|||||||
// (3) running
|
// (3) running
|
||||||
// (4) done
|
// (4) done
|
||||||
|
|
||||||
int num_initialized;
|
int num_initialized GUARDED_BY(mu);
|
||||||
int num_done;
|
int num_done GUARDED_BY(mu);
|
||||||
bool start;
|
bool start GUARDED_BY(mu);
|
||||||
|
|
||||||
SharedState() : cv(&mu) { }
|
SharedState(int total)
|
||||||
|
: cv(&mu), total(total), num_initialized(0), num_done(0), start(false) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Per-thread state for concurrent executions of the same benchmark.
|
// Per-thread state for concurrent executions of the same benchmark.
|
||||||
struct ThreadState {
|
struct ThreadState {
|
||||||
int tid; // 0..n-1 when running in n threads
|
int tid; // 0..n-1 when running in n threads
|
||||||
Random rand; // Has different seeds for different threads
|
Random rand; // Has different seeds for different threads
|
||||||
Stats stats;
|
Stats stats;
|
||||||
SharedState* shared;
|
SharedState* shared;
|
||||||
|
|
||||||
ThreadState(int index)
|
ThreadState(int index) : tid(index), rand(1000 + index), shared(nullptr) {}
|
||||||
: tid(index),
|
|
||||||
rand(1000 + index) {
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
@ -335,20 +328,20 @@ class Benchmark {
|
|||||||
static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
|
static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
|
||||||
fprintf(stdout, "Entries: %d\n", num_);
|
fprintf(stdout, "Entries: %d\n", num_);
|
||||||
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
|
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
|
||||||
((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
|
((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
|
||||||
/ 1048576.0));
|
1048576.0));
|
||||||
fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
|
fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
|
||||||
(((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_)
|
(((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
|
||||||
/ 1048576.0));
|
1048576.0));
|
||||||
PrintWarnings();
|
PrintWarnings();
|
||||||
fprintf(stdout, "------------------------------------------------\n");
|
fprintf(stdout, "------------------------------------------------\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
void PrintWarnings() {
|
void PrintWarnings() {
|
||||||
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
|
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
|
||||||
fprintf(stdout,
|
fprintf(
|
||||||
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
|
stdout,
|
||||||
);
|
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
|
||||||
#endif
|
#endif
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
fprintf(stdout,
|
fprintf(stdout,
|
||||||
@ -366,22 +359,22 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void PrintEnvironment() {
|
void PrintEnvironment() {
|
||||||
fprintf(stderr, "LevelDB: version %d.%d\n",
|
fprintf(stderr, "LevelDB: version %d.%d\n", kMajorVersion,
|
||||||
kMajorVersion, kMinorVersion);
|
kMinorVersion);
|
||||||
|
|
||||||
#if defined(__linux)
|
#if defined(__linux)
|
||||||
time_t now = time(NULL);
|
time_t now = time(nullptr);
|
||||||
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
|
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
|
||||||
|
|
||||||
FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
|
FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
|
||||||
if (cpuinfo != NULL) {
|
if (cpuinfo != nullptr) {
|
||||||
char line[1000];
|
char line[1000];
|
||||||
int num_cpus = 0;
|
int num_cpus = 0;
|
||||||
std::string cpu_type;
|
std::string cpu_type;
|
||||||
std::string cache_size;
|
std::string cache_size;
|
||||||
while (fgets(line, sizeof(line), cpuinfo) != NULL) {
|
while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
|
||||||
const char* sep = strchr(line, ':');
|
const char* sep = strchr(line, ':');
|
||||||
if (sep == NULL) {
|
if (sep == nullptr) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
Slice key = TrimSpace(Slice(line, sep - 1 - line));
|
Slice key = TrimSpace(Slice(line, sep - 1 - line));
|
||||||
@ -402,16 +395,16 @@ class Benchmark {
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
Benchmark()
|
Benchmark()
|
||||||
: cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : NULL),
|
: cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : nullptr),
|
||||||
filter_policy_(FLAGS_bloom_bits >= 0
|
filter_policy_(FLAGS_bloom_bits >= 0
|
||||||
? NewBloomFilterPolicy(FLAGS_bloom_bits)
|
? NewBloomFilterPolicy(FLAGS_bloom_bits)
|
||||||
: NULL),
|
: nullptr),
|
||||||
db_(NULL),
|
db_(nullptr),
|
||||||
num_(FLAGS_num),
|
num_(FLAGS_num),
|
||||||
value_size_(FLAGS_value_size),
|
value_size_(FLAGS_value_size),
|
||||||
entries_per_batch_(1),
|
entries_per_batch_(1),
|
||||||
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
|
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
|
||||||
heap_counter_(0) {
|
heap_counter_(0) {
|
||||||
std::vector<std::string> files;
|
std::vector<std::string> files;
|
||||||
g_env->GetChildren(FLAGS_db, &files);
|
g_env->GetChildren(FLAGS_db, &files);
|
||||||
for (size_t i = 0; i < files.size(); i++) {
|
for (size_t i = 0; i < files.size(); i++) {
|
||||||
@ -435,12 +428,12 @@ class Benchmark {
|
|||||||
Open();
|
Open();
|
||||||
|
|
||||||
const char* benchmarks = FLAGS_benchmarks;
|
const char* benchmarks = FLAGS_benchmarks;
|
||||||
while (benchmarks != NULL) {
|
while (benchmarks != nullptr) {
|
||||||
const char* sep = strchr(benchmarks, ',');
|
const char* sep = strchr(benchmarks, ',');
|
||||||
Slice name;
|
Slice name;
|
||||||
if (sep == NULL) {
|
if (sep == nullptr) {
|
||||||
name = benchmarks;
|
name = benchmarks;
|
||||||
benchmarks = NULL;
|
benchmarks = nullptr;
|
||||||
} else {
|
} else {
|
||||||
name = Slice(benchmarks, sep - benchmarks);
|
name = Slice(benchmarks, sep - benchmarks);
|
||||||
benchmarks = sep + 1;
|
benchmarks = sep + 1;
|
||||||
@ -453,7 +446,7 @@ class Benchmark {
|
|||||||
entries_per_batch_ = 1;
|
entries_per_batch_ = 1;
|
||||||
write_options_ = WriteOptions();
|
write_options_ = WriteOptions();
|
||||||
|
|
||||||
void (Benchmark::*method)(ThreadState*) = NULL;
|
void (Benchmark::*method)(ThreadState*) = nullptr;
|
||||||
bool fresh_db = false;
|
bool fresh_db = false;
|
||||||
int num_threads = FLAGS_threads;
|
int num_threads = FLAGS_threads;
|
||||||
|
|
||||||
@ -510,8 +503,6 @@ class Benchmark {
|
|||||||
method = &Benchmark::Compact;
|
method = &Benchmark::Compact;
|
||||||
} else if (name == Slice("crc32c")) {
|
} else if (name == Slice("crc32c")) {
|
||||||
method = &Benchmark::Crc32c;
|
method = &Benchmark::Crc32c;
|
||||||
} else if (name == Slice("acquireload")) {
|
|
||||||
method = &Benchmark::AcquireLoad;
|
|
||||||
} else if (name == Slice("snappycomp")) {
|
} else if (name == Slice("snappycomp")) {
|
||||||
method = &Benchmark::SnappyCompress;
|
method = &Benchmark::SnappyCompress;
|
||||||
} else if (name == Slice("snappyuncomp")) {
|
} else if (name == Slice("snappyuncomp")) {
|
||||||
@ -523,7 +514,7 @@ class Benchmark {
|
|||||||
} else if (name == Slice("sstables")) {
|
} else if (name == Slice("sstables")) {
|
||||||
PrintStats("leveldb.sstables");
|
PrintStats("leveldb.sstables");
|
||||||
} else {
|
} else {
|
||||||
if (name != Slice()) { // No error message for empty name
|
if (!name.empty()) { // No error message for empty name
|
||||||
fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
|
fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -532,16 +523,16 @@ class Benchmark {
|
|||||||
if (FLAGS_use_existing_db) {
|
if (FLAGS_use_existing_db) {
|
||||||
fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n",
|
fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n",
|
||||||
name.ToString().c_str());
|
name.ToString().c_str());
|
||||||
method = NULL;
|
method = nullptr;
|
||||||
} else {
|
} else {
|
||||||
delete db_;
|
delete db_;
|
||||||
db_ = NULL;
|
db_ = nullptr;
|
||||||
DestroyDB(FLAGS_db, Options());
|
DestroyDB(FLAGS_db, Options());
|
||||||
Open();
|
Open();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (method != NULL) {
|
if (method != nullptr) {
|
||||||
RunBenchmark(num_threads, name, method);
|
RunBenchmark(num_threads, name, method);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -585,11 +576,7 @@ class Benchmark {
|
|||||||
|
|
||||||
void RunBenchmark(int n, Slice name,
|
void RunBenchmark(int n, Slice name,
|
||||||
void (Benchmark::*method)(ThreadState*)) {
|
void (Benchmark::*method)(ThreadState*)) {
|
||||||
SharedState shared;
|
SharedState shared(n);
|
||||||
shared.total = n;
|
|
||||||
shared.num_initialized = 0;
|
|
||||||
shared.num_done = 0;
|
|
||||||
shared.start = false;
|
|
||||||
|
|
||||||
ThreadArg* arg = new ThreadArg[n];
|
ThreadArg* arg = new ThreadArg[n];
|
||||||
for (int i = 0; i < n; i++) {
|
for (int i = 0; i < n; i++) {
|
||||||
@ -643,22 +630,6 @@ class Benchmark {
|
|||||||
thread->stats.AddMessage(label);
|
thread->stats.AddMessage(label);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AcquireLoad(ThreadState* thread) {
|
|
||||||
int dummy;
|
|
||||||
port::AtomicPointer ap(&dummy);
|
|
||||||
int count = 0;
|
|
||||||
void *ptr = NULL;
|
|
||||||
thread->stats.AddMessage("(each op is 1000 loads)");
|
|
||||||
while (count < 100000) {
|
|
||||||
for (int i = 0; i < 1000; i++) {
|
|
||||||
ptr = ap.Acquire_Load();
|
|
||||||
}
|
|
||||||
count++;
|
|
||||||
thread->stats.FinishedSingleOp();
|
|
||||||
}
|
|
||||||
if (ptr == NULL) exit(1); // Disable unused variable warning.
|
|
||||||
}
|
|
||||||
|
|
||||||
void SnappyCompress(ThreadState* thread) {
|
void SnappyCompress(ThreadState* thread) {
|
||||||
RandomGenerator gen;
|
RandomGenerator gen;
|
||||||
Slice input = gen.Generate(Options().block_size);
|
Slice input = gen.Generate(Options().block_size);
|
||||||
@ -692,8 +663,8 @@ class Benchmark {
|
|||||||
int64_t bytes = 0;
|
int64_t bytes = 0;
|
||||||
char* uncompressed = new char[input.size()];
|
char* uncompressed = new char[input.size()];
|
||||||
while (ok && bytes < 1024 * 1048576) { // Compress 1G
|
while (ok && bytes < 1024 * 1048576) { // Compress 1G
|
||||||
ok = port::Snappy_Uncompress(compressed.data(), compressed.size(),
|
ok = port::Snappy_Uncompress(compressed.data(), compressed.size(),
|
||||||
uncompressed);
|
uncompressed);
|
||||||
bytes += input.size();
|
bytes += input.size();
|
||||||
thread->stats.FinishedSingleOp();
|
thread->stats.FinishedSingleOp();
|
||||||
}
|
}
|
||||||
@ -707,7 +678,7 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Open() {
|
void Open() {
|
||||||
assert(db_ == NULL);
|
assert(db_ == nullptr);
|
||||||
Options options;
|
Options options;
|
||||||
options.env = g_env;
|
options.env = g_env;
|
||||||
options.create_if_missing = !FLAGS_use_existing_db;
|
options.create_if_missing = !FLAGS_use_existing_db;
|
||||||
@ -733,13 +704,9 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void WriteSeq(ThreadState* thread) {
|
void WriteSeq(ThreadState* thread) { DoWrite(thread, true); }
|
||||||
DoWrite(thread, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
void WriteRandom(ThreadState* thread) {
|
void WriteRandom(ThreadState* thread) { DoWrite(thread, false); }
|
||||||
DoWrite(thread, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void DoWrite(ThreadState* thread, bool seq) {
|
void DoWrite(ThreadState* thread, bool seq) {
|
||||||
if (num_ != FLAGS_num) {
|
if (num_ != FLAGS_num) {
|
||||||
@ -755,7 +722,7 @@ class Benchmark {
|
|||||||
for (int i = 0; i < num_; i += entries_per_batch_) {
|
for (int i = 0; i < num_; i += entries_per_batch_) {
|
||||||
batch.Clear();
|
batch.Clear();
|
||||||
for (int j = 0; j < entries_per_batch_; j++) {
|
for (int j = 0; j < entries_per_batch_; j++) {
|
||||||
const int k = seq ? i+j : (thread->rand.Next() % FLAGS_num);
|
const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
|
||||||
char key[100];
|
char key[100];
|
||||||
snprintf(key, sizeof(key), "%016d", k);
|
snprintf(key, sizeof(key), "%016d", k);
|
||||||
batch.Put(key, gen.Generate(value_size_));
|
batch.Put(key, gen.Generate(value_size_));
|
||||||
@ -865,7 +832,7 @@ class Benchmark {
|
|||||||
for (int i = 0; i < num_; i += entries_per_batch_) {
|
for (int i = 0; i < num_; i += entries_per_batch_) {
|
||||||
batch.Clear();
|
batch.Clear();
|
||||||
for (int j = 0; j < entries_per_batch_; j++) {
|
for (int j = 0; j < entries_per_batch_; j++) {
|
||||||
const int k = seq ? i+j : (thread->rand.Next() % FLAGS_num);
|
const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
|
||||||
char key[100];
|
char key[100];
|
||||||
snprintf(key, sizeof(key), "%016d", k);
|
snprintf(key, sizeof(key), "%016d", k);
|
||||||
batch.Delete(key);
|
batch.Delete(key);
|
||||||
@ -879,13 +846,9 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void DeleteSeq(ThreadState* thread) {
|
void DeleteSeq(ThreadState* thread) { DoDelete(thread, true); }
|
||||||
DoDelete(thread, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
void DeleteRandom(ThreadState* thread) {
|
void DeleteRandom(ThreadState* thread) { DoDelete(thread, false); }
|
||||||
DoDelete(thread, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ReadWhileWriting(ThreadState* thread) {
|
void ReadWhileWriting(ThreadState* thread) {
|
||||||
if (thread->tid > 0) {
|
if (thread->tid > 0) {
|
||||||
@ -917,9 +880,7 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Compact(ThreadState* thread) {
|
void Compact(ThreadState* thread) { db_->CompactRange(nullptr, nullptr); }
|
||||||
db_->CompactRange(NULL, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
void PrintStats(const char* key) {
|
void PrintStats(const char* key) {
|
||||||
std::string stats;
|
std::string stats;
|
||||||
@ -1008,10 +969,10 @@ int main(int argc, char** argv) {
|
|||||||
leveldb::g_env = leveldb::Env::Default();
|
leveldb::g_env = leveldb::Env::Default();
|
||||||
|
|
||||||
// Choose a location for the test database if none given with --db=<path>
|
// Choose a location for the test database if none given with --db=<path>
|
||||||
if (FLAGS_db == NULL) {
|
if (FLAGS_db == nullptr) {
|
||||||
leveldb::g_env->GetTestDirectory(&default_db_path);
|
leveldb::g_env->GetTestDirectory(&default_db_path);
|
||||||
default_db_path += "/dbbench";
|
default_db_path += "/dbbench";
|
||||||
FLAGS_db = default_db_path.c_str();
|
FLAGS_db = default_db_path.c_str();
|
||||||
}
|
}
|
||||||
|
|
||||||
leveldb::Benchmark benchmark;
|
leveldb::Benchmark benchmark;
|
@ -2,9 +2,10 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
|
#include <sqlite3.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <sqlite3.h>
|
|
||||||
#include "util/histogram.h"
|
#include "util/histogram.h"
|
||||||
#include "util/random.h"
|
#include "util/random.h"
|
||||||
#include "util/testutil.h"
|
#include "util/testutil.h"
|
||||||
@ -38,8 +39,7 @@ static const char* FLAGS_benchmarks =
|
|||||||
"fillrand100K,"
|
"fillrand100K,"
|
||||||
"fillseq100K,"
|
"fillseq100K,"
|
||||||
"readseq,"
|
"readseq,"
|
||||||
"readrand100K,"
|
"readrand100K,";
|
||||||
;
|
|
||||||
|
|
||||||
// Number of key/values to place in database
|
// Number of key/values to place in database
|
||||||
static int FLAGS_num = 1000000;
|
static int FLAGS_num = 1000000;
|
||||||
@ -76,10 +76,9 @@ static bool FLAGS_transaction = true;
|
|||||||
static bool FLAGS_WAL_enabled = true;
|
static bool FLAGS_WAL_enabled = true;
|
||||||
|
|
||||||
// Use the db with the following name.
|
// Use the db with the following name.
|
||||||
static const char* FLAGS_db = NULL;
|
static const char* FLAGS_db = nullptr;
|
||||||
|
|
||||||
inline
|
inline static void ExecErrorCheck(int status, char* err_msg) {
|
||||||
static void ExecErrorCheck(int status, char *err_msg) {
|
|
||||||
if (status != SQLITE_OK) {
|
if (status != SQLITE_OK) {
|
||||||
fprintf(stderr, "SQL error: %s\n", err_msg);
|
fprintf(stderr, "SQL error: %s\n", err_msg);
|
||||||
sqlite3_free(err_msg);
|
sqlite3_free(err_msg);
|
||||||
@ -87,27 +86,25 @@ static void ExecErrorCheck(int status, char *err_msg) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline
|
inline static void StepErrorCheck(int status) {
|
||||||
static void StepErrorCheck(int status) {
|
|
||||||
if (status != SQLITE_DONE) {
|
if (status != SQLITE_DONE) {
|
||||||
fprintf(stderr, "SQL step error: status = %d\n", status);
|
fprintf(stderr, "SQL step error: status = %d\n", status);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline
|
inline static void ErrorCheck(int status) {
|
||||||
static void ErrorCheck(int status) {
|
|
||||||
if (status != SQLITE_OK) {
|
if (status != SQLITE_OK) {
|
||||||
fprintf(stderr, "sqlite3 error: status = %d\n", status);
|
fprintf(stderr, "sqlite3 error: status = %d\n", status);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline
|
inline static void WalCheckpoint(sqlite3* db_) {
|
||||||
static void WalCheckpoint(sqlite3* db_) {
|
|
||||||
// Flush all writes to disk
|
// Flush all writes to disk
|
||||||
if (FLAGS_WAL_enabled) {
|
if (FLAGS_WAL_enabled) {
|
||||||
sqlite3_wal_checkpoint_v2(db_, NULL, SQLITE_CHECKPOINT_FULL, NULL, NULL);
|
sqlite3_wal_checkpoint_v2(db_, nullptr, SQLITE_CHECKPOINT_FULL, nullptr,
|
||||||
|
nullptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -152,7 +149,7 @@ static Slice TrimSpace(Slice s) {
|
|||||||
start++;
|
start++;
|
||||||
}
|
}
|
||||||
int limit = s.size();
|
int limit = s.size();
|
||||||
while (limit > start && isspace(s[limit-1])) {
|
while (limit > start && isspace(s[limit - 1])) {
|
||||||
limit--;
|
limit--;
|
||||||
}
|
}
|
||||||
return Slice(s.data() + start, limit - start);
|
return Slice(s.data() + start, limit - start);
|
||||||
@ -176,7 +173,7 @@ class Benchmark {
|
|||||||
|
|
||||||
// State kept for progress messages
|
// State kept for progress messages
|
||||||
int done_;
|
int done_;
|
||||||
int next_report_; // When to report next
|
int next_report_; // When to report next
|
||||||
|
|
||||||
void PrintHeader() {
|
void PrintHeader() {
|
||||||
const int kKeySize = 16;
|
const int kKeySize = 16;
|
||||||
@ -185,17 +182,17 @@ class Benchmark {
|
|||||||
fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size);
|
fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size);
|
||||||
fprintf(stdout, "Entries: %d\n", num_);
|
fprintf(stdout, "Entries: %d\n", num_);
|
||||||
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
|
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
|
||||||
((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
|
((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
|
||||||
/ 1048576.0));
|
1048576.0));
|
||||||
PrintWarnings();
|
PrintWarnings();
|
||||||
fprintf(stdout, "------------------------------------------------\n");
|
fprintf(stdout, "------------------------------------------------\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
void PrintWarnings() {
|
void PrintWarnings() {
|
||||||
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
|
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
|
||||||
fprintf(stdout,
|
fprintf(
|
||||||
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
|
stdout,
|
||||||
);
|
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
|
||||||
#endif
|
#endif
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
fprintf(stdout,
|
fprintf(stdout,
|
||||||
@ -207,18 +204,18 @@ class Benchmark {
|
|||||||
fprintf(stderr, "SQLite: version %s\n", SQLITE_VERSION);
|
fprintf(stderr, "SQLite: version %s\n", SQLITE_VERSION);
|
||||||
|
|
||||||
#if defined(__linux)
|
#if defined(__linux)
|
||||||
time_t now = time(NULL);
|
time_t now = time(nullptr);
|
||||||
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
|
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
|
||||||
|
|
||||||
FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
|
FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
|
||||||
if (cpuinfo != NULL) {
|
if (cpuinfo != nullptr) {
|
||||||
char line[1000];
|
char line[1000];
|
||||||
int num_cpus = 0;
|
int num_cpus = 0;
|
||||||
std::string cpu_type;
|
std::string cpu_type;
|
||||||
std::string cache_size;
|
std::string cache_size;
|
||||||
while (fgets(line, sizeof(line), cpuinfo) != NULL) {
|
while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
|
||||||
const char* sep = strchr(line, ':');
|
const char* sep = strchr(line, ':');
|
||||||
if (sep == NULL) {
|
if (sep == nullptr) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
Slice key = TrimSpace(Slice(line, sep - 1 - line));
|
Slice key = TrimSpace(Slice(line, sep - 1 - line));
|
||||||
@ -261,13 +258,20 @@ class Benchmark {
|
|||||||
|
|
||||||
done_++;
|
done_++;
|
||||||
if (done_ >= next_report_) {
|
if (done_ >= next_report_) {
|
||||||
if (next_report_ < 1000) next_report_ += 100;
|
if (next_report_ < 1000)
|
||||||
else if (next_report_ < 5000) next_report_ += 500;
|
next_report_ += 100;
|
||||||
else if (next_report_ < 10000) next_report_ += 1000;
|
else if (next_report_ < 5000)
|
||||||
else if (next_report_ < 50000) next_report_ += 5000;
|
next_report_ += 500;
|
||||||
else if (next_report_ < 100000) next_report_ += 10000;
|
else if (next_report_ < 10000)
|
||||||
else if (next_report_ < 500000) next_report_ += 50000;
|
next_report_ += 1000;
|
||||||
else next_report_ += 100000;
|
else if (next_report_ < 50000)
|
||||||
|
next_report_ += 5000;
|
||||||
|
else if (next_report_ < 100000)
|
||||||
|
next_report_ += 10000;
|
||||||
|
else if (next_report_ < 500000)
|
||||||
|
next_report_ += 50000;
|
||||||
|
else
|
||||||
|
next_report_ += 100000;
|
||||||
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
|
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
|
||||||
fflush(stderr);
|
fflush(stderr);
|
||||||
}
|
}
|
||||||
@ -285,16 +289,14 @@ class Benchmark {
|
|||||||
snprintf(rate, sizeof(rate), "%6.1f MB/s",
|
snprintf(rate, sizeof(rate), "%6.1f MB/s",
|
||||||
(bytes_ / 1048576.0) / (finish - start_));
|
(bytes_ / 1048576.0) / (finish - start_));
|
||||||
if (!message_.empty()) {
|
if (!message_.empty()) {
|
||||||
message_ = std::string(rate) + " " + message_;
|
message_ = std::string(rate) + " " + message_;
|
||||||
} else {
|
} else {
|
||||||
message_ = rate;
|
message_ = rate;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
|
fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
|
||||||
name.ToString().c_str(),
|
(finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
|
||||||
(finish - start_) * 1e6 / done_,
|
|
||||||
(message_.empty() ? "" : " "),
|
|
||||||
message_.c_str());
|
message_.c_str());
|
||||||
if (FLAGS_histogram) {
|
if (FLAGS_histogram) {
|
||||||
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
|
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
|
||||||
@ -303,22 +305,16 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
enum Order {
|
enum Order { SEQUENTIAL, RANDOM };
|
||||||
SEQUENTIAL,
|
enum DBState { FRESH, EXISTING };
|
||||||
RANDOM
|
|
||||||
};
|
|
||||||
enum DBState {
|
|
||||||
FRESH,
|
|
||||||
EXISTING
|
|
||||||
};
|
|
||||||
|
|
||||||
Benchmark()
|
Benchmark()
|
||||||
: db_(NULL),
|
: db_(nullptr),
|
||||||
db_num_(0),
|
db_num_(0),
|
||||||
num_(FLAGS_num),
|
num_(FLAGS_num),
|
||||||
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
|
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
|
||||||
bytes_(0),
|
bytes_(0),
|
||||||
rand_(301) {
|
rand_(301) {
|
||||||
std::vector<std::string> files;
|
std::vector<std::string> files;
|
||||||
std::string test_dir;
|
std::string test_dir;
|
||||||
Env::Default()->GetTestDirectory(&test_dir);
|
Env::Default()->GetTestDirectory(&test_dir);
|
||||||
@ -345,12 +341,12 @@ class Benchmark {
|
|||||||
Open();
|
Open();
|
||||||
|
|
||||||
const char* benchmarks = FLAGS_benchmarks;
|
const char* benchmarks = FLAGS_benchmarks;
|
||||||
while (benchmarks != NULL) {
|
while (benchmarks != nullptr) {
|
||||||
const char* sep = strchr(benchmarks, ',');
|
const char* sep = strchr(benchmarks, ',');
|
||||||
Slice name;
|
Slice name;
|
||||||
if (sep == NULL) {
|
if (sep == nullptr) {
|
||||||
name = benchmarks;
|
name = benchmarks;
|
||||||
benchmarks = NULL;
|
benchmarks = nullptr;
|
||||||
} else {
|
} else {
|
||||||
name = Slice(benchmarks, sep - benchmarks);
|
name = Slice(benchmarks, sep - benchmarks);
|
||||||
benchmarks = sep + 1;
|
benchmarks = sep + 1;
|
||||||
@ -415,20 +411,18 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Open() {
|
void Open() {
|
||||||
assert(db_ == NULL);
|
assert(db_ == nullptr);
|
||||||
|
|
||||||
int status;
|
int status;
|
||||||
char file_name[100];
|
char file_name[100];
|
||||||
char* err_msg = NULL;
|
char* err_msg = nullptr;
|
||||||
db_num_++;
|
db_num_++;
|
||||||
|
|
||||||
// Open database
|
// Open database
|
||||||
std::string tmp_dir;
|
std::string tmp_dir;
|
||||||
Env::Default()->GetTestDirectory(&tmp_dir);
|
Env::Default()->GetTestDirectory(&tmp_dir);
|
||||||
snprintf(file_name, sizeof(file_name),
|
snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db",
|
||||||
"%s/dbbench_sqlite3-%d.db",
|
tmp_dir.c_str(), db_num_);
|
||||||
tmp_dir.c_str(),
|
|
||||||
db_num_);
|
|
||||||
status = sqlite3_open(file_name, &db_);
|
status = sqlite3_open(file_name, &db_);
|
||||||
if (status) {
|
if (status) {
|
||||||
fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_));
|
fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_));
|
||||||
@ -439,7 +433,7 @@ class Benchmark {
|
|||||||
char cache_size[100];
|
char cache_size[100];
|
||||||
snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d",
|
snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d",
|
||||||
FLAGS_num_pages);
|
FLAGS_num_pages);
|
||||||
status = sqlite3_exec(db_, cache_size, NULL, NULL, &err_msg);
|
status = sqlite3_exec(db_, cache_size, nullptr, nullptr, &err_msg);
|
||||||
ExecErrorCheck(status, err_msg);
|
ExecErrorCheck(status, err_msg);
|
||||||
|
|
||||||
// FLAGS_page_size is defaulted to 1024
|
// FLAGS_page_size is defaulted to 1024
|
||||||
@ -447,7 +441,7 @@ class Benchmark {
|
|||||||
char page_size[100];
|
char page_size[100];
|
||||||
snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d",
|
snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d",
|
||||||
FLAGS_page_size);
|
FLAGS_page_size);
|
||||||
status = sqlite3_exec(db_, page_size, NULL, NULL, &err_msg);
|
status = sqlite3_exec(db_, page_size, nullptr, nullptr, &err_msg);
|
||||||
ExecErrorCheck(status, err_msg);
|
ExecErrorCheck(status, err_msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -457,26 +451,28 @@ class Benchmark {
|
|||||||
|
|
||||||
// LevelDB's default cache size is a combined 4 MB
|
// LevelDB's default cache size is a combined 4 MB
|
||||||
std::string WAL_checkpoint = "PRAGMA wal_autocheckpoint = 4096";
|
std::string WAL_checkpoint = "PRAGMA wal_autocheckpoint = 4096";
|
||||||
status = sqlite3_exec(db_, WAL_stmt.c_str(), NULL, NULL, &err_msg);
|
status = sqlite3_exec(db_, WAL_stmt.c_str(), nullptr, nullptr, &err_msg);
|
||||||
ExecErrorCheck(status, err_msg);
|
ExecErrorCheck(status, err_msg);
|
||||||
status = sqlite3_exec(db_, WAL_checkpoint.c_str(), NULL, NULL, &err_msg);
|
status =
|
||||||
|
sqlite3_exec(db_, WAL_checkpoint.c_str(), nullptr, nullptr, &err_msg);
|
||||||
ExecErrorCheck(status, err_msg);
|
ExecErrorCheck(status, err_msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Change locking mode to exclusive and create tables/index for database
|
// Change locking mode to exclusive and create tables/index for database
|
||||||
std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE";
|
std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE";
|
||||||
std::string create_stmt =
|
std::string create_stmt =
|
||||||
"CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
|
"CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
|
||||||
std::string stmt_array[] = { locking_stmt, create_stmt };
|
std::string stmt_array[] = {locking_stmt, create_stmt};
|
||||||
int stmt_array_length = sizeof(stmt_array) / sizeof(std::string);
|
int stmt_array_length = sizeof(stmt_array) / sizeof(std::string);
|
||||||
for (int i = 0; i < stmt_array_length; i++) {
|
for (int i = 0; i < stmt_array_length; i++) {
|
||||||
status = sqlite3_exec(db_, stmt_array[i].c_str(), NULL, NULL, &err_msg);
|
status =
|
||||||
|
sqlite3_exec(db_, stmt_array[i].c_str(), nullptr, nullptr, &err_msg);
|
||||||
ExecErrorCheck(status, err_msg);
|
ExecErrorCheck(status, err_msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Write(bool write_sync, Order order, DBState state,
|
void Write(bool write_sync, Order order, DBState state, int num_entries,
|
||||||
int num_entries, int value_size, int entries_per_batch) {
|
int value_size, int entries_per_batch) {
|
||||||
// Create new database if state == FRESH
|
// Create new database if state == FRESH
|
||||||
if (state == FRESH) {
|
if (state == FRESH) {
|
||||||
if (FLAGS_use_existing_db) {
|
if (FLAGS_use_existing_db) {
|
||||||
@ -484,7 +480,7 @@ class Benchmark {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
sqlite3_close(db_);
|
sqlite3_close(db_);
|
||||||
db_ = NULL;
|
db_ = nullptr;
|
||||||
Open();
|
Open();
|
||||||
Start();
|
Start();
|
||||||
}
|
}
|
||||||
@ -495,7 +491,7 @@ class Benchmark {
|
|||||||
message_ = msg;
|
message_ = msg;
|
||||||
}
|
}
|
||||||
|
|
||||||
char* err_msg = NULL;
|
char* err_msg = nullptr;
|
||||||
int status;
|
int status;
|
||||||
|
|
||||||
sqlite3_stmt *replace_stmt, *begin_trans_stmt, *end_trans_stmt;
|
sqlite3_stmt *replace_stmt, *begin_trans_stmt, *end_trans_stmt;
|
||||||
@ -504,20 +500,20 @@ class Benchmark {
|
|||||||
std::string end_trans_str = "END TRANSACTION;";
|
std::string end_trans_str = "END TRANSACTION;";
|
||||||
|
|
||||||
// Check for synchronous flag in options
|
// Check for synchronous flag in options
|
||||||
std::string sync_stmt = (write_sync) ? "PRAGMA synchronous = FULL" :
|
std::string sync_stmt =
|
||||||
"PRAGMA synchronous = OFF";
|
(write_sync) ? "PRAGMA synchronous = FULL" : "PRAGMA synchronous = OFF";
|
||||||
status = sqlite3_exec(db_, sync_stmt.c_str(), NULL, NULL, &err_msg);
|
status = sqlite3_exec(db_, sync_stmt.c_str(), nullptr, nullptr, &err_msg);
|
||||||
ExecErrorCheck(status, err_msg);
|
ExecErrorCheck(status, err_msg);
|
||||||
|
|
||||||
// Preparing sqlite3 statements
|
// Preparing sqlite3 statements
|
||||||
status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1,
|
status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1, &replace_stmt,
|
||||||
&replace_stmt, NULL);
|
nullptr);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
|
status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
|
||||||
&begin_trans_stmt, NULL);
|
&begin_trans_stmt, nullptr);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1,
|
status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt,
|
||||||
&end_trans_stmt, NULL);
|
nullptr);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
|
|
||||||
bool transaction = (entries_per_batch > 1);
|
bool transaction = (entries_per_batch > 1);
|
||||||
@ -535,16 +531,16 @@ class Benchmark {
|
|||||||
const char* value = gen_.Generate(value_size).data();
|
const char* value = gen_.Generate(value_size).data();
|
||||||
|
|
||||||
// Create values for key-value pair
|
// Create values for key-value pair
|
||||||
const int k = (order == SEQUENTIAL) ? i + j :
|
const int k =
|
||||||
(rand_.Next() % num_entries);
|
(order == SEQUENTIAL) ? i + j : (rand_.Next() % num_entries);
|
||||||
char key[100];
|
char key[100];
|
||||||
snprintf(key, sizeof(key), "%016d", k);
|
snprintf(key, sizeof(key), "%016d", k);
|
||||||
|
|
||||||
// Bind KV values into replace_stmt
|
// Bind KV values into replace_stmt
|
||||||
status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC);
|
status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
status = sqlite3_bind_blob(replace_stmt, 2, value,
|
status = sqlite3_bind_blob(replace_stmt, 2, value, value_size,
|
||||||
value_size, SQLITE_STATIC);
|
SQLITE_STATIC);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
|
|
||||||
// Execute replace_stmt
|
// Execute replace_stmt
|
||||||
@ -588,12 +584,12 @@ class Benchmark {
|
|||||||
|
|
||||||
// Preparing sqlite3 statements
|
// Preparing sqlite3 statements
|
||||||
status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
|
status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
|
||||||
&begin_trans_stmt, NULL);
|
&begin_trans_stmt, nullptr);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1,
|
status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt,
|
||||||
&end_trans_stmt, NULL);
|
nullptr);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, NULL);
|
status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, nullptr);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
|
|
||||||
bool transaction = (entries_per_batch > 1);
|
bool transaction = (entries_per_batch > 1);
|
||||||
@ -618,7 +614,8 @@ class Benchmark {
|
|||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
|
|
||||||
// Execute read statement
|
// Execute read statement
|
||||||
while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) {}
|
while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) {
|
||||||
|
}
|
||||||
StepErrorCheck(status);
|
StepErrorCheck(status);
|
||||||
|
|
||||||
// Reset SQLite statement for another use
|
// Reset SQLite statement for another use
|
||||||
@ -648,10 +645,10 @@ class Benchmark {
|
|||||||
|
|
||||||
void ReadSequential() {
|
void ReadSequential() {
|
||||||
int status;
|
int status;
|
||||||
sqlite3_stmt *pStmt;
|
sqlite3_stmt* pStmt;
|
||||||
std::string read_str = "SELECT * FROM test ORDER BY key";
|
std::string read_str = "SELECT * FROM test ORDER BY key";
|
||||||
|
|
||||||
status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, NULL);
|
status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, nullptr);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
for (int i = 0; i < reads_ && SQLITE_ROW == sqlite3_step(pStmt); i++) {
|
for (int i = 0; i < reads_ && SQLITE_ROW == sqlite3_step(pStmt); i++) {
|
||||||
bytes_ += sqlite3_column_bytes(pStmt, 1) + sqlite3_column_bytes(pStmt, 2);
|
bytes_ += sqlite3_column_bytes(pStmt, 1) + sqlite3_column_bytes(pStmt, 2);
|
||||||
@ -661,7 +658,6 @@ class Benchmark {
|
|||||||
status = sqlite3_finalize(pStmt);
|
status = sqlite3_finalize(pStmt);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
@ -706,10 +702,10 @@ int main(int argc, char** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Choose a location for the test database if none given with --db=<path>
|
// Choose a location for the test database if none given with --db=<path>
|
||||||
if (FLAGS_db == NULL) {
|
if (FLAGS_db == nullptr) {
|
||||||
leveldb::Env::Default()->GetTestDirectory(&default_db_path);
|
leveldb::Env::Default()->GetTestDirectory(&default_db_path);
|
||||||
default_db_path += "/dbbench";
|
default_db_path += "/dbbench";
|
||||||
FLAGS_db = default_db_path.c_str();
|
FLAGS_db = default_db_path.c_str();
|
||||||
}
|
}
|
||||||
|
|
||||||
leveldb::Benchmark benchmark;
|
leveldb::Benchmark benchmark;
|
@ -2,9 +2,10 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
|
#include <kcpolydb.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <kcpolydb.h>
|
|
||||||
#include "util/histogram.h"
|
#include "util/histogram.h"
|
||||||
#include "util/random.h"
|
#include "util/random.h"
|
||||||
#include "util/testutil.h"
|
#include "util/testutil.h"
|
||||||
@ -34,8 +35,7 @@ static const char* FLAGS_benchmarks =
|
|||||||
"fillrand100K,"
|
"fillrand100K,"
|
||||||
"fillseq100K,"
|
"fillseq100K,"
|
||||||
"readseq100K,"
|
"readseq100K,"
|
||||||
"readrand100K,"
|
"readrand100K,";
|
||||||
;
|
|
||||||
|
|
||||||
// Number of key/values to place in database
|
// Number of key/values to place in database
|
||||||
static int FLAGS_num = 1000000;
|
static int FLAGS_num = 1000000;
|
||||||
@ -69,11 +69,9 @@ static bool FLAGS_use_existing_db = false;
|
|||||||
static bool FLAGS_compression = true;
|
static bool FLAGS_compression = true;
|
||||||
|
|
||||||
// Use the db with the following name.
|
// Use the db with the following name.
|
||||||
static const char* FLAGS_db = NULL;
|
static const char* FLAGS_db = nullptr;
|
||||||
|
|
||||||
inline
|
inline static void DBSynchronize(kyotocabinet::TreeDB* db_) {
|
||||||
static void DBSynchronize(kyotocabinet::TreeDB* db_)
|
|
||||||
{
|
|
||||||
// Synchronize will flush writes to disk
|
// Synchronize will flush writes to disk
|
||||||
if (!db_->synchronize()) {
|
if (!db_->synchronize()) {
|
||||||
fprintf(stderr, "synchronize error: %s\n", db_->error().name());
|
fprintf(stderr, "synchronize error: %s\n", db_->error().name());
|
||||||
@ -121,7 +119,7 @@ static Slice TrimSpace(Slice s) {
|
|||||||
start++;
|
start++;
|
||||||
}
|
}
|
||||||
int limit = s.size();
|
int limit = s.size();
|
||||||
while (limit > start && isspace(s[limit-1])) {
|
while (limit > start && isspace(s[limit - 1])) {
|
||||||
limit--;
|
limit--;
|
||||||
}
|
}
|
||||||
return Slice(s.data() + start, limit - start);
|
return Slice(s.data() + start, limit - start);
|
||||||
@ -146,7 +144,7 @@ class Benchmark {
|
|||||||
|
|
||||||
// State kept for progress messages
|
// State kept for progress messages
|
||||||
int done_;
|
int done_;
|
||||||
int next_report_; // When to report next
|
int next_report_; // When to report next
|
||||||
|
|
||||||
void PrintHeader() {
|
void PrintHeader() {
|
||||||
const int kKeySize = 16;
|
const int kKeySize = 16;
|
||||||
@ -157,20 +155,20 @@ class Benchmark {
|
|||||||
static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
|
static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
|
||||||
fprintf(stdout, "Entries: %d\n", num_);
|
fprintf(stdout, "Entries: %d\n", num_);
|
||||||
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
|
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
|
||||||
((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
|
((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
|
||||||
/ 1048576.0));
|
1048576.0));
|
||||||
fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
|
fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
|
||||||
(((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_)
|
(((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
|
||||||
/ 1048576.0));
|
1048576.0));
|
||||||
PrintWarnings();
|
PrintWarnings();
|
||||||
fprintf(stdout, "------------------------------------------------\n");
|
fprintf(stdout, "------------------------------------------------\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
void PrintWarnings() {
|
void PrintWarnings() {
|
||||||
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
|
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
|
||||||
fprintf(stdout,
|
fprintf(
|
||||||
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
|
stdout,
|
||||||
);
|
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
|
||||||
#endif
|
#endif
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
fprintf(stdout,
|
fprintf(stdout,
|
||||||
@ -183,18 +181,18 @@ class Benchmark {
|
|||||||
kyotocabinet::VERSION, kyotocabinet::LIBVER, kyotocabinet::LIBREV);
|
kyotocabinet::VERSION, kyotocabinet::LIBVER, kyotocabinet::LIBREV);
|
||||||
|
|
||||||
#if defined(__linux)
|
#if defined(__linux)
|
||||||
time_t now = time(NULL);
|
time_t now = time(nullptr);
|
||||||
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
|
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
|
||||||
|
|
||||||
FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
|
FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
|
||||||
if (cpuinfo != NULL) {
|
if (cpuinfo != nullptr) {
|
||||||
char line[1000];
|
char line[1000];
|
||||||
int num_cpus = 0;
|
int num_cpus = 0;
|
||||||
std::string cpu_type;
|
std::string cpu_type;
|
||||||
std::string cache_size;
|
std::string cache_size;
|
||||||
while (fgets(line, sizeof(line), cpuinfo) != NULL) {
|
while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
|
||||||
const char* sep = strchr(line, ':');
|
const char* sep = strchr(line, ':');
|
||||||
if (sep == NULL) {
|
if (sep == nullptr) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
Slice key = TrimSpace(Slice(line, sep - 1 - line));
|
Slice key = TrimSpace(Slice(line, sep - 1 - line));
|
||||||
@ -237,13 +235,20 @@ class Benchmark {
|
|||||||
|
|
||||||
done_++;
|
done_++;
|
||||||
if (done_ >= next_report_) {
|
if (done_ >= next_report_) {
|
||||||
if (next_report_ < 1000) next_report_ += 100;
|
if (next_report_ < 1000)
|
||||||
else if (next_report_ < 5000) next_report_ += 500;
|
next_report_ += 100;
|
||||||
else if (next_report_ < 10000) next_report_ += 1000;
|
else if (next_report_ < 5000)
|
||||||
else if (next_report_ < 50000) next_report_ += 5000;
|
next_report_ += 500;
|
||||||
else if (next_report_ < 100000) next_report_ += 10000;
|
else if (next_report_ < 10000)
|
||||||
else if (next_report_ < 500000) next_report_ += 50000;
|
next_report_ += 1000;
|
||||||
else next_report_ += 100000;
|
else if (next_report_ < 50000)
|
||||||
|
next_report_ += 5000;
|
||||||
|
else if (next_report_ < 100000)
|
||||||
|
next_report_ += 10000;
|
||||||
|
else if (next_report_ < 500000)
|
||||||
|
next_report_ += 50000;
|
||||||
|
else
|
||||||
|
next_report_ += 100000;
|
||||||
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
|
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
|
||||||
fflush(stderr);
|
fflush(stderr);
|
||||||
}
|
}
|
||||||
@ -261,16 +266,14 @@ class Benchmark {
|
|||||||
snprintf(rate, sizeof(rate), "%6.1f MB/s",
|
snprintf(rate, sizeof(rate), "%6.1f MB/s",
|
||||||
(bytes_ / 1048576.0) / (finish - start_));
|
(bytes_ / 1048576.0) / (finish - start_));
|
||||||
if (!message_.empty()) {
|
if (!message_.empty()) {
|
||||||
message_ = std::string(rate) + " " + message_;
|
message_ = std::string(rate) + " " + message_;
|
||||||
} else {
|
} else {
|
||||||
message_ = rate;
|
message_ = rate;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
|
fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
|
||||||
name.ToString().c_str(),
|
(finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
|
||||||
(finish - start_) * 1e6 / done_,
|
|
||||||
(message_.empty() ? "" : " "),
|
|
||||||
message_.c_str());
|
message_.c_str());
|
||||||
if (FLAGS_histogram) {
|
if (FLAGS_histogram) {
|
||||||
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
|
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
|
||||||
@ -279,21 +282,15 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
enum Order {
|
enum Order { SEQUENTIAL, RANDOM };
|
||||||
SEQUENTIAL,
|
enum DBState { FRESH, EXISTING };
|
||||||
RANDOM
|
|
||||||
};
|
|
||||||
enum DBState {
|
|
||||||
FRESH,
|
|
||||||
EXISTING
|
|
||||||
};
|
|
||||||
|
|
||||||
Benchmark()
|
Benchmark()
|
||||||
: db_(NULL),
|
: db_(nullptr),
|
||||||
num_(FLAGS_num),
|
num_(FLAGS_num),
|
||||||
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
|
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
|
||||||
bytes_(0),
|
bytes_(0),
|
||||||
rand_(301) {
|
rand_(301) {
|
||||||
std::vector<std::string> files;
|
std::vector<std::string> files;
|
||||||
std::string test_dir;
|
std::string test_dir;
|
||||||
Env::Default()->GetTestDirectory(&test_dir);
|
Env::Default()->GetTestDirectory(&test_dir);
|
||||||
@ -321,12 +318,12 @@ class Benchmark {
|
|||||||
Open(false);
|
Open(false);
|
||||||
|
|
||||||
const char* benchmarks = FLAGS_benchmarks;
|
const char* benchmarks = FLAGS_benchmarks;
|
||||||
while (benchmarks != NULL) {
|
while (benchmarks != nullptr) {
|
||||||
const char* sep = strchr(benchmarks, ',');
|
const char* sep = strchr(benchmarks, ',');
|
||||||
Slice name;
|
Slice name;
|
||||||
if (sep == NULL) {
|
if (sep == nullptr) {
|
||||||
name = benchmarks;
|
name = benchmarks;
|
||||||
benchmarks = NULL;
|
benchmarks = nullptr;
|
||||||
} else {
|
} else {
|
||||||
name = Slice(benchmarks, sep - benchmarks);
|
name = Slice(benchmarks, sep - benchmarks);
|
||||||
benchmarks = sep + 1;
|
benchmarks = sep + 1;
|
||||||
@ -386,8 +383,8 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void Open(bool sync) {
|
void Open(bool sync) {
|
||||||
assert(db_ == NULL);
|
assert(db_ == nullptr);
|
||||||
|
|
||||||
// Initialize db_
|
// Initialize db_
|
||||||
db_ = new kyotocabinet::TreeDB();
|
db_ = new kyotocabinet::TreeDB();
|
||||||
@ -395,16 +392,14 @@ class Benchmark {
|
|||||||
db_num_++;
|
db_num_++;
|
||||||
std::string test_dir;
|
std::string test_dir;
|
||||||
Env::Default()->GetTestDirectory(&test_dir);
|
Env::Default()->GetTestDirectory(&test_dir);
|
||||||
snprintf(file_name, sizeof(file_name),
|
snprintf(file_name, sizeof(file_name), "%s/dbbench_polyDB-%d.kct",
|
||||||
"%s/dbbench_polyDB-%d.kct",
|
test_dir.c_str(), db_num_);
|
||||||
test_dir.c_str(),
|
|
||||||
db_num_);
|
|
||||||
|
|
||||||
// Create tuning options and open the database
|
// Create tuning options and open the database
|
||||||
int open_options = kyotocabinet::PolyDB::OWRITER |
|
int open_options =
|
||||||
kyotocabinet::PolyDB::OCREATE;
|
kyotocabinet::PolyDB::OWRITER | kyotocabinet::PolyDB::OCREATE;
|
||||||
int tune_options = kyotocabinet::TreeDB::TSMALL |
|
int tune_options =
|
||||||
kyotocabinet::TreeDB::TLINEAR;
|
kyotocabinet::TreeDB::TSMALL | kyotocabinet::TreeDB::TLINEAR;
|
||||||
if (FLAGS_compression) {
|
if (FLAGS_compression) {
|
||||||
tune_options |= kyotocabinet::TreeDB::TCOMPRESS;
|
tune_options |= kyotocabinet::TreeDB::TCOMPRESS;
|
||||||
db_->tune_compressor(&comp_);
|
db_->tune_compressor(&comp_);
|
||||||
@ -412,7 +407,7 @@ class Benchmark {
|
|||||||
db_->tune_options(tune_options);
|
db_->tune_options(tune_options);
|
||||||
db_->tune_page_cache(FLAGS_cache_size);
|
db_->tune_page_cache(FLAGS_cache_size);
|
||||||
db_->tune_page(FLAGS_page_size);
|
db_->tune_page(FLAGS_page_size);
|
||||||
db_->tune_map(256LL<<20);
|
db_->tune_map(256LL << 20);
|
||||||
if (sync) {
|
if (sync) {
|
||||||
open_options |= kyotocabinet::PolyDB::OAUTOSYNC;
|
open_options |= kyotocabinet::PolyDB::OAUTOSYNC;
|
||||||
}
|
}
|
||||||
@ -421,8 +416,8 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Write(bool sync, Order order, DBState state,
|
void Write(bool sync, Order order, DBState state, int num_entries,
|
||||||
int num_entries, int value_size, int entries_per_batch) {
|
int value_size, int entries_per_batch) {
|
||||||
// Create new database if state == FRESH
|
// Create new database if state == FRESH
|
||||||
if (state == FRESH) {
|
if (state == FRESH) {
|
||||||
if (FLAGS_use_existing_db) {
|
if (FLAGS_use_existing_db) {
|
||||||
@ -430,7 +425,7 @@ class Benchmark {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
delete db_;
|
delete db_;
|
||||||
db_ = NULL;
|
db_ = nullptr;
|
||||||
Open(sync);
|
Open(sync);
|
||||||
Start(); // Do not count time taken to destroy/open
|
Start(); // Do not count time taken to destroy/open
|
||||||
}
|
}
|
||||||
@ -442,8 +437,7 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Write to database
|
// Write to database
|
||||||
for (int i = 0; i < num_entries; i++)
|
for (int i = 0; i < num_entries; i++) {
|
||||||
{
|
|
||||||
const int k = (order == SEQUENTIAL) ? i : (rand_.Next() % num_entries);
|
const int k = (order == SEQUENTIAL) ? i : (rand_.Next() % num_entries);
|
||||||
char key[100];
|
char key[100];
|
||||||
snprintf(key, sizeof(key), "%016d", k);
|
snprintf(key, sizeof(key), "%016d", k);
|
||||||
@ -516,10 +510,10 @@ int main(int argc, char** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Choose a location for the test database if none given with --db=<path>
|
// Choose a location for the test database if none given with --db=<path>
|
||||||
if (FLAGS_db == NULL) {
|
if (FLAGS_db == nullptr) {
|
||||||
leveldb::Env::Default()->GetTestDirectory(&default_db_path);
|
leveldb::Env::Default()->GetTestDirectory(&default_db_path);
|
||||||
default_db_path += "/dbbench";
|
default_db_path += "/dbbench";
|
||||||
FLAGS_db = default_db_path.c_str();
|
FLAGS_db = default_db_path.c_str();
|
||||||
}
|
}
|
||||||
|
|
||||||
leveldb::Benchmark benchmark;
|
leveldb::Benchmark benchmark;
|
@ -1,256 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
#
|
|
||||||
# Detects OS we're compiling on and outputs a file specified by the first
|
|
||||||
# argument, which in turn gets read while processing Makefile.
|
|
||||||
#
|
|
||||||
# The output will set the following variables:
|
|
||||||
# CC C Compiler path
|
|
||||||
# CXX C++ Compiler path
|
|
||||||
# PLATFORM_LDFLAGS Linker flags
|
|
||||||
# PLATFORM_LIBS Libraries flags
|
|
||||||
# PLATFORM_SHARED_EXT Extension for shared libraries
|
|
||||||
# PLATFORM_SHARED_LDFLAGS Flags for building shared library
|
|
||||||
# This flag is embedded just before the name
|
|
||||||
# of the shared library without intervening spaces
|
|
||||||
# PLATFORM_SHARED_CFLAGS Flags for compiling objects for shared library
|
|
||||||
# PLATFORM_CCFLAGS C compiler flags
|
|
||||||
# PLATFORM_CXXFLAGS C++ compiler flags. Will contain:
|
|
||||||
# PLATFORM_SHARED_VERSIONED Set to 'true' if platform supports versioned
|
|
||||||
# shared libraries, empty otherwise.
|
|
||||||
#
|
|
||||||
# The PLATFORM_CCFLAGS and PLATFORM_CXXFLAGS might include the following:
|
|
||||||
#
|
|
||||||
# -DLEVELDB_ATOMIC_PRESENT if <atomic> is present
|
|
||||||
# -DLEVELDB_PLATFORM_POSIX for Posix-based platforms
|
|
||||||
# -DSNAPPY if the Snappy library is present
|
|
||||||
#
|
|
||||||
|
|
||||||
OUTPUT=$1
|
|
||||||
PREFIX=$2
|
|
||||||
if test -z "$OUTPUT" || test -z "$PREFIX"; then
|
|
||||||
echo "usage: $0 <output-filename> <directory_prefix>" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Delete existing output, if it exists
|
|
||||||
rm -f $OUTPUT
|
|
||||||
touch $OUTPUT
|
|
||||||
|
|
||||||
if test -z "$CC"; then
|
|
||||||
CC=cc
|
|
||||||
fi
|
|
||||||
|
|
||||||
if test -z "$CXX"; then
|
|
||||||
CXX=g++
|
|
||||||
fi
|
|
||||||
|
|
||||||
if test -z "$TMPDIR"; then
|
|
||||||
TMPDIR=/tmp
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Detect OS
|
|
||||||
if test -z "$TARGET_OS"; then
|
|
||||||
TARGET_OS=`uname -s`
|
|
||||||
fi
|
|
||||||
|
|
||||||
COMMON_FLAGS=
|
|
||||||
CROSS_COMPILE=
|
|
||||||
PLATFORM_CCFLAGS=
|
|
||||||
PLATFORM_CXXFLAGS=
|
|
||||||
PLATFORM_LDFLAGS=
|
|
||||||
PLATFORM_LIBS=
|
|
||||||
PLATFORM_SHARED_EXT="so"
|
|
||||||
PLATFORM_SHARED_LDFLAGS="-shared -Wl,-soname -Wl,"
|
|
||||||
PLATFORM_SHARED_CFLAGS="-fPIC"
|
|
||||||
PLATFORM_SHARED_VERSIONED=true
|
|
||||||
PLATFORM_SSEFLAGS=
|
|
||||||
|
|
||||||
MEMCMP_FLAG=
|
|
||||||
if [ "$CXX" = "g++" ]; then
|
|
||||||
# Use libc's memcmp instead of GCC's memcmp. This results in ~40%
|
|
||||||
# performance improvement on readrandom under gcc 4.4.3 on Linux/x86.
|
|
||||||
MEMCMP_FLAG="-fno-builtin-memcmp"
|
|
||||||
fi
|
|
||||||
|
|
||||||
case "$TARGET_OS" in
|
|
||||||
CYGWIN_*)
|
|
||||||
PLATFORM=OS_LINUX
|
|
||||||
COMMON_FLAGS="$MEMCMP_FLAG -lpthread -DOS_LINUX -DCYGWIN"
|
|
||||||
PLATFORM_LDFLAGS="-lpthread"
|
|
||||||
PORT_FILE=port/port_posix.cc
|
|
||||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
|
||||||
;;
|
|
||||||
Darwin)
|
|
||||||
PLATFORM=OS_MACOSX
|
|
||||||
COMMON_FLAGS="$MEMCMP_FLAG -DOS_MACOSX"
|
|
||||||
PLATFORM_SHARED_EXT=dylib
|
|
||||||
[ -z "$INSTALL_PATH" ] && INSTALL_PATH=`pwd`
|
|
||||||
PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name $INSTALL_PATH/"
|
|
||||||
PORT_FILE=port/port_posix.cc
|
|
||||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
|
||||||
;;
|
|
||||||
Linux)
|
|
||||||
PLATFORM=OS_LINUX
|
|
||||||
COMMON_FLAGS="$MEMCMP_FLAG -pthread -DOS_LINUX"
|
|
||||||
PLATFORM_LDFLAGS="-pthread"
|
|
||||||
PORT_FILE=port/port_posix.cc
|
|
||||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
|
||||||
;;
|
|
||||||
SunOS)
|
|
||||||
PLATFORM=OS_SOLARIS
|
|
||||||
COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_SOLARIS"
|
|
||||||
PLATFORM_LIBS="-lpthread -lrt"
|
|
||||||
PORT_FILE=port/port_posix.cc
|
|
||||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
|
||||||
;;
|
|
||||||
FreeBSD)
|
|
||||||
PLATFORM=OS_FREEBSD
|
|
||||||
COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_FREEBSD"
|
|
||||||
PLATFORM_LIBS="-lpthread"
|
|
||||||
PORT_FILE=port/port_posix.cc
|
|
||||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
|
||||||
;;
|
|
||||||
NetBSD)
|
|
||||||
PLATFORM=OS_NETBSD
|
|
||||||
COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_NETBSD"
|
|
||||||
PLATFORM_LIBS="-lpthread -lgcc_s"
|
|
||||||
PORT_FILE=port/port_posix.cc
|
|
||||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
|
||||||
;;
|
|
||||||
OpenBSD)
|
|
||||||
PLATFORM=OS_OPENBSD
|
|
||||||
COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_OPENBSD"
|
|
||||||
PLATFORM_LDFLAGS="-pthread"
|
|
||||||
PORT_FILE=port/port_posix.cc
|
|
||||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
|
||||||
;;
|
|
||||||
DragonFly)
|
|
||||||
PLATFORM=OS_DRAGONFLYBSD
|
|
||||||
COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_DRAGONFLYBSD"
|
|
||||||
PLATFORM_LIBS="-lpthread"
|
|
||||||
PORT_FILE=port/port_posix.cc
|
|
||||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
|
||||||
;;
|
|
||||||
OS_ANDROID_CROSSCOMPILE)
|
|
||||||
PLATFORM=OS_ANDROID
|
|
||||||
COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_ANDROID -DLEVELDB_PLATFORM_POSIX"
|
|
||||||
PLATFORM_LDFLAGS="" # All pthread features are in the Android C library
|
|
||||||
PORT_FILE=port/port_posix.cc
|
|
||||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
|
||||||
CROSS_COMPILE=true
|
|
||||||
;;
|
|
||||||
HP-UX)
|
|
||||||
PLATFORM=OS_HPUX
|
|
||||||
COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_HPUX"
|
|
||||||
PLATFORM_LDFLAGS="-pthread"
|
|
||||||
PORT_FILE=port/port_posix.cc
|
|
||||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
|
||||||
# man ld: +h internal_name
|
|
||||||
PLATFORM_SHARED_LDFLAGS="-shared -Wl,+h -Wl,"
|
|
||||||
;;
|
|
||||||
IOS)
|
|
||||||
PLATFORM=IOS
|
|
||||||
COMMON_FLAGS="$MEMCMP_FLAG -DOS_MACOSX"
|
|
||||||
[ -z "$INSTALL_PATH" ] && INSTALL_PATH=`pwd`
|
|
||||||
PORT_FILE=port/port_posix.cc
|
|
||||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
|
||||||
PLATFORM_SHARED_EXT=
|
|
||||||
PLATFORM_SHARED_LDFLAGS=
|
|
||||||
PLATFORM_SHARED_CFLAGS=
|
|
||||||
PLATFORM_SHARED_VERSIONED=
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Unknown platform!" >&2
|
|
||||||
exit 1
|
|
||||||
esac
|
|
||||||
|
|
||||||
# We want to make a list of all cc files within util, db, table, and helpers
|
|
||||||
# except for the test and benchmark files. By default, find will output a list
|
|
||||||
# of all files matching either rule, so we need to append -print to make the
|
|
||||||
# prune take effect.
|
|
||||||
DIRS="$PREFIX/db $PREFIX/util $PREFIX/table"
|
|
||||||
|
|
||||||
set -f # temporarily disable globbing so that our patterns aren't expanded
|
|
||||||
PRUNE_TEST="-name *test*.cc -prune"
|
|
||||||
PRUNE_BENCH="-name *_bench.cc -prune"
|
|
||||||
PRUNE_TOOL="-name leveldbutil.cc -prune"
|
|
||||||
PORTABLE_FILES=`find $DIRS $PRUNE_TEST -o $PRUNE_BENCH -o $PRUNE_TOOL -o -name '*.cc' -print | sort | sed "s,^$PREFIX/,," | tr "\n" " "`
|
|
||||||
|
|
||||||
set +f # re-enable globbing
|
|
||||||
|
|
||||||
# The sources consist of the portable files, plus the platform-specific port
|
|
||||||
# file.
|
|
||||||
echo "SOURCES=$PORTABLE_FILES $PORT_FILE $PORT_SSE_FILE" >> $OUTPUT
|
|
||||||
echo "MEMENV_SOURCES=helpers/memenv/memenv.cc" >> $OUTPUT
|
|
||||||
|
|
||||||
if [ "$CROSS_COMPILE" = "true" ]; then
|
|
||||||
# Cross-compiling; do not try any compilation tests.
|
|
||||||
true
|
|
||||||
else
|
|
||||||
CXXOUTPUT="${TMPDIR}/leveldb_build_detect_platform-cxx.$$"
|
|
||||||
|
|
||||||
# If -std=c++0x works, use <atomic> as fallback for when memory barriers
|
|
||||||
# are not available.
|
|
||||||
$CXX $CXXFLAGS -std=c++0x -x c++ - -o $CXXOUTPUT 2>/dev/null <<EOF
|
|
||||||
#include <atomic>
|
|
||||||
int main() {}
|
|
||||||
EOF
|
|
||||||
if [ "$?" = 0 ]; then
|
|
||||||
COMMON_FLAGS="$COMMON_FLAGS -DLEVELDB_PLATFORM_POSIX -DLEVELDB_ATOMIC_PRESENT"
|
|
||||||
PLATFORM_CXXFLAGS="-std=c++0x"
|
|
||||||
else
|
|
||||||
COMMON_FLAGS="$COMMON_FLAGS -DLEVELDB_PLATFORM_POSIX"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Test whether Snappy library is installed
|
|
||||||
# http://code.google.com/p/snappy/
|
|
||||||
$CXX $CXXFLAGS -x c++ - -o $CXXOUTPUT 2>/dev/null <<EOF
|
|
||||||
#include <snappy.h>
|
|
||||||
int main() {}
|
|
||||||
EOF
|
|
||||||
if [ "$?" = 0 ]; then
|
|
||||||
COMMON_FLAGS="$COMMON_FLAGS -DSNAPPY"
|
|
||||||
PLATFORM_LIBS="$PLATFORM_LIBS -lsnappy"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Test whether tcmalloc is available
|
|
||||||
$CXX $CXXFLAGS -x c++ - -o $CXXOUTPUT -ltcmalloc 2>/dev/null <<EOF
|
|
||||||
int main() {}
|
|
||||||
EOF
|
|
||||||
if [ "$?" = 0 ]; then
|
|
||||||
PLATFORM_LIBS="$PLATFORM_LIBS -ltcmalloc"
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -f $CXXOUTPUT 2>/dev/null
|
|
||||||
|
|
||||||
# Test if gcc SSE 4.2 is supported
|
|
||||||
$CXX $CXXFLAGS -x c++ - -o $CXXOUTPUT -msse4.2 2>/dev/null <<EOF
|
|
||||||
int main() {}
|
|
||||||
EOF
|
|
||||||
if [ "$?" = 0 ]; then
|
|
||||||
PLATFORM_SSEFLAGS="-msse4.2"
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -f $CXXOUTPUT 2>/dev/null
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Use the SSE 4.2 CRC32C intrinsics iff runtime checks indicate compiler supports them.
|
|
||||||
if [ -n "$PLATFORM_SSEFLAGS" ]; then
|
|
||||||
PLATFORM_SSEFLAGS="$PLATFORM_SSEFLAGS -DLEVELDB_PLATFORM_POSIX_SSE"
|
|
||||||
fi
|
|
||||||
|
|
||||||
PLATFORM_CCFLAGS="$PLATFORM_CCFLAGS $COMMON_FLAGS"
|
|
||||||
PLATFORM_CXXFLAGS="$PLATFORM_CXXFLAGS $COMMON_FLAGS"
|
|
||||||
|
|
||||||
echo "CC=$CC" >> $OUTPUT
|
|
||||||
echo "CXX=$CXX" >> $OUTPUT
|
|
||||||
echo "PLATFORM=$PLATFORM" >> $OUTPUT
|
|
||||||
echo "PLATFORM_LDFLAGS=$PLATFORM_LDFLAGS" >> $OUTPUT
|
|
||||||
echo "PLATFORM_LIBS=$PLATFORM_LIBS" >> $OUTPUT
|
|
||||||
echo "PLATFORM_CCFLAGS=$PLATFORM_CCFLAGS" >> $OUTPUT
|
|
||||||
echo "PLATFORM_CXXFLAGS=$PLATFORM_CXXFLAGS" >> $OUTPUT
|
|
||||||
echo "PLATFORM_SSEFLAGS=$PLATFORM_SSEFLAGS" >> $OUTPUT
|
|
||||||
echo "PLATFORM_SHARED_CFLAGS=$PLATFORM_SHARED_CFLAGS" >> $OUTPUT
|
|
||||||
echo "PLATFORM_SHARED_EXT=$PLATFORM_SHARED_EXT" >> $OUTPUT
|
|
||||||
echo "PLATFORM_SHARED_LDFLAGS=$PLATFORM_SHARED_LDFLAGS" >> $OUTPUT
|
|
||||||
echo "PLATFORM_SHARED_VERSIONED=$PLATFORM_SHARED_VERSIONED" >> $OUTPUT
|
|
1
cmake/leveldbConfig.cmake
Normal file
1
cmake/leveldbConfig.cmake
Normal file
@ -0,0 +1 @@
|
|||||||
|
include("${CMAKE_CURRENT_LIST_DIR}/leveldbTargets.cmake")
|
@ -2,9 +2,9 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#include "leveldb/db.h"
|
|
||||||
#include "db/db_impl.h"
|
#include "db/db_impl.h"
|
||||||
#include "leveldb/cache.h"
|
#include "leveldb/cache.h"
|
||||||
|
#include "leveldb/db.h"
|
||||||
#include "util/testharness.h"
|
#include "util/testharness.h"
|
||||||
#include "util/testutil.h"
|
#include "util/testutil.h"
|
||||||
|
|
||||||
@ -12,11 +12,6 @@ namespace leveldb {
|
|||||||
|
|
||||||
class AutoCompactTest {
|
class AutoCompactTest {
|
||||||
public:
|
public:
|
||||||
std::string dbname_;
|
|
||||||
Cache* tiny_cache_;
|
|
||||||
Options options_;
|
|
||||||
DB* db_;
|
|
||||||
|
|
||||||
AutoCompactTest() {
|
AutoCompactTest() {
|
||||||
dbname_ = test::TmpDir() + "/autocompact_test";
|
dbname_ = test::TmpDir() + "/autocompact_test";
|
||||||
tiny_cache_ = NewLRUCache(100);
|
tiny_cache_ = NewLRUCache(100);
|
||||||
@ -47,6 +42,12 @@ class AutoCompactTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void DoReads(int n);
|
void DoReads(int n);
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::string dbname_;
|
||||||
|
Cache* tiny_cache_;
|
||||||
|
Options options_;
|
||||||
|
DB* db_;
|
||||||
};
|
};
|
||||||
|
|
||||||
static const int kValueSize = 200 * 1024;
|
static const int kValueSize = 200 * 1024;
|
||||||
@ -81,17 +82,16 @@ void AutoCompactTest::DoReads(int n) {
|
|||||||
ASSERT_LT(read, 100) << "Taking too long to compact";
|
ASSERT_LT(read, 100) << "Taking too long to compact";
|
||||||
Iterator* iter = db_->NewIterator(ReadOptions());
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
||||||
for (iter->SeekToFirst();
|
for (iter->SeekToFirst();
|
||||||
iter->Valid() && iter->key().ToString() < limit_key;
|
iter->Valid() && iter->key().ToString() < limit_key; iter->Next()) {
|
||||||
iter->Next()) {
|
|
||||||
// Drop data
|
// Drop data
|
||||||
}
|
}
|
||||||
delete iter;
|
delete iter;
|
||||||
// Wait a little bit to allow any triggered compactions to complete.
|
// Wait a little bit to allow any triggered compactions to complete.
|
||||||
Env::Default()->SleepForMicroseconds(1000000);
|
Env::Default()->SleepForMicroseconds(1000000);
|
||||||
uint64_t size = Size(Key(0), Key(n));
|
uint64_t size = Size(Key(0), Key(n));
|
||||||
fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n",
|
fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", read + 1,
|
||||||
read+1, size/1048576.0, Size(Key(n), Key(kCount))/1048576.0);
|
size / 1048576.0, Size(Key(n), Key(kCount)) / 1048576.0);
|
||||||
if (size <= initial_size/10) {
|
if (size <= initial_size / 10) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -100,19 +100,13 @@ void AutoCompactTest::DoReads(int n) {
|
|||||||
// is pretty much unchanged.
|
// is pretty much unchanged.
|
||||||
const int64_t final_other_size = Size(Key(n), Key(kCount));
|
const int64_t final_other_size = Size(Key(n), Key(kCount));
|
||||||
ASSERT_LE(final_other_size, initial_other_size + 1048576);
|
ASSERT_LE(final_other_size, initial_other_size + 1048576);
|
||||||
ASSERT_GE(final_other_size, initial_other_size/5 - 1048576);
|
ASSERT_GE(final_other_size, initial_other_size / 5 - 1048576);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(AutoCompactTest, ReadAll) {
|
TEST(AutoCompactTest, ReadAll) { DoReads(kCount); }
|
||||||
DoReads(kCount);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(AutoCompactTest, ReadHalf) {
|
TEST(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); }
|
||||||
DoReads(kCount/2);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -4,8 +4,8 @@
|
|||||||
|
|
||||||
#include "db/builder.h"
|
#include "db/builder.h"
|
||||||
|
|
||||||
#include "db/filename.h"
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
|
#include "db/filename.h"
|
||||||
#include "db/table_cache.h"
|
#include "db/table_cache.h"
|
||||||
#include "db/version_edit.h"
|
#include "db/version_edit.h"
|
||||||
#include "leveldb/db.h"
|
#include "leveldb/db.h"
|
||||||
@ -14,12 +14,8 @@
|
|||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
Status BuildTable(const std::string& dbname,
|
Status BuildTable(const std::string& dbname, Env* env, const Options& options,
|
||||||
Env* env,
|
TableCache* table_cache, Iterator* iter, FileMetaData* meta) {
|
||||||
const Options& options,
|
|
||||||
TableCache* table_cache,
|
|
||||||
Iterator* iter,
|
|
||||||
FileMetaData* meta) {
|
|
||||||
Status s;
|
Status s;
|
||||||
meta->file_size = 0;
|
meta->file_size = 0;
|
||||||
iter->SeekToFirst();
|
iter->SeekToFirst();
|
||||||
@ -41,14 +37,10 @@ Status BuildTable(const std::string& dbname,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Finish and check for builder errors
|
// Finish and check for builder errors
|
||||||
|
s = builder->Finish();
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
s = builder->Finish();
|
meta->file_size = builder->FileSize();
|
||||||
if (s.ok()) {
|
assert(meta->file_size > 0);
|
||||||
meta->file_size = builder->FileSize();
|
|
||||||
assert(meta->file_size > 0);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
builder->Abandon();
|
|
||||||
}
|
}
|
||||||
delete builder;
|
delete builder;
|
||||||
|
|
||||||
@ -60,12 +52,11 @@ Status BuildTable(const std::string& dbname,
|
|||||||
s = file->Close();
|
s = file->Close();
|
||||||
}
|
}
|
||||||
delete file;
|
delete file;
|
||||||
file = NULL;
|
file = nullptr;
|
||||||
|
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
// Verify that the table is usable
|
// Verify that the table is usable
|
||||||
Iterator* it = table_cache->NewIterator(ReadOptions(),
|
Iterator* it = table_cache->NewIterator(ReadOptions(), meta->number,
|
||||||
meta->number,
|
|
||||||
meta->file_size);
|
meta->file_size);
|
||||||
s = it->status();
|
s = it->status();
|
||||||
delete it;
|
delete it;
|
||||||
|
@ -22,12 +22,8 @@ class VersionEdit;
|
|||||||
// *meta will be filled with metadata about the generated table.
|
// *meta will be filled with metadata about the generated table.
|
||||||
// If no data is present in *iter, meta->file_size will be set to
|
// If no data is present in *iter, meta->file_size will be set to
|
||||||
// zero, and no Table file will be produced.
|
// zero, and no Table file will be produced.
|
||||||
extern Status BuildTable(const std::string& dbname,
|
Status BuildTable(const std::string& dbname, Env* env, const Options& options,
|
||||||
Env* env,
|
TableCache* table_cache, Iterator* iter, FileMetaData* meta);
|
||||||
const Options& options,
|
|
||||||
TableCache* table_cache,
|
|
||||||
Iterator* iter,
|
|
||||||
FileMetaData* meta);
|
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
381
db/c.cc
381
db/c.cc
@ -5,7 +5,7 @@
|
|||||||
#include "leveldb/c.h"
|
#include "leveldb/c.h"
|
||||||
|
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <unistd.h>
|
|
||||||
#include "leveldb/cache.h"
|
#include "leveldb/cache.h"
|
||||||
#include "leveldb/comparator.h"
|
#include "leveldb/comparator.h"
|
||||||
#include "leveldb/db.h"
|
#include "leveldb/db.h"
|
||||||
@ -43,67 +43,70 @@ using leveldb::WriteOptions;
|
|||||||
|
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
|
||||||
struct leveldb_t { DB* rep; };
|
struct leveldb_t {
|
||||||
struct leveldb_iterator_t { Iterator* rep; };
|
DB* rep;
|
||||||
struct leveldb_writebatch_t { WriteBatch rep; };
|
};
|
||||||
struct leveldb_snapshot_t { const Snapshot* rep; };
|
struct leveldb_iterator_t {
|
||||||
struct leveldb_readoptions_t { ReadOptions rep; };
|
Iterator* rep;
|
||||||
struct leveldb_writeoptions_t { WriteOptions rep; };
|
};
|
||||||
struct leveldb_options_t { Options rep; };
|
struct leveldb_writebatch_t {
|
||||||
struct leveldb_cache_t { Cache* rep; };
|
WriteBatch rep;
|
||||||
struct leveldb_seqfile_t { SequentialFile* rep; };
|
};
|
||||||
struct leveldb_randomfile_t { RandomAccessFile* rep; };
|
struct leveldb_snapshot_t {
|
||||||
struct leveldb_writablefile_t { WritableFile* rep; };
|
const Snapshot* rep;
|
||||||
struct leveldb_logger_t { Logger* rep; };
|
};
|
||||||
struct leveldb_filelock_t { FileLock* rep; };
|
struct leveldb_readoptions_t {
|
||||||
|
ReadOptions rep;
|
||||||
|
};
|
||||||
|
struct leveldb_writeoptions_t {
|
||||||
|
WriteOptions rep;
|
||||||
|
};
|
||||||
|
struct leveldb_options_t {
|
||||||
|
Options rep;
|
||||||
|
};
|
||||||
|
struct leveldb_cache_t {
|
||||||
|
Cache* rep;
|
||||||
|
};
|
||||||
|
struct leveldb_seqfile_t {
|
||||||
|
SequentialFile* rep;
|
||||||
|
};
|
||||||
|
struct leveldb_randomfile_t {
|
||||||
|
RandomAccessFile* rep;
|
||||||
|
};
|
||||||
|
struct leveldb_writablefile_t {
|
||||||
|
WritableFile* rep;
|
||||||
|
};
|
||||||
|
struct leveldb_logger_t {
|
||||||
|
Logger* rep;
|
||||||
|
};
|
||||||
|
struct leveldb_filelock_t {
|
||||||
|
FileLock* rep;
|
||||||
|
};
|
||||||
|
|
||||||
struct leveldb_comparator_t : public Comparator {
|
struct leveldb_comparator_t : public Comparator {
|
||||||
void* state_;
|
virtual ~leveldb_comparator_t() { (*destructor_)(state_); }
|
||||||
void (*destructor_)(void*);
|
|
||||||
int (*compare_)(
|
|
||||||
void*,
|
|
||||||
const char* a, size_t alen,
|
|
||||||
const char* b, size_t blen);
|
|
||||||
const char* (*name_)(void*);
|
|
||||||
|
|
||||||
virtual ~leveldb_comparator_t() {
|
|
||||||
(*destructor_)(state_);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual int Compare(const Slice& a, const Slice& b) const {
|
virtual int Compare(const Slice& a, const Slice& b) const {
|
||||||
return (*compare_)(state_, a.data(), a.size(), b.data(), b.size());
|
return (*compare_)(state_, a.data(), a.size(), b.data(), b.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual const char* Name() const {
|
virtual const char* Name() const { return (*name_)(state_); }
|
||||||
return (*name_)(state_);
|
|
||||||
}
|
|
||||||
|
|
||||||
// No-ops since the C binding does not support key shortening methods.
|
// No-ops since the C binding does not support key shortening methods.
|
||||||
virtual void FindShortestSeparator(std::string*, const Slice&) const { }
|
virtual void FindShortestSeparator(std::string*, const Slice&) const {}
|
||||||
virtual void FindShortSuccessor(std::string* key) const { }
|
virtual void FindShortSuccessor(std::string* key) const {}
|
||||||
|
|
||||||
|
void* state_;
|
||||||
|
void (*destructor_)(void*);
|
||||||
|
int (*compare_)(void*, const char* a, size_t alen, const char* b,
|
||||||
|
size_t blen);
|
||||||
|
const char* (*name_)(void*);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct leveldb_filterpolicy_t : public FilterPolicy {
|
struct leveldb_filterpolicy_t : public FilterPolicy {
|
||||||
void* state_;
|
virtual ~leveldb_filterpolicy_t() { (*destructor_)(state_); }
|
||||||
void (*destructor_)(void*);
|
|
||||||
const char* (*name_)(void*);
|
|
||||||
char* (*create_)(
|
|
||||||
void*,
|
|
||||||
const char* const* key_array, const size_t* key_length_array,
|
|
||||||
int num_keys,
|
|
||||||
size_t* filter_length);
|
|
||||||
unsigned char (*key_match_)(
|
|
||||||
void*,
|
|
||||||
const char* key, size_t length,
|
|
||||||
const char* filter, size_t filter_length);
|
|
||||||
|
|
||||||
virtual ~leveldb_filterpolicy_t() {
|
virtual const char* Name() const { return (*name_)(state_); }
|
||||||
(*destructor_)(state_);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual const char* Name() const {
|
|
||||||
return (*name_)(state_);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
|
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
|
||||||
std::vector<const char*> key_pointers(n);
|
std::vector<const char*> key_pointers(n);
|
||||||
@ -119,9 +122,18 @@ struct leveldb_filterpolicy_t : public FilterPolicy {
|
|||||||
}
|
}
|
||||||
|
|
||||||
virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const {
|
virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const {
|
||||||
return (*key_match_)(state_, key.data(), key.size(),
|
return (*key_match_)(state_, key.data(), key.size(), filter.data(),
|
||||||
filter.data(), filter.size());
|
filter.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void* state_;
|
||||||
|
void (*destructor_)(void*);
|
||||||
|
const char* (*name_)(void*);
|
||||||
|
char* (*create_)(void*, const char* const* key_array,
|
||||||
|
const size_t* key_length_array, int num_keys,
|
||||||
|
size_t* filter_length);
|
||||||
|
unsigned char (*key_match_)(void*, const char* key, size_t length,
|
||||||
|
const char* filter, size_t filter_length);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct leveldb_env_t {
|
struct leveldb_env_t {
|
||||||
@ -130,10 +142,10 @@ struct leveldb_env_t {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static bool SaveError(char** errptr, const Status& s) {
|
static bool SaveError(char** errptr, const Status& s) {
|
||||||
assert(errptr != NULL);
|
assert(errptr != nullptr);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
return false;
|
return false;
|
||||||
} else if (*errptr == NULL) {
|
} else if (*errptr == nullptr) {
|
||||||
*errptr = strdup(s.ToString().c_str());
|
*errptr = strdup(s.ToString().c_str());
|
||||||
} else {
|
} else {
|
||||||
// TODO(sanjay): Merge with existing error?
|
// TODO(sanjay): Merge with existing error?
|
||||||
@ -149,13 +161,11 @@ static char* CopyString(const std::string& str) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
leveldb_t* leveldb_open(
|
leveldb_t* leveldb_open(const leveldb_options_t* options, const char* name,
|
||||||
const leveldb_options_t* options,
|
char** errptr) {
|
||||||
const char* name,
|
|
||||||
char** errptr) {
|
|
||||||
DB* db;
|
DB* db;
|
||||||
if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) {
|
if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) {
|
||||||
return NULL;
|
return nullptr;
|
||||||
}
|
}
|
||||||
leveldb_t* result = new leveldb_t;
|
leveldb_t* result = new leveldb_t;
|
||||||
result->rep = db;
|
result->rep = db;
|
||||||
@ -167,40 +177,27 @@ void leveldb_close(leveldb_t* db) {
|
|||||||
delete db;
|
delete db;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_put(
|
void leveldb_put(leveldb_t* db, const leveldb_writeoptions_t* options,
|
||||||
leveldb_t* db,
|
const char* key, size_t keylen, const char* val, size_t vallen,
|
||||||
const leveldb_writeoptions_t* options,
|
char** errptr) {
|
||||||
const char* key, size_t keylen,
|
|
||||||
const char* val, size_t vallen,
|
|
||||||
char** errptr) {
|
|
||||||
SaveError(errptr,
|
SaveError(errptr,
|
||||||
db->rep->Put(options->rep, Slice(key, keylen), Slice(val, vallen)));
|
db->rep->Put(options->rep, Slice(key, keylen), Slice(val, vallen)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_delete(
|
void leveldb_delete(leveldb_t* db, const leveldb_writeoptions_t* options,
|
||||||
leveldb_t* db,
|
const char* key, size_t keylen, char** errptr) {
|
||||||
const leveldb_writeoptions_t* options,
|
|
||||||
const char* key, size_t keylen,
|
|
||||||
char** errptr) {
|
|
||||||
SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen)));
|
SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void leveldb_write(leveldb_t* db, const leveldb_writeoptions_t* options,
|
||||||
void leveldb_write(
|
leveldb_writebatch_t* batch, char** errptr) {
|
||||||
leveldb_t* db,
|
|
||||||
const leveldb_writeoptions_t* options,
|
|
||||||
leveldb_writebatch_t* batch,
|
|
||||||
char** errptr) {
|
|
||||||
SaveError(errptr, db->rep->Write(options->rep, &batch->rep));
|
SaveError(errptr, db->rep->Write(options->rep, &batch->rep));
|
||||||
}
|
}
|
||||||
|
|
||||||
char* leveldb_get(
|
char* leveldb_get(leveldb_t* db, const leveldb_readoptions_t* options,
|
||||||
leveldb_t* db,
|
const char* key, size_t keylen, size_t* vallen,
|
||||||
const leveldb_readoptions_t* options,
|
char** errptr) {
|
||||||
const char* key, size_t keylen,
|
char* result = nullptr;
|
||||||
size_t* vallen,
|
|
||||||
char** errptr) {
|
|
||||||
char* result = NULL;
|
|
||||||
std::string tmp;
|
std::string tmp;
|
||||||
Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp);
|
Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
@ -216,45 +213,40 @@ char* leveldb_get(
|
|||||||
}
|
}
|
||||||
|
|
||||||
leveldb_iterator_t* leveldb_create_iterator(
|
leveldb_iterator_t* leveldb_create_iterator(
|
||||||
leveldb_t* db,
|
leveldb_t* db, const leveldb_readoptions_t* options) {
|
||||||
const leveldb_readoptions_t* options) {
|
|
||||||
leveldb_iterator_t* result = new leveldb_iterator_t;
|
leveldb_iterator_t* result = new leveldb_iterator_t;
|
||||||
result->rep = db->rep->NewIterator(options->rep);
|
result->rep = db->rep->NewIterator(options->rep);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
const leveldb_snapshot_t* leveldb_create_snapshot(
|
const leveldb_snapshot_t* leveldb_create_snapshot(leveldb_t* db) {
|
||||||
leveldb_t* db) {
|
|
||||||
leveldb_snapshot_t* result = new leveldb_snapshot_t;
|
leveldb_snapshot_t* result = new leveldb_snapshot_t;
|
||||||
result->rep = db->rep->GetSnapshot();
|
result->rep = db->rep->GetSnapshot();
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_release_snapshot(
|
void leveldb_release_snapshot(leveldb_t* db,
|
||||||
leveldb_t* db,
|
const leveldb_snapshot_t* snapshot) {
|
||||||
const leveldb_snapshot_t* snapshot) {
|
|
||||||
db->rep->ReleaseSnapshot(snapshot->rep);
|
db->rep->ReleaseSnapshot(snapshot->rep);
|
||||||
delete snapshot;
|
delete snapshot;
|
||||||
}
|
}
|
||||||
|
|
||||||
char* leveldb_property_value(
|
char* leveldb_property_value(leveldb_t* db, const char* propname) {
|
||||||
leveldb_t* db,
|
|
||||||
const char* propname) {
|
|
||||||
std::string tmp;
|
std::string tmp;
|
||||||
if (db->rep->GetProperty(Slice(propname), &tmp)) {
|
if (db->rep->GetProperty(Slice(propname), &tmp)) {
|
||||||
// We use strdup() since we expect human readable output.
|
// We use strdup() since we expect human readable output.
|
||||||
return strdup(tmp.c_str());
|
return strdup(tmp.c_str());
|
||||||
} else {
|
} else {
|
||||||
return NULL;
|
return nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_approximate_sizes(
|
void leveldb_approximate_sizes(leveldb_t* db, int num_ranges,
|
||||||
leveldb_t* db,
|
const char* const* range_start_key,
|
||||||
int num_ranges,
|
const size_t* range_start_key_len,
|
||||||
const char* const* range_start_key, const size_t* range_start_key_len,
|
const char* const* range_limit_key,
|
||||||
const char* const* range_limit_key, const size_t* range_limit_key_len,
|
const size_t* range_limit_key_len,
|
||||||
uint64_t* sizes) {
|
uint64_t* sizes) {
|
||||||
Range* ranges = new Range[num_ranges];
|
Range* ranges = new Range[num_ranges];
|
||||||
for (int i = 0; i < num_ranges; i++) {
|
for (int i = 0; i < num_ranges; i++) {
|
||||||
ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
|
ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
|
||||||
@ -264,28 +256,23 @@ void leveldb_approximate_sizes(
|
|||||||
delete[] ranges;
|
delete[] ranges;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_compact_range(
|
void leveldb_compact_range(leveldb_t* db, const char* start_key,
|
||||||
leveldb_t* db,
|
size_t start_key_len, const char* limit_key,
|
||||||
const char* start_key, size_t start_key_len,
|
size_t limit_key_len) {
|
||||||
const char* limit_key, size_t limit_key_len) {
|
|
||||||
Slice a, b;
|
Slice a, b;
|
||||||
db->rep->CompactRange(
|
db->rep->CompactRange(
|
||||||
// Pass NULL Slice if corresponding "const char*" is NULL
|
// Pass null Slice if corresponding "const char*" is null
|
||||||
(start_key ? (a = Slice(start_key, start_key_len), &a) : NULL),
|
(start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
|
||||||
(limit_key ? (b = Slice(limit_key, limit_key_len), &b) : NULL));
|
(limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_destroy_db(
|
void leveldb_destroy_db(const leveldb_options_t* options, const char* name,
|
||||||
const leveldb_options_t* options,
|
char** errptr) {
|
||||||
const char* name,
|
|
||||||
char** errptr) {
|
|
||||||
SaveError(errptr, DestroyDB(name, options->rep));
|
SaveError(errptr, DestroyDB(name, options->rep));
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_repair_db(
|
void leveldb_repair_db(const leveldb_options_t* options, const char* name,
|
||||||
const leveldb_options_t* options,
|
char** errptr) {
|
||||||
const char* name,
|
|
||||||
char** errptr) {
|
|
||||||
SaveError(errptr, RepairDB(name, options->rep));
|
SaveError(errptr, RepairDB(name, options->rep));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -310,13 +297,9 @@ void leveldb_iter_seek(leveldb_iterator_t* iter, const char* k, size_t klen) {
|
|||||||
iter->rep->Seek(Slice(k, klen));
|
iter->rep->Seek(Slice(k, klen));
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_iter_next(leveldb_iterator_t* iter) {
|
void leveldb_iter_next(leveldb_iterator_t* iter) { iter->rep->Next(); }
|
||||||
iter->rep->Next();
|
|
||||||
}
|
|
||||||
|
|
||||||
void leveldb_iter_prev(leveldb_iterator_t* iter) {
|
void leveldb_iter_prev(leveldb_iterator_t* iter) { iter->rep->Prev(); }
|
||||||
iter->rep->Prev();
|
|
||||||
}
|
|
||||||
|
|
||||||
const char* leveldb_iter_key(const leveldb_iterator_t* iter, size_t* klen) {
|
const char* leveldb_iter_key(const leveldb_iterator_t* iter, size_t* klen) {
|
||||||
Slice s = iter->rep->key();
|
Slice s = iter->rep->key();
|
||||||
@ -338,32 +321,25 @@ leveldb_writebatch_t* leveldb_writebatch_create() {
|
|||||||
return new leveldb_writebatch_t;
|
return new leveldb_writebatch_t;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_writebatch_destroy(leveldb_writebatch_t* b) {
|
void leveldb_writebatch_destroy(leveldb_writebatch_t* b) { delete b; }
|
||||||
delete b;
|
|
||||||
}
|
|
||||||
|
|
||||||
void leveldb_writebatch_clear(leveldb_writebatch_t* b) {
|
void leveldb_writebatch_clear(leveldb_writebatch_t* b) { b->rep.Clear(); }
|
||||||
b->rep.Clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
void leveldb_writebatch_put(
|
void leveldb_writebatch_put(leveldb_writebatch_t* b, const char* key,
|
||||||
leveldb_writebatch_t* b,
|
size_t klen, const char* val, size_t vlen) {
|
||||||
const char* key, size_t klen,
|
|
||||||
const char* val, size_t vlen) {
|
|
||||||
b->rep.Put(Slice(key, klen), Slice(val, vlen));
|
b->rep.Put(Slice(key, klen), Slice(val, vlen));
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_writebatch_delete(
|
void leveldb_writebatch_delete(leveldb_writebatch_t* b, const char* key,
|
||||||
leveldb_writebatch_t* b,
|
size_t klen) {
|
||||||
const char* key, size_t klen) {
|
|
||||||
b->rep.Delete(Slice(key, klen));
|
b->rep.Delete(Slice(key, klen));
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_writebatch_iterate(
|
void leveldb_writebatch_iterate(const leveldb_writebatch_t* b, void* state,
|
||||||
leveldb_writebatch_t* b,
|
void (*put)(void*, const char* k, size_t klen,
|
||||||
void* state,
|
const char* v, size_t vlen),
|
||||||
void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
|
void (*deleted)(void*, const char* k,
|
||||||
void (*deleted)(void*, const char* k, size_t klen)) {
|
size_t klen)) {
|
||||||
class H : public WriteBatch::Handler {
|
class H : public WriteBatch::Handler {
|
||||||
public:
|
public:
|
||||||
void* state_;
|
void* state_;
|
||||||
@ -383,47 +359,46 @@ void leveldb_writebatch_iterate(
|
|||||||
b->rep.Iterate(&handler);
|
b->rep.Iterate(&handler);
|
||||||
}
|
}
|
||||||
|
|
||||||
leveldb_options_t* leveldb_options_create() {
|
void leveldb_writebatch_append(leveldb_writebatch_t* destination,
|
||||||
return new leveldb_options_t;
|
const leveldb_writebatch_t* source) {
|
||||||
|
destination->rep.Append(source->rep);
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_options_destroy(leveldb_options_t* options) {
|
leveldb_options_t* leveldb_options_create() { return new leveldb_options_t; }
|
||||||
delete options;
|
|
||||||
}
|
|
||||||
|
|
||||||
void leveldb_options_set_comparator(
|
void leveldb_options_destroy(leveldb_options_t* options) { delete options; }
|
||||||
leveldb_options_t* opt,
|
|
||||||
leveldb_comparator_t* cmp) {
|
void leveldb_options_set_comparator(leveldb_options_t* opt,
|
||||||
|
leveldb_comparator_t* cmp) {
|
||||||
opt->rep.comparator = cmp;
|
opt->rep.comparator = cmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_options_set_filter_policy(
|
void leveldb_options_set_filter_policy(leveldb_options_t* opt,
|
||||||
leveldb_options_t* opt,
|
leveldb_filterpolicy_t* policy) {
|
||||||
leveldb_filterpolicy_t* policy) {
|
|
||||||
opt->rep.filter_policy = policy;
|
opt->rep.filter_policy = policy;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_options_set_create_if_missing(
|
void leveldb_options_set_create_if_missing(leveldb_options_t* opt,
|
||||||
leveldb_options_t* opt, unsigned char v) {
|
unsigned char v) {
|
||||||
opt->rep.create_if_missing = v;
|
opt->rep.create_if_missing = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_options_set_error_if_exists(
|
void leveldb_options_set_error_if_exists(leveldb_options_t* opt,
|
||||||
leveldb_options_t* opt, unsigned char v) {
|
unsigned char v) {
|
||||||
opt->rep.error_if_exists = v;
|
opt->rep.error_if_exists = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_options_set_paranoid_checks(
|
void leveldb_options_set_paranoid_checks(leveldb_options_t* opt,
|
||||||
leveldb_options_t* opt, unsigned char v) {
|
unsigned char v) {
|
||||||
opt->rep.paranoid_checks = v;
|
opt->rep.paranoid_checks = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_options_set_env(leveldb_options_t* opt, leveldb_env_t* env) {
|
void leveldb_options_set_env(leveldb_options_t* opt, leveldb_env_t* env) {
|
||||||
opt->rep.env = (env ? env->rep : NULL);
|
opt->rep.env = (env ? env->rep : nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_options_set_info_log(leveldb_options_t* opt, leveldb_logger_t* l) {
|
void leveldb_options_set_info_log(leveldb_options_t* opt, leveldb_logger_t* l) {
|
||||||
opt->rep.info_log = (l ? l->rep : NULL);
|
opt->rep.info_log = (l ? l->rep : nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_options_set_write_buffer_size(leveldb_options_t* opt, size_t s) {
|
void leveldb_options_set_write_buffer_size(leveldb_options_t* opt, size_t s) {
|
||||||
@ -446,17 +421,18 @@ void leveldb_options_set_block_restart_interval(leveldb_options_t* opt, int n) {
|
|||||||
opt->rep.block_restart_interval = n;
|
opt->rep.block_restart_interval = n;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void leveldb_options_set_max_file_size(leveldb_options_t* opt, size_t s) {
|
||||||
|
opt->rep.max_file_size = s;
|
||||||
|
}
|
||||||
|
|
||||||
void leveldb_options_set_compression(leveldb_options_t* opt, int t) {
|
void leveldb_options_set_compression(leveldb_options_t* opt, int t) {
|
||||||
opt->rep.compression = static_cast<CompressionType>(t);
|
opt->rep.compression = static_cast<CompressionType>(t);
|
||||||
}
|
}
|
||||||
|
|
||||||
leveldb_comparator_t* leveldb_comparator_create(
|
leveldb_comparator_t* leveldb_comparator_create(
|
||||||
void* state,
|
void* state, void (*destructor)(void*),
|
||||||
void (*destructor)(void*),
|
int (*compare)(void*, const char* a, size_t alen, const char* b,
|
||||||
int (*compare)(
|
size_t blen),
|
||||||
void*,
|
|
||||||
const char* a, size_t alen,
|
|
||||||
const char* b, size_t blen),
|
|
||||||
const char* (*name)(void*)) {
|
const char* (*name)(void*)) {
|
||||||
leveldb_comparator_t* result = new leveldb_comparator_t;
|
leveldb_comparator_t* result = new leveldb_comparator_t;
|
||||||
result->state_ = state;
|
result->state_ = state;
|
||||||
@ -466,22 +442,15 @@ leveldb_comparator_t* leveldb_comparator_create(
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_comparator_destroy(leveldb_comparator_t* cmp) {
|
void leveldb_comparator_destroy(leveldb_comparator_t* cmp) { delete cmp; }
|
||||||
delete cmp;
|
|
||||||
}
|
|
||||||
|
|
||||||
leveldb_filterpolicy_t* leveldb_filterpolicy_create(
|
leveldb_filterpolicy_t* leveldb_filterpolicy_create(
|
||||||
void* state,
|
void* state, void (*destructor)(void*),
|
||||||
void (*destructor)(void*),
|
char* (*create_filter)(void*, const char* const* key_array,
|
||||||
char* (*create_filter)(
|
const size_t* key_length_array, int num_keys,
|
||||||
void*,
|
size_t* filter_length),
|
||||||
const char* const* key_array, const size_t* key_length_array,
|
unsigned char (*key_may_match)(void*, const char* key, size_t length,
|
||||||
int num_keys,
|
const char* filter, size_t filter_length),
|
||||||
size_t* filter_length),
|
|
||||||
unsigned char (*key_may_match)(
|
|
||||||
void*,
|
|
||||||
const char* key, size_t length,
|
|
||||||
const char* filter, size_t filter_length),
|
|
||||||
const char* (*name)(void*)) {
|
const char* (*name)(void*)) {
|
||||||
leveldb_filterpolicy_t* result = new leveldb_filterpolicy_t;
|
leveldb_filterpolicy_t* result = new leveldb_filterpolicy_t;
|
||||||
result->state_ = state;
|
result->state_ = state;
|
||||||
@ -501,7 +470,8 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) {
|
|||||||
// they delegate to a NewBloomFilterPolicy() instead of user
|
// they delegate to a NewBloomFilterPolicy() instead of user
|
||||||
// supplied C functions.
|
// supplied C functions.
|
||||||
struct Wrapper : public leveldb_filterpolicy_t {
|
struct Wrapper : public leveldb_filterpolicy_t {
|
||||||
const FilterPolicy* rep_;
|
static void DoNothing(void*) {}
|
||||||
|
|
||||||
~Wrapper() { delete rep_; }
|
~Wrapper() { delete rep_; }
|
||||||
const char* Name() const { return rep_->Name(); }
|
const char* Name() const { return rep_->Name(); }
|
||||||
void CreateFilter(const Slice* keys, int n, std::string* dst) const {
|
void CreateFilter(const Slice* keys, int n, std::string* dst) const {
|
||||||
@ -510,11 +480,12 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) {
|
|||||||
bool KeyMayMatch(const Slice& key, const Slice& filter) const {
|
bool KeyMayMatch(const Slice& key, const Slice& filter) const {
|
||||||
return rep_->KeyMayMatch(key, filter);
|
return rep_->KeyMayMatch(key, filter);
|
||||||
}
|
}
|
||||||
static void DoNothing(void*) { }
|
|
||||||
|
const FilterPolicy* rep_;
|
||||||
};
|
};
|
||||||
Wrapper* wrapper = new Wrapper;
|
Wrapper* wrapper = new Wrapper;
|
||||||
wrapper->rep_ = NewBloomFilterPolicy(bits_per_key);
|
wrapper->rep_ = NewBloomFilterPolicy(bits_per_key);
|
||||||
wrapper->state_ = NULL;
|
wrapper->state_ = nullptr;
|
||||||
wrapper->destructor_ = &Wrapper::DoNothing;
|
wrapper->destructor_ = &Wrapper::DoNothing;
|
||||||
return wrapper;
|
return wrapper;
|
||||||
}
|
}
|
||||||
@ -523,37 +494,31 @@ leveldb_readoptions_t* leveldb_readoptions_create() {
|
|||||||
return new leveldb_readoptions_t;
|
return new leveldb_readoptions_t;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) {
|
void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) { delete opt; }
|
||||||
delete opt;
|
|
||||||
}
|
|
||||||
|
|
||||||
void leveldb_readoptions_set_verify_checksums(
|
void leveldb_readoptions_set_verify_checksums(leveldb_readoptions_t* opt,
|
||||||
leveldb_readoptions_t* opt,
|
unsigned char v) {
|
||||||
unsigned char v) {
|
|
||||||
opt->rep.verify_checksums = v;
|
opt->rep.verify_checksums = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_readoptions_set_fill_cache(
|
void leveldb_readoptions_set_fill_cache(leveldb_readoptions_t* opt,
|
||||||
leveldb_readoptions_t* opt, unsigned char v) {
|
unsigned char v) {
|
||||||
opt->rep.fill_cache = v;
|
opt->rep.fill_cache = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_readoptions_set_snapshot(
|
void leveldb_readoptions_set_snapshot(leveldb_readoptions_t* opt,
|
||||||
leveldb_readoptions_t* opt,
|
const leveldb_snapshot_t* snap) {
|
||||||
const leveldb_snapshot_t* snap) {
|
opt->rep.snapshot = (snap ? snap->rep : nullptr);
|
||||||
opt->rep.snapshot = (snap ? snap->rep : NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
leveldb_writeoptions_t* leveldb_writeoptions_create() {
|
leveldb_writeoptions_t* leveldb_writeoptions_create() {
|
||||||
return new leveldb_writeoptions_t;
|
return new leveldb_writeoptions_t;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) {
|
void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) { delete opt; }
|
||||||
delete opt;
|
|
||||||
}
|
|
||||||
|
|
||||||
void leveldb_writeoptions_set_sync(
|
void leveldb_writeoptions_set_sync(leveldb_writeoptions_t* opt,
|
||||||
leveldb_writeoptions_t* opt, unsigned char v) {
|
unsigned char v) {
|
||||||
opt->rep.sync = v;
|
opt->rep.sync = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -580,16 +545,22 @@ void leveldb_env_destroy(leveldb_env_t* env) {
|
|||||||
delete env;
|
delete env;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_free(void* ptr) {
|
char* leveldb_env_get_test_directory(leveldb_env_t* env) {
|
||||||
free(ptr);
|
std::string result;
|
||||||
|
if (!env->rep->GetTestDirectory(&result).ok()) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
char* buffer = static_cast<char*>(malloc(result.size() + 1));
|
||||||
|
memcpy(buffer, result.data(), result.size());
|
||||||
|
buffer[result.size()] = '\0';
|
||||||
|
return buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
int leveldb_major_version() {
|
void leveldb_free(void* ptr) { free(ptr); }
|
||||||
return kMajorVersion;
|
|
||||||
}
|
|
||||||
|
|
||||||
int leveldb_minor_version() {
|
int leveldb_major_version() { return kMajorVersion; }
|
||||||
return kMinorVersion;
|
|
||||||
}
|
int leveldb_minor_version() { return kMinorVersion; }
|
||||||
|
|
||||||
} // end extern "C"
|
} // end extern "C"
|
||||||
|
28
db/c_test.c
28
db/c_test.c
@ -8,24 +8,14 @@
|
|||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <sys/types.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
const char* phase = "";
|
const char* phase = "";
|
||||||
static char dbname[200];
|
|
||||||
|
|
||||||
static void StartPhase(const char* name) {
|
static void StartPhase(const char* name) {
|
||||||
fprintf(stderr, "=== Test %s\n", name);
|
fprintf(stderr, "=== Test %s\n", name);
|
||||||
phase = name;
|
phase = name;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const char* GetTempDir(void) {
|
|
||||||
const char* ret = getenv("TEST_TMPDIR");
|
|
||||||
if (ret == NULL || ret[0] == '\0')
|
|
||||||
ret = "/tmp";
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define CheckNoError(err) \
|
#define CheckNoError(err) \
|
||||||
if ((err) != NULL) { \
|
if ((err) != NULL) { \
|
||||||
fprintf(stderr, "%s:%d: %s: %s\n", __FILE__, __LINE__, phase, (err)); \
|
fprintf(stderr, "%s:%d: %s: %s\n", __FILE__, __LINE__, phase, (err)); \
|
||||||
@ -162,21 +152,19 @@ int main(int argc, char** argv) {
|
|||||||
leveldb_options_t* options;
|
leveldb_options_t* options;
|
||||||
leveldb_readoptions_t* roptions;
|
leveldb_readoptions_t* roptions;
|
||||||
leveldb_writeoptions_t* woptions;
|
leveldb_writeoptions_t* woptions;
|
||||||
|
char* dbname;
|
||||||
char* err = NULL;
|
char* err = NULL;
|
||||||
int run = -1;
|
int run = -1;
|
||||||
|
|
||||||
CheckCondition(leveldb_major_version() >= 1);
|
CheckCondition(leveldb_major_version() >= 1);
|
||||||
CheckCondition(leveldb_minor_version() >= 1);
|
CheckCondition(leveldb_minor_version() >= 1);
|
||||||
|
|
||||||
snprintf(dbname, sizeof(dbname),
|
|
||||||
"%s/leveldb_c_test-%d",
|
|
||||||
GetTempDir(),
|
|
||||||
((int) geteuid()));
|
|
||||||
|
|
||||||
StartPhase("create_objects");
|
StartPhase("create_objects");
|
||||||
cmp = leveldb_comparator_create(NULL, CmpDestroy, CmpCompare, CmpName);
|
cmp = leveldb_comparator_create(NULL, CmpDestroy, CmpCompare, CmpName);
|
||||||
env = leveldb_create_default_env();
|
env = leveldb_create_default_env();
|
||||||
cache = leveldb_cache_create_lru(100000);
|
cache = leveldb_cache_create_lru(100000);
|
||||||
|
dbname = leveldb_env_get_test_directory(env);
|
||||||
|
CheckCondition(dbname != NULL);
|
||||||
|
|
||||||
options = leveldb_options_create();
|
options = leveldb_options_create();
|
||||||
leveldb_options_set_comparator(options, cmp);
|
leveldb_options_set_comparator(options, cmp);
|
||||||
@ -189,6 +177,7 @@ int main(int argc, char** argv) {
|
|||||||
leveldb_options_set_max_open_files(options, 10);
|
leveldb_options_set_max_open_files(options, 10);
|
||||||
leveldb_options_set_block_size(options, 1024);
|
leveldb_options_set_block_size(options, 1024);
|
||||||
leveldb_options_set_block_restart_interval(options, 8);
|
leveldb_options_set_block_restart_interval(options, 8);
|
||||||
|
leveldb_options_set_max_file_size(options, 3 << 20);
|
||||||
leveldb_options_set_compression(options, leveldb_no_compression);
|
leveldb_options_set_compression(options, leveldb_no_compression);
|
||||||
|
|
||||||
roptions = leveldb_readoptions_create();
|
roptions = leveldb_readoptions_create();
|
||||||
@ -239,12 +228,18 @@ int main(int argc, char** argv) {
|
|||||||
leveldb_writebatch_clear(wb);
|
leveldb_writebatch_clear(wb);
|
||||||
leveldb_writebatch_put(wb, "bar", 3, "b", 1);
|
leveldb_writebatch_put(wb, "bar", 3, "b", 1);
|
||||||
leveldb_writebatch_put(wb, "box", 3, "c", 1);
|
leveldb_writebatch_put(wb, "box", 3, "c", 1);
|
||||||
leveldb_writebatch_delete(wb, "bar", 3);
|
|
||||||
|
leveldb_writebatch_t* wb2 = leveldb_writebatch_create();
|
||||||
|
leveldb_writebatch_delete(wb2, "bar", 3);
|
||||||
|
leveldb_writebatch_append(wb, wb2);
|
||||||
|
leveldb_writebatch_destroy(wb2);
|
||||||
|
|
||||||
leveldb_write(db, woptions, wb, &err);
|
leveldb_write(db, woptions, wb, &err);
|
||||||
CheckNoError(err);
|
CheckNoError(err);
|
||||||
CheckGet(db, roptions, "foo", "hello");
|
CheckGet(db, roptions, "foo", "hello");
|
||||||
CheckGet(db, roptions, "bar", NULL);
|
CheckGet(db, roptions, "bar", NULL);
|
||||||
CheckGet(db, roptions, "box", "c");
|
CheckGet(db, roptions, "box", "c");
|
||||||
|
|
||||||
int pos = 0;
|
int pos = 0;
|
||||||
leveldb_writebatch_iterate(wb, &pos, CheckPut, CheckDel);
|
leveldb_writebatch_iterate(wb, &pos, CheckPut, CheckDel);
|
||||||
CheckCondition(pos == 3);
|
CheckCondition(pos == 3);
|
||||||
@ -381,6 +376,7 @@ int main(int argc, char** argv) {
|
|||||||
leveldb_options_destroy(options);
|
leveldb_options_destroy(options);
|
||||||
leveldb_readoptions_destroy(roptions);
|
leveldb_readoptions_destroy(roptions);
|
||||||
leveldb_writeoptions_destroy(woptions);
|
leveldb_writeoptions_destroy(woptions);
|
||||||
|
leveldb_free(dbname);
|
||||||
leveldb_cache_destroy(cache);
|
leveldb_cache_destroy(cache);
|
||||||
leveldb_comparator_destroy(cmp);
|
leveldb_comparator_destroy(cmp);
|
||||||
leveldb_env_destroy(env);
|
leveldb_env_destroy(env);
|
||||||
|
@ -2,20 +2,16 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#include "leveldb/db.h"
|
|
||||||
|
|
||||||
#include <errno.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <sys/stat.h>
|
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include "leveldb/cache.h"
|
|
||||||
#include "leveldb/env.h"
|
|
||||||
#include "leveldb/table.h"
|
|
||||||
#include "leveldb/write_batch.h"
|
|
||||||
#include "db/db_impl.h"
|
#include "db/db_impl.h"
|
||||||
#include "db/filename.h"
|
#include "db/filename.h"
|
||||||
#include "db/log_format.h"
|
#include "db/log_format.h"
|
||||||
#include "db/version_set.h"
|
#include "db/version_set.h"
|
||||||
|
#include "leveldb/cache.h"
|
||||||
|
#include "leveldb/db.h"
|
||||||
|
#include "leveldb/table.h"
|
||||||
|
#include "leveldb/write_batch.h"
|
||||||
#include "util/logging.h"
|
#include "util/logging.h"
|
||||||
#include "util/testharness.h"
|
#include "util/testharness.h"
|
||||||
#include "util/testutil.h"
|
#include "util/testutil.h"
|
||||||
@ -26,44 +22,35 @@ static const int kValueSize = 1000;
|
|||||||
|
|
||||||
class CorruptionTest {
|
class CorruptionTest {
|
||||||
public:
|
public:
|
||||||
test::ErrorEnv env_;
|
CorruptionTest()
|
||||||
std::string dbname_;
|
: db_(nullptr),
|
||||||
Cache* tiny_cache_;
|
dbname_("/memenv/corruption_test"),
|
||||||
Options options_;
|
tiny_cache_(NewLRUCache(100)) {
|
||||||
DB* db_;
|
|
||||||
|
|
||||||
CorruptionTest() {
|
|
||||||
tiny_cache_ = NewLRUCache(100);
|
|
||||||
options_.env = &env_;
|
options_.env = &env_;
|
||||||
options_.block_cache = tiny_cache_;
|
options_.block_cache = tiny_cache_;
|
||||||
dbname_ = test::TmpDir() + "/corruption_test";
|
|
||||||
DestroyDB(dbname_, options_);
|
DestroyDB(dbname_, options_);
|
||||||
|
|
||||||
db_ = NULL;
|
|
||||||
options_.create_if_missing = true;
|
options_.create_if_missing = true;
|
||||||
Reopen();
|
Reopen();
|
||||||
options_.create_if_missing = false;
|
options_.create_if_missing = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
~CorruptionTest() {
|
~CorruptionTest() {
|
||||||
delete db_;
|
delete db_;
|
||||||
DestroyDB(dbname_, Options());
|
delete tiny_cache_;
|
||||||
delete tiny_cache_;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Status TryReopen() {
|
Status TryReopen() {
|
||||||
delete db_;
|
delete db_;
|
||||||
db_ = NULL;
|
db_ = nullptr;
|
||||||
return DB::Open(options_, dbname_, &db_);
|
return DB::Open(options_, dbname_, &db_);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Reopen() {
|
void Reopen() { ASSERT_OK(TryReopen()); }
|
||||||
ASSERT_OK(TryReopen());
|
|
||||||
}
|
|
||||||
|
|
||||||
void RepairDB() {
|
void RepairDB() {
|
||||||
delete db_;
|
delete db_;
|
||||||
db_ = NULL;
|
db_ = nullptr;
|
||||||
ASSERT_OK(::leveldb::RepairDB(dbname_, options_));
|
ASSERT_OK(::leveldb::RepairDB(dbname_, options_));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -71,7 +58,7 @@ class CorruptionTest {
|
|||||||
std::string key_space, value_space;
|
std::string key_space, value_space;
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
for (int i = 0; i < n; i++) {
|
for (int i = 0; i < n; i++) {
|
||||||
//if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n);
|
// if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n);
|
||||||
Slice key = Key(i, &key_space);
|
Slice key = Key(i, &key_space);
|
||||||
batch.Clear();
|
batch.Clear();
|
||||||
batch.Put(key, Value(i, &value_space));
|
batch.Put(key, Value(i, &value_space));
|
||||||
@ -100,8 +87,7 @@ class CorruptionTest {
|
|||||||
// Ignore boundary keys.
|
// Ignore boundary keys.
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (!ConsumeDecimalNumber(&in, &key) ||
|
if (!ConsumeDecimalNumber(&in, &key) || !in.empty() ||
|
||||||
!in.empty() ||
|
|
||||||
key < next_expected) {
|
key < next_expected) {
|
||||||
bad_keys++;
|
bad_keys++;
|
||||||
continue;
|
continue;
|
||||||
@ -126,14 +112,13 @@ class CorruptionTest {
|
|||||||
void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) {
|
void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) {
|
||||||
// Pick file to corrupt
|
// Pick file to corrupt
|
||||||
std::vector<std::string> filenames;
|
std::vector<std::string> filenames;
|
||||||
ASSERT_OK(env_.GetChildren(dbname_, &filenames));
|
ASSERT_OK(env_.target()->GetChildren(dbname_, &filenames));
|
||||||
uint64_t number;
|
uint64_t number;
|
||||||
FileType type;
|
FileType type;
|
||||||
std::string fname;
|
std::string fname;
|
||||||
int picked_number = -1;
|
int picked_number = -1;
|
||||||
for (size_t i = 0; i < filenames.size(); i++) {
|
for (size_t i = 0; i < filenames.size(); i++) {
|
||||||
if (ParseFileName(filenames[i], &number, &type) &&
|
if (ParseFileName(filenames[i], &number, &type) && type == filetype &&
|
||||||
type == filetype &&
|
|
||||||
int(number) > picked_number) { // Pick latest file
|
int(number) > picked_number) { // Pick latest file
|
||||||
fname = dbname_ + "/" + filenames[i];
|
fname = dbname_ + "/" + filenames[i];
|
||||||
picked_number = number;
|
picked_number = number;
|
||||||
@ -141,35 +126,32 @@ class CorruptionTest {
|
|||||||
}
|
}
|
||||||
ASSERT_TRUE(!fname.empty()) << filetype;
|
ASSERT_TRUE(!fname.empty()) << filetype;
|
||||||
|
|
||||||
struct stat sbuf;
|
uint64_t file_size;
|
||||||
if (stat(fname.c_str(), &sbuf) != 0) {
|
ASSERT_OK(env_.target()->GetFileSize(fname, &file_size));
|
||||||
const char* msg = strerror(errno);
|
|
||||||
ASSERT_TRUE(false) << fname << ": " << msg;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (offset < 0) {
|
if (offset < 0) {
|
||||||
// Relative to end of file; make it absolute
|
// Relative to end of file; make it absolute
|
||||||
if (-offset > sbuf.st_size) {
|
if (-offset > file_size) {
|
||||||
offset = 0;
|
offset = 0;
|
||||||
} else {
|
} else {
|
||||||
offset = sbuf.st_size + offset;
|
offset = file_size + offset;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (offset > sbuf.st_size) {
|
if (offset > file_size) {
|
||||||
offset = sbuf.st_size;
|
offset = file_size;
|
||||||
}
|
}
|
||||||
if (offset + bytes_to_corrupt > sbuf.st_size) {
|
if (offset + bytes_to_corrupt > file_size) {
|
||||||
bytes_to_corrupt = sbuf.st_size - offset;
|
bytes_to_corrupt = file_size - offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do it
|
// Do it
|
||||||
std::string contents;
|
std::string contents;
|
||||||
Status s = ReadFileToString(Env::Default(), fname, &contents);
|
Status s = ReadFileToString(env_.target(), fname, &contents);
|
||||||
ASSERT_TRUE(s.ok()) << s.ToString();
|
ASSERT_TRUE(s.ok()) << s.ToString();
|
||||||
for (int i = 0; i < bytes_to_corrupt; i++) {
|
for (int i = 0; i < bytes_to_corrupt; i++) {
|
||||||
contents[i + offset] ^= 0x80;
|
contents[i + offset] ^= 0x80;
|
||||||
}
|
}
|
||||||
s = WriteStringToFile(Env::Default(), contents, fname);
|
s = WriteStringToFile(env_.target(), contents, fname);
|
||||||
ASSERT_TRUE(s.ok()) << s.ToString();
|
ASSERT_TRUE(s.ok()) << s.ToString();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -197,12 +179,20 @@ class CorruptionTest {
|
|||||||
Random r(k);
|
Random r(k);
|
||||||
return test::RandomString(&r, kValueSize, storage);
|
return test::RandomString(&r, kValueSize, storage);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
test::ErrorEnv env_;
|
||||||
|
Options options_;
|
||||||
|
DB* db_;
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::string dbname_;
|
||||||
|
Cache* tiny_cache_;
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST(CorruptionTest, Recovery) {
|
TEST(CorruptionTest, Recovery) {
|
||||||
Build(100);
|
Build(100);
|
||||||
Check(100, 100);
|
Check(100, 100);
|
||||||
Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record
|
Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record
|
||||||
Corrupt(kLogFile, log::kBlockSize + 1000, 1); // Somewhere in second block
|
Corrupt(kLogFile, log::kBlockSize + 1000, 1); // Somewhere in second block
|
||||||
Reopen();
|
Reopen();
|
||||||
|
|
||||||
@ -237,8 +227,8 @@ TEST(CorruptionTest, TableFile) {
|
|||||||
Build(100);
|
Build(100);
|
||||||
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
|
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
|
||||||
dbi->TEST_CompactMemTable();
|
dbi->TEST_CompactMemTable();
|
||||||
dbi->TEST_CompactRange(0, NULL, NULL);
|
dbi->TEST_CompactRange(0, nullptr, nullptr);
|
||||||
dbi->TEST_CompactRange(1, NULL, NULL);
|
dbi->TEST_CompactRange(1, nullptr, nullptr);
|
||||||
|
|
||||||
Corrupt(kTableFile, 100, 1);
|
Corrupt(kTableFile, 100, 1);
|
||||||
Check(90, 99);
|
Check(90, 99);
|
||||||
@ -251,8 +241,8 @@ TEST(CorruptionTest, TableFileRepair) {
|
|||||||
Build(100);
|
Build(100);
|
||||||
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
|
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
|
||||||
dbi->TEST_CompactMemTable();
|
dbi->TEST_CompactMemTable();
|
||||||
dbi->TEST_CompactRange(0, NULL, NULL);
|
dbi->TEST_CompactRange(0, nullptr, nullptr);
|
||||||
dbi->TEST_CompactRange(1, NULL, NULL);
|
dbi->TEST_CompactRange(1, nullptr, nullptr);
|
||||||
|
|
||||||
Corrupt(kTableFile, 100, 1);
|
Corrupt(kTableFile, 100, 1);
|
||||||
RepairDB();
|
RepairDB();
|
||||||
@ -302,7 +292,7 @@ TEST(CorruptionTest, CorruptedDescriptor) {
|
|||||||
ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello"));
|
ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello"));
|
||||||
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
|
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
|
||||||
dbi->TEST_CompactMemTable();
|
dbi->TEST_CompactMemTable();
|
||||||
dbi->TEST_CompactRange(0, NULL, NULL);
|
dbi->TEST_CompactRange(0, nullptr, nullptr);
|
||||||
|
|
||||||
Corrupt(kDescriptorFile, 0, 1000);
|
Corrupt(kDescriptorFile, 0, 1000);
|
||||||
Status s = TryReopen();
|
Status s = TryReopen();
|
||||||
@ -343,7 +333,7 @@ TEST(CorruptionTest, CompactionInputErrorParanoid) {
|
|||||||
Corrupt(kTableFile, 100, 1);
|
Corrupt(kTableFile, 100, 1);
|
||||||
env_.SleepForMicroseconds(100000);
|
env_.SleepForMicroseconds(100000);
|
||||||
}
|
}
|
||||||
dbi->CompactRange(NULL, NULL);
|
dbi->CompactRange(nullptr, nullptr);
|
||||||
|
|
||||||
// Write must fail because of corrupted table
|
// Write must fail because of corrupted table
|
||||||
std::string tmp1, tmp2;
|
std::string tmp1, tmp2;
|
||||||
@ -369,6 +359,4 @@ TEST(CorruptionTest, UnrelatedKeys) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
502
db/db_impl.cc
502
db/db_impl.cc
File diff suppressed because it is too large
Load Diff
123
db/db_impl.h
123
db/db_impl.h
@ -5,8 +5,11 @@
|
|||||||
#ifndef STORAGE_LEVELDB_DB_DB_IMPL_H_
|
#ifndef STORAGE_LEVELDB_DB_DB_IMPL_H_
|
||||||
#define STORAGE_LEVELDB_DB_DB_IMPL_H_
|
#define STORAGE_LEVELDB_DB_DB_IMPL_H_
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
#include <deque>
|
#include <deque>
|
||||||
#include <set>
|
#include <set>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
#include "db/log_writer.h"
|
#include "db/log_writer.h"
|
||||||
#include "db/snapshot.h"
|
#include "db/snapshot.h"
|
||||||
@ -26,14 +29,17 @@ class VersionSet;
|
|||||||
class DBImpl : public DB {
|
class DBImpl : public DB {
|
||||||
public:
|
public:
|
||||||
DBImpl(const Options& options, const std::string& dbname);
|
DBImpl(const Options& options, const std::string& dbname);
|
||||||
|
|
||||||
|
DBImpl(const DBImpl&) = delete;
|
||||||
|
DBImpl& operator=(const DBImpl&) = delete;
|
||||||
|
|
||||||
virtual ~DBImpl();
|
virtual ~DBImpl();
|
||||||
|
|
||||||
// Implementations of the DB interface
|
// Implementations of the DB interface
|
||||||
virtual Status Put(const WriteOptions&, const Slice& key, const Slice& value);
|
virtual Status Put(const WriteOptions&, const Slice& key, const Slice& value);
|
||||||
virtual Status Delete(const WriteOptions&, const Slice& key);
|
virtual Status Delete(const WriteOptions&, const Slice& key);
|
||||||
virtual Status Write(const WriteOptions& options, WriteBatch* updates);
|
virtual Status Write(const WriteOptions& options, WriteBatch* updates);
|
||||||
virtual Status Get(const ReadOptions& options,
|
virtual Status Get(const ReadOptions& options, const Slice& key,
|
||||||
const Slice& key,
|
|
||||||
std::string* value);
|
std::string* value);
|
||||||
virtual Iterator* NewIterator(const ReadOptions&);
|
virtual Iterator* NewIterator(const ReadOptions&);
|
||||||
virtual const Snapshot* GetSnapshot();
|
virtual const Snapshot* GetSnapshot();
|
||||||
@ -69,6 +75,31 @@ class DBImpl : public DB {
|
|||||||
struct CompactionState;
|
struct CompactionState;
|
||||||
struct Writer;
|
struct Writer;
|
||||||
|
|
||||||
|
// Information for a manual compaction
|
||||||
|
struct ManualCompaction {
|
||||||
|
int level;
|
||||||
|
bool done;
|
||||||
|
const InternalKey* begin; // null means beginning of key range
|
||||||
|
const InternalKey* end; // null means end of key range
|
||||||
|
InternalKey tmp_storage; // Used to keep track of compaction progress
|
||||||
|
};
|
||||||
|
|
||||||
|
// Per level compaction stats. stats_[level] stores the stats for
|
||||||
|
// compactions that produced data for the specified "level".
|
||||||
|
struct CompactionStats {
|
||||||
|
CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {}
|
||||||
|
|
||||||
|
void Add(const CompactionStats& c) {
|
||||||
|
this->micros += c.micros;
|
||||||
|
this->bytes_read += c.bytes_read;
|
||||||
|
this->bytes_written += c.bytes_written;
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t micros;
|
||||||
|
int64_t bytes_read;
|
||||||
|
int64_t bytes_written;
|
||||||
|
};
|
||||||
|
|
||||||
Iterator* NewInternalIterator(const ReadOptions&,
|
Iterator* NewInternalIterator(const ReadOptions&,
|
||||||
SequenceNumber* latest_snapshot,
|
SequenceNumber* latest_snapshot,
|
||||||
uint32_t* seed);
|
uint32_t* seed);
|
||||||
@ -84,7 +115,7 @@ class DBImpl : public DB {
|
|||||||
void MaybeIgnoreError(Status* s) const;
|
void MaybeIgnoreError(Status* s) const;
|
||||||
|
|
||||||
// Delete any unneeded files and stale in-memory entries.
|
// Delete any unneeded files and stale in-memory entries.
|
||||||
void DeleteObsoleteFiles();
|
void DeleteObsoleteFiles() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||||
|
|
||||||
// Compact the in-memory write buffer to disk. Switches to a new
|
// Compact the in-memory write buffer to disk. Switches to a new
|
||||||
// log-file/memtable and writes a new descriptor iff successful.
|
// log-file/memtable and writes a new descriptor iff successful.
|
||||||
@ -100,14 +131,15 @@ class DBImpl : public DB {
|
|||||||
|
|
||||||
Status MakeRoomForWrite(bool force /* compact even if there is room? */)
|
Status MakeRoomForWrite(bool force /* compact even if there is room? */)
|
||||||
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||||
WriteBatch* BuildBatchGroup(Writer** last_writer);
|
WriteBatch* BuildBatchGroup(Writer** last_writer)
|
||||||
|
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||||
|
|
||||||
void RecordBackgroundError(const Status& s);
|
void RecordBackgroundError(const Status& s);
|
||||||
|
|
||||||
void MaybeScheduleCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
void MaybeScheduleCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||||
static void BGWork(void* db);
|
static void BGWork(void* db);
|
||||||
void BackgroundCall();
|
void BackgroundCall();
|
||||||
void BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
void BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||||
void CleanupCompaction(CompactionState* compact)
|
void CleanupCompaction(CompactionState* compact)
|
||||||
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||||
Status DoCompactionWork(CompactionState* compact)
|
Status DoCompactionWork(CompactionState* compact)
|
||||||
@ -118,93 +150,66 @@ class DBImpl : public DB {
|
|||||||
Status InstallCompactionResults(CompactionState* compact)
|
Status InstallCompactionResults(CompactionState* compact)
|
||||||
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||||
|
|
||||||
|
const Comparator* user_comparator() const {
|
||||||
|
return internal_comparator_.user_comparator();
|
||||||
|
}
|
||||||
|
|
||||||
// Constant after construction
|
// Constant after construction
|
||||||
Env* const env_;
|
Env* const env_;
|
||||||
const InternalKeyComparator internal_comparator_;
|
const InternalKeyComparator internal_comparator_;
|
||||||
const InternalFilterPolicy internal_filter_policy_;
|
const InternalFilterPolicy internal_filter_policy_;
|
||||||
const Options options_; // options_.comparator == &internal_comparator_
|
const Options options_; // options_.comparator == &internal_comparator_
|
||||||
bool owns_info_log_;
|
const bool owns_info_log_;
|
||||||
bool owns_cache_;
|
const bool owns_cache_;
|
||||||
const std::string dbname_;
|
const std::string dbname_;
|
||||||
|
|
||||||
// table_cache_ provides its own synchronization
|
// table_cache_ provides its own synchronization
|
||||||
TableCache* table_cache_;
|
TableCache* const table_cache_;
|
||||||
|
|
||||||
// Lock over the persistent DB state. Non-NULL iff successfully acquired.
|
// Lock over the persistent DB state. Non-null iff successfully acquired.
|
||||||
FileLock* db_lock_;
|
FileLock* db_lock_;
|
||||||
|
|
||||||
// State below is protected by mutex_
|
// State below is protected by mutex_
|
||||||
port::Mutex mutex_;
|
port::Mutex mutex_;
|
||||||
port::AtomicPointer shutting_down_;
|
std::atomic<bool> shutting_down_;
|
||||||
port::CondVar bg_cv_; // Signalled when background work finishes
|
port::CondVar background_work_finished_signal_ GUARDED_BY(mutex_);
|
||||||
MemTable* mem_;
|
MemTable* mem_;
|
||||||
MemTable* imm_; // Memtable being compacted
|
MemTable* imm_ GUARDED_BY(mutex_); // Memtable being compacted
|
||||||
port::AtomicPointer has_imm_; // So bg thread can detect non-NULL imm_
|
std::atomic<bool> has_imm_; // So bg thread can detect non-null imm_
|
||||||
WritableFile* logfile_;
|
WritableFile* logfile_;
|
||||||
uint64_t logfile_number_;
|
uint64_t logfile_number_ GUARDED_BY(mutex_);
|
||||||
log::Writer* log_;
|
log::Writer* log_;
|
||||||
uint32_t seed_; // For sampling.
|
uint32_t seed_ GUARDED_BY(mutex_); // For sampling.
|
||||||
|
|
||||||
// Queue of writers.
|
// Queue of writers.
|
||||||
std::deque<Writer*> writers_;
|
std::deque<Writer*> writers_ GUARDED_BY(mutex_);
|
||||||
WriteBatch* tmp_batch_;
|
WriteBatch* tmp_batch_ GUARDED_BY(mutex_);
|
||||||
|
|
||||||
SnapshotList snapshots_;
|
SnapshotList snapshots_ GUARDED_BY(mutex_);
|
||||||
|
|
||||||
// Set of table files to protect from deletion because they are
|
// Set of table files to protect from deletion because they are
|
||||||
// part of ongoing compactions.
|
// part of ongoing compactions.
|
||||||
std::set<uint64_t> pending_outputs_;
|
std::set<uint64_t> pending_outputs_ GUARDED_BY(mutex_);
|
||||||
|
|
||||||
// Has a background compaction been scheduled or is running?
|
// Has a background compaction been scheduled or is running?
|
||||||
bool bg_compaction_scheduled_;
|
bool background_compaction_scheduled_ GUARDED_BY(mutex_);
|
||||||
|
|
||||||
// Information for a manual compaction
|
ManualCompaction* manual_compaction_ GUARDED_BY(mutex_);
|
||||||
struct ManualCompaction {
|
|
||||||
int level;
|
|
||||||
bool done;
|
|
||||||
const InternalKey* begin; // NULL means beginning of key range
|
|
||||||
const InternalKey* end; // NULL means end of key range
|
|
||||||
InternalKey tmp_storage; // Used to keep track of compaction progress
|
|
||||||
};
|
|
||||||
ManualCompaction* manual_compaction_;
|
|
||||||
|
|
||||||
VersionSet* versions_;
|
VersionSet* const versions_;
|
||||||
|
|
||||||
// Have we encountered a background error in paranoid mode?
|
// Have we encountered a background error in paranoid mode?
|
||||||
Status bg_error_;
|
Status bg_error_ GUARDED_BY(mutex_);
|
||||||
|
|
||||||
// Per level compaction stats. stats_[level] stores the stats for
|
CompactionStats stats_[config::kNumLevels] GUARDED_BY(mutex_);
|
||||||
// compactions that produced data for the specified "level".
|
|
||||||
struct CompactionStats {
|
|
||||||
int64_t micros;
|
|
||||||
int64_t bytes_read;
|
|
||||||
int64_t bytes_written;
|
|
||||||
|
|
||||||
CompactionStats() : micros(0), bytes_read(0), bytes_written(0) { }
|
|
||||||
|
|
||||||
void Add(const CompactionStats& c) {
|
|
||||||
this->micros += c.micros;
|
|
||||||
this->bytes_read += c.bytes_read;
|
|
||||||
this->bytes_written += c.bytes_written;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
CompactionStats stats_[config::kNumLevels];
|
|
||||||
|
|
||||||
// No copying allowed
|
|
||||||
DBImpl(const DBImpl&);
|
|
||||||
void operator=(const DBImpl&);
|
|
||||||
|
|
||||||
const Comparator* user_comparator() const {
|
|
||||||
return internal_comparator_.user_comparator();
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Sanitize db options. The caller should delete result.info_log if
|
// Sanitize db options. The caller should delete result.info_log if
|
||||||
// it is not equal to src.info_log.
|
// it is not equal to src.info_log.
|
||||||
extern Options SanitizeOptions(const std::string& db,
|
Options SanitizeOptions(const std::string& db,
|
||||||
const InternalKeyComparator* icmp,
|
const InternalKeyComparator* icmp,
|
||||||
const InternalFilterPolicy* ipolicy,
|
const InternalFilterPolicy* ipolicy,
|
||||||
const Options& src);
|
const Options& src);
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
@ -4,9 +4,9 @@
|
|||||||
|
|
||||||
#include "db/db_iter.h"
|
#include "db/db_iter.h"
|
||||||
|
|
||||||
#include "db/filename.h"
|
|
||||||
#include "db/db_impl.h"
|
#include "db/db_impl.h"
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
|
#include "db/filename.h"
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
#include "leveldb/iterator.h"
|
#include "leveldb/iterator.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
@ -36,17 +36,14 @@ namespace {
|
|||||||
// combines multiple entries for the same userkey found in the DB
|
// combines multiple entries for the same userkey found in the DB
|
||||||
// representation into a single entry while accounting for sequence
|
// representation into a single entry while accounting for sequence
|
||||||
// numbers, deletion markers, overwrites, etc.
|
// numbers, deletion markers, overwrites, etc.
|
||||||
class DBIter: public Iterator {
|
class DBIter : public Iterator {
|
||||||
public:
|
public:
|
||||||
// Which direction is the iterator currently moving?
|
// Which direction is the iterator currently moving?
|
||||||
// (1) When moving forward, the internal iterator is positioned at
|
// (1) When moving forward, the internal iterator is positioned at
|
||||||
// the exact entry that yields this->key(), this->value()
|
// the exact entry that yields this->key(), this->value()
|
||||||
// (2) When moving backwards, the internal iterator is positioned
|
// (2) When moving backwards, the internal iterator is positioned
|
||||||
// just before all entries whose user key == this->key().
|
// just before all entries whose user key == this->key().
|
||||||
enum Direction {
|
enum Direction { kForward, kReverse };
|
||||||
kForward,
|
|
||||||
kReverse
|
|
||||||
};
|
|
||||||
|
|
||||||
DBIter(DBImpl* db, const Comparator* cmp, Iterator* iter, SequenceNumber s,
|
DBIter(DBImpl* db, const Comparator* cmp, Iterator* iter, SequenceNumber s,
|
||||||
uint32_t seed)
|
uint32_t seed)
|
||||||
@ -57,11 +54,12 @@ class DBIter: public Iterator {
|
|||||||
direction_(kForward),
|
direction_(kForward),
|
||||||
valid_(false),
|
valid_(false),
|
||||||
rnd_(seed),
|
rnd_(seed),
|
||||||
bytes_counter_(RandomPeriod()) {
|
bytes_until_read_sampling_(RandomCompactionPeriod()) {}
|
||||||
}
|
|
||||||
virtual ~DBIter() {
|
DBIter(const DBIter&) = delete;
|
||||||
delete iter_;
|
DBIter& operator=(const DBIter&) = delete;
|
||||||
}
|
|
||||||
|
virtual ~DBIter() { delete iter_; }
|
||||||
virtual bool Valid() const { return valid_; }
|
virtual bool Valid() const { return valid_; }
|
||||||
virtual Slice key() const {
|
virtual Slice key() const {
|
||||||
assert(valid_);
|
assert(valid_);
|
||||||
@ -103,38 +101,35 @@ class DBIter: public Iterator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pick next gap with average value of config::kReadBytesPeriod.
|
// Picks the number of bytes that can be read until a compaction is scheduled.
|
||||||
ssize_t RandomPeriod() {
|
size_t RandomCompactionPeriod() {
|
||||||
return rnd_.Uniform(2*config::kReadBytesPeriod);
|
return rnd_.Uniform(2 * config::kReadBytesPeriod);
|
||||||
}
|
}
|
||||||
|
|
||||||
DBImpl* db_;
|
DBImpl* db_;
|
||||||
const Comparator* const user_comparator_;
|
const Comparator* const user_comparator_;
|
||||||
Iterator* const iter_;
|
Iterator* const iter_;
|
||||||
SequenceNumber const sequence_;
|
SequenceNumber const sequence_;
|
||||||
|
|
||||||
Status status_;
|
Status status_;
|
||||||
std::string saved_key_; // == current key when direction_==kReverse
|
std::string saved_key_; // == current key when direction_==kReverse
|
||||||
std::string saved_value_; // == current raw value when direction_==kReverse
|
std::string saved_value_; // == current raw value when direction_==kReverse
|
||||||
Direction direction_;
|
Direction direction_;
|
||||||
bool valid_;
|
bool valid_;
|
||||||
|
|
||||||
Random rnd_;
|
Random rnd_;
|
||||||
ssize_t bytes_counter_;
|
size_t bytes_until_read_sampling_;
|
||||||
|
|
||||||
// No copying allowed
|
|
||||||
DBIter(const DBIter&);
|
|
||||||
void operator=(const DBIter&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
inline bool DBIter::ParseKey(ParsedInternalKey* ikey) {
|
inline bool DBIter::ParseKey(ParsedInternalKey* ikey) {
|
||||||
Slice k = iter_->key();
|
Slice k = iter_->key();
|
||||||
ssize_t n = k.size() + iter_->value().size();
|
|
||||||
bytes_counter_ -= n;
|
size_t bytes_read = k.size() + iter_->value().size();
|
||||||
while (bytes_counter_ < 0) {
|
while (bytes_until_read_sampling_ < bytes_read) {
|
||||||
bytes_counter_ += RandomPeriod();
|
bytes_until_read_sampling_ += RandomCompactionPeriod();
|
||||||
db_->RecordReadSample(k);
|
db_->RecordReadSample(k);
|
||||||
}
|
}
|
||||||
|
assert(bytes_until_read_sampling_ >= bytes_read);
|
||||||
|
bytes_until_read_sampling_ -= bytes_read;
|
||||||
|
|
||||||
if (!ParseInternalKey(k, ikey)) {
|
if (!ParseInternalKey(k, ikey)) {
|
||||||
status_ = Status::Corruption("corrupted internal key in DBIter");
|
status_ = Status::Corruption("corrupted internal key in DBIter");
|
||||||
return false;
|
return false;
|
||||||
@ -165,6 +160,15 @@ void DBIter::Next() {
|
|||||||
} else {
|
} else {
|
||||||
// Store in saved_key_ the current key so we skip it below.
|
// Store in saved_key_ the current key so we skip it below.
|
||||||
SaveKey(ExtractUserKey(iter_->key()), &saved_key_);
|
SaveKey(ExtractUserKey(iter_->key()), &saved_key_);
|
||||||
|
|
||||||
|
// iter_ is pointing to current key. We can now safely move to the next to
|
||||||
|
// avoid checking current key.
|
||||||
|
iter_->Next();
|
||||||
|
if (!iter_->Valid()) {
|
||||||
|
valid_ = false;
|
||||||
|
saved_key_.clear();
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
FindNextUserEntry(true, &saved_key_);
|
FindNextUserEntry(true, &saved_key_);
|
||||||
@ -218,8 +222,8 @@ void DBIter::Prev() {
|
|||||||
ClearSavedValue();
|
ClearSavedValue();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (user_comparator_->Compare(ExtractUserKey(iter_->key()),
|
if (user_comparator_->Compare(ExtractUserKey(iter_->key()), saved_key_) <
|
||||||
saved_key_) < 0) {
|
0) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -275,8 +279,8 @@ void DBIter::Seek(const Slice& target) {
|
|||||||
direction_ = kForward;
|
direction_ = kForward;
|
||||||
ClearSavedValue();
|
ClearSavedValue();
|
||||||
saved_key_.clear();
|
saved_key_.clear();
|
||||||
AppendInternalKey(
|
AppendInternalKey(&saved_key_,
|
||||||
&saved_key_, ParsedInternalKey(target, sequence_, kValueTypeForSeek));
|
ParsedInternalKey(target, sequence_, kValueTypeForSeek));
|
||||||
iter_->Seek(saved_key_);
|
iter_->Seek(saved_key_);
|
||||||
if (iter_->Valid()) {
|
if (iter_->Valid()) {
|
||||||
FindNextUserEntry(false, &saved_key_ /* temporary storage */);
|
FindNextUserEntry(false, &saved_key_ /* temporary storage */);
|
||||||
@ -305,12 +309,9 @@ void DBIter::SeekToLast() {
|
|||||||
|
|
||||||
} // anonymous namespace
|
} // anonymous namespace
|
||||||
|
|
||||||
Iterator* NewDBIterator(
|
Iterator* NewDBIterator(DBImpl* db, const Comparator* user_key_comparator,
|
||||||
DBImpl* db,
|
Iterator* internal_iter, SequenceNumber sequence,
|
||||||
const Comparator* user_key_comparator,
|
uint32_t seed) {
|
||||||
Iterator* internal_iter,
|
|
||||||
SequenceNumber sequence,
|
|
||||||
uint32_t seed) {
|
|
||||||
return new DBIter(db, user_key_comparator, internal_iter, sequence, seed);
|
return new DBIter(db, user_key_comparator, internal_iter, sequence, seed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
12
db/db_iter.h
12
db/db_iter.h
@ -6,8 +6,9 @@
|
|||||||
#define STORAGE_LEVELDB_DB_DB_ITER_H_
|
#define STORAGE_LEVELDB_DB_DB_ITER_H_
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include "leveldb/db.h"
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
|
#include "leveldb/db.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
@ -16,12 +17,9 @@ class DBImpl;
|
|||||||
// Return a new iterator that converts internal keys (yielded by
|
// Return a new iterator that converts internal keys (yielded by
|
||||||
// "*internal_iter") that were live at the specified "sequence" number
|
// "*internal_iter") that were live at the specified "sequence" number
|
||||||
// into appropriate user keys.
|
// into appropriate user keys.
|
||||||
extern Iterator* NewDBIterator(
|
Iterator* NewDBIterator(DBImpl* db, const Comparator* user_key_comparator,
|
||||||
DBImpl* db,
|
Iterator* internal_iter, SequenceNumber sequence,
|
||||||
const Comparator* user_key_comparator,
|
uint32_t seed);
|
||||||
Iterator* internal_iter,
|
|
||||||
SequenceNumber sequence,
|
|
||||||
uint32_t seed);
|
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
675
db/db_test.cc
675
db/db_test.cc
File diff suppressed because it is too large
Load Diff
@ -2,8 +2,10 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#include <stdio.h>
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
|
|
||||||
@ -22,8 +24,7 @@ void AppendInternalKey(std::string* result, const ParsedInternalKey& key) {
|
|||||||
|
|
||||||
std::string ParsedInternalKey::DebugString() const {
|
std::string ParsedInternalKey::DebugString() const {
|
||||||
char buf[50];
|
char buf[50];
|
||||||
snprintf(buf, sizeof(buf), "' @ %llu : %d",
|
snprintf(buf, sizeof(buf), "' @ %llu : %d", (unsigned long long)sequence,
|
||||||
(unsigned long long) sequence,
|
|
||||||
int(type));
|
int(type));
|
||||||
std::string result = "'";
|
std::string result = "'";
|
||||||
result += EscapeString(user_key.ToString());
|
result += EscapeString(user_key.ToString());
|
||||||
@ -65,9 +66,8 @@ int InternalKeyComparator::Compare(const Slice& akey, const Slice& bkey) const {
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
void InternalKeyComparator::FindShortestSeparator(
|
void InternalKeyComparator::FindShortestSeparator(std::string* start,
|
||||||
std::string* start,
|
const Slice& limit) const {
|
||||||
const Slice& limit) const {
|
|
||||||
// Attempt to shorten the user portion of the key
|
// Attempt to shorten the user portion of the key
|
||||||
Slice user_start = ExtractUserKey(*start);
|
Slice user_start = ExtractUserKey(*start);
|
||||||
Slice user_limit = ExtractUserKey(limit);
|
Slice user_limit = ExtractUserKey(limit);
|
||||||
@ -77,7 +77,8 @@ void InternalKeyComparator::FindShortestSeparator(
|
|||||||
user_comparator_->Compare(user_start, tmp) < 0) {
|
user_comparator_->Compare(user_start, tmp) < 0) {
|
||||||
// User key has become shorter physically, but larger logically.
|
// User key has become shorter physically, but larger logically.
|
||||||
// Tack on the earliest possible number to the shortened user key.
|
// Tack on the earliest possible number to the shortened user key.
|
||||||
PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek));
|
PutFixed64(&tmp,
|
||||||
|
PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
|
||||||
assert(this->Compare(*start, tmp) < 0);
|
assert(this->Compare(*start, tmp) < 0);
|
||||||
assert(this->Compare(tmp, limit) < 0);
|
assert(this->Compare(tmp, limit) < 0);
|
||||||
start->swap(tmp);
|
start->swap(tmp);
|
||||||
@ -92,15 +93,14 @@ void InternalKeyComparator::FindShortSuccessor(std::string* key) const {
|
|||||||
user_comparator_->Compare(user_key, tmp) < 0) {
|
user_comparator_->Compare(user_key, tmp) < 0) {
|
||||||
// User key has become shorter physically, but larger logically.
|
// User key has become shorter physically, but larger logically.
|
||||||
// Tack on the earliest possible number to the shortened user key.
|
// Tack on the earliest possible number to the shortened user key.
|
||||||
PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek));
|
PutFixed64(&tmp,
|
||||||
|
PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
|
||||||
assert(this->Compare(*key, tmp) < 0);
|
assert(this->Compare(*key, tmp) < 0);
|
||||||
key->swap(tmp);
|
key->swap(tmp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* InternalFilterPolicy::Name() const {
|
const char* InternalFilterPolicy::Name() const { return user_policy_->Name(); }
|
||||||
return user_policy_->Name();
|
|
||||||
}
|
|
||||||
|
|
||||||
void InternalFilterPolicy::CreateFilter(const Slice* keys, int n,
|
void InternalFilterPolicy::CreateFilter(const Slice* keys, int n,
|
||||||
std::string* dst) const {
|
std::string* dst) const {
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#define STORAGE_LEVELDB_DB_DBFORMAT_H_
|
#define STORAGE_LEVELDB_DB_DBFORMAT_H_
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
#include "leveldb/comparator.h"
|
#include "leveldb/comparator.h"
|
||||||
#include "leveldb/db.h"
|
#include "leveldb/db.h"
|
||||||
#include "leveldb/filter_policy.h"
|
#include "leveldb/filter_policy.h"
|
||||||
@ -48,10 +49,7 @@ class InternalKey;
|
|||||||
// Value types encoded as the last component of internal keys.
|
// Value types encoded as the last component of internal keys.
|
||||||
// DO NOT CHANGE THESE ENUM VALUES: they are embedded in the on-disk
|
// DO NOT CHANGE THESE ENUM VALUES: they are embedded in the on-disk
|
||||||
// data structures.
|
// data structures.
|
||||||
enum ValueType {
|
enum ValueType { kTypeDeletion = 0x0, kTypeValue = 0x1 };
|
||||||
kTypeDeletion = 0x0,
|
|
||||||
kTypeValue = 0x1
|
|
||||||
};
|
|
||||||
// kValueTypeForSeek defines the ValueType that should be passed when
|
// kValueTypeForSeek defines the ValueType that should be passed when
|
||||||
// constructing a ParsedInternalKey object for seeking to a particular
|
// constructing a ParsedInternalKey object for seeking to a particular
|
||||||
// sequence number (since we sort sequence numbers in decreasing order
|
// sequence number (since we sort sequence numbers in decreasing order
|
||||||
@ -64,17 +62,16 @@ typedef uint64_t SequenceNumber;
|
|||||||
|
|
||||||
// We leave eight bits empty at the bottom so a type and sequence#
|
// We leave eight bits empty at the bottom so a type and sequence#
|
||||||
// can be packed together into 64-bits.
|
// can be packed together into 64-bits.
|
||||||
static const SequenceNumber kMaxSequenceNumber =
|
static const SequenceNumber kMaxSequenceNumber = ((0x1ull << 56) - 1);
|
||||||
((0x1ull << 56) - 1);
|
|
||||||
|
|
||||||
struct ParsedInternalKey {
|
struct ParsedInternalKey {
|
||||||
Slice user_key;
|
Slice user_key;
|
||||||
SequenceNumber sequence;
|
SequenceNumber sequence;
|
||||||
ValueType type;
|
ValueType type;
|
||||||
|
|
||||||
ParsedInternalKey() { } // Intentionally left uninitialized (for speed)
|
ParsedInternalKey() {} // Intentionally left uninitialized (for speed)
|
||||||
ParsedInternalKey(const Slice& u, const SequenceNumber& seq, ValueType t)
|
ParsedInternalKey(const Slice& u, const SequenceNumber& seq, ValueType t)
|
||||||
: user_key(u), sequence(seq), type(t) { }
|
: user_key(u), sequence(seq), type(t) {}
|
||||||
std::string DebugString() const;
|
std::string DebugString() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -84,15 +81,13 @@ inline size_t InternalKeyEncodingLength(const ParsedInternalKey& key) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Append the serialization of "key" to *result.
|
// Append the serialization of "key" to *result.
|
||||||
extern void AppendInternalKey(std::string* result,
|
void AppendInternalKey(std::string* result, const ParsedInternalKey& key);
|
||||||
const ParsedInternalKey& key);
|
|
||||||
|
|
||||||
// Attempt to parse an internal key from "internal_key". On success,
|
// Attempt to parse an internal key from "internal_key". On success,
|
||||||
// stores the parsed data in "*result", and returns true.
|
// stores the parsed data in "*result", and returns true.
|
||||||
//
|
//
|
||||||
// On error, returns false, leaves "*result" in an undefined state.
|
// On error, returns false, leaves "*result" in an undefined state.
|
||||||
extern bool ParseInternalKey(const Slice& internal_key,
|
bool ParseInternalKey(const Slice& internal_key, ParsedInternalKey* result);
|
||||||
ParsedInternalKey* result);
|
|
||||||
|
|
||||||
// Returns the user key portion of an internal key.
|
// Returns the user key portion of an internal key.
|
||||||
inline Slice ExtractUserKey(const Slice& internal_key) {
|
inline Slice ExtractUserKey(const Slice& internal_key) {
|
||||||
@ -100,26 +95,18 @@ inline Slice ExtractUserKey(const Slice& internal_key) {
|
|||||||
return Slice(internal_key.data(), internal_key.size() - 8);
|
return Slice(internal_key.data(), internal_key.size() - 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline ValueType ExtractValueType(const Slice& internal_key) {
|
|
||||||
assert(internal_key.size() >= 8);
|
|
||||||
const size_t n = internal_key.size();
|
|
||||||
uint64_t num = DecodeFixed64(internal_key.data() + n - 8);
|
|
||||||
unsigned char c = num & 0xff;
|
|
||||||
return static_cast<ValueType>(c);
|
|
||||||
}
|
|
||||||
|
|
||||||
// A comparator for internal keys that uses a specified comparator for
|
// A comparator for internal keys that uses a specified comparator for
|
||||||
// the user key portion and breaks ties by decreasing sequence number.
|
// the user key portion and breaks ties by decreasing sequence number.
|
||||||
class InternalKeyComparator : public Comparator {
|
class InternalKeyComparator : public Comparator {
|
||||||
private:
|
private:
|
||||||
const Comparator* user_comparator_;
|
const Comparator* user_comparator_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) { }
|
explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) {}
|
||||||
virtual const char* Name() const;
|
virtual const char* Name() const;
|
||||||
virtual int Compare(const Slice& a, const Slice& b) const;
|
virtual int Compare(const Slice& a, const Slice& b) const;
|
||||||
virtual void FindShortestSeparator(
|
virtual void FindShortestSeparator(std::string* start,
|
||||||
std::string* start,
|
const Slice& limit) const;
|
||||||
const Slice& limit) const;
|
|
||||||
virtual void FindShortSuccessor(std::string* key) const;
|
virtual void FindShortSuccessor(std::string* key) const;
|
||||||
|
|
||||||
const Comparator* user_comparator() const { return user_comparator_; }
|
const Comparator* user_comparator() const { return user_comparator_; }
|
||||||
@ -131,8 +118,9 @@ class InternalKeyComparator : public Comparator {
|
|||||||
class InternalFilterPolicy : public FilterPolicy {
|
class InternalFilterPolicy : public FilterPolicy {
|
||||||
private:
|
private:
|
||||||
const FilterPolicy* const user_policy_;
|
const FilterPolicy* const user_policy_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) { }
|
explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) {}
|
||||||
virtual const char* Name() const;
|
virtual const char* Name() const;
|
||||||
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const;
|
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const;
|
||||||
virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const;
|
virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const;
|
||||||
@ -144,8 +132,9 @@ class InternalFilterPolicy : public FilterPolicy {
|
|||||||
class InternalKey {
|
class InternalKey {
|
||||||
private:
|
private:
|
||||||
std::string rep_;
|
std::string rep_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
InternalKey() { } // Leave rep_ as empty to indicate it is invalid
|
InternalKey() {} // Leave rep_ as empty to indicate it is invalid
|
||||||
InternalKey(const Slice& user_key, SequenceNumber s, ValueType t) {
|
InternalKey(const Slice& user_key, SequenceNumber s, ValueType t) {
|
||||||
AppendInternalKey(&rep_, ParsedInternalKey(user_key, s, t));
|
AppendInternalKey(&rep_, ParsedInternalKey(user_key, s, t));
|
||||||
}
|
}
|
||||||
@ -168,8 +157,8 @@ class InternalKey {
|
|||||||
std::string DebugString() const;
|
std::string DebugString() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
inline int InternalKeyComparator::Compare(
|
inline int InternalKeyComparator::Compare(const InternalKey& a,
|
||||||
const InternalKey& a, const InternalKey& b) const {
|
const InternalKey& b) const {
|
||||||
return Compare(a.Encode(), b.Encode());
|
return Compare(a.Encode(), b.Encode());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -192,6 +181,9 @@ class LookupKey {
|
|||||||
// the specified sequence number.
|
// the specified sequence number.
|
||||||
LookupKey(const Slice& user_key, SequenceNumber sequence);
|
LookupKey(const Slice& user_key, SequenceNumber sequence);
|
||||||
|
|
||||||
|
LookupKey(const LookupKey&) = delete;
|
||||||
|
LookupKey& operator=(const LookupKey&) = delete;
|
||||||
|
|
||||||
~LookupKey();
|
~LookupKey();
|
||||||
|
|
||||||
// Return a key suitable for lookup in a MemTable.
|
// Return a key suitable for lookup in a MemTable.
|
||||||
@ -214,11 +206,7 @@ class LookupKey {
|
|||||||
const char* start_;
|
const char* start_;
|
||||||
const char* kstart_;
|
const char* kstart_;
|
||||||
const char* end_;
|
const char* end_;
|
||||||
char space_[200]; // Avoid allocation for short keys
|
char space_[200]; // Avoid allocation for short keys
|
||||||
|
|
||||||
// No copying allowed
|
|
||||||
LookupKey(const LookupKey&);
|
|
||||||
void operator=(const LookupKey&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
inline LookupKey::~LookupKey() {
|
inline LookupKey::~LookupKey() {
|
||||||
|
@ -8,8 +8,7 @@
|
|||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
static std::string IKey(const std::string& user_key,
|
static std::string IKey(const std::string& user_key, uint64_t seq,
|
||||||
uint64_t seq,
|
|
||||||
ValueType vt) {
|
ValueType vt) {
|
||||||
std::string encoded;
|
std::string encoded;
|
||||||
AppendInternalKey(&encoded, ParsedInternalKey(user_key, seq, vt));
|
AppendInternalKey(&encoded, ParsedInternalKey(user_key, seq, vt));
|
||||||
@ -28,9 +27,7 @@ static std::string ShortSuccessor(const std::string& s) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void TestKey(const std::string& key,
|
static void TestKey(const std::string& key, uint64_t seq, ValueType vt) {
|
||||||
uint64_t seq,
|
|
||||||
ValueType vt) {
|
|
||||||
std::string encoded = IKey(key, seq, vt);
|
std::string encoded = IKey(key, seq, vt);
|
||||||
|
|
||||||
Slice in(encoded);
|
Slice in(encoded);
|
||||||
@ -44,16 +41,22 @@ static void TestKey(const std::string& key,
|
|||||||
ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded));
|
ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded));
|
||||||
}
|
}
|
||||||
|
|
||||||
class FormatTest { };
|
class FormatTest {};
|
||||||
|
|
||||||
TEST(FormatTest, InternalKey_EncodeDecode) {
|
TEST(FormatTest, InternalKey_EncodeDecode) {
|
||||||
const char* keys[] = { "", "k", "hello", "longggggggggggggggggggggg" };
|
const char* keys[] = {"", "k", "hello", "longggggggggggggggggggggg"};
|
||||||
const uint64_t seq[] = {
|
const uint64_t seq[] = {1,
|
||||||
1, 2, 3,
|
2,
|
||||||
(1ull << 8) - 1, 1ull << 8, (1ull << 8) + 1,
|
3,
|
||||||
(1ull << 16) - 1, 1ull << 16, (1ull << 16) + 1,
|
(1ull << 8) - 1,
|
||||||
(1ull << 32) - 1, 1ull << 32, (1ull << 32) + 1
|
1ull << 8,
|
||||||
};
|
(1ull << 8) + 1,
|
||||||
|
(1ull << 16) - 1,
|
||||||
|
1ull << 16,
|
||||||
|
(1ull << 16) + 1,
|
||||||
|
(1ull << 32) - 1,
|
||||||
|
1ull << 32,
|
||||||
|
(1ull << 32) + 1};
|
||||||
for (int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) {
|
for (int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) {
|
||||||
for (int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) {
|
for (int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) {
|
||||||
TestKey(keys[k], seq[s], kTypeValue);
|
TestKey(keys[k], seq[s], kTypeValue);
|
||||||
@ -65,37 +68,35 @@ TEST(FormatTest, InternalKey_EncodeDecode) {
|
|||||||
TEST(FormatTest, InternalKeyShortSeparator) {
|
TEST(FormatTest, InternalKeyShortSeparator) {
|
||||||
// When user keys are same
|
// When user keys are same
|
||||||
ASSERT_EQ(IKey("foo", 100, kTypeValue),
|
ASSERT_EQ(IKey("foo", 100, kTypeValue),
|
||||||
Shorten(IKey("foo", 100, kTypeValue),
|
Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 99, kTypeValue)));
|
||||||
IKey("foo", 99, kTypeValue)));
|
ASSERT_EQ(
|
||||||
ASSERT_EQ(IKey("foo", 100, kTypeValue),
|
IKey("foo", 100, kTypeValue),
|
||||||
Shorten(IKey("foo", 100, kTypeValue),
|
Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 101, kTypeValue)));
|
||||||
IKey("foo", 101, kTypeValue)));
|
ASSERT_EQ(
|
||||||
ASSERT_EQ(IKey("foo", 100, kTypeValue),
|
IKey("foo", 100, kTypeValue),
|
||||||
Shorten(IKey("foo", 100, kTypeValue),
|
Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeValue)));
|
||||||
IKey("foo", 100, kTypeValue)));
|
ASSERT_EQ(
|
||||||
ASSERT_EQ(IKey("foo", 100, kTypeValue),
|
IKey("foo", 100, kTypeValue),
|
||||||
Shorten(IKey("foo", 100, kTypeValue),
|
Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeDeletion)));
|
||||||
IKey("foo", 100, kTypeDeletion)));
|
|
||||||
|
|
||||||
// When user keys are misordered
|
// When user keys are misordered
|
||||||
ASSERT_EQ(IKey("foo", 100, kTypeValue),
|
ASSERT_EQ(IKey("foo", 100, kTypeValue),
|
||||||
Shorten(IKey("foo", 100, kTypeValue),
|
Shorten(IKey("foo", 100, kTypeValue), IKey("bar", 99, kTypeValue)));
|
||||||
IKey("bar", 99, kTypeValue)));
|
|
||||||
|
|
||||||
// When user keys are different, but correctly ordered
|
// When user keys are different, but correctly ordered
|
||||||
ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
|
ASSERT_EQ(
|
||||||
Shorten(IKey("foo", 100, kTypeValue),
|
IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
|
||||||
IKey("hello", 200, kTypeValue)));
|
Shorten(IKey("foo", 100, kTypeValue), IKey("hello", 200, kTypeValue)));
|
||||||
|
|
||||||
// When start user key is prefix of limit user key
|
// When start user key is prefix of limit user key
|
||||||
ASSERT_EQ(IKey("foo", 100, kTypeValue),
|
ASSERT_EQ(
|
||||||
Shorten(IKey("foo", 100, kTypeValue),
|
IKey("foo", 100, kTypeValue),
|
||||||
IKey("foobar", 200, kTypeValue)));
|
Shorten(IKey("foo", 100, kTypeValue), IKey("foobar", 200, kTypeValue)));
|
||||||
|
|
||||||
// When limit user key is prefix of start user key
|
// When limit user key is prefix of start user key
|
||||||
ASSERT_EQ(IKey("foobar", 100, kTypeValue),
|
ASSERT_EQ(
|
||||||
Shorten(IKey("foobar", 100, kTypeValue),
|
IKey("foobar", 100, kTypeValue),
|
||||||
IKey("foo", 200, kTypeValue)));
|
Shorten(IKey("foobar", 100, kTypeValue), IKey("foo", 200, kTypeValue)));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(FormatTest, InternalKeyShortestSuccessor) {
|
TEST(FormatTest, InternalKeyShortestSuccessor) {
|
||||||
@ -107,6 +108,4 @@ TEST(FormatTest, InternalKeyShortestSuccessor) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -2,7 +2,10 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
|
#include "leveldb/dumpfile.h"
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
#include "db/filename.h"
|
#include "db/filename.h"
|
||||||
#include "db/log_reader.h"
|
#include "db/log_reader.h"
|
||||||
@ -35,7 +38,6 @@ bool GuessType(const std::string& fname, FileType* type) {
|
|||||||
// Notified when log reader encounters corruption.
|
// Notified when log reader encounters corruption.
|
||||||
class CorruptionReporter : public log::Reader::Reporter {
|
class CorruptionReporter : public log::Reader::Reporter {
|
||||||
public:
|
public:
|
||||||
WritableFile* dst_;
|
|
||||||
virtual void Corruption(size_t bytes, const Status& status) {
|
virtual void Corruption(size_t bytes, const Status& status) {
|
||||||
std::string r = "corruption: ";
|
std::string r = "corruption: ";
|
||||||
AppendNumberTo(&r, bytes);
|
AppendNumberTo(&r, bytes);
|
||||||
@ -44,6 +46,8 @@ class CorruptionReporter : public log::Reader::Reporter {
|
|||||||
r.push_back('\n');
|
r.push_back('\n');
|
||||||
dst_->Append(r);
|
dst_->Append(r);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
WritableFile* dst_;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Print contents of a log file. (*func)() is called on every record.
|
// Print contents of a log file. (*func)() is called on every record.
|
||||||
@ -70,7 +74,6 @@ Status PrintLogContents(Env* env, const std::string& fname,
|
|||||||
// Called on every item found in a WriteBatch.
|
// Called on every item found in a WriteBatch.
|
||||||
class WriteBatchItemPrinter : public WriteBatch::Handler {
|
class WriteBatchItemPrinter : public WriteBatch::Handler {
|
||||||
public:
|
public:
|
||||||
WritableFile* dst_;
|
|
||||||
virtual void Put(const Slice& key, const Slice& value) {
|
virtual void Put(const Slice& key, const Slice& value) {
|
||||||
std::string r = " put '";
|
std::string r = " put '";
|
||||||
AppendEscapedStringTo(&r, key);
|
AppendEscapedStringTo(&r, key);
|
||||||
@ -85,8 +88,9 @@ class WriteBatchItemPrinter : public WriteBatch::Handler {
|
|||||||
r += "'\n";
|
r += "'\n";
|
||||||
dst_->Append(r);
|
dst_->Append(r);
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
|
||||||
|
WritableFile* dst_;
|
||||||
|
};
|
||||||
|
|
||||||
// Called on every log record (each one of which is a WriteBatch)
|
// Called on every log record (each one of which is a WriteBatch)
|
||||||
// found in a kLogFile.
|
// found in a kLogFile.
|
||||||
@ -142,8 +146,8 @@ Status DumpDescriptor(Env* env, const std::string& fname, WritableFile* dst) {
|
|||||||
|
|
||||||
Status DumpTable(Env* env, const std::string& fname, WritableFile* dst) {
|
Status DumpTable(Env* env, const std::string& fname, WritableFile* dst) {
|
||||||
uint64_t file_size;
|
uint64_t file_size;
|
||||||
RandomAccessFile* file = NULL;
|
RandomAccessFile* file = nullptr;
|
||||||
Table* table = NULL;
|
Table* table = nullptr;
|
||||||
Status s = env->GetFileSize(fname, &file_size);
|
Status s = env->GetFileSize(fname, &file_size);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
s = env->NewRandomAccessFile(fname, &file);
|
s = env->NewRandomAccessFile(fname, &file);
|
||||||
@ -213,9 +217,12 @@ Status DumpFile(Env* env, const std::string& fname, WritableFile* dst) {
|
|||||||
return Status::InvalidArgument(fname + ": unknown file type");
|
return Status::InvalidArgument(fname + ": unknown file type");
|
||||||
}
|
}
|
||||||
switch (ftype) {
|
switch (ftype) {
|
||||||
case kLogFile: return DumpLog(env, fname, dst);
|
case kLogFile:
|
||||||
case kDescriptorFile: return DumpDescriptor(env, fname, dst);
|
return DumpLog(env, fname, dst);
|
||||||
case kTableFile: return DumpTable(env, fname, dst);
|
case kDescriptorFile:
|
||||||
|
return DumpDescriptor(env, fname, dst);
|
||||||
|
case kTableFile:
|
||||||
|
return DumpTable(env, fname, dst);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -6,18 +6,20 @@
|
|||||||
// the last "sync". It then checks for data loss errors by purposely dropping
|
// the last "sync". It then checks for data loss errors by purposely dropping
|
||||||
// file data (or entire files) not protected by a "sync".
|
// file data (or entire files) not protected by a "sync".
|
||||||
|
|
||||||
#include "leveldb/db.h"
|
|
||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <set>
|
#include <set>
|
||||||
|
|
||||||
#include "db/db_impl.h"
|
#include "db/db_impl.h"
|
||||||
#include "db/filename.h"
|
#include "db/filename.h"
|
||||||
#include "db/log_format.h"
|
#include "db/log_format.h"
|
||||||
#include "db/version_set.h"
|
#include "db/version_set.h"
|
||||||
#include "leveldb/cache.h"
|
#include "leveldb/cache.h"
|
||||||
|
#include "leveldb/db.h"
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
#include "leveldb/table.h"
|
#include "leveldb/table.h"
|
||||||
#include "leveldb/write_batch.h"
|
#include "leveldb/write_batch.h"
|
||||||
|
#include "port/port.h"
|
||||||
|
#include "port/thread_annotations.h"
|
||||||
#include "util/logging.h"
|
#include "util/logging.h"
|
||||||
#include "util/mutexlock.h"
|
#include "util/mutexlock.h"
|
||||||
#include "util/testharness.h"
|
#include "util/testharness.h"
|
||||||
@ -34,7 +36,7 @@ class FaultInjectionTestEnv;
|
|||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
// Assume a filename, and not a directory name like "/foo/bar/"
|
// Assume a filename, and not a directory name like "/foo/bar/"
|
||||||
static std::string GetDirName(const std::string filename) {
|
static std::string GetDirName(const std::string& filename) {
|
||||||
size_t found = filename.find_last_of("/\\");
|
size_t found = filename.find_last_of("/\\");
|
||||||
if (found == std::string::npos) {
|
if (found == std::string::npos) {
|
||||||
return "";
|
return "";
|
||||||
@ -54,8 +56,7 @@ Status Truncate(const std::string& filename, uint64_t length) {
|
|||||||
|
|
||||||
SequentialFile* orig_file;
|
SequentialFile* orig_file;
|
||||||
Status s = env->NewSequentialFile(filename, &orig_file);
|
Status s = env->NewSequentialFile(filename, &orig_file);
|
||||||
if (!s.ok())
|
if (!s.ok()) return s;
|
||||||
return s;
|
|
||||||
|
|
||||||
char* scratch = new char[length];
|
char* scratch = new char[length];
|
||||||
leveldb::Slice result;
|
leveldb::Slice result;
|
||||||
@ -83,15 +84,15 @@ Status Truncate(const std::string& filename, uint64_t length) {
|
|||||||
|
|
||||||
struct FileState {
|
struct FileState {
|
||||||
std::string filename_;
|
std::string filename_;
|
||||||
ssize_t pos_;
|
int64_t pos_;
|
||||||
ssize_t pos_at_last_sync_;
|
int64_t pos_at_last_sync_;
|
||||||
ssize_t pos_at_last_flush_;
|
int64_t pos_at_last_flush_;
|
||||||
|
|
||||||
FileState(const std::string& filename)
|
FileState(const std::string& filename)
|
||||||
: filename_(filename),
|
: filename_(filename),
|
||||||
pos_(-1),
|
pos_(-1),
|
||||||
pos_at_last_sync_(-1),
|
pos_at_last_sync_(-1),
|
||||||
pos_at_last_flush_(-1) { }
|
pos_at_last_flush_(-1) {}
|
||||||
|
|
||||||
FileState() : pos_(-1), pos_at_last_sync_(-1), pos_at_last_flush_(-1) {}
|
FileState() : pos_(-1), pos_at_last_sync_(-1), pos_at_last_flush_(-1) {}
|
||||||
|
|
||||||
@ -106,14 +107,13 @@ struct FileState {
|
|||||||
// is written to or sync'ed.
|
// is written to or sync'ed.
|
||||||
class TestWritableFile : public WritableFile {
|
class TestWritableFile : public WritableFile {
|
||||||
public:
|
public:
|
||||||
TestWritableFile(const FileState& state,
|
TestWritableFile(const FileState& state, WritableFile* f,
|
||||||
WritableFile* f,
|
|
||||||
FaultInjectionTestEnv* env);
|
FaultInjectionTestEnv* env);
|
||||||
virtual ~TestWritableFile();
|
~TestWritableFile() override;
|
||||||
virtual Status Append(const Slice& data);
|
Status Append(const Slice& data) override;
|
||||||
virtual Status Close();
|
Status Close() override;
|
||||||
virtual Status Flush();
|
Status Flush() override;
|
||||||
virtual Status Sync();
|
Status Sync() override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
FileState state_;
|
FileState state_;
|
||||||
@ -126,14 +126,15 @@ class TestWritableFile : public WritableFile {
|
|||||||
|
|
||||||
class FaultInjectionTestEnv : public EnvWrapper {
|
class FaultInjectionTestEnv : public EnvWrapper {
|
||||||
public:
|
public:
|
||||||
FaultInjectionTestEnv() : EnvWrapper(Env::Default()), filesystem_active_(true) {}
|
FaultInjectionTestEnv()
|
||||||
virtual ~FaultInjectionTestEnv() { }
|
: EnvWrapper(Env::Default()), filesystem_active_(true) {}
|
||||||
virtual Status NewWritableFile(const std::string& fname,
|
~FaultInjectionTestEnv() override = default;
|
||||||
WritableFile** result);
|
Status NewWritableFile(const std::string& fname,
|
||||||
virtual Status NewAppendableFile(const std::string& fname,
|
WritableFile** result) override;
|
||||||
WritableFile** result);
|
Status NewAppendableFile(const std::string& fname,
|
||||||
virtual Status DeleteFile(const std::string& f);
|
WritableFile** result) override;
|
||||||
virtual Status RenameFile(const std::string& s, const std::string& t);
|
Status DeleteFile(const std::string& f) override;
|
||||||
|
Status RenameFile(const std::string& s, const std::string& t) override;
|
||||||
|
|
||||||
void WritableFileClosed(const FileState& state);
|
void WritableFileClosed(const FileState& state);
|
||||||
Status DropUnsyncedFileData();
|
Status DropUnsyncedFileData();
|
||||||
@ -146,24 +147,26 @@ class FaultInjectionTestEnv : public EnvWrapper {
|
|||||||
// system reset. Setting to inactive will freeze our saved filesystem state so
|
// system reset. Setting to inactive will freeze our saved filesystem state so
|
||||||
// that it will stop being recorded. It can then be reset back to the state at
|
// that it will stop being recorded. It can then be reset back to the state at
|
||||||
// the time of the reset.
|
// the time of the reset.
|
||||||
bool IsFilesystemActive() const { return filesystem_active_; }
|
bool IsFilesystemActive() LOCKS_EXCLUDED(mutex_) {
|
||||||
void SetFilesystemActive(bool active) { filesystem_active_ = active; }
|
MutexLock l(&mutex_);
|
||||||
|
return filesystem_active_;
|
||||||
|
}
|
||||||
|
void SetFilesystemActive(bool active) LOCKS_EXCLUDED(mutex_) {
|
||||||
|
MutexLock l(&mutex_);
|
||||||
|
filesystem_active_ = active;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
port::Mutex mutex_;
|
port::Mutex mutex_;
|
||||||
std::map<std::string, FileState> db_file_state_;
|
std::map<std::string, FileState> db_file_state_ GUARDED_BY(mutex_);
|
||||||
std::set<std::string> new_files_since_last_dir_sync_;
|
std::set<std::string> new_files_since_last_dir_sync_ GUARDED_BY(mutex_);
|
||||||
bool filesystem_active_; // Record flushes, syncs, writes
|
bool filesystem_active_ GUARDED_BY(mutex_); // Record flushes, syncs, writes
|
||||||
};
|
};
|
||||||
|
|
||||||
TestWritableFile::TestWritableFile(const FileState& state,
|
TestWritableFile::TestWritableFile(const FileState& state, WritableFile* f,
|
||||||
WritableFile* f,
|
|
||||||
FaultInjectionTestEnv* env)
|
FaultInjectionTestEnv* env)
|
||||||
: state_(state),
|
: state_(state), target_(f), writable_file_opened_(true), env_(env) {
|
||||||
target_(f),
|
assert(f != nullptr);
|
||||||
writable_file_opened_(true),
|
|
||||||
env_(env) {
|
|
||||||
assert(f != NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TestWritableFile::~TestWritableFile() {
|
TestWritableFile::~TestWritableFile() {
|
||||||
@ -265,10 +268,11 @@ Status FaultInjectionTestEnv::NewAppendableFile(const std::string& fname,
|
|||||||
Status FaultInjectionTestEnv::DropUnsyncedFileData() {
|
Status FaultInjectionTestEnv::DropUnsyncedFileData() {
|
||||||
Status s;
|
Status s;
|
||||||
MutexLock l(&mutex_);
|
MutexLock l(&mutex_);
|
||||||
for (std::map<std::string, FileState>::const_iterator it =
|
for (const auto& kvp : db_file_state_) {
|
||||||
db_file_state_.begin();
|
if (!s.ok()) {
|
||||||
s.ok() && it != db_file_state_.end(); ++it) {
|
break;
|
||||||
const FileState& state = it->second;
|
}
|
||||||
|
const FileState& state = kvp.second;
|
||||||
if (!state.IsFullySynced()) {
|
if (!state.IsFullySynced()) {
|
||||||
s = state.DropUnsyncedData();
|
s = state.DropUnsyncedData();
|
||||||
}
|
}
|
||||||
@ -328,7 +332,6 @@ void FaultInjectionTestEnv::ResetState() {
|
|||||||
// Since we are not destroying the database, the existing files
|
// Since we are not destroying the database, the existing files
|
||||||
// should keep their recorded synced/flushed state. Therefore
|
// should keep their recorded synced/flushed state. Therefore
|
||||||
// we do not reset db_file_state_ and new_files_since_last_dir_sync_.
|
// we do not reset db_file_state_ and new_files_since_last_dir_sync_.
|
||||||
MutexLock l(&mutex_);
|
|
||||||
SetFilesystemActive(true);
|
SetFilesystemActive(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -338,12 +341,14 @@ Status FaultInjectionTestEnv::DeleteFilesCreatedAfterLastDirSync() {
|
|||||||
std::set<std::string> new_files(new_files_since_last_dir_sync_.begin(),
|
std::set<std::string> new_files(new_files_since_last_dir_sync_.begin(),
|
||||||
new_files_since_last_dir_sync_.end());
|
new_files_since_last_dir_sync_.end());
|
||||||
mutex_.Unlock();
|
mutex_.Unlock();
|
||||||
Status s;
|
Status status;
|
||||||
std::set<std::string>::const_iterator it;
|
for (const auto& new_file : new_files) {
|
||||||
for (it = new_files.begin(); s.ok() && it != new_files.end(); ++it) {
|
Status delete_status = DeleteFile(new_file);
|
||||||
s = DeleteFile(*it);
|
if (!delete_status.ok() && status.ok()) {
|
||||||
|
status = std::move(delete_status);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return s;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
void FaultInjectionTestEnv::WritableFileClosed(const FileState& state) {
|
void FaultInjectionTestEnv::WritableFileClosed(const FileState& state) {
|
||||||
@ -352,7 +357,7 @@ void FaultInjectionTestEnv::WritableFileClosed(const FileState& state) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Status FileState::DropUnsyncedData() const {
|
Status FileState::DropUnsyncedData() const {
|
||||||
ssize_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_;
|
int64_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_;
|
||||||
return Truncate(filename_, sync_pos);
|
return Truncate(filename_, sync_pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -370,7 +375,7 @@ class FaultInjectionTest {
|
|||||||
FaultInjectionTest()
|
FaultInjectionTest()
|
||||||
: env_(new FaultInjectionTestEnv),
|
: env_(new FaultInjectionTestEnv),
|
||||||
tiny_cache_(NewLRUCache(100)),
|
tiny_cache_(NewLRUCache(100)),
|
||||||
db_(NULL) {
|
db_(nullptr) {
|
||||||
dbname_ = test::TmpDir() + "/fault_test";
|
dbname_ = test::TmpDir() + "/fault_test";
|
||||||
DestroyDB(dbname_, Options()); // Destroy any db from earlier run
|
DestroyDB(dbname_, Options()); // Destroy any db from earlier run
|
||||||
options_.reuse_logs = true;
|
options_.reuse_logs = true;
|
||||||
@ -387,9 +392,7 @@ class FaultInjectionTest {
|
|||||||
delete env_;
|
delete env_;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReuseLogs(bool reuse) {
|
void ReuseLogs(bool reuse) { options_.reuse_logs = reuse; }
|
||||||
options_.reuse_logs = reuse;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Build(int start_idx, int num_vals) {
|
void Build(int start_idx, int num_vals) {
|
||||||
std::string key_space, value_space;
|
std::string key_space, value_space;
|
||||||
@ -449,19 +452,18 @@ class FaultInjectionTest {
|
|||||||
|
|
||||||
Status OpenDB() {
|
Status OpenDB() {
|
||||||
delete db_;
|
delete db_;
|
||||||
db_ = NULL;
|
db_ = nullptr;
|
||||||
env_->ResetState();
|
env_->ResetState();
|
||||||
return DB::Open(options_, dbname_, &db_);
|
return DB::Open(options_, dbname_, &db_);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CloseDB() {
|
void CloseDB() {
|
||||||
delete db_;
|
delete db_;
|
||||||
db_ = NULL;
|
db_ = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void DeleteAllData() {
|
void DeleteAllData() {
|
||||||
Iterator* iter = db_->NewIterator(ReadOptions());
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
||||||
WriteOptions options;
|
|
||||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||||
ASSERT_OK(db_->Delete(WriteOptions(), iter->key()));
|
ASSERT_OK(db_->Delete(WriteOptions(), iter->key()));
|
||||||
}
|
}
|
||||||
@ -485,23 +487,22 @@ class FaultInjectionTest {
|
|||||||
void PartialCompactTestPreFault(int num_pre_sync, int num_post_sync) {
|
void PartialCompactTestPreFault(int num_pre_sync, int num_post_sync) {
|
||||||
DeleteAllData();
|
DeleteAllData();
|
||||||
Build(0, num_pre_sync);
|
Build(0, num_pre_sync);
|
||||||
db_->CompactRange(NULL, NULL);
|
db_->CompactRange(nullptr, nullptr);
|
||||||
Build(num_pre_sync, num_post_sync);
|
Build(num_pre_sync, num_post_sync);
|
||||||
}
|
}
|
||||||
|
|
||||||
void PartialCompactTestReopenWithFault(ResetMethod reset_method,
|
void PartialCompactTestReopenWithFault(ResetMethod reset_method,
|
||||||
int num_pre_sync,
|
int num_pre_sync, int num_post_sync) {
|
||||||
int num_post_sync) {
|
|
||||||
env_->SetFilesystemActive(false);
|
env_->SetFilesystemActive(false);
|
||||||
CloseDB();
|
CloseDB();
|
||||||
ResetDBState(reset_method);
|
ResetDBState(reset_method);
|
||||||
ASSERT_OK(OpenDB());
|
ASSERT_OK(OpenDB());
|
||||||
ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR));
|
ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR));
|
||||||
ASSERT_OK(Verify(num_pre_sync, num_post_sync, FaultInjectionTest::VAL_EXPECT_ERROR));
|
ASSERT_OK(Verify(num_pre_sync, num_post_sync,
|
||||||
|
FaultInjectionTest::VAL_EXPECT_ERROR));
|
||||||
}
|
}
|
||||||
|
|
||||||
void NoWriteTestPreFault() {
|
void NoWriteTestPreFault() {}
|
||||||
}
|
|
||||||
|
|
||||||
void NoWriteTestReopenWithFault(ResetMethod reset_method) {
|
void NoWriteTestReopenWithFault(ResetMethod reset_method) {
|
||||||
CloseDB();
|
CloseDB();
|
||||||
@ -517,8 +518,7 @@ class FaultInjectionTest {
|
|||||||
int num_post_sync = rnd.Uniform(kMaxNumValues);
|
int num_post_sync = rnd.Uniform(kMaxNumValues);
|
||||||
|
|
||||||
PartialCompactTestPreFault(num_pre_sync, num_post_sync);
|
PartialCompactTestPreFault(num_pre_sync, num_post_sync);
|
||||||
PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA,
|
PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA, num_pre_sync,
|
||||||
num_pre_sync,
|
|
||||||
num_post_sync);
|
num_post_sync);
|
||||||
|
|
||||||
NoWriteTestPreFault();
|
NoWriteTestPreFault();
|
||||||
@ -528,8 +528,7 @@ class FaultInjectionTest {
|
|||||||
// No new files created so we expect all values since no files will be
|
// No new files created so we expect all values since no files will be
|
||||||
// dropped.
|
// dropped.
|
||||||
PartialCompactTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES,
|
PartialCompactTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES,
|
||||||
num_pre_sync + num_post_sync,
|
num_pre_sync + num_post_sync, 0);
|
||||||
0);
|
|
||||||
|
|
||||||
NoWriteTestPreFault();
|
NoWriteTestPreFault();
|
||||||
NoWriteTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES);
|
NoWriteTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES);
|
||||||
@ -549,6 +548,4 @@ TEST(FaultInjectionTest, FaultTestWithLogReuse) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -2,9 +2,11 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
|
#include "db/filename.h"
|
||||||
|
|
||||||
#include <ctype.h>
|
#include <ctype.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include "db/filename.h"
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
#include "util/logging.h"
|
#include "util/logging.h"
|
||||||
@ -12,31 +14,30 @@
|
|||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
// A utility routine: write "data" to the named file and Sync() it.
|
// A utility routine: write "data" to the named file and Sync() it.
|
||||||
extern Status WriteStringToFileSync(Env* env, const Slice& data,
|
Status WriteStringToFileSync(Env* env, const Slice& data,
|
||||||
const std::string& fname);
|
const std::string& fname);
|
||||||
|
|
||||||
static std::string MakeFileName(const std::string& name, uint64_t number,
|
static std::string MakeFileName(const std::string& dbname, uint64_t number,
|
||||||
const char* suffix) {
|
const char* suffix) {
|
||||||
char buf[100];
|
char buf[100];
|
||||||
snprintf(buf, sizeof(buf), "/%06llu.%s",
|
snprintf(buf, sizeof(buf), "/%06llu.%s",
|
||||||
static_cast<unsigned long long>(number),
|
static_cast<unsigned long long>(number), suffix);
|
||||||
suffix);
|
return dbname + buf;
|
||||||
return name + buf;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string LogFileName(const std::string& name, uint64_t number) {
|
std::string LogFileName(const std::string& dbname, uint64_t number) {
|
||||||
assert(number > 0);
|
assert(number > 0);
|
||||||
return MakeFileName(name, number, "log");
|
return MakeFileName(dbname, number, "log");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string TableFileName(const std::string& name, uint64_t number) {
|
std::string TableFileName(const std::string& dbname, uint64_t number) {
|
||||||
assert(number > 0);
|
assert(number > 0);
|
||||||
return MakeFileName(name, number, "ldb");
|
return MakeFileName(dbname, number, "ldb");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string SSTTableFileName(const std::string& name, uint64_t number) {
|
std::string SSTTableFileName(const std::string& dbname, uint64_t number) {
|
||||||
assert(number > 0);
|
assert(number > 0);
|
||||||
return MakeFileName(name, number, "sst");
|
return MakeFileName(dbname, number, "sst");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string DescriptorFileName(const std::string& dbname, uint64_t number) {
|
std::string DescriptorFileName(const std::string& dbname, uint64_t number) {
|
||||||
@ -51,9 +52,7 @@ std::string CurrentFileName(const std::string& dbname) {
|
|||||||
return dbname + "/CURRENT";
|
return dbname + "/CURRENT";
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string LockFileName(const std::string& dbname) {
|
std::string LockFileName(const std::string& dbname) { return dbname + "/LOCK"; }
|
||||||
return dbname + "/LOCK";
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string TempFileName(const std::string& dbname, uint64_t number) {
|
std::string TempFileName(const std::string& dbname, uint64_t number) {
|
||||||
assert(number > 0);
|
assert(number > 0);
|
||||||
@ -69,7 +68,6 @@ std::string OldInfoLogFileName(const std::string& dbname) {
|
|||||||
return dbname + "/LOG.old";
|
return dbname + "/LOG.old";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Owned filenames have the form:
|
// Owned filenames have the form:
|
||||||
// dbname/CURRENT
|
// dbname/CURRENT
|
||||||
// dbname/LOCK
|
// dbname/LOCK
|
||||||
@ -77,10 +75,9 @@ std::string OldInfoLogFileName(const std::string& dbname) {
|
|||||||
// dbname/LOG.old
|
// dbname/LOG.old
|
||||||
// dbname/MANIFEST-[0-9]+
|
// dbname/MANIFEST-[0-9]+
|
||||||
// dbname/[0-9]+.(log|sst|ldb)
|
// dbname/[0-9]+.(log|sst|ldb)
|
||||||
bool ParseFileName(const std::string& fname,
|
bool ParseFileName(const std::string& filename, uint64_t* number,
|
||||||
uint64_t* number,
|
|
||||||
FileType* type) {
|
FileType* type) {
|
||||||
Slice rest(fname);
|
Slice rest(filename);
|
||||||
if (rest == "CURRENT") {
|
if (rest == "CURRENT") {
|
||||||
*number = 0;
|
*number = 0;
|
||||||
*type = kCurrentFile;
|
*type = kCurrentFile;
|
||||||
|
@ -8,7 +8,9 @@
|
|||||||
#define STORAGE_LEVELDB_DB_FILENAME_H_
|
#define STORAGE_LEVELDB_DB_FILENAME_H_
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "leveldb/slice.h"
|
#include "leveldb/slice.h"
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
@ -30,55 +32,52 @@ enum FileType {
|
|||||||
// Return the name of the log file with the specified number
|
// Return the name of the log file with the specified number
|
||||||
// in the db named by "dbname". The result will be prefixed with
|
// in the db named by "dbname". The result will be prefixed with
|
||||||
// "dbname".
|
// "dbname".
|
||||||
extern std::string LogFileName(const std::string& dbname, uint64_t number);
|
std::string LogFileName(const std::string& dbname, uint64_t number);
|
||||||
|
|
||||||
// Return the name of the sstable with the specified number
|
// Return the name of the sstable with the specified number
|
||||||
// in the db named by "dbname". The result will be prefixed with
|
// in the db named by "dbname". The result will be prefixed with
|
||||||
// "dbname".
|
// "dbname".
|
||||||
extern std::string TableFileName(const std::string& dbname, uint64_t number);
|
std::string TableFileName(const std::string& dbname, uint64_t number);
|
||||||
|
|
||||||
// Return the legacy file name for an sstable with the specified number
|
// Return the legacy file name for an sstable with the specified number
|
||||||
// in the db named by "dbname". The result will be prefixed with
|
// in the db named by "dbname". The result will be prefixed with
|
||||||
// "dbname".
|
// "dbname".
|
||||||
extern std::string SSTTableFileName(const std::string& dbname, uint64_t number);
|
std::string SSTTableFileName(const std::string& dbname, uint64_t number);
|
||||||
|
|
||||||
// Return the name of the descriptor file for the db named by
|
// Return the name of the descriptor file for the db named by
|
||||||
// "dbname" and the specified incarnation number. The result will be
|
// "dbname" and the specified incarnation number. The result will be
|
||||||
// prefixed with "dbname".
|
// prefixed with "dbname".
|
||||||
extern std::string DescriptorFileName(const std::string& dbname,
|
std::string DescriptorFileName(const std::string& dbname, uint64_t number);
|
||||||
uint64_t number);
|
|
||||||
|
|
||||||
// Return the name of the current file. This file contains the name
|
// Return the name of the current file. This file contains the name
|
||||||
// of the current manifest file. The result will be prefixed with
|
// of the current manifest file. The result will be prefixed with
|
||||||
// "dbname".
|
// "dbname".
|
||||||
extern std::string CurrentFileName(const std::string& dbname);
|
std::string CurrentFileName(const std::string& dbname);
|
||||||
|
|
||||||
// Return the name of the lock file for the db named by
|
// Return the name of the lock file for the db named by
|
||||||
// "dbname". The result will be prefixed with "dbname".
|
// "dbname". The result will be prefixed with "dbname".
|
||||||
extern std::string LockFileName(const std::string& dbname);
|
std::string LockFileName(const std::string& dbname);
|
||||||
|
|
||||||
// Return the name of a temporary file owned by the db named "dbname".
|
// Return the name of a temporary file owned by the db named "dbname".
|
||||||
// The result will be prefixed with "dbname".
|
// The result will be prefixed with "dbname".
|
||||||
extern std::string TempFileName(const std::string& dbname, uint64_t number);
|
std::string TempFileName(const std::string& dbname, uint64_t number);
|
||||||
|
|
||||||
// Return the name of the info log file for "dbname".
|
// Return the name of the info log file for "dbname".
|
||||||
extern std::string InfoLogFileName(const std::string& dbname);
|
std::string InfoLogFileName(const std::string& dbname);
|
||||||
|
|
||||||
// Return the name of the old info log file for "dbname".
|
// Return the name of the old info log file for "dbname".
|
||||||
extern std::string OldInfoLogFileName(const std::string& dbname);
|
std::string OldInfoLogFileName(const std::string& dbname);
|
||||||
|
|
||||||
// If filename is a leveldb file, store the type of the file in *type.
|
// If filename is a leveldb file, store the type of the file in *type.
|
||||||
// The number encoded in the filename is stored in *number. If the
|
// The number encoded in the filename is stored in *number. If the
|
||||||
// filename was successfully parsed, returns true. Else return false.
|
// filename was successfully parsed, returns true. Else return false.
|
||||||
extern bool ParseFileName(const std::string& filename,
|
bool ParseFileName(const std::string& filename, uint64_t* number,
|
||||||
uint64_t* number,
|
FileType* type);
|
||||||
FileType* type);
|
|
||||||
|
|
||||||
// Make the CURRENT file point to the descriptor file with the
|
// Make the CURRENT file point to the descriptor file with the
|
||||||
// specified number.
|
// specified number.
|
||||||
extern Status SetCurrentFile(Env* env, const std::string& dbname,
|
Status SetCurrentFile(Env* env, const std::string& dbname,
|
||||||
uint64_t descriptor_number);
|
uint64_t descriptor_number);
|
||||||
|
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class FileNameTest { };
|
class FileNameTest {};
|
||||||
|
|
||||||
TEST(FileNameTest, Parse) {
|
TEST(FileNameTest, Parse) {
|
||||||
Slice db;
|
Slice db;
|
||||||
@ -24,17 +24,17 @@ TEST(FileNameTest, Parse) {
|
|||||||
uint64_t number;
|
uint64_t number;
|
||||||
FileType type;
|
FileType type;
|
||||||
} cases[] = {
|
} cases[] = {
|
||||||
{ "100.log", 100, kLogFile },
|
{"100.log", 100, kLogFile},
|
||||||
{ "0.log", 0, kLogFile },
|
{"0.log", 0, kLogFile},
|
||||||
{ "0.sst", 0, kTableFile },
|
{"0.sst", 0, kTableFile},
|
||||||
{ "0.ldb", 0, kTableFile },
|
{"0.ldb", 0, kTableFile},
|
||||||
{ "CURRENT", 0, kCurrentFile },
|
{"CURRENT", 0, kCurrentFile},
|
||||||
{ "LOCK", 0, kDBLockFile },
|
{"LOCK", 0, kDBLockFile},
|
||||||
{ "MANIFEST-2", 2, kDescriptorFile },
|
{"MANIFEST-2", 2, kDescriptorFile},
|
||||||
{ "MANIFEST-7", 7, kDescriptorFile },
|
{"MANIFEST-7", 7, kDescriptorFile},
|
||||||
{ "LOG", 0, kInfoLogFile },
|
{"LOG", 0, kInfoLogFile},
|
||||||
{ "LOG.old", 0, kInfoLogFile },
|
{"LOG.old", 0, kInfoLogFile},
|
||||||
{ "18446744073709551615.log", 18446744073709551615ull, kLogFile },
|
{"18446744073709551615.log", 18446744073709551615ull, kLogFile},
|
||||||
};
|
};
|
||||||
for (int i = 0; i < sizeof(cases) / sizeof(cases[0]); i++) {
|
for (int i = 0; i < sizeof(cases) / sizeof(cases[0]); i++) {
|
||||||
std::string f = cases[i].fname;
|
std::string f = cases[i].fname;
|
||||||
@ -44,30 +44,28 @@ TEST(FileNameTest, Parse) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Errors
|
// Errors
|
||||||
static const char* errors[] = {
|
static const char* errors[] = {"",
|
||||||
"",
|
"foo",
|
||||||
"foo",
|
"foo-dx-100.log",
|
||||||
"foo-dx-100.log",
|
".log",
|
||||||
".log",
|
"",
|
||||||
"",
|
"manifest",
|
||||||
"manifest",
|
"CURREN",
|
||||||
"CURREN",
|
"CURRENTX",
|
||||||
"CURRENTX",
|
"MANIFES",
|
||||||
"MANIFES",
|
"MANIFEST",
|
||||||
"MANIFEST",
|
"MANIFEST-",
|
||||||
"MANIFEST-",
|
"XMANIFEST-3",
|
||||||
"XMANIFEST-3",
|
"MANIFEST-3x",
|
||||||
"MANIFEST-3x",
|
"LOC",
|
||||||
"LOC",
|
"LOCKx",
|
||||||
"LOCKx",
|
"LO",
|
||||||
"LO",
|
"LOGx",
|
||||||
"LOGx",
|
"18446744073709551616.log",
|
||||||
"18446744073709551616.log",
|
"184467440737095516150.log",
|
||||||
"184467440737095516150.log",
|
"100",
|
||||||
"100",
|
"100.",
|
||||||
"100.",
|
"100.lop"};
|
||||||
"100.lop"
|
|
||||||
};
|
|
||||||
for (int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) {
|
for (int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) {
|
||||||
std::string f = errors[i];
|
std::string f = errors[i];
|
||||||
ASSERT_TRUE(!ParseFileName(f, &number, &type)) << f;
|
ASSERT_TRUE(!ParseFileName(f, &number, &type)) << f;
|
||||||
@ -114,10 +112,20 @@ TEST(FileNameTest, Construction) {
|
|||||||
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
|
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
|
||||||
ASSERT_EQ(999, number);
|
ASSERT_EQ(999, number);
|
||||||
ASSERT_EQ(kTempFile, type);
|
ASSERT_EQ(kTempFile, type);
|
||||||
|
|
||||||
|
fname = InfoLogFileName("foo");
|
||||||
|
ASSERT_EQ("foo/", std::string(fname.data(), 4));
|
||||||
|
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
|
||||||
|
ASSERT_EQ(0, number);
|
||||||
|
ASSERT_EQ(kInfoLogFile, type);
|
||||||
|
|
||||||
|
fname = OldInfoLogFileName("foo");
|
||||||
|
ASSERT_EQ("foo/", std::string(fname.data(), 4));
|
||||||
|
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
|
||||||
|
ASSERT_EQ(0, number);
|
||||||
|
ASSERT_EQ(kInfoLogFile, type);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
#include "leveldb/dumpfile.h"
|
#include "leveldb/dumpfile.h"
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
@ -38,11 +39,9 @@ bool HandleDumpCommand(Env* env, char** files, int num) {
|
|||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
static void Usage() {
|
static void Usage() {
|
||||||
fprintf(
|
fprintf(stderr,
|
||||||
stderr,
|
"Usage: leveldbutil command...\n"
|
||||||
"Usage: leveldbutil command...\n"
|
" dump files... -- dump contents of specified files\n");
|
||||||
" dump files... -- dump contents of specified files\n"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
@ -54,7 +53,7 @@ int main(int argc, char** argv) {
|
|||||||
} else {
|
} else {
|
||||||
std::string command = argv[1];
|
std::string command = argv[1];
|
||||||
if (command == "dump") {
|
if (command == "dump") {
|
||||||
ok = leveldb::HandleDumpCommand(env, argv+2, argc-2);
|
ok = leveldb::HandleDumpCommand(env, argv + 2, argc - 2);
|
||||||
} else {
|
} else {
|
||||||
Usage();
|
Usage();
|
||||||
ok = false;
|
ok = false;
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include "db/log_reader.h"
|
#include "db/log_reader.h"
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
#include "util/crc32c.h"
|
#include "util/crc32c.h"
|
||||||
@ -12,8 +13,7 @@
|
|||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
namespace log {
|
namespace log {
|
||||||
|
|
||||||
Reader::Reporter::~Reporter() {
|
Reader::Reporter::~Reporter() = default;
|
||||||
}
|
|
||||||
|
|
||||||
Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
|
Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
|
||||||
uint64_t initial_offset)
|
uint64_t initial_offset)
|
||||||
@ -26,20 +26,16 @@ Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
|
|||||||
last_record_offset_(0),
|
last_record_offset_(0),
|
||||||
end_of_buffer_offset_(0),
|
end_of_buffer_offset_(0),
|
||||||
initial_offset_(initial_offset),
|
initial_offset_(initial_offset),
|
||||||
resyncing_(initial_offset > 0) {
|
resyncing_(initial_offset > 0) {}
|
||||||
}
|
|
||||||
|
|
||||||
Reader::~Reader() {
|
Reader::~Reader() { delete[] backing_store_; }
|
||||||
delete[] backing_store_;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool Reader::SkipToInitialBlock() {
|
bool Reader::SkipToInitialBlock() {
|
||||||
size_t offset_in_block = initial_offset_ % kBlockSize;
|
const size_t offset_in_block = initial_offset_ % kBlockSize;
|
||||||
uint64_t block_start_location = initial_offset_ - offset_in_block;
|
uint64_t block_start_location = initial_offset_ - offset_in_block;
|
||||||
|
|
||||||
// Don't search a block if we'd be in the trailer
|
// Don't search a block if we'd be in the trailer
|
||||||
if (offset_in_block > kBlockSize - 6) {
|
if (offset_in_block > kBlockSize - 6) {
|
||||||
offset_in_block = 0;
|
|
||||||
block_start_location += kBlockSize;
|
block_start_location += kBlockSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -99,9 +95,7 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) {
|
|||||||
// it could emit an empty kFirstType record at the tail end
|
// it could emit an empty kFirstType record at the tail end
|
||||||
// of a block followed by a kFullType or kFirstType record
|
// of a block followed by a kFullType or kFirstType record
|
||||||
// at the beginning of the next block.
|
// at the beginning of the next block.
|
||||||
if (scratch->empty()) {
|
if (!scratch->empty()) {
|
||||||
in_fragmented_record = false;
|
|
||||||
} else {
|
|
||||||
ReportCorruption(scratch->size(), "partial record without end(1)");
|
ReportCorruption(scratch->size(), "partial record without end(1)");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -117,9 +111,7 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) {
|
|||||||
// it could emit an empty kFirstType record at the tail end
|
// it could emit an empty kFirstType record at the tail end
|
||||||
// of a block followed by a kFullType or kFirstType record
|
// of a block followed by a kFullType or kFirstType record
|
||||||
// at the beginning of the next block.
|
// at the beginning of the next block.
|
||||||
if (scratch->empty()) {
|
if (!scratch->empty()) {
|
||||||
in_fragmented_record = false;
|
|
||||||
} else {
|
|
||||||
ReportCorruption(scratch->size(), "partial record without end(2)");
|
ReportCorruption(scratch->size(), "partial record without end(2)");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -181,16 +173,14 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t Reader::LastRecordOffset() {
|
uint64_t Reader::LastRecordOffset() { return last_record_offset_; }
|
||||||
return last_record_offset_;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Reader::ReportCorruption(uint64_t bytes, const char* reason) {
|
void Reader::ReportCorruption(uint64_t bytes, const char* reason) {
|
||||||
ReportDrop(bytes, Status::Corruption(reason));
|
ReportDrop(bytes, Status::Corruption(reason));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Reader::ReportDrop(uint64_t bytes, const Status& reason) {
|
void Reader::ReportDrop(uint64_t bytes, const Status& reason) {
|
||||||
if (reporter_ != NULL &&
|
if (reporter_ != nullptr &&
|
||||||
end_of_buffer_offset_ - buffer_.size() - bytes >= initial_offset_) {
|
end_of_buffer_offset_ - buffer_.size() - bytes >= initial_offset_) {
|
||||||
reporter_->Corruption(static_cast<size_t>(bytes), reason);
|
reporter_->Corruption(static_cast<size_t>(bytes), reason);
|
||||||
}
|
}
|
||||||
|
@ -32,7 +32,7 @@ class Reader {
|
|||||||
// Create a reader that will return log records from "*file".
|
// Create a reader that will return log records from "*file".
|
||||||
// "*file" must remain live while this Reader is in use.
|
// "*file" must remain live while this Reader is in use.
|
||||||
//
|
//
|
||||||
// If "reporter" is non-NULL, it is notified whenever some data is
|
// If "reporter" is non-null, it is notified whenever some data is
|
||||||
// dropped due to a detected corruption. "*reporter" must remain
|
// dropped due to a detected corruption. "*reporter" must remain
|
||||||
// live while this Reader is in use.
|
// live while this Reader is in use.
|
||||||
//
|
//
|
||||||
@ -43,6 +43,9 @@ class Reader {
|
|||||||
Reader(SequentialFile* file, Reporter* reporter, bool checksum,
|
Reader(SequentialFile* file, Reporter* reporter, bool checksum,
|
||||||
uint64_t initial_offset);
|
uint64_t initial_offset);
|
||||||
|
|
||||||
|
Reader(const Reader&) = delete;
|
||||||
|
Reader& operator=(const Reader&) = delete;
|
||||||
|
|
||||||
~Reader();
|
~Reader();
|
||||||
|
|
||||||
// Read the next record into *record. Returns true if read
|
// Read the next record into *record. Returns true if read
|
||||||
@ -58,26 +61,6 @@ class Reader {
|
|||||||
uint64_t LastRecordOffset();
|
uint64_t LastRecordOffset();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
SequentialFile* const file_;
|
|
||||||
Reporter* const reporter_;
|
|
||||||
bool const checksum_;
|
|
||||||
char* const backing_store_;
|
|
||||||
Slice buffer_;
|
|
||||||
bool eof_; // Last Read() indicated EOF by returning < kBlockSize
|
|
||||||
|
|
||||||
// Offset of the last record returned by ReadRecord.
|
|
||||||
uint64_t last_record_offset_;
|
|
||||||
// Offset of the first location past the end of buffer_.
|
|
||||||
uint64_t end_of_buffer_offset_;
|
|
||||||
|
|
||||||
// Offset at which to start looking for the first record to return
|
|
||||||
uint64_t const initial_offset_;
|
|
||||||
|
|
||||||
// True if we are resynchronizing after a seek (initial_offset_ > 0). In
|
|
||||||
// particular, a run of kMiddleType and kLastType records can be silently
|
|
||||||
// skipped in this mode
|
|
||||||
bool resyncing_;
|
|
||||||
|
|
||||||
// Extend record types with the following special values
|
// Extend record types with the following special values
|
||||||
enum {
|
enum {
|
||||||
kEof = kMaxRecordType + 1,
|
kEof = kMaxRecordType + 1,
|
||||||
@ -102,9 +85,25 @@ class Reader {
|
|||||||
void ReportCorruption(uint64_t bytes, const char* reason);
|
void ReportCorruption(uint64_t bytes, const char* reason);
|
||||||
void ReportDrop(uint64_t bytes, const Status& reason);
|
void ReportDrop(uint64_t bytes, const Status& reason);
|
||||||
|
|
||||||
// No copying allowed
|
SequentialFile* const file_;
|
||||||
Reader(const Reader&);
|
Reporter* const reporter_;
|
||||||
void operator=(const Reader&);
|
bool const checksum_;
|
||||||
|
char* const backing_store_;
|
||||||
|
Slice buffer_;
|
||||||
|
bool eof_; // Last Read() indicated EOF by returning < kBlockSize
|
||||||
|
|
||||||
|
// Offset of the last record returned by ReadRecord.
|
||||||
|
uint64_t last_record_offset_;
|
||||||
|
// Offset of the first location past the end of buffer_.
|
||||||
|
uint64_t end_of_buffer_offset_;
|
||||||
|
|
||||||
|
// Offset at which to start looking for the first record to return
|
||||||
|
uint64_t const initial_offset_;
|
||||||
|
|
||||||
|
// True if we are resynchronizing after a seek (initial_offset_ > 0). In
|
||||||
|
// particular, a run of kMiddleType and kLastType records can be silently
|
||||||
|
// skipped in this mode
|
||||||
|
bool resyncing_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace log
|
} // namespace log
|
||||||
|
287
db/log_test.cc
287
db/log_test.cc
@ -37,87 +37,12 @@ static std::string RandomSkewedString(int i, Random* rnd) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
class LogTest {
|
class LogTest {
|
||||||
private:
|
|
||||||
class StringDest : public WritableFile {
|
|
||||||
public:
|
|
||||||
std::string contents_;
|
|
||||||
|
|
||||||
virtual Status Close() { return Status::OK(); }
|
|
||||||
virtual Status Flush() { return Status::OK(); }
|
|
||||||
virtual Status Sync() { return Status::OK(); }
|
|
||||||
virtual Status Append(const Slice& slice) {
|
|
||||||
contents_.append(slice.data(), slice.size());
|
|
||||||
return Status::OK();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class StringSource : public SequentialFile {
|
|
||||||
public:
|
|
||||||
Slice contents_;
|
|
||||||
bool force_error_;
|
|
||||||
bool returned_partial_;
|
|
||||||
StringSource() : force_error_(false), returned_partial_(false) { }
|
|
||||||
|
|
||||||
virtual Status Read(size_t n, Slice* result, char* scratch) {
|
|
||||||
ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
|
|
||||||
|
|
||||||
if (force_error_) {
|
|
||||||
force_error_ = false;
|
|
||||||
returned_partial_ = true;
|
|
||||||
return Status::Corruption("read error");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (contents_.size() < n) {
|
|
||||||
n = contents_.size();
|
|
||||||
returned_partial_ = true;
|
|
||||||
}
|
|
||||||
*result = Slice(contents_.data(), n);
|
|
||||||
contents_.remove_prefix(n);
|
|
||||||
return Status::OK();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual Status Skip(uint64_t n) {
|
|
||||||
if (n > contents_.size()) {
|
|
||||||
contents_.clear();
|
|
||||||
return Status::NotFound("in-memory file skipped past end");
|
|
||||||
}
|
|
||||||
|
|
||||||
contents_.remove_prefix(n);
|
|
||||||
|
|
||||||
return Status::OK();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class ReportCollector : public Reader::Reporter {
|
|
||||||
public:
|
|
||||||
size_t dropped_bytes_;
|
|
||||||
std::string message_;
|
|
||||||
|
|
||||||
ReportCollector() : dropped_bytes_(0) { }
|
|
||||||
virtual void Corruption(size_t bytes, const Status& status) {
|
|
||||||
dropped_bytes_ += bytes;
|
|
||||||
message_.append(status.ToString());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
StringDest dest_;
|
|
||||||
StringSource source_;
|
|
||||||
ReportCollector report_;
|
|
||||||
bool reading_;
|
|
||||||
Writer* writer_;
|
|
||||||
Reader* reader_;
|
|
||||||
|
|
||||||
// Record metadata for testing initial offset functionality
|
|
||||||
static size_t initial_offset_record_sizes_[];
|
|
||||||
static uint64_t initial_offset_last_record_offsets_[];
|
|
||||||
static int num_initial_offset_records_;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
LogTest() : reading_(false),
|
LogTest()
|
||||||
writer_(new Writer(&dest_)),
|
: reading_(false),
|
||||||
reader_(new Reader(&source_, &report_, true/*checksum*/,
|
writer_(new Writer(&dest_)),
|
||||||
0/*initial_offset*/)) {
|
reader_(new Reader(&source_, &report_, true /*checksum*/,
|
||||||
}
|
0 /*initial_offset*/)) {}
|
||||||
|
|
||||||
~LogTest() {
|
~LogTest() {
|
||||||
delete writer_;
|
delete writer_;
|
||||||
@ -134,9 +59,7 @@ class LogTest {
|
|||||||
writer_->AddRecord(Slice(msg));
|
writer_->AddRecord(Slice(msg));
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t WrittenBytes() const {
|
size_t WrittenBytes() const { return dest_.contents_.size(); }
|
||||||
return dest_.contents_.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string Read() {
|
std::string Read() {
|
||||||
if (!reading_) {
|
if (!reading_) {
|
||||||
@ -166,22 +89,16 @@ class LogTest {
|
|||||||
|
|
||||||
void FixChecksum(int header_offset, int len) {
|
void FixChecksum(int header_offset, int len) {
|
||||||
// Compute crc of type/len/data
|
// Compute crc of type/len/data
|
||||||
uint32_t crc = crc32c::Value(&dest_.contents_[header_offset+6], 1 + len);
|
uint32_t crc = crc32c::Value(&dest_.contents_[header_offset + 6], 1 + len);
|
||||||
crc = crc32c::Mask(crc);
|
crc = crc32c::Mask(crc);
|
||||||
EncodeFixed32(&dest_.contents_[header_offset], crc);
|
EncodeFixed32(&dest_.contents_[header_offset], crc);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ForceError() {
|
void ForceError() { source_.force_error_ = true; }
|
||||||
source_.force_error_ = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t DroppedBytes() const {
|
size_t DroppedBytes() const { return report_.dropped_bytes_; }
|
||||||
return report_.dropped_bytes_;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string ReportMessage() const {
|
std::string ReportMessage() const { return report_.message_; }
|
||||||
return report_.message_;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns OK iff recorded error message contains "msg"
|
// Returns OK iff recorded error message contains "msg"
|
||||||
std::string MatchError(const std::string& msg) const {
|
std::string MatchError(const std::string& msg) const {
|
||||||
@ -202,14 +119,14 @@ class LogTest {
|
|||||||
|
|
||||||
void StartReadingAt(uint64_t initial_offset) {
|
void StartReadingAt(uint64_t initial_offset) {
|
||||||
delete reader_;
|
delete reader_;
|
||||||
reader_ = new Reader(&source_, &report_, true/*checksum*/, initial_offset);
|
reader_ = new Reader(&source_, &report_, true /*checksum*/, initial_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) {
|
void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) {
|
||||||
WriteInitialOffsetLog();
|
WriteInitialOffsetLog();
|
||||||
reading_ = true;
|
reading_ = true;
|
||||||
source_.contents_ = Slice(dest_.contents_);
|
source_.contents_ = Slice(dest_.contents_);
|
||||||
Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
|
Reader* offset_reader = new Reader(&source_, &report_, true /*checksum*/,
|
||||||
WrittenBytes() + offset_past_end);
|
WrittenBytes() + offset_past_end);
|
||||||
Slice record;
|
Slice record;
|
||||||
std::string scratch;
|
std::string scratch;
|
||||||
@ -222,8 +139,8 @@ class LogTest {
|
|||||||
WriteInitialOffsetLog();
|
WriteInitialOffsetLog();
|
||||||
reading_ = true;
|
reading_ = true;
|
||||||
source_.contents_ = Slice(dest_.contents_);
|
source_.contents_ = Slice(dest_.contents_);
|
||||||
Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
|
Reader* offset_reader =
|
||||||
initial_offset);
|
new Reader(&source_, &report_, true /*checksum*/, initial_offset);
|
||||||
|
|
||||||
// Read all records from expected_record_offset through the last one.
|
// Read all records from expected_record_offset through the last one.
|
||||||
ASSERT_LT(expected_record_offset, num_initial_offset_records_);
|
ASSERT_LT(expected_record_offset, num_initial_offset_records_);
|
||||||
@ -240,36 +157,108 @@ class LogTest {
|
|||||||
}
|
}
|
||||||
delete offset_reader;
|
delete offset_reader;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
class StringDest : public WritableFile {
|
||||||
|
public:
|
||||||
|
virtual Status Close() { return Status::OK(); }
|
||||||
|
virtual Status Flush() { return Status::OK(); }
|
||||||
|
virtual Status Sync() { return Status::OK(); }
|
||||||
|
virtual Status Append(const Slice& slice) {
|
||||||
|
contents_.append(slice.data(), slice.size());
|
||||||
|
return Status::OK();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string contents_;
|
||||||
|
};
|
||||||
|
|
||||||
|
class StringSource : public SequentialFile {
|
||||||
|
public:
|
||||||
|
StringSource() : force_error_(false), returned_partial_(false) {}
|
||||||
|
|
||||||
|
virtual Status Read(size_t n, Slice* result, char* scratch) {
|
||||||
|
ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
|
||||||
|
|
||||||
|
if (force_error_) {
|
||||||
|
force_error_ = false;
|
||||||
|
returned_partial_ = true;
|
||||||
|
return Status::Corruption("read error");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (contents_.size() < n) {
|
||||||
|
n = contents_.size();
|
||||||
|
returned_partial_ = true;
|
||||||
|
}
|
||||||
|
*result = Slice(contents_.data(), n);
|
||||||
|
contents_.remove_prefix(n);
|
||||||
|
return Status::OK();
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual Status Skip(uint64_t n) {
|
||||||
|
if (n > contents_.size()) {
|
||||||
|
contents_.clear();
|
||||||
|
return Status::NotFound("in-memory file skipped past end");
|
||||||
|
}
|
||||||
|
|
||||||
|
contents_.remove_prefix(n);
|
||||||
|
|
||||||
|
return Status::OK();
|
||||||
|
}
|
||||||
|
|
||||||
|
Slice contents_;
|
||||||
|
bool force_error_;
|
||||||
|
bool returned_partial_;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ReportCollector : public Reader::Reporter {
|
||||||
|
public:
|
||||||
|
ReportCollector() : dropped_bytes_(0) {}
|
||||||
|
virtual void Corruption(size_t bytes, const Status& status) {
|
||||||
|
dropped_bytes_ += bytes;
|
||||||
|
message_.append(status.ToString());
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t dropped_bytes_;
|
||||||
|
std::string message_;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Record metadata for testing initial offset functionality
|
||||||
|
static size_t initial_offset_record_sizes_[];
|
||||||
|
static uint64_t initial_offset_last_record_offsets_[];
|
||||||
|
static int num_initial_offset_records_;
|
||||||
|
|
||||||
|
StringDest dest_;
|
||||||
|
StringSource source_;
|
||||||
|
ReportCollector report_;
|
||||||
|
bool reading_;
|
||||||
|
Writer* writer_;
|
||||||
|
Reader* reader_;
|
||||||
};
|
};
|
||||||
|
|
||||||
size_t LogTest::initial_offset_record_sizes_[] =
|
size_t LogTest::initial_offset_record_sizes_[] = {
|
||||||
{10000, // Two sizable records in first block
|
10000, // Two sizable records in first block
|
||||||
10000,
|
10000,
|
||||||
2 * log::kBlockSize - 1000, // Span three blocks
|
2 * log::kBlockSize - 1000, // Span three blocks
|
||||||
1,
|
1,
|
||||||
13716, // Consume all but two bytes of block 3.
|
13716, // Consume all but two bytes of block 3.
|
||||||
log::kBlockSize - kHeaderSize, // Consume the entirety of block 4.
|
log::kBlockSize - kHeaderSize, // Consume the entirety of block 4.
|
||||||
};
|
};
|
||||||
|
|
||||||
uint64_t LogTest::initial_offset_last_record_offsets_[] =
|
uint64_t LogTest::initial_offset_last_record_offsets_[] = {
|
||||||
{0,
|
0,
|
||||||
kHeaderSize + 10000,
|
kHeaderSize + 10000,
|
||||||
2 * (kHeaderSize + 10000),
|
2 * (kHeaderSize + 10000),
|
||||||
2 * (kHeaderSize + 10000) +
|
2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
|
||||||
(2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
|
2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize +
|
||||||
2 * (kHeaderSize + 10000) +
|
kHeaderSize + 1,
|
||||||
(2 * log::kBlockSize - 1000) + 3 * kHeaderSize
|
3 * log::kBlockSize,
|
||||||
+ kHeaderSize + 1,
|
};
|
||||||
3 * log::kBlockSize,
|
|
||||||
};
|
|
||||||
|
|
||||||
// LogTest::initial_offset_last_record_offsets_ must be defined before this.
|
// LogTest::initial_offset_last_record_offsets_ must be defined before this.
|
||||||
int LogTest::num_initial_offset_records_ =
|
int LogTest::num_initial_offset_records_ =
|
||||||
sizeof(LogTest::initial_offset_last_record_offsets_)/sizeof(uint64_t);
|
sizeof(LogTest::initial_offset_last_record_offsets_) / sizeof(uint64_t);
|
||||||
|
|
||||||
TEST(LogTest, Empty) {
|
TEST(LogTest, Empty) { ASSERT_EQ("EOF", Read()); }
|
||||||
ASSERT_EQ("EOF", Read());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(LogTest, ReadWrite) {
|
TEST(LogTest, ReadWrite) {
|
||||||
Write("foo");
|
Write("foo");
|
||||||
@ -306,7 +295,7 @@ TEST(LogTest, Fragmentation) {
|
|||||||
|
|
||||||
TEST(LogTest, MarginalTrailer) {
|
TEST(LogTest, MarginalTrailer) {
|
||||||
// Make a trailer that is exactly the same length as an empty record.
|
// Make a trailer that is exactly the same length as an empty record.
|
||||||
const int n = kBlockSize - 2*kHeaderSize;
|
const int n = kBlockSize - 2 * kHeaderSize;
|
||||||
Write(BigString("foo", n));
|
Write(BigString("foo", n));
|
||||||
ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
|
ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
|
||||||
Write("");
|
Write("");
|
||||||
@ -319,7 +308,7 @@ TEST(LogTest, MarginalTrailer) {
|
|||||||
|
|
||||||
TEST(LogTest, MarginalTrailer2) {
|
TEST(LogTest, MarginalTrailer2) {
|
||||||
// Make a trailer that is exactly the same length as an empty record.
|
// Make a trailer that is exactly the same length as an empty record.
|
||||||
const int n = kBlockSize - 2*kHeaderSize;
|
const int n = kBlockSize - 2 * kHeaderSize;
|
||||||
Write(BigString("foo", n));
|
Write(BigString("foo", n));
|
||||||
ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
|
ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
|
||||||
Write("bar");
|
Write("bar");
|
||||||
@ -331,7 +320,7 @@ TEST(LogTest, MarginalTrailer2) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST(LogTest, ShortTrailer) {
|
TEST(LogTest, ShortTrailer) {
|
||||||
const int n = kBlockSize - 2*kHeaderSize + 4;
|
const int n = kBlockSize - 2 * kHeaderSize + 4;
|
||||||
Write(BigString("foo", n));
|
Write(BigString("foo", n));
|
||||||
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
|
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
|
||||||
Write("");
|
Write("");
|
||||||
@ -343,7 +332,7 @@ TEST(LogTest, ShortTrailer) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST(LogTest, AlignedEof) {
|
TEST(LogTest, AlignedEof) {
|
||||||
const int n = kBlockSize - 2*kHeaderSize + 4;
|
const int n = kBlockSize - 2 * kHeaderSize + 4;
|
||||||
Write(BigString("foo", n));
|
Write(BigString("foo", n));
|
||||||
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
|
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
|
||||||
ASSERT_EQ(BigString("foo", n), Read());
|
ASSERT_EQ(BigString("foo", n), Read());
|
||||||
@ -394,7 +383,7 @@ TEST(LogTest, BadRecordType) {
|
|||||||
|
|
||||||
TEST(LogTest, TruncatedTrailingRecordIsIgnored) {
|
TEST(LogTest, TruncatedTrailingRecordIsIgnored) {
|
||||||
Write("foo");
|
Write("foo");
|
||||||
ShrinkSize(4); // Drop all payload as well as a header byte
|
ShrinkSize(4); // Drop all payload as well as a header byte
|
||||||
ASSERT_EQ("EOF", Read());
|
ASSERT_EQ("EOF", Read());
|
||||||
// Truncated last record is ignored, not treated as an error.
|
// Truncated last record is ignored, not treated as an error.
|
||||||
ASSERT_EQ(0, DroppedBytes());
|
ASSERT_EQ(0, DroppedBytes());
|
||||||
@ -492,7 +481,7 @@ TEST(LogTest, SkipIntoMultiRecord) {
|
|||||||
// If initial_offset points to a record after first(R1) but before first(R2)
|
// If initial_offset points to a record after first(R1) but before first(R2)
|
||||||
// incomplete fragment errors are not actual errors, and must be suppressed
|
// incomplete fragment errors are not actual errors, and must be suppressed
|
||||||
// until a new first or full record is encountered.
|
// until a new first or full record is encountered.
|
||||||
Write(BigString("foo", 3*kBlockSize));
|
Write(BigString("foo", 3 * kBlockSize));
|
||||||
Write("correct");
|
Write("correct");
|
||||||
StartReadingAt(kBlockSize);
|
StartReadingAt(kBlockSize);
|
||||||
|
|
||||||
@ -514,44 +503,30 @@ TEST(LogTest, ErrorJoinsRecords) {
|
|||||||
Write("correct");
|
Write("correct");
|
||||||
|
|
||||||
// Wipe the middle block
|
// Wipe the middle block
|
||||||
for (int offset = kBlockSize; offset < 2*kBlockSize; offset++) {
|
for (int offset = kBlockSize; offset < 2 * kBlockSize; offset++) {
|
||||||
SetByte(offset, 'x');
|
SetByte(offset, 'x');
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT_EQ("correct", Read());
|
ASSERT_EQ("correct", Read());
|
||||||
ASSERT_EQ("EOF", Read());
|
ASSERT_EQ("EOF", Read());
|
||||||
const size_t dropped = DroppedBytes();
|
const size_t dropped = DroppedBytes();
|
||||||
ASSERT_LE(dropped, 2*kBlockSize + 100);
|
ASSERT_LE(dropped, 2 * kBlockSize + 100);
|
||||||
ASSERT_GE(dropped, 2*kBlockSize);
|
ASSERT_GE(dropped, 2 * kBlockSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(LogTest, ReadStart) {
|
TEST(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); }
|
||||||
CheckInitialOffsetRecord(0, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(LogTest, ReadSecondOneOff) {
|
TEST(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); }
|
||||||
CheckInitialOffsetRecord(1, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(LogTest, ReadSecondTenThousand) {
|
TEST(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); }
|
||||||
CheckInitialOffsetRecord(10000, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(LogTest, ReadSecondStart) {
|
TEST(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); }
|
||||||
CheckInitialOffsetRecord(10007, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(LogTest, ReadThirdOneOff) {
|
TEST(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); }
|
||||||
CheckInitialOffsetRecord(10008, 2);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(LogTest, ReadThirdStart) {
|
TEST(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); }
|
||||||
CheckInitialOffsetRecord(20014, 2);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(LogTest, ReadFourthOneOff) {
|
TEST(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); }
|
||||||
CheckInitialOffsetRecord(20015, 3);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(LogTest, ReadFourthFirstBlockTrailer) {
|
TEST(LogTest, ReadFourthFirstBlockTrailer) {
|
||||||
CheckInitialOffsetRecord(log::kBlockSize - 4, 3);
|
CheckInitialOffsetRecord(log::kBlockSize - 4, 3);
|
||||||
@ -575,17 +550,11 @@ TEST(LogTest, ReadInitialOffsetIntoBlockPadding) {
|
|||||||
CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5);
|
CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(LogTest, ReadEnd) {
|
TEST(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); }
|
||||||
CheckOffsetPastEndReturnsNoRecords(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(LogTest, ReadPastEnd) {
|
TEST(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); }
|
||||||
CheckOffsetPastEndReturnsNoRecords(5);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace log
|
} // namespace log
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include "db/log_writer.h"
|
#include "db/log_writer.h"
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
#include "util/crc32c.h"
|
#include "util/crc32c.h"
|
||||||
@ -19,9 +20,7 @@ static void InitTypeCrc(uint32_t* type_crc) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Writer::Writer(WritableFile* dest)
|
Writer::Writer(WritableFile* dest) : dest_(dest), block_offset_(0) {
|
||||||
: dest_(dest),
|
|
||||||
block_offset_(0) {
|
|
||||||
InitTypeCrc(type_crc_);
|
InitTypeCrc(type_crc_);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -30,8 +29,7 @@ Writer::Writer(WritableFile* dest, uint64_t dest_length)
|
|||||||
InitTypeCrc(type_crc_);
|
InitTypeCrc(type_crc_);
|
||||||
}
|
}
|
||||||
|
|
||||||
Writer::~Writer() {
|
Writer::~Writer() = default;
|
||||||
}
|
|
||||||
|
|
||||||
Status Writer::AddRecord(const Slice& slice) {
|
Status Writer::AddRecord(const Slice& slice) {
|
||||||
const char* ptr = slice.data();
|
const char* ptr = slice.data();
|
||||||
@ -49,7 +47,7 @@ Status Writer::AddRecord(const Slice& slice) {
|
|||||||
// Switch to a new block
|
// Switch to a new block
|
||||||
if (leftover > 0) {
|
if (leftover > 0) {
|
||||||
// Fill the trailer (literal below relies on kHeaderSize being 7)
|
// Fill the trailer (literal below relies on kHeaderSize being 7)
|
||||||
assert(kHeaderSize == 7);
|
static_assert(kHeaderSize == 7, "");
|
||||||
dest_->Append(Slice("\x00\x00\x00\x00\x00\x00", leftover));
|
dest_->Append(Slice("\x00\x00\x00\x00\x00\x00", leftover));
|
||||||
}
|
}
|
||||||
block_offset_ = 0;
|
block_offset_ = 0;
|
||||||
@ -81,30 +79,31 @@ Status Writer::AddRecord(const Slice& slice) {
|
|||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr, size_t n) {
|
Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr,
|
||||||
assert(n <= 0xffff); // Must fit in two bytes
|
size_t length) {
|
||||||
assert(block_offset_ + kHeaderSize + n <= kBlockSize);
|
assert(length <= 0xffff); // Must fit in two bytes
|
||||||
|
assert(block_offset_ + kHeaderSize + length <= kBlockSize);
|
||||||
|
|
||||||
// Format the header
|
// Format the header
|
||||||
char buf[kHeaderSize];
|
char buf[kHeaderSize];
|
||||||
buf[4] = static_cast<char>(n & 0xff);
|
buf[4] = static_cast<char>(length & 0xff);
|
||||||
buf[5] = static_cast<char>(n >> 8);
|
buf[5] = static_cast<char>(length >> 8);
|
||||||
buf[6] = static_cast<char>(t);
|
buf[6] = static_cast<char>(t);
|
||||||
|
|
||||||
// Compute the crc of the record type and the payload.
|
// Compute the crc of the record type and the payload.
|
||||||
uint32_t crc = crc32c::Extend(type_crc_[t], ptr, n);
|
uint32_t crc = crc32c::Extend(type_crc_[t], ptr, length);
|
||||||
crc = crc32c::Mask(crc); // Adjust for storage
|
crc = crc32c::Mask(crc); // Adjust for storage
|
||||||
EncodeFixed32(buf, crc);
|
EncodeFixed32(buf, crc);
|
||||||
|
|
||||||
// Write the header and the payload
|
// Write the header and the payload
|
||||||
Status s = dest_->Append(Slice(buf, kHeaderSize));
|
Status s = dest_->Append(Slice(buf, kHeaderSize));
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
s = dest_->Append(Slice(ptr, n));
|
s = dest_->Append(Slice(ptr, length));
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
s = dest_->Flush();
|
s = dest_->Flush();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
block_offset_ += kHeaderSize + n;
|
block_offset_ += kHeaderSize + length;
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#define STORAGE_LEVELDB_DB_LOG_WRITER_H_
|
#define STORAGE_LEVELDB_DB_LOG_WRITER_H_
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include "db/log_format.h"
|
#include "db/log_format.h"
|
||||||
#include "leveldb/slice.h"
|
#include "leveldb/slice.h"
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
@ -28,24 +29,23 @@ class Writer {
|
|||||||
// "*dest" must remain live while this Writer is in use.
|
// "*dest" must remain live while this Writer is in use.
|
||||||
Writer(WritableFile* dest, uint64_t dest_length);
|
Writer(WritableFile* dest, uint64_t dest_length);
|
||||||
|
|
||||||
|
Writer(const Writer&) = delete;
|
||||||
|
Writer& operator=(const Writer&) = delete;
|
||||||
|
|
||||||
~Writer();
|
~Writer();
|
||||||
|
|
||||||
Status AddRecord(const Slice& slice);
|
Status AddRecord(const Slice& slice);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length);
|
||||||
|
|
||||||
WritableFile* dest_;
|
WritableFile* dest_;
|
||||||
int block_offset_; // Current offset in block
|
int block_offset_; // Current offset in block
|
||||||
|
|
||||||
// crc32c values for all supported record types. These are
|
// crc32c values for all supported record types. These are
|
||||||
// pre-computed to reduce the overhead of computing the crc of the
|
// pre-computed to reduce the overhead of computing the crc of the
|
||||||
// record type stored in the header.
|
// record type stored in the header.
|
||||||
uint32_t type_crc_[kMaxRecordType + 1];
|
uint32_t type_crc_[kMaxRecordType + 1];
|
||||||
|
|
||||||
Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length);
|
|
||||||
|
|
||||||
// No copying allowed
|
|
||||||
Writer(const Writer&);
|
|
||||||
void operator=(const Writer&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace log
|
} // namespace log
|
||||||
|
@ -18,20 +18,15 @@ static Slice GetLengthPrefixedSlice(const char* data) {
|
|||||||
return Slice(p, len);
|
return Slice(p, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
MemTable::MemTable(const InternalKeyComparator& cmp)
|
MemTable::MemTable(const InternalKeyComparator& comparator)
|
||||||
: comparator_(cmp),
|
: comparator_(comparator), refs_(0), table_(comparator_, &arena_) {}
|
||||||
refs_(0),
|
|
||||||
table_(comparator_, &arena_) {
|
|
||||||
}
|
|
||||||
|
|
||||||
MemTable::~MemTable() {
|
MemTable::~MemTable() { assert(refs_ == 0); }
|
||||||
assert(refs_ == 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t MemTable::ApproximateMemoryUsage() { return arena_.MemoryUsage(); }
|
size_t MemTable::ApproximateMemoryUsage() { return arena_.MemoryUsage(); }
|
||||||
|
|
||||||
int MemTable::KeyComparator::operator()(const char* aptr, const char* bptr)
|
int MemTable::KeyComparator::operator()(const char* aptr,
|
||||||
const {
|
const char* bptr) const {
|
||||||
// Internal keys are encoded as length-prefixed strings.
|
// Internal keys are encoded as length-prefixed strings.
|
||||||
Slice a = GetLengthPrefixedSlice(aptr);
|
Slice a = GetLengthPrefixedSlice(aptr);
|
||||||
Slice b = GetLengthPrefixedSlice(bptr);
|
Slice b = GetLengthPrefixedSlice(bptr);
|
||||||
@ -48,39 +43,37 @@ static const char* EncodeKey(std::string* scratch, const Slice& target) {
|
|||||||
return scratch->data();
|
return scratch->data();
|
||||||
}
|
}
|
||||||
|
|
||||||
class MemTableIterator: public Iterator {
|
class MemTableIterator : public Iterator {
|
||||||
public:
|
public:
|
||||||
explicit MemTableIterator(MemTable::Table* table) : iter_(table) { }
|
explicit MemTableIterator(MemTable::Table* table) : iter_(table) {}
|
||||||
|
|
||||||
virtual bool Valid() const { return iter_.Valid(); }
|
MemTableIterator(const MemTableIterator&) = delete;
|
||||||
virtual void Seek(const Slice& k) { iter_.Seek(EncodeKey(&tmp_, k)); }
|
MemTableIterator& operator=(const MemTableIterator&) = delete;
|
||||||
virtual void SeekToFirst() { iter_.SeekToFirst(); }
|
|
||||||
virtual void SeekToLast() { iter_.SeekToLast(); }
|
~MemTableIterator() override = default;
|
||||||
virtual void Next() { iter_.Next(); }
|
|
||||||
virtual void Prev() { iter_.Prev(); }
|
bool Valid() const override { return iter_.Valid(); }
|
||||||
virtual Slice key() const { return GetLengthPrefixedSlice(iter_.key()); }
|
void Seek(const Slice& k) override { iter_.Seek(EncodeKey(&tmp_, k)); }
|
||||||
virtual Slice value() const {
|
void SeekToFirst() override { iter_.SeekToFirst(); }
|
||||||
|
void SeekToLast() override { iter_.SeekToLast(); }
|
||||||
|
void Next() override { iter_.Next(); }
|
||||||
|
void Prev() override { iter_.Prev(); }
|
||||||
|
Slice key() const override { return GetLengthPrefixedSlice(iter_.key()); }
|
||||||
|
Slice value() const override {
|
||||||
Slice key_slice = GetLengthPrefixedSlice(iter_.key());
|
Slice key_slice = GetLengthPrefixedSlice(iter_.key());
|
||||||
return GetLengthPrefixedSlice(key_slice.data() + key_slice.size());
|
return GetLengthPrefixedSlice(key_slice.data() + key_slice.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status status() const { return Status::OK(); }
|
Status status() const override { return Status::OK(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
MemTable::Table::Iterator iter_;
|
MemTable::Table::Iterator iter_;
|
||||||
std::string tmp_; // For passing to EncodeKey
|
std::string tmp_; // For passing to EncodeKey
|
||||||
|
|
||||||
// No copying allowed
|
|
||||||
MemTableIterator(const MemTableIterator&);
|
|
||||||
void operator=(const MemTableIterator&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Iterator* MemTable::NewIterator() {
|
Iterator* MemTable::NewIterator() { return new MemTableIterator(&table_); }
|
||||||
return new MemTableIterator(&table_);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemTable::Add(SequenceNumber s, ValueType type,
|
void MemTable::Add(SequenceNumber s, ValueType type, const Slice& key,
|
||||||
const Slice& key,
|
|
||||||
const Slice& value) {
|
const Slice& value) {
|
||||||
// Format of an entry is concatenation of:
|
// Format of an entry is concatenation of:
|
||||||
// key_size : varint32 of internal_key.size()
|
// key_size : varint32 of internal_key.size()
|
||||||
@ -90,9 +83,9 @@ void MemTable::Add(SequenceNumber s, ValueType type,
|
|||||||
size_t key_size = key.size();
|
size_t key_size = key.size();
|
||||||
size_t val_size = value.size();
|
size_t val_size = value.size();
|
||||||
size_t internal_key_size = key_size + 8;
|
size_t internal_key_size = key_size + 8;
|
||||||
const size_t encoded_len =
|
const size_t encoded_len = VarintLength(internal_key_size) +
|
||||||
VarintLength(internal_key_size) + internal_key_size +
|
internal_key_size + VarintLength(val_size) +
|
||||||
VarintLength(val_size) + val_size;
|
val_size;
|
||||||
char* buf = arena_.Allocate(encoded_len);
|
char* buf = arena_.Allocate(encoded_len);
|
||||||
char* p = EncodeVarint32(buf, internal_key_size);
|
char* p = EncodeVarint32(buf, internal_key_size);
|
||||||
memcpy(p, key.data(), key_size);
|
memcpy(p, key.data(), key_size);
|
||||||
@ -101,7 +94,7 @@ void MemTable::Add(SequenceNumber s, ValueType type,
|
|||||||
p += 8;
|
p += 8;
|
||||||
p = EncodeVarint32(p, val_size);
|
p = EncodeVarint32(p, val_size);
|
||||||
memcpy(p, value.data(), val_size);
|
memcpy(p, value.data(), val_size);
|
||||||
assert((p + val_size) - buf == encoded_len);
|
assert(p + val_size == buf + encoded_len);
|
||||||
table_.Insert(buf);
|
table_.Insert(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -121,10 +114,9 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s) {
|
|||||||
// all entries with overly large sequence numbers.
|
// all entries with overly large sequence numbers.
|
||||||
const char* entry = iter.key();
|
const char* entry = iter.key();
|
||||||
uint32_t key_length;
|
uint32_t key_length;
|
||||||
const char* key_ptr = GetVarint32Ptr(entry, entry+5, &key_length);
|
const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
|
||||||
if (comparator_.comparator.user_comparator()->Compare(
|
if (comparator_.comparator.user_comparator()->Compare(
|
||||||
Slice(key_ptr, key_length - 8),
|
Slice(key_ptr, key_length - 8), key.user_key()) == 0) {
|
||||||
key.user_key()) == 0) {
|
|
||||||
// Correct user key
|
// Correct user key
|
||||||
const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
|
const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
|
||||||
switch (static_cast<ValueType>(tag & 0xff)) {
|
switch (static_cast<ValueType>(tag & 0xff)) {
|
||||||
|
@ -6,15 +6,15 @@
|
|||||||
#define STORAGE_LEVELDB_DB_MEMTABLE_H_
|
#define STORAGE_LEVELDB_DB_MEMTABLE_H_
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include "leveldb/db.h"
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
#include "db/skiplist.h"
|
#include "db/skiplist.h"
|
||||||
|
#include "leveldb/db.h"
|
||||||
#include "util/arena.h"
|
#include "util/arena.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class InternalKeyComparator;
|
class InternalKeyComparator;
|
||||||
class Mutex;
|
|
||||||
class MemTableIterator;
|
class MemTableIterator;
|
||||||
|
|
||||||
class MemTable {
|
class MemTable {
|
||||||
@ -23,6 +23,9 @@ class MemTable {
|
|||||||
// is zero and the caller must call Ref() at least once.
|
// is zero and the caller must call Ref() at least once.
|
||||||
explicit MemTable(const InternalKeyComparator& comparator);
|
explicit MemTable(const InternalKeyComparator& comparator);
|
||||||
|
|
||||||
|
MemTable(const MemTable&) = delete;
|
||||||
|
MemTable& operator=(const MemTable&) = delete;
|
||||||
|
|
||||||
// Increase reference count.
|
// Increase reference count.
|
||||||
void Ref() { ++refs_; }
|
void Ref() { ++refs_; }
|
||||||
|
|
||||||
@ -50,8 +53,7 @@ class MemTable {
|
|||||||
// Add an entry into memtable that maps key to value at the
|
// Add an entry into memtable that maps key to value at the
|
||||||
// specified sequence number and with the specified type.
|
// specified sequence number and with the specified type.
|
||||||
// Typically value will be empty if type==kTypeDeletion.
|
// Typically value will be empty if type==kTypeDeletion.
|
||||||
void Add(SequenceNumber seq, ValueType type,
|
void Add(SequenceNumber seq, ValueType type, const Slice& key,
|
||||||
const Slice& key,
|
|
||||||
const Slice& value);
|
const Slice& value);
|
||||||
|
|
||||||
// If memtable contains a value for key, store it in *value and return true.
|
// If memtable contains a value for key, store it in *value and return true.
|
||||||
@ -61,26 +63,23 @@ class MemTable {
|
|||||||
bool Get(const LookupKey& key, std::string* value, Status* s);
|
bool Get(const LookupKey& key, std::string* value, Status* s);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
~MemTable(); // Private since only Unref() should be used to delete it
|
|
||||||
|
|
||||||
struct KeyComparator {
|
|
||||||
const InternalKeyComparator comparator;
|
|
||||||
explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) { }
|
|
||||||
int operator()(const char* a, const char* b) const;
|
|
||||||
};
|
|
||||||
friend class MemTableIterator;
|
friend class MemTableIterator;
|
||||||
friend class MemTableBackwardIterator;
|
friend class MemTableBackwardIterator;
|
||||||
|
|
||||||
|
struct KeyComparator {
|
||||||
|
const InternalKeyComparator comparator;
|
||||||
|
explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) {}
|
||||||
|
int operator()(const char* a, const char* b) const;
|
||||||
|
};
|
||||||
|
|
||||||
typedef SkipList<const char*, KeyComparator> Table;
|
typedef SkipList<const char*, KeyComparator> Table;
|
||||||
|
|
||||||
|
~MemTable(); // Private since only Unref() should be used to delete it
|
||||||
|
|
||||||
KeyComparator comparator_;
|
KeyComparator comparator_;
|
||||||
int refs_;
|
int refs_;
|
||||||
Arena arena_;
|
Arena arena_;
|
||||||
Table table_;
|
Table table_;
|
||||||
|
|
||||||
// No copying allowed
|
|
||||||
MemTable(const MemTable&);
|
|
||||||
void operator=(const MemTable&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -17,7 +17,7 @@ namespace leveldb {
|
|||||||
|
|
||||||
class RecoveryTest {
|
class RecoveryTest {
|
||||||
public:
|
public:
|
||||||
RecoveryTest() : env_(Env::Default()), db_(NULL) {
|
RecoveryTest() : env_(Env::Default()), db_(nullptr) {
|
||||||
dbname_ = test::TmpDir() + "/recovery_test";
|
dbname_ = test::TmpDir() + "/recovery_test";
|
||||||
DestroyDB(dbname_, Options());
|
DestroyDB(dbname_, Options());
|
||||||
Open();
|
Open();
|
||||||
@ -44,22 +44,26 @@ class RecoveryTest {
|
|||||||
|
|
||||||
void Close() {
|
void Close() {
|
||||||
delete db_;
|
delete db_;
|
||||||
db_ = NULL;
|
db_ = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Open(Options* options = NULL) {
|
Status OpenWithStatus(Options* options = nullptr) {
|
||||||
Close();
|
Close();
|
||||||
Options opts;
|
Options opts;
|
||||||
if (options != NULL) {
|
if (options != nullptr) {
|
||||||
opts = *options;
|
opts = *options;
|
||||||
} else {
|
} else {
|
||||||
opts.reuse_logs = true; // TODO(sanjay): test both ways
|
opts.reuse_logs = true; // TODO(sanjay): test both ways
|
||||||
opts.create_if_missing = true;
|
opts.create_if_missing = true;
|
||||||
}
|
}
|
||||||
if (opts.env == NULL) {
|
if (opts.env == nullptr) {
|
||||||
opts.env = env_;
|
opts.env = env_;
|
||||||
}
|
}
|
||||||
ASSERT_OK(DB::Open(opts, dbname_, &db_));
|
return DB::Open(opts, dbname_, &db_);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Open(Options* options = nullptr) {
|
||||||
|
ASSERT_OK(OpenWithStatus(options));
|
||||||
ASSERT_EQ(1, NumLogs());
|
ASSERT_EQ(1, NumLogs());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -67,7 +71,7 @@ class RecoveryTest {
|
|||||||
return db_->Put(WriteOptions(), k, v);
|
return db_->Put(WriteOptions(), k, v);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string Get(const std::string& k, const Snapshot* snapshot = NULL) {
|
std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
|
||||||
std::string result;
|
std::string result;
|
||||||
Status s = db_->Get(ReadOptions(), k, &result);
|
Status s = db_->Get(ReadOptions(), k, &result);
|
||||||
if (s.IsNotFound()) {
|
if (s.IsNotFound()) {
|
||||||
@ -82,17 +86,18 @@ class RecoveryTest {
|
|||||||
std::string current;
|
std::string current;
|
||||||
ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), ¤t));
|
ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), ¤t));
|
||||||
size_t len = current.size();
|
size_t len = current.size();
|
||||||
if (len > 0 && current[len-1] == '\n') {
|
if (len > 0 && current[len - 1] == '\n') {
|
||||||
current.resize(len - 1);
|
current.resize(len - 1);
|
||||||
}
|
}
|
||||||
return dbname_ + "/" + current;
|
return dbname_ + "/" + current;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string LogName(uint64_t number) {
|
std::string LogName(uint64_t number) { return LogFileName(dbname_, number); }
|
||||||
return LogFileName(dbname_, number);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t DeleteLogFiles() {
|
size_t DeleteLogFiles() {
|
||||||
|
// Linux allows unlinking open files, but Windows does not.
|
||||||
|
// Closing the db allows for file deletion.
|
||||||
|
Close();
|
||||||
std::vector<uint64_t> logs = GetFiles(kLogFile);
|
std::vector<uint64_t> logs = GetFiles(kLogFile);
|
||||||
for (size_t i = 0; i < logs.size(); i++) {
|
for (size_t i = 0; i < logs.size(); i++) {
|
||||||
ASSERT_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]);
|
ASSERT_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]);
|
||||||
@ -100,9 +105,9 @@ class RecoveryTest {
|
|||||||
return logs.size();
|
return logs.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t FirstLogFile() {
|
void DeleteManifestFile() { ASSERT_OK(env_->DeleteFile(ManifestFileName())); }
|
||||||
return GetFiles(kLogFile)[0];
|
|
||||||
}
|
uint64_t FirstLogFile() { return GetFiles(kLogFile)[0]; }
|
||||||
|
|
||||||
std::vector<uint64_t> GetFiles(FileType t) {
|
std::vector<uint64_t> GetFiles(FileType t) {
|
||||||
std::vector<std::string> filenames;
|
std::vector<std::string> filenames;
|
||||||
@ -118,13 +123,9 @@ class RecoveryTest {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
int NumLogs() {
|
int NumLogs() { return GetFiles(kLogFile).size(); }
|
||||||
return GetFiles(kLogFile).size();
|
|
||||||
}
|
|
||||||
|
|
||||||
int NumTables() {
|
int NumTables() { return GetFiles(kTableFile).size(); }
|
||||||
return GetFiles(kTableFile).size();
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t FileSize(const std::string& fname) {
|
uint64_t FileSize(const std::string& fname) {
|
||||||
uint64_t result;
|
uint64_t result;
|
||||||
@ -132,9 +133,7 @@ class RecoveryTest {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompactMemTable() {
|
void CompactMemTable() { dbfull()->TEST_CompactMemTable(); }
|
||||||
dbfull()->TEST_CompactMemTable();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Directly construct a log file that sets key to val.
|
// Directly construct a log file that sets key to val.
|
||||||
void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) {
|
void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) {
|
||||||
@ -186,7 +185,7 @@ TEST(RecoveryTest, LargeManifestCompacted) {
|
|||||||
uint64_t len = FileSize(old_manifest);
|
uint64_t len = FileSize(old_manifest);
|
||||||
WritableFile* file;
|
WritableFile* file;
|
||||||
ASSERT_OK(env()->NewAppendableFile(old_manifest, &file));
|
ASSERT_OK(env()->NewAppendableFile(old_manifest, &file));
|
||||||
std::string zeroes(3*1048576 - static_cast<size_t>(len), 0);
|
std::string zeroes(3 * 1048576 - static_cast<size_t>(len), 0);
|
||||||
ASSERT_OK(file->Append(zeroes));
|
ASSERT_OK(file->Append(zeroes));
|
||||||
ASSERT_OK(file->Flush());
|
ASSERT_OK(file->Flush());
|
||||||
delete file;
|
delete file;
|
||||||
@ -259,7 +258,7 @@ TEST(RecoveryTest, MultipleMemTables) {
|
|||||||
// Force creation of multiple memtables by reducing the write buffer size.
|
// Force creation of multiple memtables by reducing the write buffer size.
|
||||||
Options opt;
|
Options opt;
|
||||||
opt.reuse_logs = true;
|
opt.reuse_logs = true;
|
||||||
opt.write_buffer_size = (kNum*100) / 2;
|
opt.write_buffer_size = (kNum * 100) / 2;
|
||||||
Open(&opt);
|
Open(&opt);
|
||||||
ASSERT_LE(2, NumTables());
|
ASSERT_LE(2, NumTables());
|
||||||
ASSERT_EQ(1, NumLogs());
|
ASSERT_EQ(1, NumLogs());
|
||||||
@ -278,16 +277,16 @@ TEST(RecoveryTest, MultipleLogFiles) {
|
|||||||
|
|
||||||
// Make a bunch of uncompacted log files.
|
// Make a bunch of uncompacted log files.
|
||||||
uint64_t old_log = FirstLogFile();
|
uint64_t old_log = FirstLogFile();
|
||||||
MakeLogFile(old_log+1, 1000, "hello", "world");
|
MakeLogFile(old_log + 1, 1000, "hello", "world");
|
||||||
MakeLogFile(old_log+2, 1001, "hi", "there");
|
MakeLogFile(old_log + 2, 1001, "hi", "there");
|
||||||
MakeLogFile(old_log+3, 1002, "foo", "bar2");
|
MakeLogFile(old_log + 3, 1002, "foo", "bar2");
|
||||||
|
|
||||||
// Recover and check that all log files were processed.
|
// Recover and check that all log files were processed.
|
||||||
Open();
|
Open();
|
||||||
ASSERT_LE(1, NumTables());
|
ASSERT_LE(1, NumTables());
|
||||||
ASSERT_EQ(1, NumLogs());
|
ASSERT_EQ(1, NumLogs());
|
||||||
uint64_t new_log = FirstLogFile();
|
uint64_t new_log = FirstLogFile();
|
||||||
ASSERT_LE(old_log+3, new_log);
|
ASSERT_LE(old_log + 3, new_log);
|
||||||
ASSERT_EQ("bar2", Get("foo"));
|
ASSERT_EQ("bar2", Get("foo"));
|
||||||
ASSERT_EQ("world", Get("hello"));
|
ASSERT_EQ("world", Get("hello"));
|
||||||
ASSERT_EQ("there", Get("hi"));
|
ASSERT_EQ("there", Get("hi"));
|
||||||
@ -305,7 +304,7 @@ TEST(RecoveryTest, MultipleLogFiles) {
|
|||||||
|
|
||||||
// Check that introducing an older log file does not cause it to be re-read.
|
// Check that introducing an older log file does not cause it to be re-read.
|
||||||
Close();
|
Close();
|
||||||
MakeLogFile(old_log+1, 2000, "hello", "stale write");
|
MakeLogFile(old_log + 1, 2000, "hello", "stale write");
|
||||||
Open();
|
Open();
|
||||||
ASSERT_LE(1, NumTables());
|
ASSERT_LE(1, NumTables());
|
||||||
ASSERT_EQ(1, NumLogs());
|
ASSERT_EQ(1, NumLogs());
|
||||||
@ -317,8 +316,15 @@ TEST(RecoveryTest, MultipleLogFiles) {
|
|||||||
ASSERT_EQ("there", Get("hi"));
|
ASSERT_EQ("there", Get("hi"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(RecoveryTest, ManifestMissing) {
|
||||||
|
ASSERT_OK(Put("foo", "bar"));
|
||||||
|
Close();
|
||||||
|
DeleteManifestFile();
|
||||||
|
|
||||||
|
Status status = OpenWithStatus();
|
||||||
|
ASSERT_TRUE(status.IsCorruption());
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
93
db/repair.cc
93
db/repair.cc
@ -54,7 +54,7 @@ class Repairer {
|
|||||||
owns_cache_(options_.block_cache != options.block_cache),
|
owns_cache_(options_.block_cache != options.block_cache),
|
||||||
next_file_number_(1) {
|
next_file_number_(1) {
|
||||||
// TableCache can be small since we expect each table to be opened once.
|
// TableCache can be small since we expect each table to be opened once.
|
||||||
table_cache_ = new TableCache(dbname_, &options_, 10);
|
table_cache_ = new TableCache(dbname_, options_, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
~Repairer() {
|
~Repairer() {
|
||||||
@ -84,9 +84,7 @@ class Repairer {
|
|||||||
"recovered %d files; %llu bytes. "
|
"recovered %d files; %llu bytes. "
|
||||||
"Some data may have been lost. "
|
"Some data may have been lost. "
|
||||||
"****",
|
"****",
|
||||||
dbname_.c_str(),
|
dbname_.c_str(), static_cast<int>(tables_.size()), bytes);
|
||||||
static_cast<int>(tables_.size()),
|
|
||||||
bytes);
|
|
||||||
}
|
}
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
@ -97,22 +95,6 @@ class Repairer {
|
|||||||
SequenceNumber max_sequence;
|
SequenceNumber max_sequence;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::string const dbname_;
|
|
||||||
Env* const env_;
|
|
||||||
InternalKeyComparator const icmp_;
|
|
||||||
InternalFilterPolicy const ipolicy_;
|
|
||||||
Options const options_;
|
|
||||||
bool owns_info_log_;
|
|
||||||
bool owns_cache_;
|
|
||||||
TableCache* table_cache_;
|
|
||||||
VersionEdit edit_;
|
|
||||||
|
|
||||||
std::vector<std::string> manifests_;
|
|
||||||
std::vector<uint64_t> table_numbers_;
|
|
||||||
std::vector<uint64_t> logs_;
|
|
||||||
std::vector<TableInfo> tables_;
|
|
||||||
uint64_t next_file_number_;
|
|
||||||
|
|
||||||
Status FindFiles() {
|
Status FindFiles() {
|
||||||
std::vector<std::string> filenames;
|
std::vector<std::string> filenames;
|
||||||
Status status = env_->GetChildren(dbname_, &filenames);
|
Status status = env_->GetChildren(dbname_, &filenames);
|
||||||
@ -152,8 +134,7 @@ class Repairer {
|
|||||||
Status status = ConvertLogToTable(logs_[i]);
|
Status status = ConvertLogToTable(logs_[i]);
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
Log(options_.info_log, "Log #%llu: ignoring conversion error: %s",
|
Log(options_.info_log, "Log #%llu: ignoring conversion error: %s",
|
||||||
(unsigned long long) logs_[i],
|
(unsigned long long)logs_[i], status.ToString().c_str());
|
||||||
status.ToString().c_str());
|
|
||||||
}
|
}
|
||||||
ArchiveFile(logname);
|
ArchiveFile(logname);
|
||||||
}
|
}
|
||||||
@ -167,8 +148,7 @@ class Repairer {
|
|||||||
virtual void Corruption(size_t bytes, const Status& s) {
|
virtual void Corruption(size_t bytes, const Status& s) {
|
||||||
// We print error messages for corruption, but continue repairing.
|
// We print error messages for corruption, but continue repairing.
|
||||||
Log(info_log, "Log #%llu: dropping %d bytes; %s",
|
Log(info_log, "Log #%llu: dropping %d bytes; %s",
|
||||||
(unsigned long long) lognum,
|
(unsigned long long)lognum, static_cast<int>(bytes),
|
||||||
static_cast<int>(bytes),
|
|
||||||
s.ToString().c_str());
|
s.ToString().c_str());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -190,8 +170,8 @@ class Repairer {
|
|||||||
// corruptions cause entire commits to be skipped instead of
|
// corruptions cause entire commits to be skipped instead of
|
||||||
// propagating bad information (like overly large sequence
|
// propagating bad information (like overly large sequence
|
||||||
// numbers).
|
// numbers).
|
||||||
log::Reader reader(lfile, &reporter, false/*do not checksum*/,
|
log::Reader reader(lfile, &reporter, false /*do not checksum*/,
|
||||||
0/*initial_offset*/);
|
0 /*initial_offset*/);
|
||||||
|
|
||||||
// Read all the records and add to a memtable
|
// Read all the records and add to a memtable
|
||||||
std::string scratch;
|
std::string scratch;
|
||||||
@ -202,8 +182,8 @@ class Repairer {
|
|||||||
int counter = 0;
|
int counter = 0;
|
||||||
while (reader.ReadRecord(&record, &scratch)) {
|
while (reader.ReadRecord(&record, &scratch)) {
|
||||||
if (record.size() < 12) {
|
if (record.size() < 12) {
|
||||||
reporter.Corruption(
|
reporter.Corruption(record.size(),
|
||||||
record.size(), Status::Corruption("log record too small"));
|
Status::Corruption("log record too small"));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
WriteBatchInternal::SetContents(&batch, record);
|
WriteBatchInternal::SetContents(&batch, record);
|
||||||
@ -212,8 +192,7 @@ class Repairer {
|
|||||||
counter += WriteBatchInternal::Count(&batch);
|
counter += WriteBatchInternal::Count(&batch);
|
||||||
} else {
|
} else {
|
||||||
Log(options_.info_log, "Log #%llu: ignoring %s",
|
Log(options_.info_log, "Log #%llu: ignoring %s",
|
||||||
(unsigned long long) log,
|
(unsigned long long)log, status.ToString().c_str());
|
||||||
status.ToString().c_str());
|
|
||||||
status = Status::OK(); // Keep going with rest of file
|
status = Status::OK(); // Keep going with rest of file
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -227,16 +206,14 @@ class Repairer {
|
|||||||
status = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta);
|
status = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta);
|
||||||
delete iter;
|
delete iter;
|
||||||
mem->Unref();
|
mem->Unref();
|
||||||
mem = NULL;
|
mem = nullptr;
|
||||||
if (status.ok()) {
|
if (status.ok()) {
|
||||||
if (meta.file_size > 0) {
|
if (meta.file_size > 0) {
|
||||||
table_numbers_.push_back(meta.number);
|
table_numbers_.push_back(meta.number);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Log(options_.info_log, "Log #%llu: %d ops saved to Table #%llu %s",
|
Log(options_.info_log, "Log #%llu: %d ops saved to Table #%llu %s",
|
||||||
(unsigned long long) log,
|
(unsigned long long)log, counter, (unsigned long long)meta.number,
|
||||||
counter,
|
|
||||||
(unsigned long long) meta.number,
|
|
||||||
status.ToString().c_str());
|
status.ToString().c_str());
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
@ -272,8 +249,7 @@ class Repairer {
|
|||||||
ArchiveFile(TableFileName(dbname_, number));
|
ArchiveFile(TableFileName(dbname_, number));
|
||||||
ArchiveFile(SSTTableFileName(dbname_, number));
|
ArchiveFile(SSTTableFileName(dbname_, number));
|
||||||
Log(options_.info_log, "Table #%llu: dropped: %s",
|
Log(options_.info_log, "Table #%llu: dropped: %s",
|
||||||
(unsigned long long) t.meta.number,
|
(unsigned long long)t.meta.number, status.ToString().c_str());
|
||||||
status.ToString().c_str());
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -287,8 +263,7 @@ class Repairer {
|
|||||||
Slice key = iter->key();
|
Slice key = iter->key();
|
||||||
if (!ParseInternalKey(key, &parsed)) {
|
if (!ParseInternalKey(key, &parsed)) {
|
||||||
Log(options_.info_log, "Table #%llu: unparsable key %s",
|
Log(options_.info_log, "Table #%llu: unparsable key %s",
|
||||||
(unsigned long long) t.meta.number,
|
(unsigned long long)t.meta.number, EscapeString(key).c_str());
|
||||||
EscapeString(key).c_str());
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -307,9 +282,7 @@ class Repairer {
|
|||||||
}
|
}
|
||||||
delete iter;
|
delete iter;
|
||||||
Log(options_.info_log, "Table #%llu: %d entries %s",
|
Log(options_.info_log, "Table #%llu: %d entries %s",
|
||||||
(unsigned long long) t.meta.number,
|
(unsigned long long)t.meta.number, counter, status.ToString().c_str());
|
||||||
counter,
|
|
||||||
status.ToString().c_str());
|
|
||||||
|
|
||||||
if (status.ok()) {
|
if (status.ok()) {
|
||||||
tables_.push_back(t);
|
tables_.push_back(t);
|
||||||
@ -350,20 +323,20 @@ class Repairer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
delete builder;
|
delete builder;
|
||||||
builder = NULL;
|
builder = nullptr;
|
||||||
|
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
s = file->Close();
|
s = file->Close();
|
||||||
}
|
}
|
||||||
delete file;
|
delete file;
|
||||||
file = NULL;
|
file = nullptr;
|
||||||
|
|
||||||
if (counter > 0 && s.ok()) {
|
if (counter > 0 && s.ok()) {
|
||||||
std::string orig = TableFileName(dbname_, t.meta.number);
|
std::string orig = TableFileName(dbname_, t.meta.number);
|
||||||
s = env_->RenameFile(copy, orig);
|
s = env_->RenameFile(copy, orig);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
Log(options_.info_log, "Table #%llu: %d entries repaired",
|
Log(options_.info_log, "Table #%llu: %d entries repaired",
|
||||||
(unsigned long long) t.meta.number, counter);
|
(unsigned long long)t.meta.number, counter);
|
||||||
tables_.push_back(t);
|
tables_.push_back(t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -395,11 +368,11 @@ class Repairer {
|
|||||||
for (size_t i = 0; i < tables_.size(); i++) {
|
for (size_t i = 0; i < tables_.size(); i++) {
|
||||||
// TODO(opt): separate out into multiple levels
|
// TODO(opt): separate out into multiple levels
|
||||||
const TableInfo& t = tables_[i];
|
const TableInfo& t = tables_[i];
|
||||||
edit_.AddFile(0, t.meta.number, t.meta.file_size,
|
edit_.AddFile(0, t.meta.number, t.meta.file_size, t.meta.smallest,
|
||||||
t.meta.smallest, t.meta.largest);
|
t.meta.largest);
|
||||||
}
|
}
|
||||||
|
|
||||||
//fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
|
// fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
|
||||||
{
|
{
|
||||||
log::Writer log(file);
|
log::Writer log(file);
|
||||||
std::string record;
|
std::string record;
|
||||||
@ -410,7 +383,7 @@ class Repairer {
|
|||||||
status = file->Close();
|
status = file->Close();
|
||||||
}
|
}
|
||||||
delete file;
|
delete file;
|
||||||
file = NULL;
|
file = nullptr;
|
||||||
|
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
env_->DeleteFile(tmp);
|
env_->DeleteFile(tmp);
|
||||||
@ -438,18 +411,34 @@ class Repairer {
|
|||||||
// dir/lost/foo
|
// dir/lost/foo
|
||||||
const char* slash = strrchr(fname.c_str(), '/');
|
const char* slash = strrchr(fname.c_str(), '/');
|
||||||
std::string new_dir;
|
std::string new_dir;
|
||||||
if (slash != NULL) {
|
if (slash != nullptr) {
|
||||||
new_dir.assign(fname.data(), slash - fname.data());
|
new_dir.assign(fname.data(), slash - fname.data());
|
||||||
}
|
}
|
||||||
new_dir.append("/lost");
|
new_dir.append("/lost");
|
||||||
env_->CreateDir(new_dir); // Ignore error
|
env_->CreateDir(new_dir); // Ignore error
|
||||||
std::string new_file = new_dir;
|
std::string new_file = new_dir;
|
||||||
new_file.append("/");
|
new_file.append("/");
|
||||||
new_file.append((slash == NULL) ? fname.c_str() : slash + 1);
|
new_file.append((slash == nullptr) ? fname.c_str() : slash + 1);
|
||||||
Status s = env_->RenameFile(fname, new_file);
|
Status s = env_->RenameFile(fname, new_file);
|
||||||
Log(options_.info_log, "Archiving %s: %s\n",
|
Log(options_.info_log, "Archiving %s: %s\n", fname.c_str(),
|
||||||
fname.c_str(), s.ToString().c_str());
|
s.ToString().c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const std::string dbname_;
|
||||||
|
Env* const env_;
|
||||||
|
InternalKeyComparator const icmp_;
|
||||||
|
InternalFilterPolicy const ipolicy_;
|
||||||
|
const Options options_;
|
||||||
|
bool owns_info_log_;
|
||||||
|
bool owns_cache_;
|
||||||
|
TableCache* table_cache_;
|
||||||
|
VersionEdit edit_;
|
||||||
|
|
||||||
|
std::vector<std::string> manifests_;
|
||||||
|
std::vector<uint64_t> table_numbers_;
|
||||||
|
std::vector<uint64_t> logs_;
|
||||||
|
std::vector<TableInfo> tables_;
|
||||||
|
uint64_t next_file_number_;
|
||||||
};
|
};
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
|
182
db/skiplist.h
182
db/skiplist.h
@ -27,9 +27,10 @@
|
|||||||
//
|
//
|
||||||
// ... prev vs. next pointer ordering ...
|
// ... prev vs. next pointer ordering ...
|
||||||
|
|
||||||
#include <assert.h>
|
#include <atomic>
|
||||||
#include <stdlib.h>
|
#include <cassert>
|
||||||
#include "port/port.h"
|
#include <cstdlib>
|
||||||
|
|
||||||
#include "util/arena.h"
|
#include "util/arena.h"
|
||||||
#include "util/random.h"
|
#include "util/random.h"
|
||||||
|
|
||||||
@ -37,7 +38,7 @@ namespace leveldb {
|
|||||||
|
|
||||||
class Arena;
|
class Arena;
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
class SkipList {
|
class SkipList {
|
||||||
private:
|
private:
|
||||||
struct Node;
|
struct Node;
|
||||||
@ -48,6 +49,9 @@ class SkipList {
|
|||||||
// must remain allocated for the lifetime of the skiplist object.
|
// must remain allocated for the lifetime of the skiplist object.
|
||||||
explicit SkipList(Comparator cmp, Arena* arena);
|
explicit SkipList(Comparator cmp, Arena* arena);
|
||||||
|
|
||||||
|
SkipList(const SkipList&) = delete;
|
||||||
|
SkipList& operator=(const SkipList&) = delete;
|
||||||
|
|
||||||
// Insert key into the list.
|
// Insert key into the list.
|
||||||
// REQUIRES: nothing that compares equal to key is currently in the list.
|
// REQUIRES: nothing that compares equal to key is currently in the list.
|
||||||
void Insert(const Key& key);
|
void Insert(const Key& key);
|
||||||
@ -97,24 +101,10 @@ class SkipList {
|
|||||||
private:
|
private:
|
||||||
enum { kMaxHeight = 12 };
|
enum { kMaxHeight = 12 };
|
||||||
|
|
||||||
// Immutable after construction
|
|
||||||
Comparator const compare_;
|
|
||||||
Arena* const arena_; // Arena used for allocations of nodes
|
|
||||||
|
|
||||||
Node* const head_;
|
|
||||||
|
|
||||||
// Modified only by Insert(). Read racily by readers, but stale
|
|
||||||
// values are ok.
|
|
||||||
port::AtomicPointer max_height_; // Height of the entire list
|
|
||||||
|
|
||||||
inline int GetMaxHeight() const {
|
inline int GetMaxHeight() const {
|
||||||
return static_cast<int>(
|
return max_height_.load(std::memory_order_relaxed);
|
||||||
reinterpret_cast<intptr_t>(max_height_.NoBarrier_Load()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read/written only by Insert().
|
|
||||||
Random rnd_;
|
|
||||||
|
|
||||||
Node* NewNode(const Key& key, int height);
|
Node* NewNode(const Key& key, int height);
|
||||||
int RandomHeight();
|
int RandomHeight();
|
||||||
bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); }
|
bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); }
|
||||||
@ -123,9 +113,9 @@ class SkipList {
|
|||||||
bool KeyIsAfterNode(const Key& key, Node* n) const;
|
bool KeyIsAfterNode(const Key& key, Node* n) const;
|
||||||
|
|
||||||
// Return the earliest node that comes at or after key.
|
// Return the earliest node that comes at or after key.
|
||||||
// Return NULL if there is no such node.
|
// Return nullptr if there is no such node.
|
||||||
//
|
//
|
||||||
// If prev is non-NULL, fills prev[level] with pointer to previous
|
// If prev is non-null, fills prev[level] with pointer to previous
|
||||||
// node at "level" for every level in [0..max_height_-1].
|
// node at "level" for every level in [0..max_height_-1].
|
||||||
Node* FindGreaterOrEqual(const Key& key, Node** prev) const;
|
Node* FindGreaterOrEqual(const Key& key, Node** prev) const;
|
||||||
|
|
||||||
@ -137,15 +127,24 @@ class SkipList {
|
|||||||
// Return head_ if list is empty.
|
// Return head_ if list is empty.
|
||||||
Node* FindLast() const;
|
Node* FindLast() const;
|
||||||
|
|
||||||
// No copying allowed
|
// Immutable after construction
|
||||||
SkipList(const SkipList&);
|
Comparator const compare_;
|
||||||
void operator=(const SkipList&);
|
Arena* const arena_; // Arena used for allocations of nodes
|
||||||
|
|
||||||
|
Node* const head_;
|
||||||
|
|
||||||
|
// Modified only by Insert(). Read racily by readers, but stale
|
||||||
|
// values are ok.
|
||||||
|
std::atomic<int> max_height_; // Height of the entire list
|
||||||
|
|
||||||
|
// Read/written only by Insert().
|
||||||
|
Random rnd_;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Implementation details follow
|
// Implementation details follow
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
struct SkipList<Key,Comparator>::Node {
|
struct SkipList<Key, Comparator>::Node {
|
||||||
explicit Node(const Key& k) : key(k) { }
|
explicit Node(const Key& k) : key(k) {}
|
||||||
|
|
||||||
Key const key;
|
Key const key;
|
||||||
|
|
||||||
@ -155,92 +154,92 @@ struct SkipList<Key,Comparator>::Node {
|
|||||||
assert(n >= 0);
|
assert(n >= 0);
|
||||||
// Use an 'acquire load' so that we observe a fully initialized
|
// Use an 'acquire load' so that we observe a fully initialized
|
||||||
// version of the returned Node.
|
// version of the returned Node.
|
||||||
return reinterpret_cast<Node*>(next_[n].Acquire_Load());
|
return next_[n].load(std::memory_order_acquire);
|
||||||
}
|
}
|
||||||
void SetNext(int n, Node* x) {
|
void SetNext(int n, Node* x) {
|
||||||
assert(n >= 0);
|
assert(n >= 0);
|
||||||
// Use a 'release store' so that anybody who reads through this
|
// Use a 'release store' so that anybody who reads through this
|
||||||
// pointer observes a fully initialized version of the inserted node.
|
// pointer observes a fully initialized version of the inserted node.
|
||||||
next_[n].Release_Store(x);
|
next_[n].store(x, std::memory_order_release);
|
||||||
}
|
}
|
||||||
|
|
||||||
// No-barrier variants that can be safely used in a few locations.
|
// No-barrier variants that can be safely used in a few locations.
|
||||||
Node* NoBarrier_Next(int n) {
|
Node* NoBarrier_Next(int n) {
|
||||||
assert(n >= 0);
|
assert(n >= 0);
|
||||||
return reinterpret_cast<Node*>(next_[n].NoBarrier_Load());
|
return next_[n].load(std::memory_order_relaxed);
|
||||||
}
|
}
|
||||||
void NoBarrier_SetNext(int n, Node* x) {
|
void NoBarrier_SetNext(int n, Node* x) {
|
||||||
assert(n >= 0);
|
assert(n >= 0);
|
||||||
next_[n].NoBarrier_Store(x);
|
next_[n].store(x, std::memory_order_relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Array of length equal to the node height. next_[0] is lowest level link.
|
// Array of length equal to the node height. next_[0] is lowest level link.
|
||||||
port::AtomicPointer next_[1];
|
std::atomic<Node*> next_[1];
|
||||||
};
|
};
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
typename SkipList<Key,Comparator>::Node*
|
typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::NewNode(
|
||||||
SkipList<Key,Comparator>::NewNode(const Key& key, int height) {
|
const Key& key, int height) {
|
||||||
char* mem = arena_->AllocateAligned(
|
char* const node_memory = arena_->AllocateAligned(
|
||||||
sizeof(Node) + sizeof(port::AtomicPointer) * (height - 1));
|
sizeof(Node) + sizeof(std::atomic<Node*>) * (height - 1));
|
||||||
return new (mem) Node(key);
|
return new (node_memory) Node(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
inline SkipList<Key,Comparator>::Iterator::Iterator(const SkipList* list) {
|
inline SkipList<Key, Comparator>::Iterator::Iterator(const SkipList* list) {
|
||||||
list_ = list;
|
list_ = list;
|
||||||
node_ = NULL;
|
node_ = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
inline bool SkipList<Key,Comparator>::Iterator::Valid() const {
|
inline bool SkipList<Key, Comparator>::Iterator::Valid() const {
|
||||||
return node_ != NULL;
|
return node_ != nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
inline const Key& SkipList<Key,Comparator>::Iterator::key() const {
|
inline const Key& SkipList<Key, Comparator>::Iterator::key() const {
|
||||||
assert(Valid());
|
assert(Valid());
|
||||||
return node_->key;
|
return node_->key;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
inline void SkipList<Key,Comparator>::Iterator::Next() {
|
inline void SkipList<Key, Comparator>::Iterator::Next() {
|
||||||
assert(Valid());
|
assert(Valid());
|
||||||
node_ = node_->Next(0);
|
node_ = node_->Next(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
inline void SkipList<Key,Comparator>::Iterator::Prev() {
|
inline void SkipList<Key, Comparator>::Iterator::Prev() {
|
||||||
// Instead of using explicit "prev" links, we just search for the
|
// Instead of using explicit "prev" links, we just search for the
|
||||||
// last node that falls before key.
|
// last node that falls before key.
|
||||||
assert(Valid());
|
assert(Valid());
|
||||||
node_ = list_->FindLessThan(node_->key);
|
node_ = list_->FindLessThan(node_->key);
|
||||||
if (node_ == list_->head_) {
|
if (node_ == list_->head_) {
|
||||||
node_ = NULL;
|
node_ = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
inline void SkipList<Key,Comparator>::Iterator::Seek(const Key& target) {
|
inline void SkipList<Key, Comparator>::Iterator::Seek(const Key& target) {
|
||||||
node_ = list_->FindGreaterOrEqual(target, NULL);
|
node_ = list_->FindGreaterOrEqual(target, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
inline void SkipList<Key,Comparator>::Iterator::SeekToFirst() {
|
inline void SkipList<Key, Comparator>::Iterator::SeekToFirst() {
|
||||||
node_ = list_->head_->Next(0);
|
node_ = list_->head_->Next(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
inline void SkipList<Key,Comparator>::Iterator::SeekToLast() {
|
inline void SkipList<Key, Comparator>::Iterator::SeekToLast() {
|
||||||
node_ = list_->FindLast();
|
node_ = list_->FindLast();
|
||||||
if (node_ == list_->head_) {
|
if (node_ == list_->head_) {
|
||||||
node_ = NULL;
|
node_ = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
int SkipList<Key,Comparator>::RandomHeight() {
|
int SkipList<Key, Comparator>::RandomHeight() {
|
||||||
// Increase height with probability 1 in kBranching
|
// Increase height with probability 1 in kBranching
|
||||||
static const unsigned int kBranching = 4;
|
static const unsigned int kBranching = 4;
|
||||||
int height = 1;
|
int height = 1;
|
||||||
@ -252,15 +251,16 @@ int SkipList<Key,Comparator>::RandomHeight() {
|
|||||||
return height;
|
return height;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
bool SkipList<Key,Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
|
bool SkipList<Key, Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
|
||||||
// NULL n is considered infinite
|
// null n is considered infinite
|
||||||
return (n != NULL) && (compare_(n->key, key) < 0);
|
return (n != nullptr) && (compare_(n->key, key) < 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindGreaterOrEqual(const Key& key, Node** prev)
|
typename SkipList<Key, Comparator>::Node*
|
||||||
const {
|
SkipList<Key, Comparator>::FindGreaterOrEqual(const Key& key,
|
||||||
|
Node** prev) const {
|
||||||
Node* x = head_;
|
Node* x = head_;
|
||||||
int level = GetMaxHeight() - 1;
|
int level = GetMaxHeight() - 1;
|
||||||
while (true) {
|
while (true) {
|
||||||
@ -269,7 +269,7 @@ typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindGreaterOr
|
|||||||
// Keep searching in this list
|
// Keep searching in this list
|
||||||
x = next;
|
x = next;
|
||||||
} else {
|
} else {
|
||||||
if (prev != NULL) prev[level] = x;
|
if (prev != nullptr) prev[level] = x;
|
||||||
if (level == 0) {
|
if (level == 0) {
|
||||||
return next;
|
return next;
|
||||||
} else {
|
} else {
|
||||||
@ -280,15 +280,15 @@ typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindGreaterOr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
typename SkipList<Key,Comparator>::Node*
|
typename SkipList<Key, Comparator>::Node*
|
||||||
SkipList<Key,Comparator>::FindLessThan(const Key& key) const {
|
SkipList<Key, Comparator>::FindLessThan(const Key& key) const {
|
||||||
Node* x = head_;
|
Node* x = head_;
|
||||||
int level = GetMaxHeight() - 1;
|
int level = GetMaxHeight() - 1;
|
||||||
while (true) {
|
while (true) {
|
||||||
assert(x == head_ || compare_(x->key, key) < 0);
|
assert(x == head_ || compare_(x->key, key) < 0);
|
||||||
Node* next = x->Next(level);
|
Node* next = x->Next(level);
|
||||||
if (next == NULL || compare_(next->key, key) >= 0) {
|
if (next == nullptr || compare_(next->key, key) >= 0) {
|
||||||
if (level == 0) {
|
if (level == 0) {
|
||||||
return x;
|
return x;
|
||||||
} else {
|
} else {
|
||||||
@ -301,14 +301,14 @@ SkipList<Key,Comparator>::FindLessThan(const Key& key) const {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindLast()
|
typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast()
|
||||||
const {
|
const {
|
||||||
Node* x = head_;
|
Node* x = head_;
|
||||||
int level = GetMaxHeight() - 1;
|
int level = GetMaxHeight() - 1;
|
||||||
while (true) {
|
while (true) {
|
||||||
Node* next = x->Next(level);
|
Node* next = x->Next(level);
|
||||||
if (next == NULL) {
|
if (next == nullptr) {
|
||||||
if (level == 0) {
|
if (level == 0) {
|
||||||
return x;
|
return x;
|
||||||
} else {
|
} else {
|
||||||
@ -321,43 +321,41 @@ typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindLast()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
SkipList<Key,Comparator>::SkipList(Comparator cmp, Arena* arena)
|
SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena)
|
||||||
: compare_(cmp),
|
: compare_(cmp),
|
||||||
arena_(arena),
|
arena_(arena),
|
||||||
head_(NewNode(0 /* any key will do */, kMaxHeight)),
|
head_(NewNode(0 /* any key will do */, kMaxHeight)),
|
||||||
max_height_(reinterpret_cast<void*>(1)),
|
max_height_(1),
|
||||||
rnd_(0xdeadbeef) {
|
rnd_(0xdeadbeef) {
|
||||||
for (int i = 0; i < kMaxHeight; i++) {
|
for (int i = 0; i < kMaxHeight; i++) {
|
||||||
head_->SetNext(i, NULL);
|
head_->SetNext(i, nullptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
void SkipList<Key,Comparator>::Insert(const Key& key) {
|
void SkipList<Key, Comparator>::Insert(const Key& key) {
|
||||||
// TODO(opt): We can use a barrier-free variant of FindGreaterOrEqual()
|
// TODO(opt): We can use a barrier-free variant of FindGreaterOrEqual()
|
||||||
// here since Insert() is externally synchronized.
|
// here since Insert() is externally synchronized.
|
||||||
Node* prev[kMaxHeight];
|
Node* prev[kMaxHeight];
|
||||||
Node* x = FindGreaterOrEqual(key, prev);
|
Node* x = FindGreaterOrEqual(key, prev);
|
||||||
|
|
||||||
// Our data structure does not allow duplicate insertion
|
// Our data structure does not allow duplicate insertion
|
||||||
assert(x == NULL || !Equal(key, x->key));
|
assert(x == nullptr || !Equal(key, x->key));
|
||||||
|
|
||||||
int height = RandomHeight();
|
int height = RandomHeight();
|
||||||
if (height > GetMaxHeight()) {
|
if (height > GetMaxHeight()) {
|
||||||
for (int i = GetMaxHeight(); i < height; i++) {
|
for (int i = GetMaxHeight(); i < height; i++) {
|
||||||
prev[i] = head_;
|
prev[i] = head_;
|
||||||
}
|
}
|
||||||
//fprintf(stderr, "Change height from %d to %d\n", max_height_, height);
|
|
||||||
|
|
||||||
// It is ok to mutate max_height_ without any synchronization
|
// It is ok to mutate max_height_ without any synchronization
|
||||||
// with concurrent readers. A concurrent reader that observes
|
// with concurrent readers. A concurrent reader that observes
|
||||||
// the new value of max_height_ will see either the old value of
|
// the new value of max_height_ will see either the old value of
|
||||||
// new level pointers from head_ (NULL), or a new value set in
|
// new level pointers from head_ (nullptr), or a new value set in
|
||||||
// the loop below. In the former case the reader will
|
// the loop below. In the former case the reader will
|
||||||
// immediately drop to the next level since NULL sorts after all
|
// immediately drop to the next level since nullptr sorts after all
|
||||||
// keys. In the latter case the reader will use the new node.
|
// keys. In the latter case the reader will use the new node.
|
||||||
max_height_.NoBarrier_Store(reinterpret_cast<void*>(height));
|
max_height_.store(height, std::memory_order_relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
x = NewNode(key, height);
|
x = NewNode(key, height);
|
||||||
@ -369,10 +367,10 @@ void SkipList<Key,Comparator>::Insert(const Key& key) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
bool SkipList<Key,Comparator>::Contains(const Key& key) const {
|
bool SkipList<Key, Comparator>::Contains(const Key& key) const {
|
||||||
Node* x = FindGreaterOrEqual(key, NULL);
|
Node* x = FindGreaterOrEqual(key, nullptr);
|
||||||
if (x != NULL && Equal(key, x->key)) {
|
if (x != nullptr && Equal(key, x->key)) {
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
|
@ -3,8 +3,13 @@
|
|||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#include "db/skiplist.h"
|
#include "db/skiplist.h"
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
#include <set>
|
#include <set>
|
||||||
|
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
|
#include "port/port.h"
|
||||||
|
#include "port/thread_annotations.h"
|
||||||
#include "util/arena.h"
|
#include "util/arena.h"
|
||||||
#include "util/hash.h"
|
#include "util/hash.h"
|
||||||
#include "util/random.h"
|
#include "util/random.h"
|
||||||
@ -26,7 +31,7 @@ struct Comparator {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class SkipTest { };
|
class SkipTest {};
|
||||||
|
|
||||||
TEST(SkipTest, Empty) {
|
TEST(SkipTest, Empty) {
|
||||||
Arena arena;
|
Arena arena;
|
||||||
@ -112,8 +117,7 @@ TEST(SkipTest, InsertAndLookup) {
|
|||||||
|
|
||||||
// Compare against model iterator
|
// Compare against model iterator
|
||||||
for (std::set<Key>::reverse_iterator model_iter = keys.rbegin();
|
for (std::set<Key>::reverse_iterator model_iter = keys.rbegin();
|
||||||
model_iter != keys.rend();
|
model_iter != keys.rend(); ++model_iter) {
|
||||||
++model_iter) {
|
|
||||||
ASSERT_TRUE(iter.Valid());
|
ASSERT_TRUE(iter.Valid());
|
||||||
ASSERT_EQ(*model_iter, iter.key());
|
ASSERT_EQ(*model_iter, iter.key());
|
||||||
iter.Prev();
|
iter.Prev();
|
||||||
@ -126,7 +130,7 @@ TEST(SkipTest, InsertAndLookup) {
|
|||||||
// concurrent readers (with no synchronization other than when a
|
// concurrent readers (with no synchronization other than when a
|
||||||
// reader's iterator is created), the reader always observes all the
|
// reader's iterator is created), the reader always observes all the
|
||||||
// data that was present in the skip list when the iterator was
|
// data that was present in the skip list when the iterator was
|
||||||
// constructor. Because insertions are happening concurrently, we may
|
// constructed. Because insertions are happening concurrently, we may
|
||||||
// also observe new values that were inserted since the iterator was
|
// also observe new values that were inserted since the iterator was
|
||||||
// constructed, but we should never miss any values that were present
|
// constructed, but we should never miss any values that were present
|
||||||
// at iterator construction time.
|
// at iterator construction time.
|
||||||
@ -155,12 +159,12 @@ class ConcurrentTest {
|
|||||||
static uint64_t hash(Key key) { return key & 0xff; }
|
static uint64_t hash(Key key) { return key & 0xff; }
|
||||||
|
|
||||||
static uint64_t HashNumbers(uint64_t k, uint64_t g) {
|
static uint64_t HashNumbers(uint64_t k, uint64_t g) {
|
||||||
uint64_t data[2] = { k, g };
|
uint64_t data[2] = {k, g};
|
||||||
return Hash(reinterpret_cast<char*>(data), sizeof(data), 0);
|
return Hash(reinterpret_cast<char*>(data), sizeof(data), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static Key MakeKey(uint64_t k, uint64_t g) {
|
static Key MakeKey(uint64_t k, uint64_t g) {
|
||||||
assert(sizeof(Key) == sizeof(uint64_t));
|
static_assert(sizeof(Key) == sizeof(uint64_t), "");
|
||||||
assert(k <= K); // We sometimes pass K to seek to the end of the skiplist
|
assert(k <= K); // We sometimes pass K to seek to the end of the skiplist
|
||||||
assert(g <= 0xffffffffu);
|
assert(g <= 0xffffffffu);
|
||||||
return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff));
|
return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff));
|
||||||
@ -186,13 +190,11 @@ class ConcurrentTest {
|
|||||||
|
|
||||||
// Per-key generation
|
// Per-key generation
|
||||||
struct State {
|
struct State {
|
||||||
port::AtomicPointer generation[K];
|
std::atomic<int> generation[K];
|
||||||
void Set(int k, intptr_t v) {
|
void Set(int k, int v) {
|
||||||
generation[k].Release_Store(reinterpret_cast<void*>(v));
|
generation[k].store(v, std::memory_order_release);
|
||||||
}
|
|
||||||
intptr_t Get(int k) {
|
|
||||||
return reinterpret_cast<intptr_t>(generation[k].Acquire_Load());
|
|
||||||
}
|
}
|
||||||
|
int Get(int k) { return generation[k].load(std::memory_order_acquire); }
|
||||||
|
|
||||||
State() {
|
State() {
|
||||||
for (int k = 0; k < K; k++) {
|
for (int k = 0; k < K; k++) {
|
||||||
@ -211,7 +213,7 @@ class ConcurrentTest {
|
|||||||
SkipList<Key, Comparator> list_;
|
SkipList<Key, Comparator> list_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ConcurrentTest() : list_(Comparator(), &arena_) { }
|
ConcurrentTest() : list_(Comparator(), &arena_) {}
|
||||||
|
|
||||||
// REQUIRES: External synchronization
|
// REQUIRES: External synchronization
|
||||||
void WriteStep(Random* rnd) {
|
void WriteStep(Random* rnd) {
|
||||||
@ -250,11 +252,9 @@ class ConcurrentTest {
|
|||||||
// Note that generation 0 is never inserted, so it is ok if
|
// Note that generation 0 is never inserted, so it is ok if
|
||||||
// <*,0,*> is missing.
|
// <*,0,*> is missing.
|
||||||
ASSERT_TRUE((gen(pos) == 0) ||
|
ASSERT_TRUE((gen(pos) == 0) ||
|
||||||
(gen(pos) > static_cast<Key>(initial_state.Get(key(pos))))
|
(gen(pos) > static_cast<Key>(initial_state.Get(key(pos)))))
|
||||||
) << "key: " << key(pos)
|
<< "key: " << key(pos) << "; gen: " << gen(pos)
|
||||||
<< "; gen: " << gen(pos)
|
<< "; initgen: " << initial_state.Get(key(pos));
|
||||||
<< "; initgen: "
|
|
||||||
<< initial_state.Get(key(pos));
|
|
||||||
|
|
||||||
// Advance to next key in the valid key space
|
// Advance to next key in the valid key space
|
||||||
if (key(pos) < key(current)) {
|
if (key(pos) < key(current)) {
|
||||||
@ -298,21 +298,14 @@ class TestState {
|
|||||||
public:
|
public:
|
||||||
ConcurrentTest t_;
|
ConcurrentTest t_;
|
||||||
int seed_;
|
int seed_;
|
||||||
port::AtomicPointer quit_flag_;
|
std::atomic<bool> quit_flag_;
|
||||||
|
|
||||||
enum ReaderState {
|
enum ReaderState { STARTING, RUNNING, DONE };
|
||||||
STARTING,
|
|
||||||
RUNNING,
|
|
||||||
DONE
|
|
||||||
};
|
|
||||||
|
|
||||||
explicit TestState(int s)
|
explicit TestState(int s)
|
||||||
: seed_(s),
|
: seed_(s), quit_flag_(false), state_(STARTING), state_cv_(&mu_) {}
|
||||||
quit_flag_(NULL),
|
|
||||||
state_(STARTING),
|
|
||||||
state_cv_(&mu_) {}
|
|
||||||
|
|
||||||
void Wait(ReaderState s) {
|
void Wait(ReaderState s) LOCKS_EXCLUDED(mu_) {
|
||||||
mu_.Lock();
|
mu_.Lock();
|
||||||
while (state_ != s) {
|
while (state_ != s) {
|
||||||
state_cv_.Wait();
|
state_cv_.Wait();
|
||||||
@ -320,7 +313,7 @@ class TestState {
|
|||||||
mu_.Unlock();
|
mu_.Unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Change(ReaderState s) {
|
void Change(ReaderState s) LOCKS_EXCLUDED(mu_) {
|
||||||
mu_.Lock();
|
mu_.Lock();
|
||||||
state_ = s;
|
state_ = s;
|
||||||
state_cv_.Signal();
|
state_cv_.Signal();
|
||||||
@ -329,8 +322,8 @@ class TestState {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
port::Mutex mu_;
|
port::Mutex mu_;
|
||||||
ReaderState state_;
|
ReaderState state_ GUARDED_BY(mu_);
|
||||||
port::CondVar state_cv_;
|
port::CondVar state_cv_ GUARDED_BY(mu_);
|
||||||
};
|
};
|
||||||
|
|
||||||
static void ConcurrentReader(void* arg) {
|
static void ConcurrentReader(void* arg) {
|
||||||
@ -338,7 +331,7 @@ static void ConcurrentReader(void* arg) {
|
|||||||
Random rnd(state->seed_);
|
Random rnd(state->seed_);
|
||||||
int64_t reads = 0;
|
int64_t reads = 0;
|
||||||
state->Change(TestState::RUNNING);
|
state->Change(TestState::RUNNING);
|
||||||
while (!state->quit_flag_.Acquire_Load()) {
|
while (!state->quit_flag_.load(std::memory_order_acquire)) {
|
||||||
state->t_.ReadStep(&rnd);
|
state->t_.ReadStep(&rnd);
|
||||||
++reads;
|
++reads;
|
||||||
}
|
}
|
||||||
@ -360,7 +353,7 @@ static void RunConcurrent(int run) {
|
|||||||
for (int i = 0; i < kSize; i++) {
|
for (int i = 0; i < kSize; i++) {
|
||||||
state.t_.WriteStep(&rnd);
|
state.t_.WriteStep(&rnd);
|
||||||
}
|
}
|
||||||
state.quit_flag_.Release_Store(&state); // Any non-NULL arg will do
|
state.quit_flag_.store(true, std::memory_order_release);
|
||||||
state.Wait(TestState::DONE);
|
state.Wait(TestState::DONE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -373,6 +366,4 @@ TEST(SkipTest, Concurrent5) { RunConcurrent(5); }
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -16,50 +16,78 @@ class SnapshotList;
|
|||||||
// Each SnapshotImpl corresponds to a particular sequence number.
|
// Each SnapshotImpl corresponds to a particular sequence number.
|
||||||
class SnapshotImpl : public Snapshot {
|
class SnapshotImpl : public Snapshot {
|
||||||
public:
|
public:
|
||||||
SequenceNumber number_; // const after creation
|
SnapshotImpl(SequenceNumber sequence_number)
|
||||||
|
: sequence_number_(sequence_number) {}
|
||||||
|
|
||||||
|
SequenceNumber sequence_number() const { return sequence_number_; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend class SnapshotList;
|
friend class SnapshotList;
|
||||||
|
|
||||||
// SnapshotImpl is kept in a doubly-linked circular list
|
// SnapshotImpl is kept in a doubly-linked circular list. The SnapshotList
|
||||||
|
// implementation operates on the next/previous fields direcly.
|
||||||
SnapshotImpl* prev_;
|
SnapshotImpl* prev_;
|
||||||
SnapshotImpl* next_;
|
SnapshotImpl* next_;
|
||||||
|
|
||||||
SnapshotList* list_; // just for sanity checks
|
const SequenceNumber sequence_number_;
|
||||||
|
|
||||||
|
#if !defined(NDEBUG)
|
||||||
|
SnapshotList* list_ = nullptr;
|
||||||
|
#endif // !defined(NDEBUG)
|
||||||
};
|
};
|
||||||
|
|
||||||
class SnapshotList {
|
class SnapshotList {
|
||||||
public:
|
public:
|
||||||
SnapshotList() {
|
SnapshotList() : head_(0) {
|
||||||
list_.prev_ = &list_;
|
head_.prev_ = &head_;
|
||||||
list_.next_ = &list_;
|
head_.next_ = &head_;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool empty() const { return list_.next_ == &list_; }
|
bool empty() const { return head_.next_ == &head_; }
|
||||||
SnapshotImpl* oldest() const { assert(!empty()); return list_.next_; }
|
SnapshotImpl* oldest() const {
|
||||||
SnapshotImpl* newest() const { assert(!empty()); return list_.prev_; }
|
assert(!empty());
|
||||||
|
return head_.next_;
|
||||||
const SnapshotImpl* New(SequenceNumber seq) {
|
}
|
||||||
SnapshotImpl* s = new SnapshotImpl;
|
SnapshotImpl* newest() const {
|
||||||
s->number_ = seq;
|
assert(!empty());
|
||||||
s->list_ = this;
|
return head_.prev_;
|
||||||
s->next_ = &list_;
|
|
||||||
s->prev_ = list_.prev_;
|
|
||||||
s->prev_->next_ = s;
|
|
||||||
s->next_->prev_ = s;
|
|
||||||
return s;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Delete(const SnapshotImpl* s) {
|
// Creates a SnapshotImpl and appends it to the end of the list.
|
||||||
assert(s->list_ == this);
|
SnapshotImpl* New(SequenceNumber sequence_number) {
|
||||||
s->prev_->next_ = s->next_;
|
assert(empty() || newest()->sequence_number_ <= sequence_number);
|
||||||
s->next_->prev_ = s->prev_;
|
|
||||||
delete s;
|
SnapshotImpl* snapshot = new SnapshotImpl(sequence_number);
|
||||||
|
|
||||||
|
#if !defined(NDEBUG)
|
||||||
|
snapshot->list_ = this;
|
||||||
|
#endif // !defined(NDEBUG)
|
||||||
|
snapshot->next_ = &head_;
|
||||||
|
snapshot->prev_ = head_.prev_;
|
||||||
|
snapshot->prev_->next_ = snapshot;
|
||||||
|
snapshot->next_->prev_ = snapshot;
|
||||||
|
return snapshot;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Removes a SnapshotImpl from this list.
|
||||||
|
//
|
||||||
|
// The snapshot must have been created by calling New() on this list.
|
||||||
|
//
|
||||||
|
// The snapshot pointer should not be const, because its memory is
|
||||||
|
// deallocated. However, that would force us to change DB::ReleaseSnapshot(),
|
||||||
|
// which is in the API, and currently takes a const Snapshot.
|
||||||
|
void Delete(const SnapshotImpl* snapshot) {
|
||||||
|
#if !defined(NDEBUG)
|
||||||
|
assert(snapshot->list_ == this);
|
||||||
|
#endif // !defined(NDEBUG)
|
||||||
|
snapshot->prev_->next_ = snapshot->next_;
|
||||||
|
snapshot->next_->prev_ = snapshot->prev_;
|
||||||
|
delete snapshot;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Dummy head of doubly-linked list of snapshots
|
// Dummy head of doubly-linked list of snapshots
|
||||||
SnapshotImpl list_;
|
SnapshotImpl head_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -29,18 +29,14 @@ static void UnrefEntry(void* arg1, void* arg2) {
|
|||||||
cache->Release(h);
|
cache->Release(h);
|
||||||
}
|
}
|
||||||
|
|
||||||
TableCache::TableCache(const std::string& dbname,
|
TableCache::TableCache(const std::string& dbname, const Options& options,
|
||||||
const Options* options,
|
|
||||||
int entries)
|
int entries)
|
||||||
: env_(options->env),
|
: env_(options.env),
|
||||||
dbname_(dbname),
|
dbname_(dbname),
|
||||||
options_(options),
|
options_(options),
|
||||||
cache_(NewLRUCache(entries)) {
|
cache_(NewLRUCache(entries)) {}
|
||||||
}
|
|
||||||
|
|
||||||
TableCache::~TableCache() {
|
TableCache::~TableCache() { delete cache_; }
|
||||||
delete cache_;
|
|
||||||
}
|
|
||||||
|
|
||||||
Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
|
Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
|
||||||
Cache::Handle** handle) {
|
Cache::Handle** handle) {
|
||||||
@ -49,10 +45,10 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
|
|||||||
EncodeFixed64(buf, file_number);
|
EncodeFixed64(buf, file_number);
|
||||||
Slice key(buf, sizeof(buf));
|
Slice key(buf, sizeof(buf));
|
||||||
*handle = cache_->Lookup(key);
|
*handle = cache_->Lookup(key);
|
||||||
if (*handle == NULL) {
|
if (*handle == nullptr) {
|
||||||
std::string fname = TableFileName(dbname_, file_number);
|
std::string fname = TableFileName(dbname_, file_number);
|
||||||
RandomAccessFile* file = NULL;
|
RandomAccessFile* file = nullptr;
|
||||||
Table* table = NULL;
|
Table* table = nullptr;
|
||||||
s = env_->NewRandomAccessFile(fname, &file);
|
s = env_->NewRandomAccessFile(fname, &file);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
std::string old_fname = SSTTableFileName(dbname_, file_number);
|
std::string old_fname = SSTTableFileName(dbname_, file_number);
|
||||||
@ -61,11 +57,11 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
s = Table::Open(*options_, file, file_size, &table);
|
s = Table::Open(options_, file, file_size, &table);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
assert(table == NULL);
|
assert(table == nullptr);
|
||||||
delete file;
|
delete file;
|
||||||
// We do not cache error results so that if the error is transient,
|
// We do not cache error results so that if the error is transient,
|
||||||
// or somebody repairs the file, we recover automatically.
|
// or somebody repairs the file, we recover automatically.
|
||||||
@ -80,14 +76,13 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
Iterator* TableCache::NewIterator(const ReadOptions& options,
|
Iterator* TableCache::NewIterator(const ReadOptions& options,
|
||||||
uint64_t file_number,
|
uint64_t file_number, uint64_t file_size,
|
||||||
uint64_t file_size,
|
|
||||||
Table** tableptr) {
|
Table** tableptr) {
|
||||||
if (tableptr != NULL) {
|
if (tableptr != nullptr) {
|
||||||
*tableptr = NULL;
|
*tableptr = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
Cache::Handle* handle = NULL;
|
Cache::Handle* handle = nullptr;
|
||||||
Status s = FindTable(file_number, file_size, &handle);
|
Status s = FindTable(file_number, file_size, &handle);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return NewErrorIterator(s);
|
return NewErrorIterator(s);
|
||||||
@ -96,23 +91,21 @@ Iterator* TableCache::NewIterator(const ReadOptions& options,
|
|||||||
Table* table = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
|
Table* table = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
|
||||||
Iterator* result = table->NewIterator(options);
|
Iterator* result = table->NewIterator(options);
|
||||||
result->RegisterCleanup(&UnrefEntry, cache_, handle);
|
result->RegisterCleanup(&UnrefEntry, cache_, handle);
|
||||||
if (tableptr != NULL) {
|
if (tableptr != nullptr) {
|
||||||
*tableptr = table;
|
*tableptr = table;
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
Status TableCache::Get(const ReadOptions& options,
|
Status TableCache::Get(const ReadOptions& options, uint64_t file_number,
|
||||||
uint64_t file_number,
|
uint64_t file_size, const Slice& k, void* arg,
|
||||||
uint64_t file_size,
|
void (*handle_result)(void*, const Slice&,
|
||||||
const Slice& k,
|
const Slice&)) {
|
||||||
void* arg,
|
Cache::Handle* handle = nullptr;
|
||||||
void (*saver)(void*, const Slice&, const Slice&)) {
|
|
||||||
Cache::Handle* handle = NULL;
|
|
||||||
Status s = FindTable(file_number, file_size, &handle);
|
Status s = FindTable(file_number, file_size, &handle);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
Table* t = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
|
Table* t = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
|
||||||
s = t->InternalGet(options, k, arg, saver);
|
s = t->InternalGet(options, k, arg, handle_result);
|
||||||
cache_->Release(handle);
|
cache_->Release(handle);
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
|
@ -7,8 +7,10 @@
|
|||||||
#ifndef STORAGE_LEVELDB_DB_TABLE_CACHE_H_
|
#ifndef STORAGE_LEVELDB_DB_TABLE_CACHE_H_
|
||||||
#define STORAGE_LEVELDB_DB_TABLE_CACHE_H_
|
#define STORAGE_LEVELDB_DB_TABLE_CACHE_H_
|
||||||
|
|
||||||
#include <string>
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
#include "leveldb/cache.h"
|
#include "leveldb/cache.h"
|
||||||
#include "leveldb/table.h"
|
#include "leveldb/table.h"
|
||||||
@ -20,40 +22,35 @@ class Env;
|
|||||||
|
|
||||||
class TableCache {
|
class TableCache {
|
||||||
public:
|
public:
|
||||||
TableCache(const std::string& dbname, const Options* options, int entries);
|
TableCache(const std::string& dbname, const Options& options, int entries);
|
||||||
~TableCache();
|
~TableCache();
|
||||||
|
|
||||||
// Return an iterator for the specified file number (the corresponding
|
// Return an iterator for the specified file number (the corresponding
|
||||||
// file length must be exactly "file_size" bytes). If "tableptr" is
|
// file length must be exactly "file_size" bytes). If "tableptr" is
|
||||||
// non-NULL, also sets "*tableptr" to point to the Table object
|
// non-null, also sets "*tableptr" to point to the Table object
|
||||||
// underlying the returned iterator, or NULL if no Table object underlies
|
// underlying the returned iterator, or to nullptr if no Table object
|
||||||
// the returned iterator. The returned "*tableptr" object is owned by
|
// underlies the returned iterator. The returned "*tableptr" object is owned
|
||||||
// the cache and should not be deleted, and is valid for as long as the
|
// by the cache and should not be deleted, and is valid for as long as the
|
||||||
// returned iterator is live.
|
// returned iterator is live.
|
||||||
Iterator* NewIterator(const ReadOptions& options,
|
Iterator* NewIterator(const ReadOptions& options, uint64_t file_number,
|
||||||
uint64_t file_number,
|
uint64_t file_size, Table** tableptr = nullptr);
|
||||||
uint64_t file_size,
|
|
||||||
Table** tableptr = NULL);
|
|
||||||
|
|
||||||
// If a seek to internal key "k" in specified file finds an entry,
|
// If a seek to internal key "k" in specified file finds an entry,
|
||||||
// call (*handle_result)(arg, found_key, found_value).
|
// call (*handle_result)(arg, found_key, found_value).
|
||||||
Status Get(const ReadOptions& options,
|
Status Get(const ReadOptions& options, uint64_t file_number,
|
||||||
uint64_t file_number,
|
uint64_t file_size, const Slice& k, void* arg,
|
||||||
uint64_t file_size,
|
|
||||||
const Slice& k,
|
|
||||||
void* arg,
|
|
||||||
void (*handle_result)(void*, const Slice&, const Slice&));
|
void (*handle_result)(void*, const Slice&, const Slice&));
|
||||||
|
|
||||||
// Evict any entry for the specified file number
|
// Evict any entry for the specified file number
|
||||||
void Evict(uint64_t file_number);
|
void Evict(uint64_t file_number);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**);
|
||||||
|
|
||||||
Env* const env_;
|
Env* const env_;
|
||||||
const std::string dbname_;
|
const std::string dbname_;
|
||||||
const Options* options_;
|
const Options& options_;
|
||||||
Cache* cache_;
|
Cache* cache_;
|
||||||
|
|
||||||
Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -12,15 +12,15 @@ namespace leveldb {
|
|||||||
// Tag numbers for serialized VersionEdit. These numbers are written to
|
// Tag numbers for serialized VersionEdit. These numbers are written to
|
||||||
// disk and should not be changed.
|
// disk and should not be changed.
|
||||||
enum Tag {
|
enum Tag {
|
||||||
kComparator = 1,
|
kComparator = 1,
|
||||||
kLogNumber = 2,
|
kLogNumber = 2,
|
||||||
kNextFileNumber = 3,
|
kNextFileNumber = 3,
|
||||||
kLastSequence = 4,
|
kLastSequence = 4,
|
||||||
kCompactPointer = 5,
|
kCompactPointer = 5,
|
||||||
kDeletedFile = 6,
|
kDeletedFile = 6,
|
||||||
kNewFile = 7,
|
kNewFile = 7,
|
||||||
// 8 was used for large value refs
|
// 8 was used for large value refs
|
||||||
kPrevLogNumber = 9
|
kPrevLogNumber = 9
|
||||||
};
|
};
|
||||||
|
|
||||||
void VersionEdit::Clear() {
|
void VersionEdit::Clear() {
|
||||||
@ -66,12 +66,10 @@ void VersionEdit::EncodeTo(std::string* dst) const {
|
|||||||
PutLengthPrefixedSlice(dst, compact_pointers_[i].second.Encode());
|
PutLengthPrefixedSlice(dst, compact_pointers_[i].second.Encode());
|
||||||
}
|
}
|
||||||
|
|
||||||
for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
|
for (const auto& deleted_file_kvp : deleted_files_) {
|
||||||
iter != deleted_files_.end();
|
|
||||||
++iter) {
|
|
||||||
PutVarint32(dst, kDeletedFile);
|
PutVarint32(dst, kDeletedFile);
|
||||||
PutVarint32(dst, iter->first); // level
|
PutVarint32(dst, deleted_file_kvp.first); // level
|
||||||
PutVarint64(dst, iter->second); // file number
|
PutVarint64(dst, deleted_file_kvp.second); // file number
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < new_files_.size(); i++) {
|
for (size_t i = 0; i < new_files_.size(); i++) {
|
||||||
@ -97,8 +95,7 @@ static bool GetInternalKey(Slice* input, InternalKey* dst) {
|
|||||||
|
|
||||||
static bool GetLevel(Slice* input, int* level) {
|
static bool GetLevel(Slice* input, int* level) {
|
||||||
uint32_t v;
|
uint32_t v;
|
||||||
if (GetVarint32(input, &v) &&
|
if (GetVarint32(input, &v) && v < config::kNumLevels) {
|
||||||
v < config::kNumLevels) {
|
|
||||||
*level = v;
|
*level = v;
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
@ -109,7 +106,7 @@ static bool GetLevel(Slice* input, int* level) {
|
|||||||
Status VersionEdit::DecodeFrom(const Slice& src) {
|
Status VersionEdit::DecodeFrom(const Slice& src) {
|
||||||
Clear();
|
Clear();
|
||||||
Slice input = src;
|
Slice input = src;
|
||||||
const char* msg = NULL;
|
const char* msg = nullptr;
|
||||||
uint32_t tag;
|
uint32_t tag;
|
||||||
|
|
||||||
// Temporary storage for parsing
|
// Temporary storage for parsing
|
||||||
@ -119,7 +116,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
|
|||||||
Slice str;
|
Slice str;
|
||||||
InternalKey key;
|
InternalKey key;
|
||||||
|
|
||||||
while (msg == NULL && GetVarint32(&input, &tag)) {
|
while (msg == nullptr && GetVarint32(&input, &tag)) {
|
||||||
switch (tag) {
|
switch (tag) {
|
||||||
case kComparator:
|
case kComparator:
|
||||||
if (GetLengthPrefixedSlice(&input, &str)) {
|
if (GetLengthPrefixedSlice(&input, &str)) {
|
||||||
@ -163,8 +160,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case kCompactPointer:
|
case kCompactPointer:
|
||||||
if (GetLevel(&input, &level) &&
|
if (GetLevel(&input, &level) && GetInternalKey(&input, &key)) {
|
||||||
GetInternalKey(&input, &key)) {
|
|
||||||
compact_pointers_.push_back(std::make_pair(level, key));
|
compact_pointers_.push_back(std::make_pair(level, key));
|
||||||
} else {
|
} else {
|
||||||
msg = "compaction pointer";
|
msg = "compaction pointer";
|
||||||
@ -172,8 +168,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case kDeletedFile:
|
case kDeletedFile:
|
||||||
if (GetLevel(&input, &level) &&
|
if (GetLevel(&input, &level) && GetVarint64(&input, &number)) {
|
||||||
GetVarint64(&input, &number)) {
|
|
||||||
deleted_files_.insert(std::make_pair(level, number));
|
deleted_files_.insert(std::make_pair(level, number));
|
||||||
} else {
|
} else {
|
||||||
msg = "deleted file";
|
msg = "deleted file";
|
||||||
@ -181,8 +176,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case kNewFile:
|
case kNewFile:
|
||||||
if (GetLevel(&input, &level) &&
|
if (GetLevel(&input, &level) && GetVarint64(&input, &f.number) &&
|
||||||
GetVarint64(&input, &f.number) &&
|
|
||||||
GetVarint64(&input, &f.file_size) &&
|
GetVarint64(&input, &f.file_size) &&
|
||||||
GetInternalKey(&input, &f.smallest) &&
|
GetInternalKey(&input, &f.smallest) &&
|
||||||
GetInternalKey(&input, &f.largest)) {
|
GetInternalKey(&input, &f.largest)) {
|
||||||
@ -198,12 +192,12 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (msg == NULL && !input.empty()) {
|
if (msg == nullptr && !input.empty()) {
|
||||||
msg = "invalid tag";
|
msg = "invalid tag";
|
||||||
}
|
}
|
||||||
|
|
||||||
Status result;
|
Status result;
|
||||||
if (msg != NULL) {
|
if (msg != nullptr) {
|
||||||
result = Status::Corruption("VersionEdit", msg);
|
result = Status::Corruption("VersionEdit", msg);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
@ -238,13 +232,11 @@ std::string VersionEdit::DebugString() const {
|
|||||||
r.append(" ");
|
r.append(" ");
|
||||||
r.append(compact_pointers_[i].second.DebugString());
|
r.append(compact_pointers_[i].second.DebugString());
|
||||||
}
|
}
|
||||||
for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
|
for (const auto& deleted_files_kvp : deleted_files_) {
|
||||||
iter != deleted_files_.end();
|
|
||||||
++iter) {
|
|
||||||
r.append("\n DeleteFile: ");
|
r.append("\n DeleteFile: ");
|
||||||
AppendNumberTo(&r, iter->first);
|
AppendNumberTo(&r, deleted_files_kvp.first);
|
||||||
r.append(" ");
|
r.append(" ");
|
||||||
AppendNumberTo(&r, iter->second);
|
AppendNumberTo(&r, deleted_files_kvp.second);
|
||||||
}
|
}
|
||||||
for (size_t i = 0; i < new_files_.size(); i++) {
|
for (size_t i = 0; i < new_files_.size(); i++) {
|
||||||
const FileMetaData& f = new_files_[i].second;
|
const FileMetaData& f = new_files_[i].second;
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <set>
|
#include <set>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
@ -15,20 +16,20 @@ namespace leveldb {
|
|||||||
class VersionSet;
|
class VersionSet;
|
||||||
|
|
||||||
struct FileMetaData {
|
struct FileMetaData {
|
||||||
int refs;
|
FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) {}
|
||||||
int allowed_seeks; // Seeks allowed until compaction
|
|
||||||
uint64_t number;
|
|
||||||
uint64_t file_size; // File size in bytes
|
|
||||||
InternalKey smallest; // Smallest internal key served by table
|
|
||||||
InternalKey largest; // Largest internal key served by table
|
|
||||||
|
|
||||||
FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) { }
|
int refs;
|
||||||
|
int allowed_seeks; // Seeks allowed until compaction
|
||||||
|
uint64_t number;
|
||||||
|
uint64_t file_size; // File size in bytes
|
||||||
|
InternalKey smallest; // Smallest internal key served by table
|
||||||
|
InternalKey largest; // Largest internal key served by table
|
||||||
};
|
};
|
||||||
|
|
||||||
class VersionEdit {
|
class VersionEdit {
|
||||||
public:
|
public:
|
||||||
VersionEdit() { Clear(); }
|
VersionEdit() { Clear(); }
|
||||||
~VersionEdit() { }
|
~VersionEdit() = default;
|
||||||
|
|
||||||
void Clear();
|
void Clear();
|
||||||
|
|
||||||
@ -59,10 +60,8 @@ class VersionEdit {
|
|||||||
// Add the specified file at the specified number.
|
// Add the specified file at the specified number.
|
||||||
// REQUIRES: This version has not been saved (see VersionSet::SaveTo)
|
// REQUIRES: This version has not been saved (see VersionSet::SaveTo)
|
||||||
// REQUIRES: "smallest" and "largest" are smallest and largest keys in file
|
// REQUIRES: "smallest" and "largest" are smallest and largest keys in file
|
||||||
void AddFile(int level, uint64_t file,
|
void AddFile(int level, uint64_t file, uint64_t file_size,
|
||||||
uint64_t file_size,
|
const InternalKey& smallest, const InternalKey& largest) {
|
||||||
const InternalKey& smallest,
|
|
||||||
const InternalKey& largest) {
|
|
||||||
FileMetaData f;
|
FileMetaData f;
|
||||||
f.number = file;
|
f.number = file;
|
||||||
f.file_size = file_size;
|
f.file_size = file_size;
|
||||||
@ -84,7 +83,7 @@ class VersionEdit {
|
|||||||
private:
|
private:
|
||||||
friend class VersionSet;
|
friend class VersionSet;
|
||||||
|
|
||||||
typedef std::set< std::pair<int, uint64_t> > DeletedFileSet;
|
typedef std::set<std::pair<int, uint64_t> > DeletedFileSet;
|
||||||
|
|
||||||
std::string comparator_;
|
std::string comparator_;
|
||||||
uint64_t log_number_;
|
uint64_t log_number_;
|
||||||
@ -97,9 +96,9 @@ class VersionEdit {
|
|||||||
bool has_next_file_number_;
|
bool has_next_file_number_;
|
||||||
bool has_last_sequence_;
|
bool has_last_sequence_;
|
||||||
|
|
||||||
std::vector< std::pair<int, InternalKey> > compact_pointers_;
|
std::vector<std::pair<int, InternalKey> > compact_pointers_;
|
||||||
DeletedFileSet deleted_files_;
|
DeletedFileSet deleted_files_;
|
||||||
std::vector< std::pair<int, FileMetaData> > new_files_;
|
std::vector<std::pair<int, FileMetaData> > new_files_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -17,7 +17,7 @@ static void TestEncodeDecode(const VersionEdit& edit) {
|
|||||||
ASSERT_EQ(encoded, encoded2);
|
ASSERT_EQ(encoded, encoded2);
|
||||||
}
|
}
|
||||||
|
|
||||||
class VersionEditTest { };
|
class VersionEditTest {};
|
||||||
|
|
||||||
TEST(VersionEditTest, EncodeDecode) {
|
TEST(VersionEditTest, EncodeDecode) {
|
||||||
static const uint64_t kBig = 1ull << 50;
|
static const uint64_t kBig = 1ull << 50;
|
||||||
@ -41,6 +41,4 @@ TEST(VersionEditTest, EncodeDecode) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -4,8 +4,10 @@
|
|||||||
|
|
||||||
#include "db/version_set.h"
|
#include "db/version_set.h"
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
#include "db/filename.h"
|
#include "db/filename.h"
|
||||||
#include "db/log_reader.h"
|
#include "db/log_reader.h"
|
||||||
#include "db/log_writer.h"
|
#include "db/log_writer.h"
|
||||||
@ -20,7 +22,7 @@
|
|||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
static int TargetFileSize(const Options* options) {
|
static size_t TargetFileSize(const Options* options) {
|
||||||
return options->max_file_size;
|
return options->max_file_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,8 +86,7 @@ Version::~Version() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int FindFile(const InternalKeyComparator& icmp,
|
int FindFile(const InternalKeyComparator& icmp,
|
||||||
const std::vector<FileMetaData*>& files,
|
const std::vector<FileMetaData*>& files, const Slice& key) {
|
||||||
const Slice& key) {
|
|
||||||
uint32_t left = 0;
|
uint32_t left = 0;
|
||||||
uint32_t right = files.size();
|
uint32_t right = files.size();
|
||||||
while (left < right) {
|
while (left < right) {
|
||||||
@ -104,26 +105,25 @@ int FindFile(const InternalKeyComparator& icmp,
|
|||||||
return right;
|
return right;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool AfterFile(const Comparator* ucmp,
|
static bool AfterFile(const Comparator* ucmp, const Slice* user_key,
|
||||||
const Slice* user_key, const FileMetaData* f) {
|
const FileMetaData* f) {
|
||||||
// NULL user_key occurs before all keys and is therefore never after *f
|
// null user_key occurs before all keys and is therefore never after *f
|
||||||
return (user_key != NULL &&
|
return (user_key != nullptr &&
|
||||||
ucmp->Compare(*user_key, f->largest.user_key()) > 0);
|
ucmp->Compare(*user_key, f->largest.user_key()) > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool BeforeFile(const Comparator* ucmp,
|
static bool BeforeFile(const Comparator* ucmp, const Slice* user_key,
|
||||||
const Slice* user_key, const FileMetaData* f) {
|
const FileMetaData* f) {
|
||||||
// NULL user_key occurs after all keys and is therefore never before *f
|
// null user_key occurs after all keys and is therefore never before *f
|
||||||
return (user_key != NULL &&
|
return (user_key != nullptr &&
|
||||||
ucmp->Compare(*user_key, f->smallest.user_key()) < 0);
|
ucmp->Compare(*user_key, f->smallest.user_key()) < 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SomeFileOverlapsRange(
|
bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
|
||||||
const InternalKeyComparator& icmp,
|
bool disjoint_sorted_files,
|
||||||
bool disjoint_sorted_files,
|
const std::vector<FileMetaData*>& files,
|
||||||
const std::vector<FileMetaData*>& files,
|
const Slice* smallest_user_key,
|
||||||
const Slice* smallest_user_key,
|
const Slice* largest_user_key) {
|
||||||
const Slice* largest_user_key) {
|
|
||||||
const Comparator* ucmp = icmp.user_comparator();
|
const Comparator* ucmp = icmp.user_comparator();
|
||||||
if (!disjoint_sorted_files) {
|
if (!disjoint_sorted_files) {
|
||||||
// Need to check against all files
|
// Need to check against all files
|
||||||
@ -141,10 +141,11 @@ bool SomeFileOverlapsRange(
|
|||||||
|
|
||||||
// Binary search over file list
|
// Binary search over file list
|
||||||
uint32_t index = 0;
|
uint32_t index = 0;
|
||||||
if (smallest_user_key != NULL) {
|
if (smallest_user_key != nullptr) {
|
||||||
// Find the earliest possible internal key for smallest_user_key
|
// Find the earliest possible internal key for smallest_user_key
|
||||||
InternalKey small(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek);
|
InternalKey small_key(*smallest_user_key, kMaxSequenceNumber,
|
||||||
index = FindFile(icmp, files, small.Encode());
|
kValueTypeForSeek);
|
||||||
|
index = FindFile(icmp, files, small_key.Encode());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (index >= files.size()) {
|
if (index >= files.size()) {
|
||||||
@ -164,13 +165,9 @@ class Version::LevelFileNumIterator : public Iterator {
|
|||||||
public:
|
public:
|
||||||
LevelFileNumIterator(const InternalKeyComparator& icmp,
|
LevelFileNumIterator(const InternalKeyComparator& icmp,
|
||||||
const std::vector<FileMetaData*>* flist)
|
const std::vector<FileMetaData*>* flist)
|
||||||
: icmp_(icmp),
|
: icmp_(icmp), flist_(flist), index_(flist->size()) { // Marks as invalid
|
||||||
flist_(flist),
|
|
||||||
index_(flist->size()) { // Marks as invalid
|
|
||||||
}
|
|
||||||
virtual bool Valid() const {
|
|
||||||
return index_ < flist_->size();
|
|
||||||
}
|
}
|
||||||
|
virtual bool Valid() const { return index_ < flist_->size(); }
|
||||||
virtual void Seek(const Slice& target) {
|
virtual void Seek(const Slice& target) {
|
||||||
index_ = FindFile(icmp_, *flist_, target);
|
index_ = FindFile(icmp_, *flist_, target);
|
||||||
}
|
}
|
||||||
@ -197,10 +194,11 @@ class Version::LevelFileNumIterator : public Iterator {
|
|||||||
Slice value() const {
|
Slice value() const {
|
||||||
assert(Valid());
|
assert(Valid());
|
||||||
EncodeFixed64(value_buf_, (*flist_)[index_]->number);
|
EncodeFixed64(value_buf_, (*flist_)[index_]->number);
|
||||||
EncodeFixed64(value_buf_+8, (*flist_)[index_]->file_size);
|
EncodeFixed64(value_buf_ + 8, (*flist_)[index_]->file_size);
|
||||||
return Slice(value_buf_, sizeof(value_buf_));
|
return Slice(value_buf_, sizeof(value_buf_));
|
||||||
}
|
}
|
||||||
virtual Status status() const { return Status::OK(); }
|
virtual Status status() const { return Status::OK(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const InternalKeyComparator icmp_;
|
const InternalKeyComparator icmp_;
|
||||||
const std::vector<FileMetaData*>* const flist_;
|
const std::vector<FileMetaData*>* const flist_;
|
||||||
@ -210,16 +208,14 @@ class Version::LevelFileNumIterator : public Iterator {
|
|||||||
mutable char value_buf_[16];
|
mutable char value_buf_[16];
|
||||||
};
|
};
|
||||||
|
|
||||||
static Iterator* GetFileIterator(void* arg,
|
static Iterator* GetFileIterator(void* arg, const ReadOptions& options,
|
||||||
const ReadOptions& options,
|
|
||||||
const Slice& file_value) {
|
const Slice& file_value) {
|
||||||
TableCache* cache = reinterpret_cast<TableCache*>(arg);
|
TableCache* cache = reinterpret_cast<TableCache*>(arg);
|
||||||
if (file_value.size() != 16) {
|
if (file_value.size() != 16) {
|
||||||
return NewErrorIterator(
|
return NewErrorIterator(
|
||||||
Status::Corruption("FileReader invoked with unexpected value"));
|
Status::Corruption("FileReader invoked with unexpected value"));
|
||||||
} else {
|
} else {
|
||||||
return cache->NewIterator(options,
|
return cache->NewIterator(options, DecodeFixed64(file_value.data()),
|
||||||
DecodeFixed64(file_value.data()),
|
|
||||||
DecodeFixed64(file_value.data() + 8));
|
DecodeFixed64(file_value.data() + 8));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -227,17 +223,16 @@ static Iterator* GetFileIterator(void* arg,
|
|||||||
Iterator* Version::NewConcatenatingIterator(const ReadOptions& options,
|
Iterator* Version::NewConcatenatingIterator(const ReadOptions& options,
|
||||||
int level) const {
|
int level) const {
|
||||||
return NewTwoLevelIterator(
|
return NewTwoLevelIterator(
|
||||||
new LevelFileNumIterator(vset_->icmp_, &files_[level]),
|
new LevelFileNumIterator(vset_->icmp_, &files_[level]), &GetFileIterator,
|
||||||
&GetFileIterator, vset_->table_cache_, options);
|
vset_->table_cache_, options);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Version::AddIterators(const ReadOptions& options,
|
void Version::AddIterators(const ReadOptions& options,
|
||||||
std::vector<Iterator*>* iters) {
|
std::vector<Iterator*>* iters) {
|
||||||
// Merge all level zero files together since they may overlap
|
// Merge all level zero files together since they may overlap
|
||||||
for (size_t i = 0; i < files_[0].size(); i++) {
|
for (size_t i = 0; i < files_[0].size(); i++) {
|
||||||
iters->push_back(
|
iters->push_back(vset_->table_cache_->NewIterator(
|
||||||
vset_->table_cache_->NewIterator(
|
options, files_[0][i]->number, files_[0][i]->file_size));
|
||||||
options, files_[0][i]->number, files_[0][i]->file_size));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// For levels > 0, we can use a concatenating iterator that sequentially
|
// For levels > 0, we can use a concatenating iterator that sequentially
|
||||||
@ -264,7 +259,7 @@ struct Saver {
|
|||||||
Slice user_key;
|
Slice user_key;
|
||||||
std::string* value;
|
std::string* value;
|
||||||
};
|
};
|
||||||
}
|
} // namespace
|
||||||
static void SaveValue(void* arg, const Slice& ikey, const Slice& v) {
|
static void SaveValue(void* arg, const Slice& ikey, const Slice& v) {
|
||||||
Saver* s = reinterpret_cast<Saver*>(arg);
|
Saver* s = reinterpret_cast<Saver*>(arg);
|
||||||
ParsedInternalKey parsed_key;
|
ParsedInternalKey parsed_key;
|
||||||
@ -284,8 +279,7 @@ static bool NewestFirst(FileMetaData* a, FileMetaData* b) {
|
|||||||
return a->number > b->number;
|
return a->number > b->number;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Version::ForEachOverlapping(Slice user_key, Slice internal_key,
|
void Version::ForEachOverlapping(Slice user_key, Slice internal_key, void* arg,
|
||||||
void* arg,
|
|
||||||
bool (*func)(void*, int, FileMetaData*)) {
|
bool (*func)(void*, int, FileMetaData*)) {
|
||||||
// TODO(sanjay): Change Version::Get() to use this function.
|
// TODO(sanjay): Change Version::Get() to use this function.
|
||||||
const Comparator* ucmp = vset_->icmp_.user_comparator();
|
const Comparator* ucmp = vset_->icmp_.user_comparator();
|
||||||
@ -329,23 +323,21 @@ void Version::ForEachOverlapping(Slice user_key, Slice internal_key,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Status Version::Get(const ReadOptions& options,
|
Status Version::Get(const ReadOptions& options, const LookupKey& k,
|
||||||
const LookupKey& k,
|
std::string* value, GetStats* stats) {
|
||||||
std::string* value,
|
|
||||||
GetStats* stats) {
|
|
||||||
Slice ikey = k.internal_key();
|
Slice ikey = k.internal_key();
|
||||||
Slice user_key = k.user_key();
|
Slice user_key = k.user_key();
|
||||||
const Comparator* ucmp = vset_->icmp_.user_comparator();
|
const Comparator* ucmp = vset_->icmp_.user_comparator();
|
||||||
Status s;
|
Status s;
|
||||||
|
|
||||||
stats->seek_file = NULL;
|
stats->seek_file = nullptr;
|
||||||
stats->seek_file_level = -1;
|
stats->seek_file_level = -1;
|
||||||
FileMetaData* last_file_read = NULL;
|
FileMetaData* last_file_read = nullptr;
|
||||||
int last_file_read_level = -1;
|
int last_file_read_level = -1;
|
||||||
|
|
||||||
// We can search level-by-level since entries never hop across
|
// We can search level-by-level since entries never hop across
|
||||||
// levels. Therefore we are guaranteed that if we find data
|
// levels. Therefore we are guaranteed that if we find data
|
||||||
// in an smaller level, later levels are irrelevant.
|
// in a smaller level, later levels are irrelevant.
|
||||||
std::vector<FileMetaData*> tmp;
|
std::vector<FileMetaData*> tmp;
|
||||||
FileMetaData* tmp2;
|
FileMetaData* tmp2;
|
||||||
for (int level = 0; level < config::kNumLevels; level++) {
|
for (int level = 0; level < config::kNumLevels; level++) {
|
||||||
@ -374,13 +366,13 @@ Status Version::Get(const ReadOptions& options,
|
|||||||
// Binary search to find earliest index whose largest key >= ikey.
|
// Binary search to find earliest index whose largest key >= ikey.
|
||||||
uint32_t index = FindFile(vset_->icmp_, files_[level], ikey);
|
uint32_t index = FindFile(vset_->icmp_, files_[level], ikey);
|
||||||
if (index >= num_files) {
|
if (index >= num_files) {
|
||||||
files = NULL;
|
files = nullptr;
|
||||||
num_files = 0;
|
num_files = 0;
|
||||||
} else {
|
} else {
|
||||||
tmp2 = files[index];
|
tmp2 = files[index];
|
||||||
if (ucmp->Compare(user_key, tmp2->smallest.user_key()) < 0) {
|
if (ucmp->Compare(user_key, tmp2->smallest.user_key()) < 0) {
|
||||||
// All of "tmp2" is past any data for user_key
|
// All of "tmp2" is past any data for user_key
|
||||||
files = NULL;
|
files = nullptr;
|
||||||
num_files = 0;
|
num_files = 0;
|
||||||
} else {
|
} else {
|
||||||
files = &tmp2;
|
files = &tmp2;
|
||||||
@ -390,7 +382,7 @@ Status Version::Get(const ReadOptions& options,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (uint32_t i = 0; i < num_files; ++i) {
|
for (uint32_t i = 0; i < num_files; ++i) {
|
||||||
if (last_file_read != NULL && stats->seek_file == NULL) {
|
if (last_file_read != nullptr && stats->seek_file == nullptr) {
|
||||||
// We have had more than one seek for this read. Charge the 1st file.
|
// We have had more than one seek for this read. Charge the 1st file.
|
||||||
stats->seek_file = last_file_read;
|
stats->seek_file = last_file_read;
|
||||||
stats->seek_file_level = last_file_read_level;
|
stats->seek_file_level = last_file_read_level;
|
||||||
@ -405,14 +397,14 @@ Status Version::Get(const ReadOptions& options,
|
|||||||
saver.ucmp = ucmp;
|
saver.ucmp = ucmp;
|
||||||
saver.user_key = user_key;
|
saver.user_key = user_key;
|
||||||
saver.value = value;
|
saver.value = value;
|
||||||
s = vset_->table_cache_->Get(options, f->number, f->file_size,
|
s = vset_->table_cache_->Get(options, f->number, f->file_size, ikey,
|
||||||
ikey, &saver, SaveValue);
|
&saver, SaveValue);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
switch (saver.state) {
|
switch (saver.state) {
|
||||||
case kNotFound:
|
case kNotFound:
|
||||||
break; // Keep searching in other files
|
break; // Keep searching in other files
|
||||||
case kFound:
|
case kFound:
|
||||||
return s;
|
return s;
|
||||||
case kDeleted:
|
case kDeleted:
|
||||||
@ -430,9 +422,9 @@ Status Version::Get(const ReadOptions& options,
|
|||||||
|
|
||||||
bool Version::UpdateStats(const GetStats& stats) {
|
bool Version::UpdateStats(const GetStats& stats) {
|
||||||
FileMetaData* f = stats.seek_file;
|
FileMetaData* f = stats.seek_file;
|
||||||
if (f != NULL) {
|
if (f != nullptr) {
|
||||||
f->allowed_seeks--;
|
f->allowed_seeks--;
|
||||||
if (f->allowed_seeks <= 0 && file_to_compact_ == NULL) {
|
if (f->allowed_seeks <= 0 && file_to_compact_ == nullptr) {
|
||||||
file_to_compact_ = f;
|
file_to_compact_ = f;
|
||||||
file_to_compact_level_ = stats.seek_file_level;
|
file_to_compact_level_ = stats.seek_file_level;
|
||||||
return true;
|
return true;
|
||||||
@ -479,9 +471,7 @@ bool Version::RecordReadSample(Slice internal_key) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Version::Ref() {
|
void Version::Ref() { ++refs_; }
|
||||||
++refs_;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Version::Unref() {
|
void Version::Unref() {
|
||||||
assert(this != &vset_->dummy_versions_);
|
assert(this != &vset_->dummy_versions_);
|
||||||
@ -492,16 +482,14 @@ void Version::Unref() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Version::OverlapInLevel(int level,
|
bool Version::OverlapInLevel(int level, const Slice* smallest_user_key,
|
||||||
const Slice* smallest_user_key,
|
|
||||||
const Slice* largest_user_key) {
|
const Slice* largest_user_key) {
|
||||||
return SomeFileOverlapsRange(vset_->icmp_, (level > 0), files_[level],
|
return SomeFileOverlapsRange(vset_->icmp_, (level > 0), files_[level],
|
||||||
smallest_user_key, largest_user_key);
|
smallest_user_key, largest_user_key);
|
||||||
}
|
}
|
||||||
|
|
||||||
int Version::PickLevelForMemTableOutput(
|
int Version::PickLevelForMemTableOutput(const Slice& smallest_user_key,
|
||||||
const Slice& smallest_user_key,
|
const Slice& largest_user_key) {
|
||||||
const Slice& largest_user_key) {
|
|
||||||
int level = 0;
|
int level = 0;
|
||||||
if (!OverlapInLevel(0, &smallest_user_key, &largest_user_key)) {
|
if (!OverlapInLevel(0, &smallest_user_key, &largest_user_key)) {
|
||||||
// Push to next level if there is no overlap in next level,
|
// Push to next level if there is no overlap in next level,
|
||||||
@ -528,40 +516,39 @@ int Version::PickLevelForMemTableOutput(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Store in "*inputs" all files in "level" that overlap [begin,end]
|
// Store in "*inputs" all files in "level" that overlap [begin,end]
|
||||||
void Version::GetOverlappingInputs(
|
void Version::GetOverlappingInputs(int level, const InternalKey* begin,
|
||||||
int level,
|
const InternalKey* end,
|
||||||
const InternalKey* begin,
|
std::vector<FileMetaData*>* inputs) {
|
||||||
const InternalKey* end,
|
|
||||||
std::vector<FileMetaData*>* inputs) {
|
|
||||||
assert(level >= 0);
|
assert(level >= 0);
|
||||||
assert(level < config::kNumLevels);
|
assert(level < config::kNumLevels);
|
||||||
inputs->clear();
|
inputs->clear();
|
||||||
Slice user_begin, user_end;
|
Slice user_begin, user_end;
|
||||||
if (begin != NULL) {
|
if (begin != nullptr) {
|
||||||
user_begin = begin->user_key();
|
user_begin = begin->user_key();
|
||||||
}
|
}
|
||||||
if (end != NULL) {
|
if (end != nullptr) {
|
||||||
user_end = end->user_key();
|
user_end = end->user_key();
|
||||||
}
|
}
|
||||||
const Comparator* user_cmp = vset_->icmp_.user_comparator();
|
const Comparator* user_cmp = vset_->icmp_.user_comparator();
|
||||||
for (size_t i = 0; i < files_[level].size(); ) {
|
for (size_t i = 0; i < files_[level].size();) {
|
||||||
FileMetaData* f = files_[level][i++];
|
FileMetaData* f = files_[level][i++];
|
||||||
const Slice file_start = f->smallest.user_key();
|
const Slice file_start = f->smallest.user_key();
|
||||||
const Slice file_limit = f->largest.user_key();
|
const Slice file_limit = f->largest.user_key();
|
||||||
if (begin != NULL && user_cmp->Compare(file_limit, user_begin) < 0) {
|
if (begin != nullptr && user_cmp->Compare(file_limit, user_begin) < 0) {
|
||||||
// "f" is completely before specified range; skip it
|
// "f" is completely before specified range; skip it
|
||||||
} else if (end != NULL && user_cmp->Compare(file_start, user_end) > 0) {
|
} else if (end != nullptr && user_cmp->Compare(file_start, user_end) > 0) {
|
||||||
// "f" is completely after specified range; skip it
|
// "f" is completely after specified range; skip it
|
||||||
} else {
|
} else {
|
||||||
inputs->push_back(f);
|
inputs->push_back(f);
|
||||||
if (level == 0) {
|
if (level == 0) {
|
||||||
// Level-0 files may overlap each other. So check if the newly
|
// Level-0 files may overlap each other. So check if the newly
|
||||||
// added file has expanded the range. If so, restart search.
|
// added file has expanded the range. If so, restart search.
|
||||||
if (begin != NULL && user_cmp->Compare(file_start, user_begin) < 0) {
|
if (begin != nullptr && user_cmp->Compare(file_start, user_begin) < 0) {
|
||||||
user_begin = file_start;
|
user_begin = file_start;
|
||||||
inputs->clear();
|
inputs->clear();
|
||||||
i = 0;
|
i = 0;
|
||||||
} else if (end != NULL && user_cmp->Compare(file_limit, user_end) > 0) {
|
} else if (end != nullptr &&
|
||||||
|
user_cmp->Compare(file_limit, user_end) > 0) {
|
||||||
user_end = file_limit;
|
user_end = file_limit;
|
||||||
inputs->clear();
|
inputs->clear();
|
||||||
i = 0;
|
i = 0;
|
||||||
@ -629,9 +616,7 @@ class VersionSet::Builder {
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
// Initialize a builder with the files from *base and other info from *vset
|
// Initialize a builder with the files from *base and other info from *vset
|
||||||
Builder(VersionSet* vset, Version* base)
|
Builder(VersionSet* vset, Version* base) : vset_(vset), base_(base) {
|
||||||
: vset_(vset),
|
|
||||||
base_(base) {
|
|
||||||
base_->Ref();
|
base_->Ref();
|
||||||
BySmallestKey cmp;
|
BySmallestKey cmp;
|
||||||
cmp.internal_comparator = &vset_->icmp_;
|
cmp.internal_comparator = &vset_->icmp_;
|
||||||
@ -645,8 +630,8 @@ class VersionSet::Builder {
|
|||||||
const FileSet* added = levels_[level].added_files;
|
const FileSet* added = levels_[level].added_files;
|
||||||
std::vector<FileMetaData*> to_unref;
|
std::vector<FileMetaData*> to_unref;
|
||||||
to_unref.reserve(added->size());
|
to_unref.reserve(added->size());
|
||||||
for (FileSet::const_iterator it = added->begin();
|
for (FileSet::const_iterator it = added->begin(); it != added->end();
|
||||||
it != added->end(); ++it) {
|
++it) {
|
||||||
to_unref.push_back(*it);
|
to_unref.push_back(*it);
|
||||||
}
|
}
|
||||||
delete added;
|
delete added;
|
||||||
@ -671,12 +656,9 @@ class VersionSet::Builder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete files
|
// Delete files
|
||||||
const VersionEdit::DeletedFileSet& del = edit->deleted_files_;
|
for (const auto& deleted_file_set_kvp : edit->deleted_files_) {
|
||||||
for (VersionEdit::DeletedFileSet::const_iterator iter = del.begin();
|
const int level = deleted_file_set_kvp.first;
|
||||||
iter != del.end();
|
const uint64_t number = deleted_file_set_kvp.second;
|
||||||
++iter) {
|
|
||||||
const int level = iter->first;
|
|
||||||
const uint64_t number = iter->second;
|
|
||||||
levels_[level].deleted_files.insert(number);
|
levels_[level].deleted_files.insert(number);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -699,7 +681,7 @@ class VersionSet::Builder {
|
|||||||
// same as the compaction of 40KB of data. We are a little
|
// same as the compaction of 40KB of data. We are a little
|
||||||
// conservative and allow approximately one seek for every 16KB
|
// conservative and allow approximately one seek for every 16KB
|
||||||
// of data before triggering a compaction.
|
// of data before triggering a compaction.
|
||||||
f->allowed_seeks = (f->file_size / 16384);
|
f->allowed_seeks = static_cast<int>((f->file_size / 16384U));
|
||||||
if (f->allowed_seeks < 100) f->allowed_seeks = 100;
|
if (f->allowed_seeks < 100) f->allowed_seeks = 100;
|
||||||
|
|
||||||
levels_[level].deleted_files.erase(f->number);
|
levels_[level].deleted_files.erase(f->number);
|
||||||
@ -717,20 +699,17 @@ class VersionSet::Builder {
|
|||||||
const std::vector<FileMetaData*>& base_files = base_->files_[level];
|
const std::vector<FileMetaData*>& base_files = base_->files_[level];
|
||||||
std::vector<FileMetaData*>::const_iterator base_iter = base_files.begin();
|
std::vector<FileMetaData*>::const_iterator base_iter = base_files.begin();
|
||||||
std::vector<FileMetaData*>::const_iterator base_end = base_files.end();
|
std::vector<FileMetaData*>::const_iterator base_end = base_files.end();
|
||||||
const FileSet* added = levels_[level].added_files;
|
const FileSet* added_files = levels_[level].added_files;
|
||||||
v->files_[level].reserve(base_files.size() + added->size());
|
v->files_[level].reserve(base_files.size() + added_files->size());
|
||||||
for (FileSet::const_iterator added_iter = added->begin();
|
for (const auto& added_file : *added_files) {
|
||||||
added_iter != added->end();
|
|
||||||
++added_iter) {
|
|
||||||
// Add all smaller files listed in base_
|
// Add all smaller files listed in base_
|
||||||
for (std::vector<FileMetaData*>::const_iterator bpos
|
for (std::vector<FileMetaData*>::const_iterator bpos =
|
||||||
= std::upper_bound(base_iter, base_end, *added_iter, cmp);
|
std::upper_bound(base_iter, base_end, added_file, cmp);
|
||||||
base_iter != bpos;
|
base_iter != bpos; ++base_iter) {
|
||||||
++base_iter) {
|
|
||||||
MaybeAddFile(v, level, *base_iter);
|
MaybeAddFile(v, level, *base_iter);
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeAddFile(v, level, *added_iter);
|
MaybeAddFile(v, level, added_file);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add remaining base files
|
// Add remaining base files
|
||||||
@ -742,7 +721,7 @@ class VersionSet::Builder {
|
|||||||
// Make sure there is no overlap in levels > 0
|
// Make sure there is no overlap in levels > 0
|
||||||
if (level > 0) {
|
if (level > 0) {
|
||||||
for (uint32_t i = 1; i < v->files_[level].size(); i++) {
|
for (uint32_t i = 1; i < v->files_[level].size(); i++) {
|
||||||
const InternalKey& prev_end = v->files_[level][i-1]->largest;
|
const InternalKey& prev_end = v->files_[level][i - 1]->largest;
|
||||||
const InternalKey& this_begin = v->files_[level][i]->smallest;
|
const InternalKey& this_begin = v->files_[level][i]->smallest;
|
||||||
if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) {
|
if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) {
|
||||||
fprintf(stderr, "overlapping ranges in same level %s vs. %s\n",
|
fprintf(stderr, "overlapping ranges in same level %s vs. %s\n",
|
||||||
@ -763,7 +742,7 @@ class VersionSet::Builder {
|
|||||||
std::vector<FileMetaData*>* files = &v->files_[level];
|
std::vector<FileMetaData*>* files = &v->files_[level];
|
||||||
if (level > 0 && !files->empty()) {
|
if (level > 0 && !files->empty()) {
|
||||||
// Must not overlap
|
// Must not overlap
|
||||||
assert(vset_->icmp_.Compare((*files)[files->size()-1]->largest,
|
assert(vset_->icmp_.Compare((*files)[files->size() - 1]->largest,
|
||||||
f->smallest) < 0);
|
f->smallest) < 0);
|
||||||
}
|
}
|
||||||
f->refs++;
|
f->refs++;
|
||||||
@ -772,8 +751,7 @@ class VersionSet::Builder {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
VersionSet::VersionSet(const std::string& dbname,
|
VersionSet::VersionSet(const std::string& dbname, const Options* options,
|
||||||
const Options* options,
|
|
||||||
TableCache* table_cache,
|
TableCache* table_cache,
|
||||||
const InternalKeyComparator* cmp)
|
const InternalKeyComparator* cmp)
|
||||||
: env_(options->env),
|
: env_(options->env),
|
||||||
@ -786,10 +764,10 @@ VersionSet::VersionSet(const std::string& dbname,
|
|||||||
last_sequence_(0),
|
last_sequence_(0),
|
||||||
log_number_(0),
|
log_number_(0),
|
||||||
prev_log_number_(0),
|
prev_log_number_(0),
|
||||||
descriptor_file_(NULL),
|
descriptor_file_(nullptr),
|
||||||
descriptor_log_(NULL),
|
descriptor_log_(nullptr),
|
||||||
dummy_versions_(this),
|
dummy_versions_(this),
|
||||||
current_(NULL) {
|
current_(nullptr) {
|
||||||
AppendVersion(new Version(this));
|
AppendVersion(new Version(this));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -804,7 +782,7 @@ void VersionSet::AppendVersion(Version* v) {
|
|||||||
// Make "v" current
|
// Make "v" current
|
||||||
assert(v->refs_ == 0);
|
assert(v->refs_ == 0);
|
||||||
assert(v != current_);
|
assert(v != current_);
|
||||||
if (current_ != NULL) {
|
if (current_ != nullptr) {
|
||||||
current_->Unref();
|
current_->Unref();
|
||||||
}
|
}
|
||||||
current_ = v;
|
current_ = v;
|
||||||
@ -844,10 +822,10 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
|
|||||||
// a temporary file that contains a snapshot of the current version.
|
// a temporary file that contains a snapshot of the current version.
|
||||||
std::string new_manifest_file;
|
std::string new_manifest_file;
|
||||||
Status s;
|
Status s;
|
||||||
if (descriptor_log_ == NULL) {
|
if (descriptor_log_ == nullptr) {
|
||||||
// No reason to unlock *mu here since we only hit this path in the
|
// No reason to unlock *mu here since we only hit this path in the
|
||||||
// first call to LogAndApply (when opening the database).
|
// first call to LogAndApply (when opening the database).
|
||||||
assert(descriptor_file_ == NULL);
|
assert(descriptor_file_ == nullptr);
|
||||||
new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_);
|
new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_);
|
||||||
edit->SetNextFile(next_file_number_);
|
edit->SetNextFile(next_file_number_);
|
||||||
s = env_->NewWritableFile(new_manifest_file, &descriptor_file_);
|
s = env_->NewWritableFile(new_manifest_file, &descriptor_file_);
|
||||||
@ -893,8 +871,8 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
|
|||||||
if (!new_manifest_file.empty()) {
|
if (!new_manifest_file.empty()) {
|
||||||
delete descriptor_log_;
|
delete descriptor_log_;
|
||||||
delete descriptor_file_;
|
delete descriptor_file_;
|
||||||
descriptor_log_ = NULL;
|
descriptor_log_ = nullptr;
|
||||||
descriptor_file_ = NULL;
|
descriptor_file_ = nullptr;
|
||||||
env_->DeleteFile(new_manifest_file);
|
env_->DeleteFile(new_manifest_file);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -902,7 +880,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
|
|||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
Status VersionSet::Recover(bool *save_manifest) {
|
Status VersionSet::Recover(bool* save_manifest) {
|
||||||
struct LogReporter : public log::Reader::Reporter {
|
struct LogReporter : public log::Reader::Reporter {
|
||||||
Status* status;
|
Status* status;
|
||||||
virtual void Corruption(size_t bytes, const Status& s) {
|
virtual void Corruption(size_t bytes, const Status& s) {
|
||||||
@ -916,7 +894,7 @@ Status VersionSet::Recover(bool *save_manifest) {
|
|||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
if (current.empty() || current[current.size()-1] != '\n') {
|
if (current.empty() || current[current.size() - 1] != '\n') {
|
||||||
return Status::Corruption("CURRENT file does not end with newline");
|
return Status::Corruption("CURRENT file does not end with newline");
|
||||||
}
|
}
|
||||||
current.resize(current.size() - 1);
|
current.resize(current.size() - 1);
|
||||||
@ -925,6 +903,10 @@ Status VersionSet::Recover(bool *save_manifest) {
|
|||||||
SequentialFile* file;
|
SequentialFile* file;
|
||||||
s = env_->NewSequentialFile(dscname, &file);
|
s = env_->NewSequentialFile(dscname, &file);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
|
if (s.IsNotFound()) {
|
||||||
|
return Status::Corruption("CURRENT points to a non-existent file",
|
||||||
|
s.ToString());
|
||||||
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -941,7 +923,8 @@ Status VersionSet::Recover(bool *save_manifest) {
|
|||||||
{
|
{
|
||||||
LogReporter reporter;
|
LogReporter reporter;
|
||||||
reporter.status = &s;
|
reporter.status = &s;
|
||||||
log::Reader reader(file, &reporter, true/*checksum*/, 0/*initial_offset*/);
|
log::Reader reader(file, &reporter, true /*checksum*/,
|
||||||
|
0 /*initial_offset*/);
|
||||||
Slice record;
|
Slice record;
|
||||||
std::string scratch;
|
std::string scratch;
|
||||||
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
|
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
|
||||||
@ -982,7 +965,7 @@ Status VersionSet::Recover(bool *save_manifest) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
delete file;
|
delete file;
|
||||||
file = NULL;
|
file = nullptr;
|
||||||
|
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
if (!have_next_file) {
|
if (!have_next_file) {
|
||||||
@ -1040,12 +1023,12 @@ bool VersionSet::ReuseManifest(const std::string& dscname,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(descriptor_file_ == NULL);
|
assert(descriptor_file_ == nullptr);
|
||||||
assert(descriptor_log_ == NULL);
|
assert(descriptor_log_ == nullptr);
|
||||||
Status r = env_->NewAppendableFile(dscname, &descriptor_file_);
|
Status r = env_->NewAppendableFile(dscname, &descriptor_file_);
|
||||||
if (!r.ok()) {
|
if (!r.ok()) {
|
||||||
Log(options_->info_log, "Reuse MANIFEST: %s\n", r.ToString().c_str());
|
Log(options_->info_log, "Reuse MANIFEST: %s\n", r.ToString().c_str());
|
||||||
assert(descriptor_file_ == NULL);
|
assert(descriptor_file_ == nullptr);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1066,7 +1049,7 @@ void VersionSet::Finalize(Version* v) {
|
|||||||
int best_level = -1;
|
int best_level = -1;
|
||||||
double best_score = -1;
|
double best_score = -1;
|
||||||
|
|
||||||
for (int level = 0; level < config::kNumLevels-1; level++) {
|
for (int level = 0; level < config::kNumLevels - 1; level++) {
|
||||||
double score;
|
double score;
|
||||||
if (level == 0) {
|
if (level == 0) {
|
||||||
// We treat level-0 specially by bounding the number of files
|
// We treat level-0 specially by bounding the number of files
|
||||||
@ -1081,7 +1064,7 @@ void VersionSet::Finalize(Version* v) {
|
|||||||
// setting, or very high compression ratios, or lots of
|
// setting, or very high compression ratios, or lots of
|
||||||
// overwrites/deletions).
|
// overwrites/deletions).
|
||||||
score = v->files_[level].size() /
|
score = v->files_[level].size() /
|
||||||
static_cast<double>(config::kL0_CompactionTrigger);
|
static_cast<double>(config::kL0_CompactionTrigger);
|
||||||
} else {
|
} else {
|
||||||
// Compute the ratio of current size to size limit.
|
// Compute the ratio of current size to size limit.
|
||||||
const uint64_t level_bytes = TotalFileSize(v->files_[level]);
|
const uint64_t level_bytes = TotalFileSize(v->files_[level]);
|
||||||
@ -1137,16 +1120,12 @@ int VersionSet::NumLevelFiles(int level) const {
|
|||||||
|
|
||||||
const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const {
|
const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const {
|
||||||
// Update code if kNumLevels changes
|
// Update code if kNumLevels changes
|
||||||
assert(config::kNumLevels == 7);
|
static_assert(config::kNumLevels == 7, "");
|
||||||
snprintf(scratch->buffer, sizeof(scratch->buffer),
|
snprintf(scratch->buffer, sizeof(scratch->buffer),
|
||||||
"files[ %d %d %d %d %d %d %d ]",
|
"files[ %d %d %d %d %d %d %d ]", int(current_->files_[0].size()),
|
||||||
int(current_->files_[0].size()),
|
int(current_->files_[1].size()), int(current_->files_[2].size()),
|
||||||
int(current_->files_[1].size()),
|
int(current_->files_[3].size()), int(current_->files_[4].size()),
|
||||||
int(current_->files_[2].size()),
|
int(current_->files_[5].size()), int(current_->files_[6].size()));
|
||||||
int(current_->files_[3].size()),
|
|
||||||
int(current_->files_[4].size()),
|
|
||||||
int(current_->files_[5].size()),
|
|
||||||
int(current_->files_[6].size()));
|
|
||||||
return scratch->buffer;
|
return scratch->buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1172,7 +1151,7 @@ uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) {
|
|||||||
Table* tableptr;
|
Table* tableptr;
|
||||||
Iterator* iter = table_cache_->NewIterator(
|
Iterator* iter = table_cache_->NewIterator(
|
||||||
ReadOptions(), files[i]->number, files[i]->file_size, &tableptr);
|
ReadOptions(), files[i]->number, files[i]->file_size, &tableptr);
|
||||||
if (tableptr != NULL) {
|
if (tableptr != nullptr) {
|
||||||
result += tableptr->ApproximateOffsetOf(ikey.Encode());
|
result += tableptr->ApproximateOffsetOf(ikey.Encode());
|
||||||
}
|
}
|
||||||
delete iter;
|
delete iter;
|
||||||
@ -1183,8 +1162,7 @@ uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void VersionSet::AddLiveFiles(std::set<uint64_t>* live) {
|
void VersionSet::AddLiveFiles(std::set<uint64_t>* live) {
|
||||||
for (Version* v = dummy_versions_.next_;
|
for (Version* v = dummy_versions_.next_; v != &dummy_versions_;
|
||||||
v != &dummy_versions_;
|
|
||||||
v = v->next_) {
|
v = v->next_) {
|
||||||
for (int level = 0; level < config::kNumLevels; level++) {
|
for (int level = 0; level < config::kNumLevels; level++) {
|
||||||
const std::vector<FileMetaData*>& files = v->files_[level];
|
const std::vector<FileMetaData*>& files = v->files_[level];
|
||||||
@ -1207,7 +1185,7 @@ int64_t VersionSet::MaxNextLevelOverlappingBytes() {
|
|||||||
for (int level = 1; level < config::kNumLevels - 1; level++) {
|
for (int level = 1; level < config::kNumLevels - 1; level++) {
|
||||||
for (size_t i = 0; i < current_->files_[level].size(); i++) {
|
for (size_t i = 0; i < current_->files_[level].size(); i++) {
|
||||||
const FileMetaData* f = current_->files_[level][i];
|
const FileMetaData* f = current_->files_[level][i];
|
||||||
current_->GetOverlappingInputs(level+1, &f->smallest, &f->largest,
|
current_->GetOverlappingInputs(level + 1, &f->smallest, &f->largest,
|
||||||
&overlaps);
|
&overlaps);
|
||||||
const int64_t sum = TotalFileSize(overlaps);
|
const int64_t sum = TotalFileSize(overlaps);
|
||||||
if (sum > result) {
|
if (sum > result) {
|
||||||
@ -1222,8 +1200,7 @@ int64_t VersionSet::MaxNextLevelOverlappingBytes() {
|
|||||||
// *smallest, *largest.
|
// *smallest, *largest.
|
||||||
// REQUIRES: inputs is not empty
|
// REQUIRES: inputs is not empty
|
||||||
void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs,
|
void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs,
|
||||||
InternalKey* smallest,
|
InternalKey* smallest, InternalKey* largest) {
|
||||||
InternalKey* largest) {
|
|
||||||
assert(!inputs.empty());
|
assert(!inputs.empty());
|
||||||
smallest->Clear();
|
smallest->Clear();
|
||||||
largest->Clear();
|
largest->Clear();
|
||||||
@ -1248,8 +1225,7 @@ void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs,
|
|||||||
// REQUIRES: inputs is not empty
|
// REQUIRES: inputs is not empty
|
||||||
void VersionSet::GetRange2(const std::vector<FileMetaData*>& inputs1,
|
void VersionSet::GetRange2(const std::vector<FileMetaData*>& inputs1,
|
||||||
const std::vector<FileMetaData*>& inputs2,
|
const std::vector<FileMetaData*>& inputs2,
|
||||||
InternalKey* smallest,
|
InternalKey* smallest, InternalKey* largest) {
|
||||||
InternalKey* largest) {
|
|
||||||
std::vector<FileMetaData*> all = inputs1;
|
std::vector<FileMetaData*> all = inputs1;
|
||||||
all.insert(all.end(), inputs2.begin(), inputs2.end());
|
all.insert(all.end(), inputs2.begin(), inputs2.end());
|
||||||
GetRange(all, smallest, largest);
|
GetRange(all, smallest, largest);
|
||||||
@ -1271,8 +1247,8 @@ Iterator* VersionSet::MakeInputIterator(Compaction* c) {
|
|||||||
if (c->level() + which == 0) {
|
if (c->level() + which == 0) {
|
||||||
const std::vector<FileMetaData*>& files = c->inputs_[which];
|
const std::vector<FileMetaData*>& files = c->inputs_[which];
|
||||||
for (size_t i = 0; i < files.size(); i++) {
|
for (size_t i = 0; i < files.size(); i++) {
|
||||||
list[num++] = table_cache_->NewIterator(
|
list[num++] = table_cache_->NewIterator(options, files[i]->number,
|
||||||
options, files[i]->number, files[i]->file_size);
|
files[i]->file_size);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Create concatenating iterator for the files from this level
|
// Create concatenating iterator for the files from this level
|
||||||
@ -1295,11 +1271,11 @@ Compaction* VersionSet::PickCompaction() {
|
|||||||
// We prefer compactions triggered by too much data in a level over
|
// We prefer compactions triggered by too much data in a level over
|
||||||
// the compactions triggered by seeks.
|
// the compactions triggered by seeks.
|
||||||
const bool size_compaction = (current_->compaction_score_ >= 1);
|
const bool size_compaction = (current_->compaction_score_ >= 1);
|
||||||
const bool seek_compaction = (current_->file_to_compact_ != NULL);
|
const bool seek_compaction = (current_->file_to_compact_ != nullptr);
|
||||||
if (size_compaction) {
|
if (size_compaction) {
|
||||||
level = current_->compaction_level_;
|
level = current_->compaction_level_;
|
||||||
assert(level >= 0);
|
assert(level >= 0);
|
||||||
assert(level+1 < config::kNumLevels);
|
assert(level + 1 < config::kNumLevels);
|
||||||
c = new Compaction(options_, level);
|
c = new Compaction(options_, level);
|
||||||
|
|
||||||
// Pick the first file that comes after compact_pointer_[level]
|
// Pick the first file that comes after compact_pointer_[level]
|
||||||
@ -1320,7 +1296,7 @@ Compaction* VersionSet::PickCompaction() {
|
|||||||
c = new Compaction(options_, level);
|
c = new Compaction(options_, level);
|
||||||
c->inputs_[0].push_back(current_->file_to_compact_);
|
c->inputs_[0].push_back(current_->file_to_compact_);
|
||||||
} else {
|
} else {
|
||||||
return NULL;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
c->input_version_ = current_;
|
c->input_version_ = current_;
|
||||||
@ -1342,12 +1318,94 @@ Compaction* VersionSet::PickCompaction() {
|
|||||||
return c;
|
return c;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Finds the largest key in a vector of files. Returns true if files it not
|
||||||
|
// empty.
|
||||||
|
bool FindLargestKey(const InternalKeyComparator& icmp,
|
||||||
|
const std::vector<FileMetaData*>& files,
|
||||||
|
InternalKey* largest_key) {
|
||||||
|
if (files.empty()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
*largest_key = files[0]->largest;
|
||||||
|
for (size_t i = 1; i < files.size(); ++i) {
|
||||||
|
FileMetaData* f = files[i];
|
||||||
|
if (icmp.Compare(f->largest, *largest_key) > 0) {
|
||||||
|
*largest_key = f->largest;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finds minimum file b2=(l2, u2) in level file for which l2 > u1 and
|
||||||
|
// user_key(l2) = user_key(u1)
|
||||||
|
FileMetaData* FindSmallestBoundaryFile(
|
||||||
|
const InternalKeyComparator& icmp,
|
||||||
|
const std::vector<FileMetaData*>& level_files,
|
||||||
|
const InternalKey& largest_key) {
|
||||||
|
const Comparator* user_cmp = icmp.user_comparator();
|
||||||
|
FileMetaData* smallest_boundary_file = nullptr;
|
||||||
|
for (size_t i = 0; i < level_files.size(); ++i) {
|
||||||
|
FileMetaData* f = level_files[i];
|
||||||
|
if (icmp.Compare(f->smallest, largest_key) > 0 &&
|
||||||
|
user_cmp->Compare(f->smallest.user_key(), largest_key.user_key()) ==
|
||||||
|
0) {
|
||||||
|
if (smallest_boundary_file == nullptr ||
|
||||||
|
icmp.Compare(f->smallest, smallest_boundary_file->smallest) < 0) {
|
||||||
|
smallest_boundary_file = f;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return smallest_boundary_file;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extracts the largest file b1 from |compaction_files| and then searches for a
|
||||||
|
// b2 in |level_files| for which user_key(u1) = user_key(l2). If it finds such a
|
||||||
|
// file b2 (known as a boundary file) it adds it to |compaction_files| and then
|
||||||
|
// searches again using this new upper bound.
|
||||||
|
//
|
||||||
|
// If there are two blocks, b1=(l1, u1) and b2=(l2, u2) and
|
||||||
|
// user_key(u1) = user_key(l2), and if we compact b1 but not b2 then a
|
||||||
|
// subsequent get operation will yield an incorrect result because it will
|
||||||
|
// return the record from b2 in level i rather than from b1 because it searches
|
||||||
|
// level by level for records matching the supplied user key.
|
||||||
|
//
|
||||||
|
// parameters:
|
||||||
|
// in level_files: List of files to search for boundary files.
|
||||||
|
// in/out compaction_files: List of files to extend by adding boundary files.
|
||||||
|
void AddBoundaryInputs(const InternalKeyComparator& icmp,
|
||||||
|
const std::vector<FileMetaData*>& level_files,
|
||||||
|
std::vector<FileMetaData*>* compaction_files) {
|
||||||
|
InternalKey largest_key;
|
||||||
|
|
||||||
|
// Quick return if compaction_files is empty.
|
||||||
|
if (!FindLargestKey(icmp, *compaction_files, &largest_key)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool continue_searching = true;
|
||||||
|
while (continue_searching) {
|
||||||
|
FileMetaData* smallest_boundary_file =
|
||||||
|
FindSmallestBoundaryFile(icmp, level_files, largest_key);
|
||||||
|
|
||||||
|
// If a boundary file was found advance largest_key, otherwise we're done.
|
||||||
|
if (smallest_boundary_file != NULL) {
|
||||||
|
compaction_files->push_back(smallest_boundary_file);
|
||||||
|
largest_key = smallest_boundary_file->largest;
|
||||||
|
} else {
|
||||||
|
continue_searching = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void VersionSet::SetupOtherInputs(Compaction* c) {
|
void VersionSet::SetupOtherInputs(Compaction* c) {
|
||||||
const int level = c->level();
|
const int level = c->level();
|
||||||
InternalKey smallest, largest;
|
InternalKey smallest, largest;
|
||||||
|
|
||||||
|
AddBoundaryInputs(icmp_, current_->files_[level], &c->inputs_[0]);
|
||||||
GetRange(c->inputs_[0], &smallest, &largest);
|
GetRange(c->inputs_[0], &smallest, &largest);
|
||||||
|
|
||||||
current_->GetOverlappingInputs(level+1, &smallest, &largest, &c->inputs_[1]);
|
current_->GetOverlappingInputs(level + 1, &smallest, &largest,
|
||||||
|
&c->inputs_[1]);
|
||||||
|
|
||||||
// Get entire range covered by compaction
|
// Get entire range covered by compaction
|
||||||
InternalKey all_start, all_limit;
|
InternalKey all_start, all_limit;
|
||||||
@ -1358,6 +1416,7 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
|
|||||||
if (!c->inputs_[1].empty()) {
|
if (!c->inputs_[1].empty()) {
|
||||||
std::vector<FileMetaData*> expanded0;
|
std::vector<FileMetaData*> expanded0;
|
||||||
current_->GetOverlappingInputs(level, &all_start, &all_limit, &expanded0);
|
current_->GetOverlappingInputs(level, &all_start, &all_limit, &expanded0);
|
||||||
|
AddBoundaryInputs(icmp_, current_->files_[level], &expanded0);
|
||||||
const int64_t inputs0_size = TotalFileSize(c->inputs_[0]);
|
const int64_t inputs0_size = TotalFileSize(c->inputs_[0]);
|
||||||
const int64_t inputs1_size = TotalFileSize(c->inputs_[1]);
|
const int64_t inputs1_size = TotalFileSize(c->inputs_[1]);
|
||||||
const int64_t expanded0_size = TotalFileSize(expanded0);
|
const int64_t expanded0_size = TotalFileSize(expanded0);
|
||||||
@ -1367,18 +1426,14 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
|
|||||||
InternalKey new_start, new_limit;
|
InternalKey new_start, new_limit;
|
||||||
GetRange(expanded0, &new_start, &new_limit);
|
GetRange(expanded0, &new_start, &new_limit);
|
||||||
std::vector<FileMetaData*> expanded1;
|
std::vector<FileMetaData*> expanded1;
|
||||||
current_->GetOverlappingInputs(level+1, &new_start, &new_limit,
|
current_->GetOverlappingInputs(level + 1, &new_start, &new_limit,
|
||||||
&expanded1);
|
&expanded1);
|
||||||
if (expanded1.size() == c->inputs_[1].size()) {
|
if (expanded1.size() == c->inputs_[1].size()) {
|
||||||
Log(options_->info_log,
|
Log(options_->info_log,
|
||||||
"Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n",
|
"Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n",
|
||||||
level,
|
level, int(c->inputs_[0].size()), int(c->inputs_[1].size()),
|
||||||
int(c->inputs_[0].size()),
|
long(inputs0_size), long(inputs1_size), int(expanded0.size()),
|
||||||
int(c->inputs_[1].size()),
|
int(expanded1.size()), long(expanded0_size), long(inputs1_size));
|
||||||
long(inputs0_size), long(inputs1_size),
|
|
||||||
int(expanded0.size()),
|
|
||||||
int(expanded1.size()),
|
|
||||||
long(expanded0_size), long(inputs1_size));
|
|
||||||
smallest = new_start;
|
smallest = new_start;
|
||||||
largest = new_limit;
|
largest = new_limit;
|
||||||
c->inputs_[0] = expanded0;
|
c->inputs_[0] = expanded0;
|
||||||
@ -1395,13 +1450,6 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
|
|||||||
&c->grandparents_);
|
&c->grandparents_);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (false) {
|
|
||||||
Log(options_->info_log, "Compacting %d '%s' .. '%s'",
|
|
||||||
level,
|
|
||||||
smallest.DebugString().c_str(),
|
|
||||||
largest.DebugString().c_str());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the place where we will do the next compaction for this level.
|
// Update the place where we will do the next compaction for this level.
|
||||||
// We update this immediately instead of waiting for the VersionEdit
|
// We update this immediately instead of waiting for the VersionEdit
|
||||||
// to be applied so that if the compaction fails, we will try a different
|
// to be applied so that if the compaction fails, we will try a different
|
||||||
@ -1410,14 +1458,12 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
|
|||||||
c->edit_.SetCompactPointer(level, largest);
|
c->edit_.SetCompactPointer(level, largest);
|
||||||
}
|
}
|
||||||
|
|
||||||
Compaction* VersionSet::CompactRange(
|
Compaction* VersionSet::CompactRange(int level, const InternalKey* begin,
|
||||||
int level,
|
const InternalKey* end) {
|
||||||
const InternalKey* begin,
|
|
||||||
const InternalKey* end) {
|
|
||||||
std::vector<FileMetaData*> inputs;
|
std::vector<FileMetaData*> inputs;
|
||||||
current_->GetOverlappingInputs(level, begin, end, &inputs);
|
current_->GetOverlappingInputs(level, begin, end, &inputs);
|
||||||
if (inputs.empty()) {
|
if (inputs.empty()) {
|
||||||
return NULL;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Avoid compacting too much in one shot in case the range is large.
|
// Avoid compacting too much in one shot in case the range is large.
|
||||||
@ -1448,7 +1494,7 @@ Compaction* VersionSet::CompactRange(
|
|||||||
Compaction::Compaction(const Options* options, int level)
|
Compaction::Compaction(const Options* options, int level)
|
||||||
: level_(level),
|
: level_(level),
|
||||||
max_output_file_size_(MaxFileSizeForLevel(options, level)),
|
max_output_file_size_(MaxFileSizeForLevel(options, level)),
|
||||||
input_version_(NULL),
|
input_version_(nullptr),
|
||||||
grandparent_index_(0),
|
grandparent_index_(0),
|
||||||
seen_key_(false),
|
seen_key_(false),
|
||||||
overlapped_bytes_(0) {
|
overlapped_bytes_(0) {
|
||||||
@ -1458,7 +1504,7 @@ Compaction::Compaction(const Options* options, int level)
|
|||||||
}
|
}
|
||||||
|
|
||||||
Compaction::~Compaction() {
|
Compaction::~Compaction() {
|
||||||
if (input_version_ != NULL) {
|
if (input_version_ != nullptr) {
|
||||||
input_version_->Unref();
|
input_version_->Unref();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1486,7 +1532,7 @@ bool Compaction::IsBaseLevelForKey(const Slice& user_key) {
|
|||||||
const Comparator* user_cmp = input_version_->vset_->icmp_.user_comparator();
|
const Comparator* user_cmp = input_version_->vset_->icmp_.user_comparator();
|
||||||
for (int lvl = level_ + 2; lvl < config::kNumLevels; lvl++) {
|
for (int lvl = level_ + 2; lvl < config::kNumLevels; lvl++) {
|
||||||
const std::vector<FileMetaData*>& files = input_version_->files_[lvl];
|
const std::vector<FileMetaData*>& files = input_version_->files_[lvl];
|
||||||
for (; level_ptrs_[lvl] < files.size(); ) {
|
for (; level_ptrs_[lvl] < files.size();) {
|
||||||
FileMetaData* f = files[level_ptrs_[lvl]];
|
FileMetaData* f = files[level_ptrs_[lvl]];
|
||||||
if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) {
|
if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) {
|
||||||
// We've advanced far enough
|
// We've advanced far enough
|
||||||
@ -1507,8 +1553,9 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) {
|
|||||||
// Scan to find earliest grandparent file that contains key.
|
// Scan to find earliest grandparent file that contains key.
|
||||||
const InternalKeyComparator* icmp = &vset->icmp_;
|
const InternalKeyComparator* icmp = &vset->icmp_;
|
||||||
while (grandparent_index_ < grandparents_.size() &&
|
while (grandparent_index_ < grandparents_.size() &&
|
||||||
icmp->Compare(internal_key,
|
icmp->Compare(internal_key,
|
||||||
grandparents_[grandparent_index_]->largest.Encode()) > 0) {
|
grandparents_[grandparent_index_]->largest.Encode()) >
|
||||||
|
0) {
|
||||||
if (seen_key_) {
|
if (seen_key_) {
|
||||||
overlapped_bytes_ += grandparents_[grandparent_index_]->file_size;
|
overlapped_bytes_ += grandparents_[grandparent_index_]->file_size;
|
||||||
}
|
}
|
||||||
@ -1526,9 +1573,9 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Compaction::ReleaseInputs() {
|
void Compaction::ReleaseInputs() {
|
||||||
if (input_version_ != NULL) {
|
if (input_version_ != nullptr) {
|
||||||
input_version_->Unref();
|
input_version_->Unref();
|
||||||
input_version_ = NULL;
|
input_version_ = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
125
db/version_set.h
125
db/version_set.h
@ -18,6 +18,7 @@
|
|||||||
#include <map>
|
#include <map>
|
||||||
#include <set>
|
#include <set>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
#include "db/version_edit.h"
|
#include "db/version_edit.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
@ -25,7 +26,9 @@
|
|||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
namespace log { class Writer; }
|
namespace log {
|
||||||
|
class Writer;
|
||||||
|
}
|
||||||
|
|
||||||
class Compaction;
|
class Compaction;
|
||||||
class Iterator;
|
class Iterator;
|
||||||
@ -39,30 +42,23 @@ class WritableFile;
|
|||||||
// Return the smallest index i such that files[i]->largest >= key.
|
// Return the smallest index i such that files[i]->largest >= key.
|
||||||
// Return files.size() if there is no such file.
|
// Return files.size() if there is no such file.
|
||||||
// REQUIRES: "files" contains a sorted list of non-overlapping files.
|
// REQUIRES: "files" contains a sorted list of non-overlapping files.
|
||||||
extern int FindFile(const InternalKeyComparator& icmp,
|
int FindFile(const InternalKeyComparator& icmp,
|
||||||
const std::vector<FileMetaData*>& files,
|
const std::vector<FileMetaData*>& files, const Slice& key);
|
||||||
const Slice& key);
|
|
||||||
|
|
||||||
// Returns true iff some file in "files" overlaps the user key range
|
// Returns true iff some file in "files" overlaps the user key range
|
||||||
// [*smallest,*largest].
|
// [*smallest,*largest].
|
||||||
// smallest==NULL represents a key smaller than all keys in the DB.
|
// smallest==nullptr represents a key smaller than all keys in the DB.
|
||||||
// largest==NULL represents a key largest than all keys in the DB.
|
// largest==nullptr represents a key largest than all keys in the DB.
|
||||||
// REQUIRES: If disjoint_sorted_files, files[] contains disjoint ranges
|
// REQUIRES: If disjoint_sorted_files, files[] contains disjoint ranges
|
||||||
// in sorted order.
|
// in sorted order.
|
||||||
extern bool SomeFileOverlapsRange(
|
bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
|
||||||
const InternalKeyComparator& icmp,
|
bool disjoint_sorted_files,
|
||||||
bool disjoint_sorted_files,
|
const std::vector<FileMetaData*>& files,
|
||||||
const std::vector<FileMetaData*>& files,
|
const Slice* smallest_user_key,
|
||||||
const Slice* smallest_user_key,
|
const Slice* largest_user_key);
|
||||||
const Slice* largest_user_key);
|
|
||||||
|
|
||||||
class Version {
|
class Version {
|
||||||
public:
|
public:
|
||||||
// Append to *iters a sequence of iterators that will
|
|
||||||
// yield the contents of this Version when merged together.
|
|
||||||
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
|
||||||
void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters);
|
|
||||||
|
|
||||||
// Lookup the value for key. If found, store it in *val and
|
// Lookup the value for key. If found, store it in *val and
|
||||||
// return OK. Else return a non-OK status. Fills *stats.
|
// return OK. Else return a non-OK status. Fills *stats.
|
||||||
// REQUIRES: lock is not held
|
// REQUIRES: lock is not held
|
||||||
@ -70,6 +66,12 @@ class Version {
|
|||||||
FileMetaData* seek_file;
|
FileMetaData* seek_file;
|
||||||
int seek_file_level;
|
int seek_file_level;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Append to *iters a sequence of iterators that will
|
||||||
|
// yield the contents of this Version when merged together.
|
||||||
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
||||||
|
void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters);
|
||||||
|
|
||||||
Status Get(const ReadOptions&, const LookupKey& key, std::string* val,
|
Status Get(const ReadOptions&, const LookupKey& key, std::string* val,
|
||||||
GetStats* stats);
|
GetStats* stats);
|
||||||
|
|
||||||
@ -91,16 +93,15 @@ class Version {
|
|||||||
|
|
||||||
void GetOverlappingInputs(
|
void GetOverlappingInputs(
|
||||||
int level,
|
int level,
|
||||||
const InternalKey* begin, // NULL means before all keys
|
const InternalKey* begin, // nullptr means before all keys
|
||||||
const InternalKey* end, // NULL means after all keys
|
const InternalKey* end, // nullptr means after all keys
|
||||||
std::vector<FileMetaData*>* inputs);
|
std::vector<FileMetaData*>* inputs);
|
||||||
|
|
||||||
// Returns true iff some file in the specified level overlaps
|
// Returns true iff some file in the specified level overlaps
|
||||||
// some part of [*smallest_user_key,*largest_user_key].
|
// some part of [*smallest_user_key,*largest_user_key].
|
||||||
// smallest_user_key==NULL represents a key smaller than all keys in the DB.
|
// smallest_user_key==nullptr represents a key smaller than all the DB's keys.
|
||||||
// largest_user_key==NULL represents a key largest than all keys in the DB.
|
// largest_user_key==nullptr represents a key largest than all the DB's keys.
|
||||||
bool OverlapInLevel(int level,
|
bool OverlapInLevel(int level, const Slice* smallest_user_key,
|
||||||
const Slice* smallest_user_key,
|
|
||||||
const Slice* largest_user_key);
|
const Slice* largest_user_key);
|
||||||
|
|
||||||
// Return the level at which we should place a new memtable compaction
|
// Return the level at which we should place a new memtable compaction
|
||||||
@ -118,6 +119,22 @@ class Version {
|
|||||||
friend class VersionSet;
|
friend class VersionSet;
|
||||||
|
|
||||||
class LevelFileNumIterator;
|
class LevelFileNumIterator;
|
||||||
|
|
||||||
|
explicit Version(VersionSet* vset)
|
||||||
|
: vset_(vset),
|
||||||
|
next_(this),
|
||||||
|
prev_(this),
|
||||||
|
refs_(0),
|
||||||
|
file_to_compact_(nullptr),
|
||||||
|
file_to_compact_level_(-1),
|
||||||
|
compaction_score_(-1),
|
||||||
|
compaction_level_(-1) {}
|
||||||
|
|
||||||
|
Version(const Version&) = delete;
|
||||||
|
Version& operator=(const Version&) = delete;
|
||||||
|
|
||||||
|
~Version();
|
||||||
|
|
||||||
Iterator* NewConcatenatingIterator(const ReadOptions&, int level) const;
|
Iterator* NewConcatenatingIterator(const ReadOptions&, int level) const;
|
||||||
|
|
||||||
// Call func(arg, level, f) for every file that overlaps user_key in
|
// Call func(arg, level, f) for every file that overlaps user_key in
|
||||||
@ -125,14 +142,13 @@ class Version {
|
|||||||
// false, makes no more calls.
|
// false, makes no more calls.
|
||||||
//
|
//
|
||||||
// REQUIRES: user portion of internal_key == user_key.
|
// REQUIRES: user portion of internal_key == user_key.
|
||||||
void ForEachOverlapping(Slice user_key, Slice internal_key,
|
void ForEachOverlapping(Slice user_key, Slice internal_key, void* arg,
|
||||||
void* arg,
|
|
||||||
bool (*func)(void*, int, FileMetaData*));
|
bool (*func)(void*, int, FileMetaData*));
|
||||||
|
|
||||||
VersionSet* vset_; // VersionSet to which this Version belongs
|
VersionSet* vset_; // VersionSet to which this Version belongs
|
||||||
Version* next_; // Next version in linked list
|
Version* next_; // Next version in linked list
|
||||||
Version* prev_; // Previous version in linked list
|
Version* prev_; // Previous version in linked list
|
||||||
int refs_; // Number of live refs to this version
|
int refs_; // Number of live refs to this version
|
||||||
|
|
||||||
// List of files per level
|
// List of files per level
|
||||||
std::vector<FileMetaData*> files_[config::kNumLevels];
|
std::vector<FileMetaData*> files_[config::kNumLevels];
|
||||||
@ -146,28 +162,15 @@ class Version {
|
|||||||
// are initialized by Finalize().
|
// are initialized by Finalize().
|
||||||
double compaction_score_;
|
double compaction_score_;
|
||||||
int compaction_level_;
|
int compaction_level_;
|
||||||
|
|
||||||
explicit Version(VersionSet* vset)
|
|
||||||
: vset_(vset), next_(this), prev_(this), refs_(0),
|
|
||||||
file_to_compact_(NULL),
|
|
||||||
file_to_compact_level_(-1),
|
|
||||||
compaction_score_(-1),
|
|
||||||
compaction_level_(-1) {
|
|
||||||
}
|
|
||||||
|
|
||||||
~Version();
|
|
||||||
|
|
||||||
// No copying allowed
|
|
||||||
Version(const Version&);
|
|
||||||
void operator=(const Version&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class VersionSet {
|
class VersionSet {
|
||||||
public:
|
public:
|
||||||
VersionSet(const std::string& dbname,
|
VersionSet(const std::string& dbname, const Options* options,
|
||||||
const Options* options,
|
TableCache* table_cache, const InternalKeyComparator*);
|
||||||
TableCache* table_cache,
|
VersionSet(const VersionSet&) = delete;
|
||||||
const InternalKeyComparator*);
|
VersionSet& operator=(const VersionSet&) = delete;
|
||||||
|
|
||||||
~VersionSet();
|
~VersionSet();
|
||||||
|
|
||||||
// Apply *edit to the current version to form a new descriptor that
|
// Apply *edit to the current version to form a new descriptor that
|
||||||
@ -179,7 +182,7 @@ class VersionSet {
|
|||||||
EXCLUSIVE_LOCKS_REQUIRED(mu);
|
EXCLUSIVE_LOCKS_REQUIRED(mu);
|
||||||
|
|
||||||
// Recover the last saved descriptor from persistent storage.
|
// Recover the last saved descriptor from persistent storage.
|
||||||
Status Recover(bool *save_manifest);
|
Status Recover(bool* save_manifest);
|
||||||
|
|
||||||
// Return the current version.
|
// Return the current version.
|
||||||
Version* current() const { return current_; }
|
Version* current() const { return current_; }
|
||||||
@ -225,19 +228,17 @@ class VersionSet {
|
|||||||
uint64_t PrevLogNumber() const { return prev_log_number_; }
|
uint64_t PrevLogNumber() const { return prev_log_number_; }
|
||||||
|
|
||||||
// Pick level and inputs for a new compaction.
|
// Pick level and inputs for a new compaction.
|
||||||
// Returns NULL if there is no compaction to be done.
|
// Returns nullptr if there is no compaction to be done.
|
||||||
// Otherwise returns a pointer to a heap-allocated object that
|
// Otherwise returns a pointer to a heap-allocated object that
|
||||||
// describes the compaction. Caller should delete the result.
|
// describes the compaction. Caller should delete the result.
|
||||||
Compaction* PickCompaction();
|
Compaction* PickCompaction();
|
||||||
|
|
||||||
// Return a compaction object for compacting the range [begin,end] in
|
// Return a compaction object for compacting the range [begin,end] in
|
||||||
// the specified level. Returns NULL if there is nothing in that
|
// the specified level. Returns nullptr if there is nothing in that
|
||||||
// level that overlaps the specified range. Caller should delete
|
// level that overlaps the specified range. Caller should delete
|
||||||
// the result.
|
// the result.
|
||||||
Compaction* CompactRange(
|
Compaction* CompactRange(int level, const InternalKey* begin,
|
||||||
int level,
|
const InternalKey* end);
|
||||||
const InternalKey* begin,
|
|
||||||
const InternalKey* end);
|
|
||||||
|
|
||||||
// Return the maximum overlapping data (in bytes) at next level for any
|
// Return the maximum overlapping data (in bytes) at next level for any
|
||||||
// file at a level >= 1.
|
// file at a level >= 1.
|
||||||
@ -250,7 +251,7 @@ class VersionSet {
|
|||||||
// Returns true iff some level needs a compaction.
|
// Returns true iff some level needs a compaction.
|
||||||
bool NeedsCompaction() const {
|
bool NeedsCompaction() const {
|
||||||
Version* v = current_;
|
Version* v = current_;
|
||||||
return (v->compaction_score_ >= 1) || (v->file_to_compact_ != NULL);
|
return (v->compaction_score_ >= 1) || (v->file_to_compact_ != nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add all files listed in any live version to *live.
|
// Add all files listed in any live version to *live.
|
||||||
@ -278,14 +279,12 @@ class VersionSet {
|
|||||||
|
|
||||||
void Finalize(Version* v);
|
void Finalize(Version* v);
|
||||||
|
|
||||||
void GetRange(const std::vector<FileMetaData*>& inputs,
|
void GetRange(const std::vector<FileMetaData*>& inputs, InternalKey* smallest,
|
||||||
InternalKey* smallest,
|
|
||||||
InternalKey* largest);
|
InternalKey* largest);
|
||||||
|
|
||||||
void GetRange2(const std::vector<FileMetaData*>& inputs1,
|
void GetRange2(const std::vector<FileMetaData*>& inputs1,
|
||||||
const std::vector<FileMetaData*>& inputs2,
|
const std::vector<FileMetaData*>& inputs2,
|
||||||
InternalKey* smallest,
|
InternalKey* smallest, InternalKey* largest);
|
||||||
InternalKey* largest);
|
|
||||||
|
|
||||||
void SetupOtherInputs(Compaction* c);
|
void SetupOtherInputs(Compaction* c);
|
||||||
|
|
||||||
@ -314,10 +313,6 @@ class VersionSet {
|
|||||||
// Per-level key at which the next compaction at that level should start.
|
// Per-level key at which the next compaction at that level should start.
|
||||||
// Either an empty string, or a valid InternalKey.
|
// Either an empty string, or a valid InternalKey.
|
||||||
std::string compact_pointer_[config::kNumLevels];
|
std::string compact_pointer_[config::kNumLevels];
|
||||||
|
|
||||||
// No copying allowed
|
|
||||||
VersionSet(const VersionSet&);
|
|
||||||
void operator=(const VersionSet&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// A Compaction encapsulates information about a compaction.
|
// A Compaction encapsulates information about a compaction.
|
||||||
@ -374,9 +369,9 @@ class Compaction {
|
|||||||
VersionEdit edit_;
|
VersionEdit edit_;
|
||||||
|
|
||||||
// Each compaction reads inputs from "level_" and "level_+1"
|
// Each compaction reads inputs from "level_" and "level_+1"
|
||||||
std::vector<FileMetaData*> inputs_[2]; // The two sets of inputs
|
std::vector<FileMetaData*> inputs_[2]; // The two sets of inputs
|
||||||
|
|
||||||
// State used to check for number of of overlapping grandparent files
|
// State used to check for number of overlapping grandparent files
|
||||||
// (parent == level_ + 1, grandparent == level_ + 2)
|
// (parent == level_ + 1, grandparent == level_ + 2)
|
||||||
std::vector<FileMetaData*> grandparents_;
|
std::vector<FileMetaData*> grandparents_;
|
||||||
size_t grandparent_index_; // Index in grandparent_starts_
|
size_t grandparent_index_; // Index in grandparent_starts_
|
||||||
|
@ -11,10 +11,7 @@ namespace leveldb {
|
|||||||
|
|
||||||
class FindFileTest {
|
class FindFileTest {
|
||||||
public:
|
public:
|
||||||
std::vector<FileMetaData*> files_;
|
FindFileTest() : disjoint_sorted_files_(true) {}
|
||||||
bool disjoint_sorted_files_;
|
|
||||||
|
|
||||||
FindFileTest() : disjoint_sorted_files_(true) { }
|
|
||||||
|
|
||||||
~FindFileTest() {
|
~FindFileTest() {
|
||||||
for (int i = 0; i < files_.size(); i++) {
|
for (int i = 0; i < files_.size(); i++) {
|
||||||
@ -40,20 +37,25 @@ class FindFileTest {
|
|||||||
|
|
||||||
bool Overlaps(const char* smallest, const char* largest) {
|
bool Overlaps(const char* smallest, const char* largest) {
|
||||||
InternalKeyComparator cmp(BytewiseComparator());
|
InternalKeyComparator cmp(BytewiseComparator());
|
||||||
Slice s(smallest != NULL ? smallest : "");
|
Slice s(smallest != nullptr ? smallest : "");
|
||||||
Slice l(largest != NULL ? largest : "");
|
Slice l(largest != nullptr ? largest : "");
|
||||||
return SomeFileOverlapsRange(cmp, disjoint_sorted_files_, files_,
|
return SomeFileOverlapsRange(cmp, disjoint_sorted_files_, files_,
|
||||||
(smallest != NULL ? &s : NULL),
|
(smallest != nullptr ? &s : nullptr),
|
||||||
(largest != NULL ? &l : NULL));
|
(largest != nullptr ? &l : nullptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool disjoint_sorted_files_;
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::vector<FileMetaData*> files_;
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST(FindFileTest, Empty) {
|
TEST(FindFileTest, Empty) {
|
||||||
ASSERT_EQ(0, Find("foo"));
|
ASSERT_EQ(0, Find("foo"));
|
||||||
ASSERT_TRUE(! Overlaps("a", "z"));
|
ASSERT_TRUE(!Overlaps("a", "z"));
|
||||||
ASSERT_TRUE(! Overlaps(NULL, "z"));
|
ASSERT_TRUE(!Overlaps(nullptr, "z"));
|
||||||
ASSERT_TRUE(! Overlaps("a", NULL));
|
ASSERT_TRUE(!Overlaps("a", nullptr));
|
||||||
ASSERT_TRUE(! Overlaps(NULL, NULL));
|
ASSERT_TRUE(!Overlaps(nullptr, nullptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(FindFileTest, Single) {
|
TEST(FindFileTest, Single) {
|
||||||
@ -65,8 +67,8 @@ TEST(FindFileTest, Single) {
|
|||||||
ASSERT_EQ(1, Find("q1"));
|
ASSERT_EQ(1, Find("q1"));
|
||||||
ASSERT_EQ(1, Find("z"));
|
ASSERT_EQ(1, Find("z"));
|
||||||
|
|
||||||
ASSERT_TRUE(! Overlaps("a", "b"));
|
ASSERT_TRUE(!Overlaps("a", "b"));
|
||||||
ASSERT_TRUE(! Overlaps("z1", "z2"));
|
ASSERT_TRUE(!Overlaps("z1", "z2"));
|
||||||
ASSERT_TRUE(Overlaps("a", "p"));
|
ASSERT_TRUE(Overlaps("a", "p"));
|
||||||
ASSERT_TRUE(Overlaps("a", "q"));
|
ASSERT_TRUE(Overlaps("a", "q"));
|
||||||
ASSERT_TRUE(Overlaps("a", "z"));
|
ASSERT_TRUE(Overlaps("a", "z"));
|
||||||
@ -78,15 +80,14 @@ TEST(FindFileTest, Single) {
|
|||||||
ASSERT_TRUE(Overlaps("q", "q"));
|
ASSERT_TRUE(Overlaps("q", "q"));
|
||||||
ASSERT_TRUE(Overlaps("q", "q1"));
|
ASSERT_TRUE(Overlaps("q", "q1"));
|
||||||
|
|
||||||
ASSERT_TRUE(! Overlaps(NULL, "j"));
|
ASSERT_TRUE(!Overlaps(nullptr, "j"));
|
||||||
ASSERT_TRUE(! Overlaps("r", NULL));
|
ASSERT_TRUE(!Overlaps("r", nullptr));
|
||||||
ASSERT_TRUE(Overlaps(NULL, "p"));
|
ASSERT_TRUE(Overlaps(nullptr, "p"));
|
||||||
ASSERT_TRUE(Overlaps(NULL, "p1"));
|
ASSERT_TRUE(Overlaps(nullptr, "p1"));
|
||||||
ASSERT_TRUE(Overlaps("q", NULL));
|
ASSERT_TRUE(Overlaps("q", nullptr));
|
||||||
ASSERT_TRUE(Overlaps(NULL, NULL));
|
ASSERT_TRUE(Overlaps(nullptr, nullptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
TEST(FindFileTest, Multiple) {
|
TEST(FindFileTest, Multiple) {
|
||||||
Add("150", "200");
|
Add("150", "200");
|
||||||
Add("200", "250");
|
Add("200", "250");
|
||||||
@ -110,10 +111,10 @@ TEST(FindFileTest, Multiple) {
|
|||||||
ASSERT_EQ(3, Find("450"));
|
ASSERT_EQ(3, Find("450"));
|
||||||
ASSERT_EQ(4, Find("451"));
|
ASSERT_EQ(4, Find("451"));
|
||||||
|
|
||||||
ASSERT_TRUE(! Overlaps("100", "149"));
|
ASSERT_TRUE(!Overlaps("100", "149"));
|
||||||
ASSERT_TRUE(! Overlaps("251", "299"));
|
ASSERT_TRUE(!Overlaps("251", "299"));
|
||||||
ASSERT_TRUE(! Overlaps("451", "500"));
|
ASSERT_TRUE(!Overlaps("451", "500"));
|
||||||
ASSERT_TRUE(! Overlaps("351", "399"));
|
ASSERT_TRUE(!Overlaps("351", "399"));
|
||||||
|
|
||||||
ASSERT_TRUE(Overlaps("100", "150"));
|
ASSERT_TRUE(Overlaps("100", "150"));
|
||||||
ASSERT_TRUE(Overlaps("100", "200"));
|
ASSERT_TRUE(Overlaps("100", "200"));
|
||||||
@ -130,25 +131,25 @@ TEST(FindFileTest, MultipleNullBoundaries) {
|
|||||||
Add("200", "250");
|
Add("200", "250");
|
||||||
Add("300", "350");
|
Add("300", "350");
|
||||||
Add("400", "450");
|
Add("400", "450");
|
||||||
ASSERT_TRUE(! Overlaps(NULL, "149"));
|
ASSERT_TRUE(!Overlaps(nullptr, "149"));
|
||||||
ASSERT_TRUE(! Overlaps("451", NULL));
|
ASSERT_TRUE(!Overlaps("451", nullptr));
|
||||||
ASSERT_TRUE(Overlaps(NULL, NULL));
|
ASSERT_TRUE(Overlaps(nullptr, nullptr));
|
||||||
ASSERT_TRUE(Overlaps(NULL, "150"));
|
ASSERT_TRUE(Overlaps(nullptr, "150"));
|
||||||
ASSERT_TRUE(Overlaps(NULL, "199"));
|
ASSERT_TRUE(Overlaps(nullptr, "199"));
|
||||||
ASSERT_TRUE(Overlaps(NULL, "200"));
|
ASSERT_TRUE(Overlaps(nullptr, "200"));
|
||||||
ASSERT_TRUE(Overlaps(NULL, "201"));
|
ASSERT_TRUE(Overlaps(nullptr, "201"));
|
||||||
ASSERT_TRUE(Overlaps(NULL, "400"));
|
ASSERT_TRUE(Overlaps(nullptr, "400"));
|
||||||
ASSERT_TRUE(Overlaps(NULL, "800"));
|
ASSERT_TRUE(Overlaps(nullptr, "800"));
|
||||||
ASSERT_TRUE(Overlaps("100", NULL));
|
ASSERT_TRUE(Overlaps("100", nullptr));
|
||||||
ASSERT_TRUE(Overlaps("200", NULL));
|
ASSERT_TRUE(Overlaps("200", nullptr));
|
||||||
ASSERT_TRUE(Overlaps("449", NULL));
|
ASSERT_TRUE(Overlaps("449", nullptr));
|
||||||
ASSERT_TRUE(Overlaps("450", NULL));
|
ASSERT_TRUE(Overlaps("450", nullptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(FindFileTest, OverlapSequenceChecks) {
|
TEST(FindFileTest, OverlapSequenceChecks) {
|
||||||
Add("200", "200", 5000, 3000);
|
Add("200", "200", 5000, 3000);
|
||||||
ASSERT_TRUE(! Overlaps("199", "199"));
|
ASSERT_TRUE(!Overlaps("199", "199"));
|
||||||
ASSERT_TRUE(! Overlaps("201", "300"));
|
ASSERT_TRUE(!Overlaps("201", "300"));
|
||||||
ASSERT_TRUE(Overlaps("200", "200"));
|
ASSERT_TRUE(Overlaps("200", "200"));
|
||||||
ASSERT_TRUE(Overlaps("190", "200"));
|
ASSERT_TRUE(Overlaps("190", "200"));
|
||||||
ASSERT_TRUE(Overlaps("200", "210"));
|
ASSERT_TRUE(Overlaps("200", "210"));
|
||||||
@ -158,8 +159,8 @@ TEST(FindFileTest, OverlappingFiles) {
|
|||||||
Add("150", "600");
|
Add("150", "600");
|
||||||
Add("400", "500");
|
Add("400", "500");
|
||||||
disjoint_sorted_files_ = false;
|
disjoint_sorted_files_ = false;
|
||||||
ASSERT_TRUE(! Overlaps("100", "149"));
|
ASSERT_TRUE(!Overlaps("100", "149"));
|
||||||
ASSERT_TRUE(! Overlaps("601", "700"));
|
ASSERT_TRUE(!Overlaps("601", "700"));
|
||||||
ASSERT_TRUE(Overlaps("100", "150"));
|
ASSERT_TRUE(Overlaps("100", "150"));
|
||||||
ASSERT_TRUE(Overlaps("100", "200"));
|
ASSERT_TRUE(Overlaps("100", "200"));
|
||||||
ASSERT_TRUE(Overlaps("100", "300"));
|
ASSERT_TRUE(Overlaps("100", "300"));
|
||||||
@ -172,8 +173,160 @@ TEST(FindFileTest, OverlappingFiles) {
|
|||||||
ASSERT_TRUE(Overlaps("600", "700"));
|
ASSERT_TRUE(Overlaps("600", "700"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void AddBoundaryInputs(const InternalKeyComparator& icmp,
|
||||||
|
const std::vector<FileMetaData*>& level_files,
|
||||||
|
std::vector<FileMetaData*>* compaction_files);
|
||||||
|
|
||||||
|
class AddBoundaryInputsTest {
|
||||||
|
public:
|
||||||
|
std::vector<FileMetaData*> level_files_;
|
||||||
|
std::vector<FileMetaData*> compaction_files_;
|
||||||
|
std::vector<FileMetaData*> all_files_;
|
||||||
|
InternalKeyComparator icmp_;
|
||||||
|
|
||||||
|
AddBoundaryInputsTest() : icmp_(BytewiseComparator()) {}
|
||||||
|
|
||||||
|
~AddBoundaryInputsTest() {
|
||||||
|
for (size_t i = 0; i < all_files_.size(); ++i) {
|
||||||
|
delete all_files_[i];
|
||||||
|
}
|
||||||
|
all_files_.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
FileMetaData* CreateFileMetaData(uint64_t number, InternalKey smallest,
|
||||||
|
InternalKey largest) {
|
||||||
|
FileMetaData* f = new FileMetaData();
|
||||||
|
f->number = number;
|
||||||
|
f->smallest = smallest;
|
||||||
|
f->largest = largest;
|
||||||
|
all_files_.push_back(f);
|
||||||
|
return f;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST(AddBoundaryInputsTest, TestEmptyFileSets) {
|
||||||
|
AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
|
||||||
|
ASSERT_TRUE(compaction_files_.empty());
|
||||||
|
ASSERT_TRUE(level_files_.empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) {
|
||||||
|
FileMetaData* f1 =
|
||||||
|
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
|
||||||
|
InternalKey(InternalKey("100", 1, kTypeValue)));
|
||||||
|
compaction_files_.push_back(f1);
|
||||||
|
|
||||||
|
AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
|
||||||
|
ASSERT_EQ(1, compaction_files_.size());
|
||||||
|
ASSERT_EQ(f1, compaction_files_[0]);
|
||||||
|
ASSERT_TRUE(level_files_.empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) {
|
||||||
|
FileMetaData* f1 =
|
||||||
|
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
|
||||||
|
InternalKey(InternalKey("100", 1, kTypeValue)));
|
||||||
|
level_files_.push_back(f1);
|
||||||
|
|
||||||
|
AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
|
||||||
|
ASSERT_TRUE(compaction_files_.empty());
|
||||||
|
ASSERT_EQ(1, level_files_.size());
|
||||||
|
ASSERT_EQ(f1, level_files_[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) {
|
||||||
|
FileMetaData* f1 =
|
||||||
|
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
|
||||||
|
InternalKey(InternalKey("100", 1, kTypeValue)));
|
||||||
|
FileMetaData* f2 =
|
||||||
|
CreateFileMetaData(1, InternalKey("200", 2, kTypeValue),
|
||||||
|
InternalKey(InternalKey("200", 1, kTypeValue)));
|
||||||
|
FileMetaData* f3 =
|
||||||
|
CreateFileMetaData(1, InternalKey("300", 2, kTypeValue),
|
||||||
|
InternalKey(InternalKey("300", 1, kTypeValue)));
|
||||||
|
|
||||||
|
level_files_.push_back(f3);
|
||||||
|
level_files_.push_back(f2);
|
||||||
|
level_files_.push_back(f1);
|
||||||
|
compaction_files_.push_back(f2);
|
||||||
|
compaction_files_.push_back(f3);
|
||||||
|
|
||||||
|
AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
|
||||||
|
ASSERT_EQ(2, compaction_files_.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) {
|
||||||
|
FileMetaData* f1 =
|
||||||
|
CreateFileMetaData(1, InternalKey("100", 3, kTypeValue),
|
||||||
|
InternalKey(InternalKey("100", 2, kTypeValue)));
|
||||||
|
FileMetaData* f2 =
|
||||||
|
CreateFileMetaData(1, InternalKey("100", 1, kTypeValue),
|
||||||
|
InternalKey(InternalKey("200", 3, kTypeValue)));
|
||||||
|
FileMetaData* f3 =
|
||||||
|
CreateFileMetaData(1, InternalKey("300", 2, kTypeValue),
|
||||||
|
InternalKey(InternalKey("300", 1, kTypeValue)));
|
||||||
|
|
||||||
|
level_files_.push_back(f3);
|
||||||
|
level_files_.push_back(f2);
|
||||||
|
level_files_.push_back(f1);
|
||||||
|
compaction_files_.push_back(f1);
|
||||||
|
|
||||||
|
AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
|
||||||
|
ASSERT_EQ(2, compaction_files_.size());
|
||||||
|
ASSERT_EQ(f1, compaction_files_[0]);
|
||||||
|
ASSERT_EQ(f2, compaction_files_[1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) {
|
||||||
|
FileMetaData* f1 =
|
||||||
|
CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
|
||||||
|
InternalKey(InternalKey("100", 5, kTypeValue)));
|
||||||
|
FileMetaData* f2 =
|
||||||
|
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
|
||||||
|
InternalKey(InternalKey("300", 1, kTypeValue)));
|
||||||
|
FileMetaData* f3 =
|
||||||
|
CreateFileMetaData(1, InternalKey("100", 4, kTypeValue),
|
||||||
|
InternalKey(InternalKey("100", 3, kTypeValue)));
|
||||||
|
|
||||||
|
level_files_.push_back(f2);
|
||||||
|
level_files_.push_back(f3);
|
||||||
|
level_files_.push_back(f1);
|
||||||
|
compaction_files_.push_back(f1);
|
||||||
|
|
||||||
|
AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
|
||||||
|
ASSERT_EQ(3, compaction_files_.size());
|
||||||
|
ASSERT_EQ(f1, compaction_files_[0]);
|
||||||
|
ASSERT_EQ(f3, compaction_files_[1]);
|
||||||
|
ASSERT_EQ(f2, compaction_files_[2]);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) {
|
||||||
|
FileMetaData* f1 =
|
||||||
|
CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
|
||||||
|
InternalKey(InternalKey("100", 5, kTypeValue)));
|
||||||
|
FileMetaData* f2 =
|
||||||
|
CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
|
||||||
|
InternalKey(InternalKey("100", 5, kTypeValue)));
|
||||||
|
FileMetaData* f3 =
|
||||||
|
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
|
||||||
|
InternalKey(InternalKey("300", 1, kTypeValue)));
|
||||||
|
FileMetaData* f4 =
|
||||||
|
CreateFileMetaData(1, InternalKey("100", 4, kTypeValue),
|
||||||
|
InternalKey(InternalKey("100", 3, kTypeValue)));
|
||||||
|
|
||||||
|
level_files_.push_back(f2);
|
||||||
|
level_files_.push_back(f3);
|
||||||
|
level_files_.push_back(f4);
|
||||||
|
|
||||||
|
compaction_files_.push_back(f1);
|
||||||
|
|
||||||
|
AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
|
||||||
|
ASSERT_EQ(3, compaction_files_.size());
|
||||||
|
ASSERT_EQ(f1, compaction_files_[0]);
|
||||||
|
ASSERT_EQ(f4, compaction_files_[1]);
|
||||||
|
ASSERT_EQ(f3, compaction_files_[2]);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -15,10 +15,10 @@
|
|||||||
|
|
||||||
#include "leveldb/write_batch.h"
|
#include "leveldb/write_batch.h"
|
||||||
|
|
||||||
#include "leveldb/db.h"
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
#include "db/memtable.h"
|
#include "db/memtable.h"
|
||||||
#include "db/write_batch_internal.h"
|
#include "db/write_batch_internal.h"
|
||||||
|
#include "leveldb/db.h"
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
@ -26,19 +26,19 @@ namespace leveldb {
|
|||||||
// WriteBatch header has an 8-byte sequence number followed by a 4-byte count.
|
// WriteBatch header has an 8-byte sequence number followed by a 4-byte count.
|
||||||
static const size_t kHeader = 12;
|
static const size_t kHeader = 12;
|
||||||
|
|
||||||
WriteBatch::WriteBatch() {
|
WriteBatch::WriteBatch() { Clear(); }
|
||||||
Clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
WriteBatch::~WriteBatch() { }
|
WriteBatch::~WriteBatch() = default;
|
||||||
|
|
||||||
WriteBatch::Handler::~Handler() { }
|
WriteBatch::Handler::~Handler() = default;
|
||||||
|
|
||||||
void WriteBatch::Clear() {
|
void WriteBatch::Clear() {
|
||||||
rep_.clear();
|
rep_.clear();
|
||||||
rep_.resize(kHeader);
|
rep_.resize(kHeader);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t WriteBatch::ApproximateSize() const { return rep_.size(); }
|
||||||
|
|
||||||
Status WriteBatch::Iterate(Handler* handler) const {
|
Status WriteBatch::Iterate(Handler* handler) const {
|
||||||
Slice input(rep_);
|
Slice input(rep_);
|
||||||
if (input.size() < kHeader) {
|
if (input.size() < kHeader) {
|
||||||
@ -108,25 +108,28 @@ void WriteBatch::Delete(const Slice& key) {
|
|||||||
PutLengthPrefixedSlice(&rep_, key);
|
PutLengthPrefixedSlice(&rep_, key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void WriteBatch::Append(const WriteBatch& source) {
|
||||||
|
WriteBatchInternal::Append(this, &source);
|
||||||
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
class MemTableInserter : public WriteBatch::Handler {
|
class MemTableInserter : public WriteBatch::Handler {
|
||||||
public:
|
public:
|
||||||
SequenceNumber sequence_;
|
SequenceNumber sequence_;
|
||||||
MemTable* mem_;
|
MemTable* mem_;
|
||||||
|
|
||||||
virtual void Put(const Slice& key, const Slice& value) {
|
void Put(const Slice& key, const Slice& value) override {
|
||||||
mem_->Add(sequence_, kTypeValue, key, value);
|
mem_->Add(sequence_, kTypeValue, key, value);
|
||||||
sequence_++;
|
sequence_++;
|
||||||
}
|
}
|
||||||
virtual void Delete(const Slice& key) {
|
void Delete(const Slice& key) override {
|
||||||
mem_->Add(sequence_, kTypeDeletion, key, Slice());
|
mem_->Add(sequence_, kTypeDeletion, key, Slice());
|
||||||
sequence_++;
|
sequence_++;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
Status WriteBatchInternal::InsertInto(const WriteBatch* b,
|
Status WriteBatchInternal::InsertInto(const WriteBatch* b, MemTable* memtable) {
|
||||||
MemTable* memtable) {
|
|
||||||
MemTableInserter inserter;
|
MemTableInserter inserter;
|
||||||
inserter.sequence_ = WriteBatchInternal::Sequence(b);
|
inserter.sequence_ = WriteBatchInternal::Sequence(b);
|
||||||
inserter.mem_ = memtable;
|
inserter.mem_ = memtable;
|
||||||
|
@ -29,13 +29,9 @@ class WriteBatchInternal {
|
|||||||
// this batch.
|
// this batch.
|
||||||
static void SetSequence(WriteBatch* batch, SequenceNumber seq);
|
static void SetSequence(WriteBatch* batch, SequenceNumber seq);
|
||||||
|
|
||||||
static Slice Contents(const WriteBatch* batch) {
|
static Slice Contents(const WriteBatch* batch) { return Slice(batch->rep_); }
|
||||||
return Slice(batch->rep_);
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t ByteSize(const WriteBatch* batch) {
|
static size_t ByteSize(const WriteBatch* batch) { return batch->rep_.size(); }
|
||||||
return batch->rep_.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void SetContents(WriteBatch* batch, const Slice& contents);
|
static void SetContents(WriteBatch* batch, const Slice& contents);
|
||||||
|
|
||||||
@ -46,5 +42,4 @@ class WriteBatchInternal {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_
|
#endif // STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_
|
||||||
|
@ -52,7 +52,7 @@ static std::string PrintContents(WriteBatch* b) {
|
|||||||
return state;
|
return state;
|
||||||
}
|
}
|
||||||
|
|
||||||
class WriteBatchTest { };
|
class WriteBatchTest {};
|
||||||
|
|
||||||
TEST(WriteBatchTest, Empty) {
|
TEST(WriteBatchTest, Empty) {
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
@ -68,10 +68,11 @@ TEST(WriteBatchTest, Multiple) {
|
|||||||
WriteBatchInternal::SetSequence(&batch, 100);
|
WriteBatchInternal::SetSequence(&batch, 100);
|
||||||
ASSERT_EQ(100, WriteBatchInternal::Sequence(&batch));
|
ASSERT_EQ(100, WriteBatchInternal::Sequence(&batch));
|
||||||
ASSERT_EQ(3, WriteBatchInternal::Count(&batch));
|
ASSERT_EQ(3, WriteBatchInternal::Count(&batch));
|
||||||
ASSERT_EQ("Put(baz, boo)@102"
|
ASSERT_EQ(
|
||||||
"Delete(box)@101"
|
"Put(baz, boo)@102"
|
||||||
"Put(foo, bar)@100",
|
"Delete(box)@101"
|
||||||
PrintContents(&batch));
|
"Put(foo, bar)@100",
|
||||||
|
PrintContents(&batch));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(WriteBatchTest, Corruption) {
|
TEST(WriteBatchTest, Corruption) {
|
||||||
@ -81,40 +82,56 @@ TEST(WriteBatchTest, Corruption) {
|
|||||||
WriteBatchInternal::SetSequence(&batch, 200);
|
WriteBatchInternal::SetSequence(&batch, 200);
|
||||||
Slice contents = WriteBatchInternal::Contents(&batch);
|
Slice contents = WriteBatchInternal::Contents(&batch);
|
||||||
WriteBatchInternal::SetContents(&batch,
|
WriteBatchInternal::SetContents(&batch,
|
||||||
Slice(contents.data(),contents.size()-1));
|
Slice(contents.data(), contents.size() - 1));
|
||||||
ASSERT_EQ("Put(foo, bar)@200"
|
ASSERT_EQ(
|
||||||
"ParseError()",
|
"Put(foo, bar)@200"
|
||||||
PrintContents(&batch));
|
"ParseError()",
|
||||||
|
PrintContents(&batch));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(WriteBatchTest, Append) {
|
TEST(WriteBatchTest, Append) {
|
||||||
WriteBatch b1, b2;
|
WriteBatch b1, b2;
|
||||||
WriteBatchInternal::SetSequence(&b1, 200);
|
WriteBatchInternal::SetSequence(&b1, 200);
|
||||||
WriteBatchInternal::SetSequence(&b2, 300);
|
WriteBatchInternal::SetSequence(&b2, 300);
|
||||||
WriteBatchInternal::Append(&b1, &b2);
|
b1.Append(b2);
|
||||||
ASSERT_EQ("",
|
ASSERT_EQ("", PrintContents(&b1));
|
||||||
PrintContents(&b1));
|
|
||||||
b2.Put("a", "va");
|
b2.Put("a", "va");
|
||||||
WriteBatchInternal::Append(&b1, &b2);
|
b1.Append(b2);
|
||||||
ASSERT_EQ("Put(a, va)@200",
|
ASSERT_EQ("Put(a, va)@200", PrintContents(&b1));
|
||||||
PrintContents(&b1));
|
|
||||||
b2.Clear();
|
b2.Clear();
|
||||||
b2.Put("b", "vb");
|
b2.Put("b", "vb");
|
||||||
WriteBatchInternal::Append(&b1, &b2);
|
b1.Append(b2);
|
||||||
ASSERT_EQ("Put(a, va)@200"
|
ASSERT_EQ(
|
||||||
"Put(b, vb)@201",
|
"Put(a, va)@200"
|
||||||
PrintContents(&b1));
|
"Put(b, vb)@201",
|
||||||
|
PrintContents(&b1));
|
||||||
b2.Delete("foo");
|
b2.Delete("foo");
|
||||||
WriteBatchInternal::Append(&b1, &b2);
|
b1.Append(b2);
|
||||||
ASSERT_EQ("Put(a, va)@200"
|
ASSERT_EQ(
|
||||||
"Put(b, vb)@202"
|
"Put(a, va)@200"
|
||||||
"Put(b, vb)@201"
|
"Put(b, vb)@202"
|
||||||
"Delete(foo)@203",
|
"Put(b, vb)@201"
|
||||||
PrintContents(&b1));
|
"Delete(foo)@203",
|
||||||
|
PrintContents(&b1));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(WriteBatchTest, ApproximateSize) {
|
||||||
|
WriteBatch batch;
|
||||||
|
size_t empty_size = batch.ApproximateSize();
|
||||||
|
|
||||||
|
batch.Put(Slice("foo"), Slice("bar"));
|
||||||
|
size_t one_key_size = batch.ApproximateSize();
|
||||||
|
ASSERT_LT(empty_size, one_key_size);
|
||||||
|
|
||||||
|
batch.Put(Slice("baz"), Slice("boo"));
|
||||||
|
size_t two_keys_size = batch.ApproximateSize();
|
||||||
|
ASSERT_LT(one_key_size, two_keys_size);
|
||||||
|
|
||||||
|
batch.Delete(Slice("box"));
|
||||||
|
size_t post_delete_size = batch.ApproximateSize();
|
||||||
|
ASSERT_LT(two_keys_size, post_delete_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -90,9 +90,9 @@ div.bsql {
|
|||||||
<h4>Benchmark Source Code</h4>
|
<h4>Benchmark Source Code</h4>
|
||||||
<p>We wrote benchmark tools for SQLite and Kyoto TreeDB based on LevelDB's <span class="code">db_bench</span>. The code for each of the benchmarks resides here:</p>
|
<p>We wrote benchmark tools for SQLite and Kyoto TreeDB based on LevelDB's <span class="code">db_bench</span>. The code for each of the benchmarks resides here:</p>
|
||||||
<ul>
|
<ul>
|
||||||
<li> <b>LevelDB:</b> <a href="http://code.google.com/p/leveldb/source/browse/trunk/db/db_bench.cc">db/db_bench.cc</a>.</li>
|
<li> <b>LevelDB:</b> <a href="https://github.com/google/leveldb/blob/master/benchmarks/db_bench.cc">benchmarks/db_bench.cc</a>.</li>
|
||||||
<li> <b>SQLite:</b> <a href="http://code.google.com/p/leveldb/source/browse/#svn%2Ftrunk%2Fdoc%2Fbench%2Fdb_bench_sqlite3.cc">doc/bench/db_bench_sqlite3.cc</a>.</li>
|
<li> <b>SQLite:</b> <a href="https://github.com/google/leveldb/blob/master/benchmarks/db_bench_sqlite3.cc">benchmarks/db_bench_sqlite3.cc</a>.</li>
|
||||||
<li> <b>Kyoto TreeDB:</b> <a href="http://code.google.com/p/leveldb/source/browse/#svn%2Ftrunk%2Fdoc%2Fbench%2Fdb_bench_tree_db.cc">doc/bench/db_bench_tree_db.cc</a>.</li>
|
<li> <b>Kyoto TreeDB:</b> <a href="https://github.com/google/leveldb/blob/master/benchmarks/db_bench_tree_db.cc">benchmarks/db_bench_tree_db.cc</a>.</li>
|
||||||
</ul>
|
</ul>
|
||||||
|
|
||||||
<h4>Custom Build Specifications</h4>
|
<h4>Custom Build Specifications</h4>
|
||||||
|
14
doc/impl.md
14
doc/impl.md
@ -64,13 +64,15 @@ Other files used for miscellaneous purposes may also be present (LOCK, *.dbtmp).
|
|||||||
|
|
||||||
## Level 0
|
## Level 0
|
||||||
|
|
||||||
When the log file grows above a certain size (1MB by default):
|
When the log file grows above a certain size (4MB by default):
|
||||||
Create a brand new memtable and log file and direct future updates here
|
Create a brand new memtable and log file and direct future updates here.
|
||||||
|
|
||||||
In the background:
|
In the background:
|
||||||
Write the contents of the previous memtable to an sstable
|
|
||||||
Discard the memtable
|
1. Write the contents of the previous memtable to an sstable.
|
||||||
Delete the old log file and the old memtable
|
2. Discard the memtable.
|
||||||
Add the new sstable to the young (level-0) level.
|
3. Delete the old log file and the old memtable.
|
||||||
|
4. Add the new sstable to the young (level-0) level.
|
||||||
|
|
||||||
## Compactions
|
## Compactions
|
||||||
|
|
||||||
|
@ -338,19 +338,19 @@ options.compression = leveldb::kNoCompression;
|
|||||||
### Cache
|
### Cache
|
||||||
|
|
||||||
The contents of the database are stored in a set of files in the filesystem and
|
The contents of the database are stored in a set of files in the filesystem and
|
||||||
each file stores a sequence of compressed blocks. If options.cache is non-NULL,
|
each file stores a sequence of compressed blocks. If options.block_cache is
|
||||||
it is used to cache frequently used uncompressed block contents.
|
non-NULL, it is used to cache frequently used uncompressed block contents.
|
||||||
|
|
||||||
```c++
|
```c++
|
||||||
#include "leveldb/cache.h"
|
#include "leveldb/cache.h"
|
||||||
|
|
||||||
leveldb::Options options;
|
leveldb::Options options;
|
||||||
options.cache = leveldb::NewLRUCache(100 * 1048576); // 100MB cache
|
options.block_cache = leveldb::NewLRUCache(100 * 1048576); // 100MB cache
|
||||||
leveldb::DB* db;
|
leveldb::DB* db;
|
||||||
leveldb::DB::Open(options, name, &db);
|
leveldb::DB::Open(options, name, &db);
|
||||||
... use the db ...
|
... use the db ...
|
||||||
delete db
|
delete db
|
||||||
delete options.cache;
|
delete options.block_cache;
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that the cache holds uncompressed data, and therefore it should be sized
|
Note that the cache holds uncompressed data, and therefore it should be sized
|
||||||
|
@ -4,14 +4,18 @@
|
|||||||
|
|
||||||
#include "helpers/memenv/memenv.h"
|
#include "helpers/memenv/memenv.h"
|
||||||
|
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
|
#include <limits>
|
||||||
|
#include <map>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
|
#include "port/thread_annotations.h"
|
||||||
#include "util/mutexlock.h"
|
#include "util/mutexlock.h"
|
||||||
#include <map>
|
|
||||||
#include <string.h>
|
|
||||||
#include <string>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
@ -23,6 +27,10 @@ class FileState {
|
|||||||
// and the caller must call Ref() at least once.
|
// and the caller must call Ref() at least once.
|
||||||
FileState() : refs_(0), size_(0) {}
|
FileState() : refs_(0), size_(0) {}
|
||||||
|
|
||||||
|
// No copying allowed.
|
||||||
|
FileState(const FileState&) = delete;
|
||||||
|
FileState& operator=(const FileState&) = delete;
|
||||||
|
|
||||||
// Increase the reference count.
|
// Increase the reference count.
|
||||||
void Ref() {
|
void Ref() {
|
||||||
MutexLock lock(&refs_mutex_);
|
MutexLock lock(&refs_mutex_);
|
||||||
@ -47,9 +55,22 @@ class FileState {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t Size() const { return size_; }
|
uint64_t Size() const {
|
||||||
|
MutexLock lock(&blocks_mutex_);
|
||||||
|
return size_;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Truncate() {
|
||||||
|
MutexLock lock(&blocks_mutex_);
|
||||||
|
for (char*& block : blocks_) {
|
||||||
|
delete[] block;
|
||||||
|
}
|
||||||
|
blocks_.clear();
|
||||||
|
size_ = 0;
|
||||||
|
}
|
||||||
|
|
||||||
Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const {
|
Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const {
|
||||||
|
MutexLock lock(&blocks_mutex_);
|
||||||
if (offset > size_) {
|
if (offset > size_) {
|
||||||
return Status::IOError("Offset greater than file size.");
|
return Status::IOError("Offset greater than file size.");
|
||||||
}
|
}
|
||||||
@ -62,16 +83,9 @@ class FileState {
|
|||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(offset / kBlockSize <= SIZE_MAX);
|
assert(offset / kBlockSize <= std::numeric_limits<size_t>::max());
|
||||||
size_t block = static_cast<size_t>(offset / kBlockSize);
|
size_t block = static_cast<size_t>(offset / kBlockSize);
|
||||||
size_t block_offset = offset % kBlockSize;
|
size_t block_offset = offset % kBlockSize;
|
||||||
|
|
||||||
if (n <= kBlockSize - block_offset) {
|
|
||||||
// The requested bytes are all in the first block.
|
|
||||||
*result = Slice(blocks_[block] + block_offset, n);
|
|
||||||
return Status::OK();
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t bytes_to_copy = n;
|
size_t bytes_to_copy = n;
|
||||||
char* dst = scratch;
|
char* dst = scratch;
|
||||||
|
|
||||||
@ -96,6 +110,7 @@ class FileState {
|
|||||||
const char* src = data.data();
|
const char* src = data.data();
|
||||||
size_t src_len = data.size();
|
size_t src_len = data.size();
|
||||||
|
|
||||||
|
MutexLock lock(&blocks_mutex_);
|
||||||
while (src_len > 0) {
|
while (src_len > 0) {
|
||||||
size_t avail;
|
size_t avail;
|
||||||
size_t offset = size_ % kBlockSize;
|
size_t offset = size_ % kBlockSize;
|
||||||
@ -122,28 +137,17 @@ class FileState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Private since only Unref() should be used to delete it.
|
enum { kBlockSize = 8 * 1024 };
|
||||||
~FileState() {
|
|
||||||
for (std::vector<char*>::iterator i = blocks_.begin(); i != blocks_.end();
|
|
||||||
++i) {
|
|
||||||
delete [] *i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// No copying allowed.
|
// Private since only Unref() should be used to delete it.
|
||||||
FileState(const FileState&);
|
~FileState() { Truncate(); }
|
||||||
void operator=(const FileState&);
|
|
||||||
|
|
||||||
port::Mutex refs_mutex_;
|
port::Mutex refs_mutex_;
|
||||||
int refs_; // Protected by refs_mutex_;
|
int refs_ GUARDED_BY(refs_mutex_);
|
||||||
|
|
||||||
// The following fields are not protected by any mutex. They are only mutable
|
mutable port::Mutex blocks_mutex_;
|
||||||
// while the file is being written, and concurrent access is not allowed
|
std::vector<char*> blocks_ GUARDED_BY(blocks_mutex_);
|
||||||
// to writable files.
|
uint64_t size_ GUARDED_BY(blocks_mutex_);
|
||||||
std::vector<char*> blocks_;
|
|
||||||
uint64_t size_;
|
|
||||||
|
|
||||||
enum { kBlockSize = 8 * 1024 };
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class SequentialFileImpl : public SequentialFile {
|
class SequentialFileImpl : public SequentialFile {
|
||||||
@ -152,9 +156,7 @@ class SequentialFileImpl : public SequentialFile {
|
|||||||
file_->Ref();
|
file_->Ref();
|
||||||
}
|
}
|
||||||
|
|
||||||
~SequentialFileImpl() {
|
~SequentialFileImpl() { file_->Unref(); }
|
||||||
file_->Unref();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual Status Read(size_t n, Slice* result, char* scratch) {
|
virtual Status Read(size_t n, Slice* result, char* scratch) {
|
||||||
Status s = file_->Read(pos_, n, result, scratch);
|
Status s = file_->Read(pos_, n, result, scratch);
|
||||||
@ -183,13 +185,9 @@ class SequentialFileImpl : public SequentialFile {
|
|||||||
|
|
||||||
class RandomAccessFileImpl : public RandomAccessFile {
|
class RandomAccessFileImpl : public RandomAccessFile {
|
||||||
public:
|
public:
|
||||||
explicit RandomAccessFileImpl(FileState* file) : file_(file) {
|
explicit RandomAccessFileImpl(FileState* file) : file_(file) { file_->Ref(); }
|
||||||
file_->Ref();
|
|
||||||
}
|
|
||||||
|
|
||||||
~RandomAccessFileImpl() {
|
~RandomAccessFileImpl() { file_->Unref(); }
|
||||||
file_->Unref();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
||||||
char* scratch) const {
|
char* scratch) const {
|
||||||
@ -202,17 +200,11 @@ class RandomAccessFileImpl : public RandomAccessFile {
|
|||||||
|
|
||||||
class WritableFileImpl : public WritableFile {
|
class WritableFileImpl : public WritableFile {
|
||||||
public:
|
public:
|
||||||
WritableFileImpl(FileState* file) : file_(file) {
|
WritableFileImpl(FileState* file) : file_(file) { file_->Ref(); }
|
||||||
file_->Ref();
|
|
||||||
}
|
|
||||||
|
|
||||||
~WritableFileImpl() {
|
~WritableFileImpl() { file_->Unref(); }
|
||||||
file_->Unref();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual Status Append(const Slice& data) {
|
virtual Status Append(const Slice& data) { return file_->Append(data); }
|
||||||
return file_->Append(data);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual Status Close() { return Status::OK(); }
|
virtual Status Close() { return Status::OK(); }
|
||||||
virtual Status Flush() { return Status::OK(); }
|
virtual Status Flush() { return Status::OK(); }
|
||||||
@ -224,25 +216,25 @@ class WritableFileImpl : public WritableFile {
|
|||||||
|
|
||||||
class NoOpLogger : public Logger {
|
class NoOpLogger : public Logger {
|
||||||
public:
|
public:
|
||||||
virtual void Logv(const char* format, va_list ap) { }
|
virtual void Logv(const char* format, va_list ap) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
class InMemoryEnv : public EnvWrapper {
|
class InMemoryEnv : public EnvWrapper {
|
||||||
public:
|
public:
|
||||||
explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) { }
|
explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) {}
|
||||||
|
|
||||||
virtual ~InMemoryEnv() {
|
~InMemoryEnv() override {
|
||||||
for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i){
|
for (const auto& kvp : file_map_) {
|
||||||
i->second->Unref();
|
kvp.second->Unref();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Partial implementation of the Env interface.
|
// Partial implementation of the Env interface.
|
||||||
virtual Status NewSequentialFile(const std::string& fname,
|
Status NewSequentialFile(const std::string& fname,
|
||||||
SequentialFile** result) {
|
SequentialFile** result) override {
|
||||||
MutexLock lock(&mutex_);
|
MutexLock lock(&mutex_);
|
||||||
if (file_map_.find(fname) == file_map_.end()) {
|
if (file_map_.find(fname) == file_map_.end()) {
|
||||||
*result = NULL;
|
*result = nullptr;
|
||||||
return Status::IOError(fname, "File not found");
|
return Status::IOError(fname, "File not found");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -250,11 +242,11 @@ class InMemoryEnv : public EnvWrapper {
|
|||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status NewRandomAccessFile(const std::string& fname,
|
Status NewRandomAccessFile(const std::string& fname,
|
||||||
RandomAccessFile** result) {
|
RandomAccessFile** result) override {
|
||||||
MutexLock lock(&mutex_);
|
MutexLock lock(&mutex_);
|
||||||
if (file_map_.find(fname) == file_map_.end()) {
|
if (file_map_.find(fname) == file_map_.end()) {
|
||||||
*result = NULL;
|
*result = nullptr;
|
||||||
return Status::IOError(fname, "File not found");
|
return Status::IOError(fname, "File not found");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -262,27 +254,32 @@ class InMemoryEnv : public EnvWrapper {
|
|||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status NewWritableFile(const std::string& fname,
|
Status NewWritableFile(const std::string& fname,
|
||||||
WritableFile** result) {
|
WritableFile** result) override {
|
||||||
MutexLock lock(&mutex_);
|
MutexLock lock(&mutex_);
|
||||||
if (file_map_.find(fname) != file_map_.end()) {
|
FileSystem::iterator it = file_map_.find(fname);
|
||||||
DeleteFileInternal(fname);
|
|
||||||
}
|
|
||||||
|
|
||||||
FileState* file = new FileState();
|
FileState* file;
|
||||||
file->Ref();
|
if (it == file_map_.end()) {
|
||||||
file_map_[fname] = file;
|
// File is not currently open.
|
||||||
|
file = new FileState();
|
||||||
|
file->Ref();
|
||||||
|
file_map_[fname] = file;
|
||||||
|
} else {
|
||||||
|
file = it->second;
|
||||||
|
file->Truncate();
|
||||||
|
}
|
||||||
|
|
||||||
*result = new WritableFileImpl(file);
|
*result = new WritableFileImpl(file);
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status NewAppendableFile(const std::string& fname,
|
Status NewAppendableFile(const std::string& fname,
|
||||||
WritableFile** result) {
|
WritableFile** result) override {
|
||||||
MutexLock lock(&mutex_);
|
MutexLock lock(&mutex_);
|
||||||
FileState** sptr = &file_map_[fname];
|
FileState** sptr = &file_map_[fname];
|
||||||
FileState* file = *sptr;
|
FileState* file = *sptr;
|
||||||
if (file == NULL) {
|
if (file == nullptr) {
|
||||||
file = new FileState();
|
file = new FileState();
|
||||||
file->Ref();
|
file->Ref();
|
||||||
}
|
}
|
||||||
@ -290,18 +287,18 @@ class InMemoryEnv : public EnvWrapper {
|
|||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual bool FileExists(const std::string& fname) {
|
bool FileExists(const std::string& fname) override {
|
||||||
MutexLock lock(&mutex_);
|
MutexLock lock(&mutex_);
|
||||||
return file_map_.find(fname) != file_map_.end();
|
return file_map_.find(fname) != file_map_.end();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status GetChildren(const std::string& dir,
|
Status GetChildren(const std::string& dir,
|
||||||
std::vector<std::string>* result) {
|
std::vector<std::string>* result) override {
|
||||||
MutexLock lock(&mutex_);
|
MutexLock lock(&mutex_);
|
||||||
result->clear();
|
result->clear();
|
||||||
|
|
||||||
for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i){
|
for (const auto& kvp : file_map_) {
|
||||||
const std::string& filename = i->first;
|
const std::string& filename = kvp.first;
|
||||||
|
|
||||||
if (filename.size() >= dir.size() + 1 && filename[dir.size()] == '/' &&
|
if (filename.size() >= dir.size() + 1 && filename[dir.size()] == '/' &&
|
||||||
Slice(filename).starts_with(Slice(dir))) {
|
Slice(filename).starts_with(Slice(dir))) {
|
||||||
@ -312,7 +309,8 @@ class InMemoryEnv : public EnvWrapper {
|
|||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
void DeleteFileInternal(const std::string& fname) {
|
void DeleteFileInternal(const std::string& fname)
|
||||||
|
EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
|
||||||
if (file_map_.find(fname) == file_map_.end()) {
|
if (file_map_.find(fname) == file_map_.end()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -321,7 +319,7 @@ class InMemoryEnv : public EnvWrapper {
|
|||||||
file_map_.erase(fname);
|
file_map_.erase(fname);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status DeleteFile(const std::string& fname) {
|
Status DeleteFile(const std::string& fname) override {
|
||||||
MutexLock lock(&mutex_);
|
MutexLock lock(&mutex_);
|
||||||
if (file_map_.find(fname) == file_map_.end()) {
|
if (file_map_.find(fname) == file_map_.end()) {
|
||||||
return Status::IOError(fname, "File not found");
|
return Status::IOError(fname, "File not found");
|
||||||
@ -331,15 +329,11 @@ class InMemoryEnv : public EnvWrapper {
|
|||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status CreateDir(const std::string& dirname) {
|
Status CreateDir(const std::string& dirname) override { return Status::OK(); }
|
||||||
return Status::OK();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual Status DeleteDir(const std::string& dirname) {
|
Status DeleteDir(const std::string& dirname) override { return Status::OK(); }
|
||||||
return Status::OK();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) {
|
Status GetFileSize(const std::string& fname, uint64_t* file_size) override {
|
||||||
MutexLock lock(&mutex_);
|
MutexLock lock(&mutex_);
|
||||||
if (file_map_.find(fname) == file_map_.end()) {
|
if (file_map_.find(fname) == file_map_.end()) {
|
||||||
return Status::IOError(fname, "File not found");
|
return Status::IOError(fname, "File not found");
|
||||||
@ -349,8 +343,8 @@ class InMemoryEnv : public EnvWrapper {
|
|||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status RenameFile(const std::string& src,
|
Status RenameFile(const std::string& src,
|
||||||
const std::string& target) {
|
const std::string& target) override {
|
||||||
MutexLock lock(&mutex_);
|
MutexLock lock(&mutex_);
|
||||||
if (file_map_.find(src) == file_map_.end()) {
|
if (file_map_.find(src) == file_map_.end()) {
|
||||||
return Status::IOError(src, "File not found");
|
return Status::IOError(src, "File not found");
|
||||||
@ -362,22 +356,22 @@ class InMemoryEnv : public EnvWrapper {
|
|||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status LockFile(const std::string& fname, FileLock** lock) {
|
Status LockFile(const std::string& fname, FileLock** lock) override {
|
||||||
*lock = new FileLock;
|
*lock = new FileLock;
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status UnlockFile(FileLock* lock) {
|
Status UnlockFile(FileLock* lock) override {
|
||||||
delete lock;
|
delete lock;
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status GetTestDirectory(std::string* path) {
|
Status GetTestDirectory(std::string* path) override {
|
||||||
*path = "/test";
|
*path = "/test";
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status NewLogger(const std::string& fname, Logger** result) {
|
Status NewLogger(const std::string& fname, Logger** result) override {
|
||||||
*result = new NoOpLogger;
|
*result = new NoOpLogger;
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
@ -385,14 +379,13 @@ class InMemoryEnv : public EnvWrapper {
|
|||||||
private:
|
private:
|
||||||
// Map from filenames to FileState objects, representing a simple file system.
|
// Map from filenames to FileState objects, representing a simple file system.
|
||||||
typedef std::map<std::string, FileState*> FileSystem;
|
typedef std::map<std::string, FileState*> FileSystem;
|
||||||
|
|
||||||
port::Mutex mutex_;
|
port::Mutex mutex_;
|
||||||
FileSystem file_map_; // Protected by mutex_.
|
FileSystem file_map_ GUARDED_BY(mutex_);
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
Env* NewMemEnv(Env* base_env) {
|
Env* NewMemEnv(Env* base_env) { return new InMemoryEnv(base_env); }
|
||||||
return new InMemoryEnv(base_env);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -5,6 +5,8 @@
|
|||||||
#ifndef STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_
|
#ifndef STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_
|
||||||
#define STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_
|
#define STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_
|
||||||
|
|
||||||
|
#include "leveldb/export.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class Env;
|
class Env;
|
||||||
@ -13,7 +15,7 @@ class Env;
|
|||||||
// all non-file-storage tasks to base_env. The caller must delete the result
|
// all non-file-storage tasks to base_env. The caller must delete the result
|
||||||
// when it is no longer needed.
|
// when it is no longer needed.
|
||||||
// *base_env must remain live while the result is in use.
|
// *base_env must remain live while the result is in use.
|
||||||
Env* NewMemEnv(Env* base_env);
|
LEVELDB_EXPORT Env* NewMemEnv(Env* base_env);
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
@ -4,25 +4,22 @@
|
|||||||
|
|
||||||
#include "helpers/memenv/memenv.h"
|
#include "helpers/memenv/memenv.h"
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
#include "db/db_impl.h"
|
#include "db/db_impl.h"
|
||||||
#include "leveldb/db.h"
|
#include "leveldb/db.h"
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
#include "util/testharness.h"
|
#include "util/testharness.h"
|
||||||
#include <string>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class MemEnvTest {
|
class MemEnvTest {
|
||||||
public:
|
public:
|
||||||
Env* env_;
|
MemEnvTest() : env_(NewMemEnv(Env::Default())) {}
|
||||||
|
~MemEnvTest() { delete env_; }
|
||||||
|
|
||||||
MemEnvTest()
|
Env* env_;
|
||||||
: env_(NewMemEnv(Env::Default())) {
|
|
||||||
}
|
|
||||||
~MemEnvTest() {
|
|
||||||
delete env_;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST(MemEnvTest, Basics) {
|
TEST(MemEnvTest, Basics) {
|
||||||
@ -109,25 +106,25 @@ TEST(MemEnvTest, ReadWrite) {
|
|||||||
|
|
||||||
// Read sequentially.
|
// Read sequentially.
|
||||||
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
|
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
|
||||||
ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello".
|
ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello".
|
||||||
ASSERT_EQ(0, result.compare("hello"));
|
ASSERT_EQ(0, result.compare("hello"));
|
||||||
ASSERT_OK(seq_file->Skip(1));
|
ASSERT_OK(seq_file->Skip(1));
|
||||||
ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world".
|
ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world".
|
||||||
ASSERT_EQ(0, result.compare("world"));
|
ASSERT_EQ(0, result.compare("world"));
|
||||||
ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF.
|
ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF.
|
||||||
ASSERT_EQ(0, result.size());
|
ASSERT_EQ(0, result.size());
|
||||||
ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file.
|
ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file.
|
||||||
ASSERT_OK(seq_file->Read(1000, &result, scratch));
|
ASSERT_OK(seq_file->Read(1000, &result, scratch));
|
||||||
ASSERT_EQ(0, result.size());
|
ASSERT_EQ(0, result.size());
|
||||||
delete seq_file;
|
delete seq_file;
|
||||||
|
|
||||||
// Random reads.
|
// Random reads.
|
||||||
ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file));
|
ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file));
|
||||||
ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world".
|
ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world".
|
||||||
ASSERT_EQ(0, result.compare("world"));
|
ASSERT_EQ(0, result.compare("world"));
|
||||||
ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello".
|
ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello".
|
||||||
ASSERT_EQ(0, result.compare("hello"));
|
ASSERT_EQ(0, result.compare("hello"));
|
||||||
ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d".
|
ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d".
|
||||||
ASSERT_EQ(0, result.compare("d"));
|
ASSERT_EQ(0, result.compare("d"));
|
||||||
|
|
||||||
// Too high offset.
|
// Too high offset.
|
||||||
@ -176,7 +173,7 @@ TEST(MemEnvTest, LargeWrite) {
|
|||||||
SequentialFile* seq_file;
|
SequentialFile* seq_file;
|
||||||
Slice result;
|
Slice result;
|
||||||
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
|
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
|
||||||
ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo".
|
ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo".
|
||||||
ASSERT_EQ(0, result.compare("foo"));
|
ASSERT_EQ(0, result.compare("foo"));
|
||||||
|
|
||||||
size_t read = 0;
|
size_t read = 0;
|
||||||
@ -188,7 +185,30 @@ TEST(MemEnvTest, LargeWrite) {
|
|||||||
}
|
}
|
||||||
ASSERT_TRUE(write_data == read_data);
|
ASSERT_TRUE(write_data == read_data);
|
||||||
delete seq_file;
|
delete seq_file;
|
||||||
delete [] scratch;
|
delete[] scratch;
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(MemEnvTest, OverwriteOpenFile) {
|
||||||
|
const char kWrite1Data[] = "Write #1 data";
|
||||||
|
const size_t kFileDataLen = sizeof(kWrite1Data) - 1;
|
||||||
|
const std::string kTestFileName = test::TmpDir() + "/leveldb-TestFile.dat";
|
||||||
|
|
||||||
|
ASSERT_OK(WriteStringToFile(env_, kWrite1Data, kTestFileName));
|
||||||
|
|
||||||
|
RandomAccessFile* rand_file;
|
||||||
|
ASSERT_OK(env_->NewRandomAccessFile(kTestFileName, &rand_file));
|
||||||
|
|
||||||
|
const char kWrite2Data[] = "Write #2 data";
|
||||||
|
ASSERT_OK(WriteStringToFile(env_, kWrite2Data, kTestFileName));
|
||||||
|
|
||||||
|
// Verify that overwriting an open file will result in the new file data
|
||||||
|
// being read from files opened before the write.
|
||||||
|
Slice result;
|
||||||
|
char scratch[kFileDataLen];
|
||||||
|
ASSERT_OK(rand_file->Read(0, kFileDataLen, &result, scratch));
|
||||||
|
ASSERT_EQ(0, result.compare(kWrite2Data));
|
||||||
|
|
||||||
|
delete rand_file;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(MemEnvTest, DBTest) {
|
TEST(MemEnvTest, DBTest) {
|
||||||
@ -236,6 +256,4 @@ TEST(MemEnvTest, DBTest) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -48,225 +48,205 @@ extern "C" {
|
|||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include "leveldb/export.h"
|
||||||
|
|
||||||
/* Exported types */
|
/* Exported types */
|
||||||
|
|
||||||
typedef struct leveldb_t leveldb_t;
|
typedef struct leveldb_t leveldb_t;
|
||||||
typedef struct leveldb_cache_t leveldb_cache_t;
|
typedef struct leveldb_cache_t leveldb_cache_t;
|
||||||
typedef struct leveldb_comparator_t leveldb_comparator_t;
|
typedef struct leveldb_comparator_t leveldb_comparator_t;
|
||||||
typedef struct leveldb_env_t leveldb_env_t;
|
typedef struct leveldb_env_t leveldb_env_t;
|
||||||
typedef struct leveldb_filelock_t leveldb_filelock_t;
|
typedef struct leveldb_filelock_t leveldb_filelock_t;
|
||||||
typedef struct leveldb_filterpolicy_t leveldb_filterpolicy_t;
|
typedef struct leveldb_filterpolicy_t leveldb_filterpolicy_t;
|
||||||
typedef struct leveldb_iterator_t leveldb_iterator_t;
|
typedef struct leveldb_iterator_t leveldb_iterator_t;
|
||||||
typedef struct leveldb_logger_t leveldb_logger_t;
|
typedef struct leveldb_logger_t leveldb_logger_t;
|
||||||
typedef struct leveldb_options_t leveldb_options_t;
|
typedef struct leveldb_options_t leveldb_options_t;
|
||||||
typedef struct leveldb_randomfile_t leveldb_randomfile_t;
|
typedef struct leveldb_randomfile_t leveldb_randomfile_t;
|
||||||
typedef struct leveldb_readoptions_t leveldb_readoptions_t;
|
typedef struct leveldb_readoptions_t leveldb_readoptions_t;
|
||||||
typedef struct leveldb_seqfile_t leveldb_seqfile_t;
|
typedef struct leveldb_seqfile_t leveldb_seqfile_t;
|
||||||
typedef struct leveldb_snapshot_t leveldb_snapshot_t;
|
typedef struct leveldb_snapshot_t leveldb_snapshot_t;
|
||||||
typedef struct leveldb_writablefile_t leveldb_writablefile_t;
|
typedef struct leveldb_writablefile_t leveldb_writablefile_t;
|
||||||
typedef struct leveldb_writebatch_t leveldb_writebatch_t;
|
typedef struct leveldb_writebatch_t leveldb_writebatch_t;
|
||||||
typedef struct leveldb_writeoptions_t leveldb_writeoptions_t;
|
typedef struct leveldb_writeoptions_t leveldb_writeoptions_t;
|
||||||
|
|
||||||
/* DB operations */
|
/* DB operations */
|
||||||
|
|
||||||
extern leveldb_t* leveldb_open(
|
LEVELDB_EXPORT leveldb_t* leveldb_open(const leveldb_options_t* options,
|
||||||
const leveldb_options_t* options,
|
const char* name, char** errptr);
|
||||||
const char* name,
|
|
||||||
char** errptr);
|
|
||||||
|
|
||||||
extern void leveldb_close(leveldb_t* db);
|
LEVELDB_EXPORT void leveldb_close(leveldb_t* db);
|
||||||
|
|
||||||
extern void leveldb_put(
|
LEVELDB_EXPORT void leveldb_put(leveldb_t* db,
|
||||||
leveldb_t* db,
|
const leveldb_writeoptions_t* options,
|
||||||
const leveldb_writeoptions_t* options,
|
const char* key, size_t keylen, const char* val,
|
||||||
const char* key, size_t keylen,
|
size_t vallen, char** errptr);
|
||||||
const char* val, size_t vallen,
|
|
||||||
char** errptr);
|
|
||||||
|
|
||||||
extern void leveldb_delete(
|
LEVELDB_EXPORT void leveldb_delete(leveldb_t* db,
|
||||||
leveldb_t* db,
|
const leveldb_writeoptions_t* options,
|
||||||
const leveldb_writeoptions_t* options,
|
const char* key, size_t keylen,
|
||||||
const char* key, size_t keylen,
|
char** errptr);
|
||||||
char** errptr);
|
|
||||||
|
|
||||||
extern void leveldb_write(
|
LEVELDB_EXPORT void leveldb_write(leveldb_t* db,
|
||||||
leveldb_t* db,
|
const leveldb_writeoptions_t* options,
|
||||||
const leveldb_writeoptions_t* options,
|
leveldb_writebatch_t* batch, char** errptr);
|
||||||
leveldb_writebatch_t* batch,
|
|
||||||
char** errptr);
|
|
||||||
|
|
||||||
/* Returns NULL if not found. A malloc()ed array otherwise.
|
/* Returns NULL if not found. A malloc()ed array otherwise.
|
||||||
Stores the length of the array in *vallen. */
|
Stores the length of the array in *vallen. */
|
||||||
extern char* leveldb_get(
|
LEVELDB_EXPORT char* leveldb_get(leveldb_t* db,
|
||||||
leveldb_t* db,
|
const leveldb_readoptions_t* options,
|
||||||
const leveldb_readoptions_t* options,
|
const char* key, size_t keylen, size_t* vallen,
|
||||||
const char* key, size_t keylen,
|
char** errptr);
|
||||||
size_t* vallen,
|
|
||||||
char** errptr);
|
|
||||||
|
|
||||||
extern leveldb_iterator_t* leveldb_create_iterator(
|
LEVELDB_EXPORT leveldb_iterator_t* leveldb_create_iterator(
|
||||||
leveldb_t* db,
|
leveldb_t* db, const leveldb_readoptions_t* options);
|
||||||
const leveldb_readoptions_t* options);
|
|
||||||
|
|
||||||
extern const leveldb_snapshot_t* leveldb_create_snapshot(
|
LEVELDB_EXPORT const leveldb_snapshot_t* leveldb_create_snapshot(leveldb_t* db);
|
||||||
leveldb_t* db);
|
|
||||||
|
|
||||||
extern void leveldb_release_snapshot(
|
LEVELDB_EXPORT void leveldb_release_snapshot(
|
||||||
leveldb_t* db,
|
leveldb_t* db, const leveldb_snapshot_t* snapshot);
|
||||||
const leveldb_snapshot_t* snapshot);
|
|
||||||
|
|
||||||
/* Returns NULL if property name is unknown.
|
/* Returns NULL if property name is unknown.
|
||||||
Else returns a pointer to a malloc()-ed null-terminated value. */
|
Else returns a pointer to a malloc()-ed null-terminated value. */
|
||||||
extern char* leveldb_property_value(
|
LEVELDB_EXPORT char* leveldb_property_value(leveldb_t* db,
|
||||||
leveldb_t* db,
|
const char* propname);
|
||||||
const char* propname);
|
|
||||||
|
|
||||||
extern void leveldb_approximate_sizes(
|
LEVELDB_EXPORT void leveldb_approximate_sizes(
|
||||||
leveldb_t* db,
|
leveldb_t* db, int num_ranges, const char* const* range_start_key,
|
||||||
int num_ranges,
|
const size_t* range_start_key_len, const char* const* range_limit_key,
|
||||||
const char* const* range_start_key, const size_t* range_start_key_len,
|
const size_t* range_limit_key_len, uint64_t* sizes);
|
||||||
const char* const* range_limit_key, const size_t* range_limit_key_len,
|
|
||||||
uint64_t* sizes);
|
|
||||||
|
|
||||||
extern void leveldb_compact_range(
|
LEVELDB_EXPORT void leveldb_compact_range(leveldb_t* db, const char* start_key,
|
||||||
leveldb_t* db,
|
size_t start_key_len,
|
||||||
const char* start_key, size_t start_key_len,
|
const char* limit_key,
|
||||||
const char* limit_key, size_t limit_key_len);
|
size_t limit_key_len);
|
||||||
|
|
||||||
/* Management operations */
|
/* Management operations */
|
||||||
|
|
||||||
extern void leveldb_destroy_db(
|
LEVELDB_EXPORT void leveldb_destroy_db(const leveldb_options_t* options,
|
||||||
const leveldb_options_t* options,
|
const char* name, char** errptr);
|
||||||
const char* name,
|
|
||||||
char** errptr);
|
|
||||||
|
|
||||||
extern void leveldb_repair_db(
|
LEVELDB_EXPORT void leveldb_repair_db(const leveldb_options_t* options,
|
||||||
const leveldb_options_t* options,
|
const char* name, char** errptr);
|
||||||
const char* name,
|
|
||||||
char** errptr);
|
|
||||||
|
|
||||||
/* Iterator */
|
/* Iterator */
|
||||||
|
|
||||||
extern void leveldb_iter_destroy(leveldb_iterator_t*);
|
LEVELDB_EXPORT void leveldb_iter_destroy(leveldb_iterator_t*);
|
||||||
extern unsigned char leveldb_iter_valid(const leveldb_iterator_t*);
|
LEVELDB_EXPORT unsigned char leveldb_iter_valid(const leveldb_iterator_t*);
|
||||||
extern void leveldb_iter_seek_to_first(leveldb_iterator_t*);
|
LEVELDB_EXPORT void leveldb_iter_seek_to_first(leveldb_iterator_t*);
|
||||||
extern void leveldb_iter_seek_to_last(leveldb_iterator_t*);
|
LEVELDB_EXPORT void leveldb_iter_seek_to_last(leveldb_iterator_t*);
|
||||||
extern void leveldb_iter_seek(leveldb_iterator_t*, const char* k, size_t klen);
|
LEVELDB_EXPORT void leveldb_iter_seek(leveldb_iterator_t*, const char* k,
|
||||||
extern void leveldb_iter_next(leveldb_iterator_t*);
|
size_t klen);
|
||||||
extern void leveldb_iter_prev(leveldb_iterator_t*);
|
LEVELDB_EXPORT void leveldb_iter_next(leveldb_iterator_t*);
|
||||||
extern const char* leveldb_iter_key(const leveldb_iterator_t*, size_t* klen);
|
LEVELDB_EXPORT void leveldb_iter_prev(leveldb_iterator_t*);
|
||||||
extern const char* leveldb_iter_value(const leveldb_iterator_t*, size_t* vlen);
|
LEVELDB_EXPORT const char* leveldb_iter_key(const leveldb_iterator_t*,
|
||||||
extern void leveldb_iter_get_error(const leveldb_iterator_t*, char** errptr);
|
size_t* klen);
|
||||||
|
LEVELDB_EXPORT const char* leveldb_iter_value(const leveldb_iterator_t*,
|
||||||
|
size_t* vlen);
|
||||||
|
LEVELDB_EXPORT void leveldb_iter_get_error(const leveldb_iterator_t*,
|
||||||
|
char** errptr);
|
||||||
|
|
||||||
/* Write batch */
|
/* Write batch */
|
||||||
|
|
||||||
extern leveldb_writebatch_t* leveldb_writebatch_create();
|
LEVELDB_EXPORT leveldb_writebatch_t* leveldb_writebatch_create(void);
|
||||||
extern void leveldb_writebatch_destroy(leveldb_writebatch_t*);
|
LEVELDB_EXPORT void leveldb_writebatch_destroy(leveldb_writebatch_t*);
|
||||||
extern void leveldb_writebatch_clear(leveldb_writebatch_t*);
|
LEVELDB_EXPORT void leveldb_writebatch_clear(leveldb_writebatch_t*);
|
||||||
extern void leveldb_writebatch_put(
|
LEVELDB_EXPORT void leveldb_writebatch_put(leveldb_writebatch_t*,
|
||||||
leveldb_writebatch_t*,
|
const char* key, size_t klen,
|
||||||
const char* key, size_t klen,
|
const char* val, size_t vlen);
|
||||||
const char* val, size_t vlen);
|
LEVELDB_EXPORT void leveldb_writebatch_delete(leveldb_writebatch_t*,
|
||||||
extern void leveldb_writebatch_delete(
|
const char* key, size_t klen);
|
||||||
leveldb_writebatch_t*,
|
LEVELDB_EXPORT void leveldb_writebatch_iterate(
|
||||||
const char* key, size_t klen);
|
const leveldb_writebatch_t*, void* state,
|
||||||
extern void leveldb_writebatch_iterate(
|
|
||||||
leveldb_writebatch_t*,
|
|
||||||
void* state,
|
|
||||||
void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
|
void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
|
||||||
void (*deleted)(void*, const char* k, size_t klen));
|
void (*deleted)(void*, const char* k, size_t klen));
|
||||||
|
LEVELDB_EXPORT void leveldb_writebatch_append(
|
||||||
|
leveldb_writebatch_t* destination, const leveldb_writebatch_t* source);
|
||||||
|
|
||||||
/* Options */
|
/* Options */
|
||||||
|
|
||||||
extern leveldb_options_t* leveldb_options_create();
|
LEVELDB_EXPORT leveldb_options_t* leveldb_options_create(void);
|
||||||
extern void leveldb_options_destroy(leveldb_options_t*);
|
LEVELDB_EXPORT void leveldb_options_destroy(leveldb_options_t*);
|
||||||
extern void leveldb_options_set_comparator(
|
LEVELDB_EXPORT void leveldb_options_set_comparator(leveldb_options_t*,
|
||||||
leveldb_options_t*,
|
leveldb_comparator_t*);
|
||||||
leveldb_comparator_t*);
|
LEVELDB_EXPORT void leveldb_options_set_filter_policy(leveldb_options_t*,
|
||||||
extern void leveldb_options_set_filter_policy(
|
leveldb_filterpolicy_t*);
|
||||||
leveldb_options_t*,
|
LEVELDB_EXPORT void leveldb_options_set_create_if_missing(leveldb_options_t*,
|
||||||
leveldb_filterpolicy_t*);
|
unsigned char);
|
||||||
extern void leveldb_options_set_create_if_missing(
|
LEVELDB_EXPORT void leveldb_options_set_error_if_exists(leveldb_options_t*,
|
||||||
leveldb_options_t*, unsigned char);
|
unsigned char);
|
||||||
extern void leveldb_options_set_error_if_exists(
|
LEVELDB_EXPORT void leveldb_options_set_paranoid_checks(leveldb_options_t*,
|
||||||
leveldb_options_t*, unsigned char);
|
unsigned char);
|
||||||
extern void leveldb_options_set_paranoid_checks(
|
LEVELDB_EXPORT void leveldb_options_set_env(leveldb_options_t*, leveldb_env_t*);
|
||||||
leveldb_options_t*, unsigned char);
|
LEVELDB_EXPORT void leveldb_options_set_info_log(leveldb_options_t*,
|
||||||
extern void leveldb_options_set_env(leveldb_options_t*, leveldb_env_t*);
|
leveldb_logger_t*);
|
||||||
extern void leveldb_options_set_info_log(leveldb_options_t*, leveldb_logger_t*);
|
LEVELDB_EXPORT void leveldb_options_set_write_buffer_size(leveldb_options_t*,
|
||||||
extern void leveldb_options_set_write_buffer_size(leveldb_options_t*, size_t);
|
size_t);
|
||||||
extern void leveldb_options_set_max_open_files(leveldb_options_t*, int);
|
LEVELDB_EXPORT void leveldb_options_set_max_open_files(leveldb_options_t*, int);
|
||||||
extern void leveldb_options_set_cache(leveldb_options_t*, leveldb_cache_t*);
|
LEVELDB_EXPORT void leveldb_options_set_cache(leveldb_options_t*,
|
||||||
extern void leveldb_options_set_block_size(leveldb_options_t*, size_t);
|
leveldb_cache_t*);
|
||||||
extern void leveldb_options_set_block_restart_interval(leveldb_options_t*, int);
|
LEVELDB_EXPORT void leveldb_options_set_block_size(leveldb_options_t*, size_t);
|
||||||
|
LEVELDB_EXPORT void leveldb_options_set_block_restart_interval(
|
||||||
|
leveldb_options_t*, int);
|
||||||
|
LEVELDB_EXPORT void leveldb_options_set_max_file_size(leveldb_options_t*,
|
||||||
|
size_t);
|
||||||
|
|
||||||
enum {
|
enum { leveldb_no_compression = 0, leveldb_snappy_compression = 1 };
|
||||||
leveldb_no_compression = 0,
|
LEVELDB_EXPORT void leveldb_options_set_compression(leveldb_options_t*, int);
|
||||||
leveldb_snappy_compression = 1
|
|
||||||
};
|
|
||||||
extern void leveldb_options_set_compression(leveldb_options_t*, int);
|
|
||||||
|
|
||||||
/* Comparator */
|
/* Comparator */
|
||||||
|
|
||||||
extern leveldb_comparator_t* leveldb_comparator_create(
|
LEVELDB_EXPORT leveldb_comparator_t* leveldb_comparator_create(
|
||||||
void* state,
|
void* state, void (*destructor)(void*),
|
||||||
void (*destructor)(void*),
|
int (*compare)(void*, const char* a, size_t alen, const char* b,
|
||||||
int (*compare)(
|
size_t blen),
|
||||||
void*,
|
|
||||||
const char* a, size_t alen,
|
|
||||||
const char* b, size_t blen),
|
|
||||||
const char* (*name)(void*));
|
const char* (*name)(void*));
|
||||||
extern void leveldb_comparator_destroy(leveldb_comparator_t*);
|
LEVELDB_EXPORT void leveldb_comparator_destroy(leveldb_comparator_t*);
|
||||||
|
|
||||||
/* Filter policy */
|
/* Filter policy */
|
||||||
|
|
||||||
extern leveldb_filterpolicy_t* leveldb_filterpolicy_create(
|
LEVELDB_EXPORT leveldb_filterpolicy_t* leveldb_filterpolicy_create(
|
||||||
void* state,
|
void* state, void (*destructor)(void*),
|
||||||
void (*destructor)(void*),
|
char* (*create_filter)(void*, const char* const* key_array,
|
||||||
char* (*create_filter)(
|
const size_t* key_length_array, int num_keys,
|
||||||
void*,
|
size_t* filter_length),
|
||||||
const char* const* key_array, const size_t* key_length_array,
|
unsigned char (*key_may_match)(void*, const char* key, size_t length,
|
||||||
int num_keys,
|
const char* filter, size_t filter_length),
|
||||||
size_t* filter_length),
|
|
||||||
unsigned char (*key_may_match)(
|
|
||||||
void*,
|
|
||||||
const char* key, size_t length,
|
|
||||||
const char* filter, size_t filter_length),
|
|
||||||
const char* (*name)(void*));
|
const char* (*name)(void*));
|
||||||
extern void leveldb_filterpolicy_destroy(leveldb_filterpolicy_t*);
|
LEVELDB_EXPORT void leveldb_filterpolicy_destroy(leveldb_filterpolicy_t*);
|
||||||
|
|
||||||
extern leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(
|
LEVELDB_EXPORT leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(
|
||||||
int bits_per_key);
|
int bits_per_key);
|
||||||
|
|
||||||
/* Read options */
|
/* Read options */
|
||||||
|
|
||||||
extern leveldb_readoptions_t* leveldb_readoptions_create();
|
LEVELDB_EXPORT leveldb_readoptions_t* leveldb_readoptions_create(void);
|
||||||
extern void leveldb_readoptions_destroy(leveldb_readoptions_t*);
|
LEVELDB_EXPORT void leveldb_readoptions_destroy(leveldb_readoptions_t*);
|
||||||
extern void leveldb_readoptions_set_verify_checksums(
|
LEVELDB_EXPORT void leveldb_readoptions_set_verify_checksums(
|
||||||
leveldb_readoptions_t*,
|
|
||||||
unsigned char);
|
|
||||||
extern void leveldb_readoptions_set_fill_cache(
|
|
||||||
leveldb_readoptions_t*, unsigned char);
|
leveldb_readoptions_t*, unsigned char);
|
||||||
extern void leveldb_readoptions_set_snapshot(
|
LEVELDB_EXPORT void leveldb_readoptions_set_fill_cache(leveldb_readoptions_t*,
|
||||||
leveldb_readoptions_t*,
|
unsigned char);
|
||||||
const leveldb_snapshot_t*);
|
LEVELDB_EXPORT void leveldb_readoptions_set_snapshot(leveldb_readoptions_t*,
|
||||||
|
const leveldb_snapshot_t*);
|
||||||
|
|
||||||
/* Write options */
|
/* Write options */
|
||||||
|
|
||||||
extern leveldb_writeoptions_t* leveldb_writeoptions_create();
|
LEVELDB_EXPORT leveldb_writeoptions_t* leveldb_writeoptions_create(void);
|
||||||
extern void leveldb_writeoptions_destroy(leveldb_writeoptions_t*);
|
LEVELDB_EXPORT void leveldb_writeoptions_destroy(leveldb_writeoptions_t*);
|
||||||
extern void leveldb_writeoptions_set_sync(
|
LEVELDB_EXPORT void leveldb_writeoptions_set_sync(leveldb_writeoptions_t*,
|
||||||
leveldb_writeoptions_t*, unsigned char);
|
unsigned char);
|
||||||
|
|
||||||
/* Cache */
|
/* Cache */
|
||||||
|
|
||||||
extern leveldb_cache_t* leveldb_cache_create_lru(size_t capacity);
|
LEVELDB_EXPORT leveldb_cache_t* leveldb_cache_create_lru(size_t capacity);
|
||||||
extern void leveldb_cache_destroy(leveldb_cache_t* cache);
|
LEVELDB_EXPORT void leveldb_cache_destroy(leveldb_cache_t* cache);
|
||||||
|
|
||||||
/* Env */
|
/* Env */
|
||||||
|
|
||||||
extern leveldb_env_t* leveldb_create_default_env();
|
LEVELDB_EXPORT leveldb_env_t* leveldb_create_default_env(void);
|
||||||
extern void leveldb_env_destroy(leveldb_env_t*);
|
LEVELDB_EXPORT void leveldb_env_destroy(leveldb_env_t*);
|
||||||
|
|
||||||
|
/* If not NULL, the returned buffer must be released using leveldb_free(). */
|
||||||
|
LEVELDB_EXPORT char* leveldb_env_get_test_directory(leveldb_env_t*);
|
||||||
|
|
||||||
/* Utility */
|
/* Utility */
|
||||||
|
|
||||||
@ -275,16 +255,16 @@ extern void leveldb_env_destroy(leveldb_env_t*);
|
|||||||
in this file. Note that in certain cases (typically on Windows), you
|
in this file. Note that in certain cases (typically on Windows), you
|
||||||
may need to call this routine instead of free(ptr) to dispose of
|
may need to call this routine instead of free(ptr) to dispose of
|
||||||
malloc()-ed memory returned by this library. */
|
malloc()-ed memory returned by this library. */
|
||||||
extern void leveldb_free(void* ptr);
|
LEVELDB_EXPORT void leveldb_free(void* ptr);
|
||||||
|
|
||||||
/* Return the major version number for this release. */
|
/* Return the major version number for this release. */
|
||||||
extern int leveldb_major_version();
|
LEVELDB_EXPORT int leveldb_major_version(void);
|
||||||
|
|
||||||
/* Return the minor version number for this release. */
|
/* Return the minor version number for this release. */
|
||||||
extern int leveldb_minor_version();
|
LEVELDB_EXPORT int leveldb_minor_version(void);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
} /* end extern "C" */
|
} /* end extern "C" */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* STORAGE_LEVELDB_INCLUDE_C_H_ */
|
#endif /* STORAGE_LEVELDB_INCLUDE_C_H_ */
|
||||||
|
@ -19,26 +19,31 @@
|
|||||||
#define STORAGE_LEVELDB_INCLUDE_CACHE_H_
|
#define STORAGE_LEVELDB_INCLUDE_CACHE_H_
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include "leveldb/export.h"
|
||||||
#include "leveldb/slice.h"
|
#include "leveldb/slice.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class Cache;
|
class LEVELDB_EXPORT Cache;
|
||||||
|
|
||||||
// Create a new cache with a fixed size capacity. This implementation
|
// Create a new cache with a fixed size capacity. This implementation
|
||||||
// of Cache uses a least-recently-used eviction policy.
|
// of Cache uses a least-recently-used eviction policy.
|
||||||
extern Cache* NewLRUCache(size_t capacity);
|
LEVELDB_EXPORT Cache* NewLRUCache(size_t capacity);
|
||||||
|
|
||||||
class Cache {
|
class LEVELDB_EXPORT Cache {
|
||||||
public:
|
public:
|
||||||
Cache() { }
|
Cache() = default;
|
||||||
|
|
||||||
|
Cache(const Cache&) = delete;
|
||||||
|
Cache& operator=(const Cache&) = delete;
|
||||||
|
|
||||||
// Destroys all existing entries by calling the "deleter"
|
// Destroys all existing entries by calling the "deleter"
|
||||||
// function that was passed to the constructor.
|
// function that was passed to the constructor.
|
||||||
virtual ~Cache();
|
virtual ~Cache();
|
||||||
|
|
||||||
// Opaque handle to an entry stored in the cache.
|
// Opaque handle to an entry stored in the cache.
|
||||||
struct Handle { };
|
struct Handle {};
|
||||||
|
|
||||||
// Insert a mapping from key->value into the cache and assign it
|
// Insert a mapping from key->value into the cache and assign it
|
||||||
// the specified charge against the total cache capacity.
|
// the specified charge against the total cache capacity.
|
||||||
@ -52,7 +57,7 @@ class Cache {
|
|||||||
virtual Handle* Insert(const Slice& key, void* value, size_t charge,
|
virtual Handle* Insert(const Slice& key, void* value, size_t charge,
|
||||||
void (*deleter)(const Slice& key, void* value)) = 0;
|
void (*deleter)(const Slice& key, void* value)) = 0;
|
||||||
|
|
||||||
// If the cache has no mapping for "key", returns NULL.
|
// If the cache has no mapping for "key", returns nullptr.
|
||||||
//
|
//
|
||||||
// Else return a handle that corresponds to the mapping. The caller
|
// Else return a handle that corresponds to the mapping. The caller
|
||||||
// must call this->Release(handle) when the returned mapping is no
|
// must call this->Release(handle) when the returned mapping is no
|
||||||
@ -99,10 +104,6 @@ class Cache {
|
|||||||
|
|
||||||
struct Rep;
|
struct Rep;
|
||||||
Rep* rep_;
|
Rep* rep_;
|
||||||
|
|
||||||
// No copying allowed
|
|
||||||
Cache(const Cache&);
|
|
||||||
void operator=(const Cache&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -7,6 +7,8 @@
|
|||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
|
#include "leveldb/export.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class Slice;
|
class Slice;
|
||||||
@ -15,7 +17,7 @@ class Slice;
|
|||||||
// used as keys in an sstable or a database. A Comparator implementation
|
// used as keys in an sstable or a database. A Comparator implementation
|
||||||
// must be thread-safe since leveldb may invoke its methods concurrently
|
// must be thread-safe since leveldb may invoke its methods concurrently
|
||||||
// from multiple threads.
|
// from multiple threads.
|
||||||
class Comparator {
|
class LEVELDB_EXPORT Comparator {
|
||||||
public:
|
public:
|
||||||
virtual ~Comparator();
|
virtual ~Comparator();
|
||||||
|
|
||||||
@ -43,9 +45,8 @@ class Comparator {
|
|||||||
// If *start < limit, changes *start to a short string in [start,limit).
|
// If *start < limit, changes *start to a short string in [start,limit).
|
||||||
// Simple comparator implementations may return with *start unchanged,
|
// Simple comparator implementations may return with *start unchanged,
|
||||||
// i.e., an implementation of this method that does nothing is correct.
|
// i.e., an implementation of this method that does nothing is correct.
|
||||||
virtual void FindShortestSeparator(
|
virtual void FindShortestSeparator(std::string* start,
|
||||||
std::string* start,
|
const Slice& limit) const = 0;
|
||||||
const Slice& limit) const = 0;
|
|
||||||
|
|
||||||
// Changes *key to a short string >= *key.
|
// Changes *key to a short string >= *key.
|
||||||
// Simple comparator implementations may return with *key unchanged,
|
// Simple comparator implementations may return with *key unchanged,
|
||||||
@ -56,7 +57,7 @@ class Comparator {
|
|||||||
// Return a builtin comparator that uses lexicographic byte-wise
|
// Return a builtin comparator that uses lexicographic byte-wise
|
||||||
// ordering. The result remains the property of this module and
|
// ordering. The result remains the property of this module and
|
||||||
// must not be deleted.
|
// must not be deleted.
|
||||||
extern const Comparator* BytewiseComparator();
|
LEVELDB_EXPORT const Comparator* BytewiseComparator();
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
@ -7,14 +7,16 @@
|
|||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
|
#include "leveldb/export.h"
|
||||||
#include "leveldb/iterator.h"
|
#include "leveldb/iterator.h"
|
||||||
#include "leveldb/options.h"
|
#include "leveldb/options.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
// Update Makefile if you change these
|
// Update CMakeLists.txt if you change these
|
||||||
static const int kMajorVersion = 1;
|
static const int kMajorVersion = 1;
|
||||||
static const int kMinorVersion = 20;
|
static const int kMinorVersion = 22;
|
||||||
|
|
||||||
struct Options;
|
struct Options;
|
||||||
struct ReadOptions;
|
struct ReadOptions;
|
||||||
@ -24,42 +26,44 @@ class WriteBatch;
|
|||||||
// Abstract handle to particular state of a DB.
|
// Abstract handle to particular state of a DB.
|
||||||
// A Snapshot is an immutable object and can therefore be safely
|
// A Snapshot is an immutable object and can therefore be safely
|
||||||
// accessed from multiple threads without any external synchronization.
|
// accessed from multiple threads without any external synchronization.
|
||||||
class Snapshot {
|
class LEVELDB_EXPORT Snapshot {
|
||||||
protected:
|
protected:
|
||||||
virtual ~Snapshot();
|
virtual ~Snapshot();
|
||||||
};
|
};
|
||||||
|
|
||||||
// A range of keys
|
// A range of keys
|
||||||
struct Range {
|
struct LEVELDB_EXPORT Range {
|
||||||
Slice start; // Included in the range
|
Range() = default;
|
||||||
Slice limit; // Not included in the range
|
Range(const Slice& s, const Slice& l) : start(s), limit(l) {}
|
||||||
|
|
||||||
Range() { }
|
Slice start; // Included in the range
|
||||||
Range(const Slice& s, const Slice& l) : start(s), limit(l) { }
|
Slice limit; // Not included in the range
|
||||||
};
|
};
|
||||||
|
|
||||||
// A DB is a persistent ordered map from keys to values.
|
// A DB is a persistent ordered map from keys to values.
|
||||||
// A DB is safe for concurrent access from multiple threads without
|
// A DB is safe for concurrent access from multiple threads without
|
||||||
// any external synchronization.
|
// any external synchronization.
|
||||||
class DB {
|
class LEVELDB_EXPORT DB {
|
||||||
public:
|
public:
|
||||||
// Open the database with the specified "name".
|
// Open the database with the specified "name".
|
||||||
// Stores a pointer to a heap-allocated database in *dbptr and returns
|
// Stores a pointer to a heap-allocated database in *dbptr and returns
|
||||||
// OK on success.
|
// OK on success.
|
||||||
// Stores NULL in *dbptr and returns a non-OK status on error.
|
// Stores nullptr in *dbptr and returns a non-OK status on error.
|
||||||
// Caller should delete *dbptr when it is no longer needed.
|
// Caller should delete *dbptr when it is no longer needed.
|
||||||
static Status Open(const Options& options,
|
static Status Open(const Options& options, const std::string& name,
|
||||||
const std::string& name,
|
|
||||||
DB** dbptr);
|
DB** dbptr);
|
||||||
|
|
||||||
DB() { }
|
DB() = default;
|
||||||
|
|
||||||
|
DB(const DB&) = delete;
|
||||||
|
DB& operator=(const DB&) = delete;
|
||||||
|
|
||||||
virtual ~DB();
|
virtual ~DB();
|
||||||
|
|
||||||
// Set the database entry for "key" to "value". Returns OK on success,
|
// Set the database entry for "key" to "value". Returns OK on success,
|
||||||
// and a non-OK status on error.
|
// and a non-OK status on error.
|
||||||
// Note: consider setting options.sync = true.
|
// Note: consider setting options.sync = true.
|
||||||
virtual Status Put(const WriteOptions& options,
|
virtual Status Put(const WriteOptions& options, const Slice& key,
|
||||||
const Slice& key,
|
|
||||||
const Slice& value) = 0;
|
const Slice& value) = 0;
|
||||||
|
|
||||||
// Remove the database entry (if any) for "key". Returns OK on
|
// Remove the database entry (if any) for "key". Returns OK on
|
||||||
@ -80,8 +84,8 @@ class DB {
|
|||||||
// a status for which Status::IsNotFound() returns true.
|
// a status for which Status::IsNotFound() returns true.
|
||||||
//
|
//
|
||||||
// May return some other Status on an error.
|
// May return some other Status on an error.
|
||||||
virtual Status Get(const ReadOptions& options,
|
virtual Status Get(const ReadOptions& options, const Slice& key,
|
||||||
const Slice& key, std::string* value) = 0;
|
std::string* value) = 0;
|
||||||
|
|
||||||
// Return a heap-allocated iterator over the contents of the database.
|
// Return a heap-allocated iterator over the contents of the database.
|
||||||
// The result of NewIterator() is initially invalid (caller must
|
// The result of NewIterator() is initially invalid (caller must
|
||||||
@ -136,27 +140,27 @@ class DB {
|
|||||||
// needed to access the data. This operation should typically only
|
// needed to access the data. This operation should typically only
|
||||||
// be invoked by users who understand the underlying implementation.
|
// be invoked by users who understand the underlying implementation.
|
||||||
//
|
//
|
||||||
// begin==NULL is treated as a key before all keys in the database.
|
// begin==nullptr is treated as a key before all keys in the database.
|
||||||
// end==NULL is treated as a key after all keys in the database.
|
// end==nullptr is treated as a key after all keys in the database.
|
||||||
// Therefore the following call will compact the entire database:
|
// Therefore the following call will compact the entire database:
|
||||||
// db->CompactRange(NULL, NULL);
|
// db->CompactRange(nullptr, nullptr);
|
||||||
virtual void CompactRange(const Slice* begin, const Slice* end) = 0;
|
virtual void CompactRange(const Slice* begin, const Slice* end) = 0;
|
||||||
|
|
||||||
private:
|
|
||||||
// No copying allowed
|
|
||||||
DB(const DB&);
|
|
||||||
void operator=(const DB&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Destroy the contents of the specified database.
|
// Destroy the contents of the specified database.
|
||||||
// Be very careful using this method.
|
// Be very careful using this method.
|
||||||
Status DestroyDB(const std::string& name, const Options& options);
|
//
|
||||||
|
// Note: For backwards compatibility, if DestroyDB is unable to list the
|
||||||
|
// database files, Status::OK() will still be returned masking this failure.
|
||||||
|
LEVELDB_EXPORT Status DestroyDB(const std::string& name,
|
||||||
|
const Options& options);
|
||||||
|
|
||||||
// If a DB cannot be opened, you may attempt to call this method to
|
// If a DB cannot be opened, you may attempt to call this method to
|
||||||
// resurrect as much of the contents of the database as possible.
|
// resurrect as much of the contents of the database as possible.
|
||||||
// Some data may be lost, so be careful when calling this function
|
// Some data may be lost, so be careful when calling this function
|
||||||
// on a database that contains important information.
|
// on a database that contains important information.
|
||||||
Status RepairDB(const std::string& dbname, const Options& options);
|
LEVELDB_EXPORT Status RepairDB(const std::string& dbname,
|
||||||
|
const Options& options);
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
@ -6,7 +6,9 @@
|
|||||||
#define STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_
|
#define STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
|
#include "leveldb/export.h"
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
@ -18,7 +20,8 @@ namespace leveldb {
|
|||||||
//
|
//
|
||||||
// Returns a non-OK result if fname does not name a leveldb storage
|
// Returns a non-OK result if fname does not name a leveldb storage
|
||||||
// file, or if the file cannot be read.
|
// file, or if the file cannot be read.
|
||||||
Status DumpFile(Env* env, const std::string& fname, WritableFile* dst);
|
LEVELDB_EXPORT Status DumpFile(Env* env, const std::string& fname,
|
||||||
|
WritableFile* dst);
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
@ -13,12 +13,36 @@
|
|||||||
#ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_
|
#ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_
|
||||||
#define STORAGE_LEVELDB_INCLUDE_ENV_H_
|
#define STORAGE_LEVELDB_INCLUDE_ENV_H_
|
||||||
|
|
||||||
#include <string>
|
|
||||||
#include <vector>
|
|
||||||
#include <stdarg.h>
|
#include <stdarg.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "leveldb/export.h"
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
|
|
||||||
|
#if defined(_WIN32)
|
||||||
|
// The leveldb::Env class below contains a DeleteFile method.
|
||||||
|
// At the same time, <windows.h>, a fairly popular header
|
||||||
|
// file for Windows applications, defines a DeleteFile macro.
|
||||||
|
//
|
||||||
|
// Without any intervention on our part, the result of this
|
||||||
|
// unfortunate coincidence is that the name of the
|
||||||
|
// leveldb::Env::DeleteFile method seen by the compiler depends on
|
||||||
|
// whether <windows.h> was included before or after the LevelDB
|
||||||
|
// headers.
|
||||||
|
//
|
||||||
|
// To avoid headaches, we undefined DeleteFile (if defined) and
|
||||||
|
// redefine it at the bottom of this file. This way <windows.h>
|
||||||
|
// can be included before this file (or not at all) and the
|
||||||
|
// exported method will always be leveldb::Env::DeleteFile.
|
||||||
|
#if defined(DeleteFile)
|
||||||
|
#undef DeleteFile
|
||||||
|
#define LEVELDB_DELETEFILE_UNDEFINED
|
||||||
|
#endif // defined(DeleteFile)
|
||||||
|
#endif // defined(_WIN32)
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class FileLock;
|
class FileLock;
|
||||||
@ -28,9 +52,13 @@ class SequentialFile;
|
|||||||
class Slice;
|
class Slice;
|
||||||
class WritableFile;
|
class WritableFile;
|
||||||
|
|
||||||
class Env {
|
class LEVELDB_EXPORT Env {
|
||||||
public:
|
public:
|
||||||
Env() { }
|
Env() = default;
|
||||||
|
|
||||||
|
Env(const Env&) = delete;
|
||||||
|
Env& operator=(const Env&) = delete;
|
||||||
|
|
||||||
virtual ~Env();
|
virtual ~Env();
|
||||||
|
|
||||||
// Return a default environment suitable for the current operating
|
// Return a default environment suitable for the current operating
|
||||||
@ -40,20 +68,22 @@ class Env {
|
|||||||
// The result of Default() belongs to leveldb and must never be deleted.
|
// The result of Default() belongs to leveldb and must never be deleted.
|
||||||
static Env* Default();
|
static Env* Default();
|
||||||
|
|
||||||
// Create a brand new sequentially-readable file with the specified name.
|
// Create an object that sequentially reads the file with the specified name.
|
||||||
// On success, stores a pointer to the new file in *result and returns OK.
|
// On success, stores a pointer to the new file in *result and returns OK.
|
||||||
// On failure stores NULL in *result and returns non-OK. If the file does
|
// On failure stores nullptr in *result and returns non-OK. If the file does
|
||||||
// not exist, returns a non-OK status.
|
// not exist, returns a non-OK status. Implementations should return a
|
||||||
|
// NotFound status when the file does not exist.
|
||||||
//
|
//
|
||||||
// The returned file will only be accessed by one thread at a time.
|
// The returned file will only be accessed by one thread at a time.
|
||||||
virtual Status NewSequentialFile(const std::string& fname,
|
virtual Status NewSequentialFile(const std::string& fname,
|
||||||
SequentialFile** result) = 0;
|
SequentialFile** result) = 0;
|
||||||
|
|
||||||
// Create a brand new random access read-only file with the
|
// Create an object supporting random-access reads from the file with the
|
||||||
// specified name. On success, stores a pointer to the new file in
|
// specified name. On success, stores a pointer to the new file in
|
||||||
// *result and returns OK. On failure stores NULL in *result and
|
// *result and returns OK. On failure stores nullptr in *result and
|
||||||
// returns non-OK. If the file does not exist, returns a non-OK
|
// returns non-OK. If the file does not exist, returns a non-OK
|
||||||
// status.
|
// status. Implementations should return a NotFound status when the file does
|
||||||
|
// not exist.
|
||||||
//
|
//
|
||||||
// The returned file may be concurrently accessed by multiple threads.
|
// The returned file may be concurrently accessed by multiple threads.
|
||||||
virtual Status NewRandomAccessFile(const std::string& fname,
|
virtual Status NewRandomAccessFile(const std::string& fname,
|
||||||
@ -62,7 +92,7 @@ class Env {
|
|||||||
// Create an object that writes to a new file with the specified
|
// Create an object that writes to a new file with the specified
|
||||||
// name. Deletes any existing file with the same name and creates a
|
// name. Deletes any existing file with the same name and creates a
|
||||||
// new file. On success, stores a pointer to the new file in
|
// new file. On success, stores a pointer to the new file in
|
||||||
// *result and returns OK. On failure stores NULL in *result and
|
// *result and returns OK. On failure stores nullptr in *result and
|
||||||
// returns non-OK.
|
// returns non-OK.
|
||||||
//
|
//
|
||||||
// The returned file will only be accessed by one thread at a time.
|
// The returned file will only be accessed by one thread at a time.
|
||||||
@ -72,7 +102,7 @@ class Env {
|
|||||||
// Create an object that either appends to an existing file, or
|
// Create an object that either appends to an existing file, or
|
||||||
// writes to a new file (if the file does not exist to begin with).
|
// writes to a new file (if the file does not exist to begin with).
|
||||||
// On success, stores a pointer to the new file in *result and
|
// On success, stores a pointer to the new file in *result and
|
||||||
// returns OK. On failure stores NULL in *result and returns
|
// returns OK. On failure stores nullptr in *result and returns
|
||||||
// non-OK.
|
// non-OK.
|
||||||
//
|
//
|
||||||
// The returned file will only be accessed by one thread at a time.
|
// The returned file will only be accessed by one thread at a time.
|
||||||
@ -110,7 +140,7 @@ class Env {
|
|||||||
const std::string& target) = 0;
|
const std::string& target) = 0;
|
||||||
|
|
||||||
// Lock the specified file. Used to prevent concurrent access to
|
// Lock the specified file. Used to prevent concurrent access to
|
||||||
// the same db by multiple processes. On failure, stores NULL in
|
// the same db by multiple processes. On failure, stores nullptr in
|
||||||
// *lock and returns non-OK.
|
// *lock and returns non-OK.
|
||||||
//
|
//
|
||||||
// On success, stores a pointer to the object that represents the
|
// On success, stores a pointer to the object that represents the
|
||||||
@ -136,16 +166,14 @@ class Env {
|
|||||||
// added to the same Env may run concurrently in different threads.
|
// added to the same Env may run concurrently in different threads.
|
||||||
// I.e., the caller may not assume that background work items are
|
// I.e., the caller may not assume that background work items are
|
||||||
// serialized.
|
// serialized.
|
||||||
virtual void Schedule(
|
virtual void Schedule(void (*function)(void* arg), void* arg) = 0;
|
||||||
void (*function)(void* arg),
|
|
||||||
void* arg) = 0;
|
|
||||||
|
|
||||||
// Start a new thread, invoking "function(arg)" within the new thread.
|
// Start a new thread, invoking "function(arg)" within the new thread.
|
||||||
// When "function(arg)" returns, the thread will be destroyed.
|
// When "function(arg)" returns, the thread will be destroyed.
|
||||||
virtual void StartThread(void (*function)(void* arg), void* arg) = 0;
|
virtual void StartThread(void (*function)(void* arg), void* arg) = 0;
|
||||||
|
|
||||||
// *path is set to a temporary directory that can be used for testing. It may
|
// *path is set to a temporary directory that can be used for testing. It may
|
||||||
// or many not have just been created. The directory may or may not differ
|
// or may not have just been created. The directory may or may not differ
|
||||||
// between runs of the same process, but subsequent calls will return the
|
// between runs of the same process, but subsequent calls will return the
|
||||||
// same directory.
|
// same directory.
|
||||||
virtual Status GetTestDirectory(std::string* path) = 0;
|
virtual Status GetTestDirectory(std::string* path) = 0;
|
||||||
@ -159,17 +187,16 @@ class Env {
|
|||||||
|
|
||||||
// Sleep/delay the thread for the prescribed number of micro-seconds.
|
// Sleep/delay the thread for the prescribed number of micro-seconds.
|
||||||
virtual void SleepForMicroseconds(int micros) = 0;
|
virtual void SleepForMicroseconds(int micros) = 0;
|
||||||
|
|
||||||
private:
|
|
||||||
// No copying allowed
|
|
||||||
Env(const Env&);
|
|
||||||
void operator=(const Env&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// A file abstraction for reading sequentially through a file
|
// A file abstraction for reading sequentially through a file
|
||||||
class SequentialFile {
|
class LEVELDB_EXPORT SequentialFile {
|
||||||
public:
|
public:
|
||||||
SequentialFile() { }
|
SequentialFile() = default;
|
||||||
|
|
||||||
|
SequentialFile(const SequentialFile&) = delete;
|
||||||
|
SequentialFile& operator=(const SequentialFile&) = delete;
|
||||||
|
|
||||||
virtual ~SequentialFile();
|
virtual ~SequentialFile();
|
||||||
|
|
||||||
// Read up to "n" bytes from the file. "scratch[0..n-1]" may be
|
// Read up to "n" bytes from the file. "scratch[0..n-1]" may be
|
||||||
@ -190,17 +217,16 @@ class SequentialFile {
|
|||||||
//
|
//
|
||||||
// REQUIRES: External synchronization
|
// REQUIRES: External synchronization
|
||||||
virtual Status Skip(uint64_t n) = 0;
|
virtual Status Skip(uint64_t n) = 0;
|
||||||
|
|
||||||
private:
|
|
||||||
// No copying allowed
|
|
||||||
SequentialFile(const SequentialFile&);
|
|
||||||
void operator=(const SequentialFile&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// A file abstraction for randomly reading the contents of a file.
|
// A file abstraction for randomly reading the contents of a file.
|
||||||
class RandomAccessFile {
|
class LEVELDB_EXPORT RandomAccessFile {
|
||||||
public:
|
public:
|
||||||
RandomAccessFile() { }
|
RandomAccessFile() = default;
|
||||||
|
|
||||||
|
RandomAccessFile(const RandomAccessFile&) = delete;
|
||||||
|
RandomAccessFile& operator=(const RandomAccessFile&) = delete;
|
||||||
|
|
||||||
virtual ~RandomAccessFile();
|
virtual ~RandomAccessFile();
|
||||||
|
|
||||||
// Read up to "n" bytes from the file starting at "offset".
|
// Read up to "n" bytes from the file starting at "offset".
|
||||||
@ -214,138 +240,148 @@ class RandomAccessFile {
|
|||||||
// Safe for concurrent use by multiple threads.
|
// Safe for concurrent use by multiple threads.
|
||||||
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
||||||
char* scratch) const = 0;
|
char* scratch) const = 0;
|
||||||
|
|
||||||
private:
|
|
||||||
// No copying allowed
|
|
||||||
RandomAccessFile(const RandomAccessFile&);
|
|
||||||
void operator=(const RandomAccessFile&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// A file abstraction for sequential writing. The implementation
|
// A file abstraction for sequential writing. The implementation
|
||||||
// must provide buffering since callers may append small fragments
|
// must provide buffering since callers may append small fragments
|
||||||
// at a time to the file.
|
// at a time to the file.
|
||||||
class WritableFile {
|
class LEVELDB_EXPORT WritableFile {
|
||||||
public:
|
public:
|
||||||
WritableFile() { }
|
WritableFile() = default;
|
||||||
|
|
||||||
|
WritableFile(const WritableFile&) = delete;
|
||||||
|
WritableFile& operator=(const WritableFile&) = delete;
|
||||||
|
|
||||||
virtual ~WritableFile();
|
virtual ~WritableFile();
|
||||||
|
|
||||||
virtual Status Append(const Slice& data) = 0;
|
virtual Status Append(const Slice& data) = 0;
|
||||||
virtual Status Close() = 0;
|
virtual Status Close() = 0;
|
||||||
virtual Status Flush() = 0;
|
virtual Status Flush() = 0;
|
||||||
virtual Status Sync() = 0;
|
virtual Status Sync() = 0;
|
||||||
|
|
||||||
private:
|
|
||||||
// No copying allowed
|
|
||||||
WritableFile(const WritableFile&);
|
|
||||||
void operator=(const WritableFile&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// An interface for writing log messages.
|
// An interface for writing log messages.
|
||||||
class Logger {
|
class LEVELDB_EXPORT Logger {
|
||||||
public:
|
public:
|
||||||
Logger() { }
|
Logger() = default;
|
||||||
|
|
||||||
|
Logger(const Logger&) = delete;
|
||||||
|
Logger& operator=(const Logger&) = delete;
|
||||||
|
|
||||||
virtual ~Logger();
|
virtual ~Logger();
|
||||||
|
|
||||||
// Write an entry to the log file with the specified format.
|
// Write an entry to the log file with the specified format.
|
||||||
virtual void Logv(const char* format, va_list ap) = 0;
|
virtual void Logv(const char* format, va_list ap) = 0;
|
||||||
|
|
||||||
private:
|
|
||||||
// No copying allowed
|
|
||||||
Logger(const Logger&);
|
|
||||||
void operator=(const Logger&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// Identifies a locked file.
|
// Identifies a locked file.
|
||||||
class FileLock {
|
class LEVELDB_EXPORT FileLock {
|
||||||
public:
|
public:
|
||||||
FileLock() { }
|
FileLock() = default;
|
||||||
|
|
||||||
|
FileLock(const FileLock&) = delete;
|
||||||
|
FileLock& operator=(const FileLock&) = delete;
|
||||||
|
|
||||||
virtual ~FileLock();
|
virtual ~FileLock();
|
||||||
private:
|
|
||||||
// No copying allowed
|
|
||||||
FileLock(const FileLock&);
|
|
||||||
void operator=(const FileLock&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Log the specified data to *info_log if info_log is non-NULL.
|
// Log the specified data to *info_log if info_log is non-null.
|
||||||
extern void Log(Logger* info_log, const char* format, ...)
|
void Log(Logger* info_log, const char* format, ...)
|
||||||
# if defined(__GNUC__) || defined(__clang__)
|
#if defined(__GNUC__) || defined(__clang__)
|
||||||
__attribute__((__format__ (__printf__, 2, 3)))
|
__attribute__((__format__(__printf__, 2, 3)))
|
||||||
# endif
|
#endif
|
||||||
;
|
;
|
||||||
|
|
||||||
// A utility routine: write "data" to the named file.
|
// A utility routine: write "data" to the named file.
|
||||||
extern Status WriteStringToFile(Env* env, const Slice& data,
|
LEVELDB_EXPORT Status WriteStringToFile(Env* env, const Slice& data,
|
||||||
const std::string& fname);
|
const std::string& fname);
|
||||||
|
|
||||||
// A utility routine: read contents of named file into *data
|
// A utility routine: read contents of named file into *data
|
||||||
extern Status ReadFileToString(Env* env, const std::string& fname,
|
LEVELDB_EXPORT Status ReadFileToString(Env* env, const std::string& fname,
|
||||||
std::string* data);
|
std::string* data);
|
||||||
|
|
||||||
// An implementation of Env that forwards all calls to another Env.
|
// An implementation of Env that forwards all calls to another Env.
|
||||||
// May be useful to clients who wish to override just part of the
|
// May be useful to clients who wish to override just part of the
|
||||||
// functionality of another Env.
|
// functionality of another Env.
|
||||||
class EnvWrapper : public Env {
|
class LEVELDB_EXPORT EnvWrapper : public Env {
|
||||||
public:
|
public:
|
||||||
// Initialize an EnvWrapper that delegates all calls to *t
|
// Initialize an EnvWrapper that delegates all calls to *t.
|
||||||
explicit EnvWrapper(Env* t) : target_(t) { }
|
explicit EnvWrapper(Env* t) : target_(t) {}
|
||||||
virtual ~EnvWrapper();
|
virtual ~EnvWrapper();
|
||||||
|
|
||||||
// Return the target to which this Env forwards all calls
|
// Return the target to which this Env forwards all calls.
|
||||||
Env* target() const { return target_; }
|
Env* target() const { return target_; }
|
||||||
|
|
||||||
// The following text is boilerplate that forwards all methods to target()
|
// The following text is boilerplate that forwards all methods to target().
|
||||||
Status NewSequentialFile(const std::string& f, SequentialFile** r) {
|
Status NewSequentialFile(const std::string& f, SequentialFile** r) override {
|
||||||
return target_->NewSequentialFile(f, r);
|
return target_->NewSequentialFile(f, r);
|
||||||
}
|
}
|
||||||
Status NewRandomAccessFile(const std::string& f, RandomAccessFile** r) {
|
Status NewRandomAccessFile(const std::string& f,
|
||||||
|
RandomAccessFile** r) override {
|
||||||
return target_->NewRandomAccessFile(f, r);
|
return target_->NewRandomAccessFile(f, r);
|
||||||
}
|
}
|
||||||
Status NewWritableFile(const std::string& f, WritableFile** r) {
|
Status NewWritableFile(const std::string& f, WritableFile** r) override {
|
||||||
return target_->NewWritableFile(f, r);
|
return target_->NewWritableFile(f, r);
|
||||||
}
|
}
|
||||||
Status NewAppendableFile(const std::string& f, WritableFile** r) {
|
Status NewAppendableFile(const std::string& f, WritableFile** r) override {
|
||||||
return target_->NewAppendableFile(f, r);
|
return target_->NewAppendableFile(f, r);
|
||||||
}
|
}
|
||||||
bool FileExists(const std::string& f) { return target_->FileExists(f); }
|
bool FileExists(const std::string& f) override {
|
||||||
Status GetChildren(const std::string& dir, std::vector<std::string>* r) {
|
return target_->FileExists(f);
|
||||||
|
}
|
||||||
|
Status GetChildren(const std::string& dir,
|
||||||
|
std::vector<std::string>* r) override {
|
||||||
return target_->GetChildren(dir, r);
|
return target_->GetChildren(dir, r);
|
||||||
}
|
}
|
||||||
Status DeleteFile(const std::string& f) { return target_->DeleteFile(f); }
|
Status DeleteFile(const std::string& f) override {
|
||||||
Status CreateDir(const std::string& d) { return target_->CreateDir(d); }
|
return target_->DeleteFile(f);
|
||||||
Status DeleteDir(const std::string& d) { return target_->DeleteDir(d); }
|
}
|
||||||
Status GetFileSize(const std::string& f, uint64_t* s) {
|
Status CreateDir(const std::string& d) override {
|
||||||
|
return target_->CreateDir(d);
|
||||||
|
}
|
||||||
|
Status DeleteDir(const std::string& d) override {
|
||||||
|
return target_->DeleteDir(d);
|
||||||
|
}
|
||||||
|
Status GetFileSize(const std::string& f, uint64_t* s) override {
|
||||||
return target_->GetFileSize(f, s);
|
return target_->GetFileSize(f, s);
|
||||||
}
|
}
|
||||||
Status RenameFile(const std::string& s, const std::string& t) {
|
Status RenameFile(const std::string& s, const std::string& t) override {
|
||||||
return target_->RenameFile(s, t);
|
return target_->RenameFile(s, t);
|
||||||
}
|
}
|
||||||
Status LockFile(const std::string& f, FileLock** l) {
|
Status LockFile(const std::string& f, FileLock** l) override {
|
||||||
return target_->LockFile(f, l);
|
return target_->LockFile(f, l);
|
||||||
}
|
}
|
||||||
Status UnlockFile(FileLock* l) { return target_->UnlockFile(l); }
|
Status UnlockFile(FileLock* l) override { return target_->UnlockFile(l); }
|
||||||
void Schedule(void (*f)(void*), void* a) {
|
void Schedule(void (*f)(void*), void* a) override {
|
||||||
return target_->Schedule(f, a);
|
return target_->Schedule(f, a);
|
||||||
}
|
}
|
||||||
void StartThread(void (*f)(void*), void* a) {
|
void StartThread(void (*f)(void*), void* a) override {
|
||||||
return target_->StartThread(f, a);
|
return target_->StartThread(f, a);
|
||||||
}
|
}
|
||||||
virtual Status GetTestDirectory(std::string* path) {
|
Status GetTestDirectory(std::string* path) override {
|
||||||
return target_->GetTestDirectory(path);
|
return target_->GetTestDirectory(path);
|
||||||
}
|
}
|
||||||
virtual Status NewLogger(const std::string& fname, Logger** result) {
|
Status NewLogger(const std::string& fname, Logger** result) override {
|
||||||
return target_->NewLogger(fname, result);
|
return target_->NewLogger(fname, result);
|
||||||
}
|
}
|
||||||
uint64_t NowMicros() {
|
uint64_t NowMicros() override { return target_->NowMicros(); }
|
||||||
return target_->NowMicros();
|
void SleepForMicroseconds(int micros) override {
|
||||||
}
|
|
||||||
void SleepForMicroseconds(int micros) {
|
|
||||||
target_->SleepForMicroseconds(micros);
|
target_->SleepForMicroseconds(micros);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Env* target_;
|
Env* target_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
// Redefine DeleteFile if necessary.
|
||||||
|
#if defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED)
|
||||||
|
#if defined(UNICODE)
|
||||||
|
#define DeleteFile DeleteFileW
|
||||||
|
#else
|
||||||
|
#define DeleteFile DeleteFileA
|
||||||
|
#endif // defined(UNICODE)
|
||||||
|
#endif // defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED)
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_INCLUDE_ENV_H_
|
#endif // STORAGE_LEVELDB_INCLUDE_ENV_H_
|
||||||
|
33
include/leveldb/export.h
Normal file
33
include/leveldb/export.h
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
// Copyright (c) 2017 The LevelDB Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
|
#ifndef STORAGE_LEVELDB_INCLUDE_EXPORT_H_
|
||||||
|
#define STORAGE_LEVELDB_INCLUDE_EXPORT_H_
|
||||||
|
|
||||||
|
#if !defined(LEVELDB_EXPORT)
|
||||||
|
|
||||||
|
#if defined(LEVELDB_SHARED_LIBRARY)
|
||||||
|
#if defined(_WIN32)
|
||||||
|
|
||||||
|
#if defined(LEVELDB_COMPILE_LIBRARY)
|
||||||
|
#define LEVELDB_EXPORT __declspec(dllexport)
|
||||||
|
#else
|
||||||
|
#define LEVELDB_EXPORT __declspec(dllimport)
|
||||||
|
#endif // defined(LEVELDB_COMPILE_LIBRARY)
|
||||||
|
|
||||||
|
#else // defined(_WIN32)
|
||||||
|
#if defined(LEVELDB_COMPILE_LIBRARY)
|
||||||
|
#define LEVELDB_EXPORT __attribute__((visibility("default")))
|
||||||
|
#else
|
||||||
|
#define LEVELDB_EXPORT
|
||||||
|
#endif
|
||||||
|
#endif // defined(_WIN32)
|
||||||
|
|
||||||
|
#else // defined(LEVELDB_SHARED_LIBRARY)
|
||||||
|
#define LEVELDB_EXPORT
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif // !defined(LEVELDB_EXPORT)
|
||||||
|
|
||||||
|
#endif // STORAGE_LEVELDB_INCLUDE_EXPORT_H_
|
@ -18,11 +18,13 @@
|
|||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
|
#include "leveldb/export.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class Slice;
|
class Slice;
|
||||||
|
|
||||||
class FilterPolicy {
|
class LEVELDB_EXPORT FilterPolicy {
|
||||||
public:
|
public:
|
||||||
virtual ~FilterPolicy();
|
virtual ~FilterPolicy();
|
||||||
|
|
||||||
@ -38,8 +40,8 @@ class FilterPolicy {
|
|||||||
//
|
//
|
||||||
// Warning: do not change the initial contents of *dst. Instead,
|
// Warning: do not change the initial contents of *dst. Instead,
|
||||||
// append the newly constructed filter to *dst.
|
// append the newly constructed filter to *dst.
|
||||||
virtual void CreateFilter(const Slice* keys, int n, std::string* dst)
|
virtual void CreateFilter(const Slice* keys, int n,
|
||||||
const = 0;
|
std::string* dst) const = 0;
|
||||||
|
|
||||||
// "filter" contains the data appended by a preceding call to
|
// "filter" contains the data appended by a preceding call to
|
||||||
// CreateFilter() on this class. This method must return true if
|
// CreateFilter() on this class. This method must return true if
|
||||||
@ -63,8 +65,8 @@ class FilterPolicy {
|
|||||||
// ignores trailing spaces, it would be incorrect to use a
|
// ignores trailing spaces, it would be incorrect to use a
|
||||||
// FilterPolicy (like NewBloomFilterPolicy) that does not ignore
|
// FilterPolicy (like NewBloomFilterPolicy) that does not ignore
|
||||||
// trailing spaces in keys.
|
// trailing spaces in keys.
|
||||||
extern const FilterPolicy* NewBloomFilterPolicy(int bits_per_key);
|
LEVELDB_EXPORT const FilterPolicy* NewBloomFilterPolicy(int bits_per_key);
|
||||||
|
|
||||||
}
|
} // namespace leveldb
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_
|
#endif // STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_
|
||||||
|
@ -15,14 +15,19 @@
|
|||||||
#ifndef STORAGE_LEVELDB_INCLUDE_ITERATOR_H_
|
#ifndef STORAGE_LEVELDB_INCLUDE_ITERATOR_H_
|
||||||
#define STORAGE_LEVELDB_INCLUDE_ITERATOR_H_
|
#define STORAGE_LEVELDB_INCLUDE_ITERATOR_H_
|
||||||
|
|
||||||
|
#include "leveldb/export.h"
|
||||||
#include "leveldb/slice.h"
|
#include "leveldb/slice.h"
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class Iterator {
|
class LEVELDB_EXPORT Iterator {
|
||||||
public:
|
public:
|
||||||
Iterator();
|
Iterator();
|
||||||
|
|
||||||
|
Iterator(const Iterator&) = delete;
|
||||||
|
Iterator& operator=(const Iterator&) = delete;
|
||||||
|
|
||||||
virtual ~Iterator();
|
virtual ~Iterator();
|
||||||
|
|
||||||
// An iterator is either positioned at a key/value pair, or
|
// An iterator is either positioned at a key/value pair, or
|
||||||
@ -72,28 +77,35 @@ class Iterator {
|
|||||||
//
|
//
|
||||||
// Note that unlike all of the preceding methods, this method is
|
// Note that unlike all of the preceding methods, this method is
|
||||||
// not abstract and therefore clients should not override it.
|
// not abstract and therefore clients should not override it.
|
||||||
typedef void (*CleanupFunction)(void* arg1, void* arg2);
|
using CleanupFunction = void (*)(void* arg1, void* arg2);
|
||||||
void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2);
|
void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct Cleanup {
|
// Cleanup functions are stored in a single-linked list.
|
||||||
|
// The list's head node is inlined in the iterator.
|
||||||
|
struct CleanupNode {
|
||||||
|
// True if the node is not used. Only head nodes might be unused.
|
||||||
|
bool IsEmpty() const { return function == nullptr; }
|
||||||
|
// Invokes the cleanup function.
|
||||||
|
void Run() {
|
||||||
|
assert(function != nullptr);
|
||||||
|
(*function)(arg1, arg2);
|
||||||
|
}
|
||||||
|
|
||||||
|
// The head node is used if the function pointer is not null.
|
||||||
CleanupFunction function;
|
CleanupFunction function;
|
||||||
void* arg1;
|
void* arg1;
|
||||||
void* arg2;
|
void* arg2;
|
||||||
Cleanup* next;
|
CleanupNode* next;
|
||||||
};
|
};
|
||||||
Cleanup cleanup_;
|
CleanupNode cleanup_head_;
|
||||||
|
|
||||||
// No copying allowed
|
|
||||||
Iterator(const Iterator&);
|
|
||||||
void operator=(const Iterator&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Return an empty iterator (yields nothing).
|
// Return an empty iterator (yields nothing).
|
||||||
extern Iterator* NewEmptyIterator();
|
LEVELDB_EXPORT Iterator* NewEmptyIterator();
|
||||||
|
|
||||||
// Return an empty iterator with the specified status.
|
// Return an empty iterator with the specified status.
|
||||||
extern Iterator* NewErrorIterator(const Status& status);
|
LEVELDB_EXPORT Iterator* NewErrorIterator(const Status& status);
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
@ -7,6 +7,8 @@
|
|||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
|
|
||||||
|
#include "leveldb/export.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class Cache;
|
class Cache;
|
||||||
@ -23,12 +25,15 @@ class Snapshot;
|
|||||||
enum CompressionType {
|
enum CompressionType {
|
||||||
// NOTE: do not change the values of existing entries, as these are
|
// NOTE: do not change the values of existing entries, as these are
|
||||||
// part of the persistent format on disk.
|
// part of the persistent format on disk.
|
||||||
kNoCompression = 0x0,
|
kNoCompression = 0x0,
|
||||||
kSnappyCompression = 0x1
|
kSnappyCompression = 0x1
|
||||||
};
|
};
|
||||||
|
|
||||||
// Options to control the behavior of a database (passed to DB::Open)
|
// Options to control the behavior of a database (passed to DB::Open)
|
||||||
struct Options {
|
struct LEVELDB_EXPORT Options {
|
||||||
|
// Create an Options object with default values for all fields.
|
||||||
|
Options();
|
||||||
|
|
||||||
// -------------------
|
// -------------------
|
||||||
// Parameters that affect behavior
|
// Parameters that affect behavior
|
||||||
|
|
||||||
@ -41,20 +46,17 @@ struct Options {
|
|||||||
const Comparator* comparator;
|
const Comparator* comparator;
|
||||||
|
|
||||||
// If true, the database will be created if it is missing.
|
// If true, the database will be created if it is missing.
|
||||||
// Default: false
|
bool create_if_missing = false;
|
||||||
bool create_if_missing;
|
|
||||||
|
|
||||||
// If true, an error is raised if the database already exists.
|
// If true, an error is raised if the database already exists.
|
||||||
// Default: false
|
bool error_if_exists = false;
|
||||||
bool error_if_exists;
|
|
||||||
|
|
||||||
// If true, the implementation will do aggressive checking of the
|
// If true, the implementation will do aggressive checking of the
|
||||||
// data it is processing and will stop early if it detects any
|
// data it is processing and will stop early if it detects any
|
||||||
// errors. This may have unforeseen ramifications: for example, a
|
// errors. This may have unforeseen ramifications: for example, a
|
||||||
// corruption of one DB entry may cause a large number of entries to
|
// corruption of one DB entry may cause a large number of entries to
|
||||||
// become unreadable or for the entire DB to become unopenable.
|
// become unreadable or for the entire DB to become unopenable.
|
||||||
// Default: false
|
bool paranoid_checks = false;
|
||||||
bool paranoid_checks;
|
|
||||||
|
|
||||||
// Use the specified object to interact with the environment,
|
// Use the specified object to interact with the environment,
|
||||||
// e.g. to read/write files, schedule background work, etc.
|
// e.g. to read/write files, schedule background work, etc.
|
||||||
@ -62,10 +64,9 @@ struct Options {
|
|||||||
Env* env;
|
Env* env;
|
||||||
|
|
||||||
// Any internal progress/error information generated by the db will
|
// Any internal progress/error information generated by the db will
|
||||||
// be written to info_log if it is non-NULL, or to a file stored
|
// be written to info_log if it is non-null, or to a file stored
|
||||||
// in the same directory as the DB contents if info_log is NULL.
|
// in the same directory as the DB contents if info_log is null.
|
||||||
// Default: NULL
|
Logger* info_log = nullptr;
|
||||||
Logger* info_log;
|
|
||||||
|
|
||||||
// -------------------
|
// -------------------
|
||||||
// Parameters that affect performance
|
// Parameters that affect performance
|
||||||
@ -78,39 +79,30 @@ struct Options {
|
|||||||
// so you may wish to adjust this parameter to control memory usage.
|
// so you may wish to adjust this parameter to control memory usage.
|
||||||
// Also, a larger write buffer will result in a longer recovery time
|
// Also, a larger write buffer will result in a longer recovery time
|
||||||
// the next time the database is opened.
|
// the next time the database is opened.
|
||||||
//
|
size_t write_buffer_size = 4 * 1024 * 1024;
|
||||||
// Default: 4MB
|
|
||||||
size_t write_buffer_size;
|
|
||||||
|
|
||||||
// Number of open files that can be used by the DB. You may need to
|
// Number of open files that can be used by the DB. You may need to
|
||||||
// increase this if your database has a large working set (budget
|
// increase this if your database has a large working set (budget
|
||||||
// one open file per 2MB of working set).
|
// one open file per 2MB of working set).
|
||||||
//
|
int max_open_files = 1000;
|
||||||
// Default: 1000
|
|
||||||
int max_open_files;
|
|
||||||
|
|
||||||
// Control over blocks (user data is stored in a set of blocks, and
|
// Control over blocks (user data is stored in a set of blocks, and
|
||||||
// a block is the unit of reading from disk).
|
// a block is the unit of reading from disk).
|
||||||
|
|
||||||
// If non-NULL, use the specified cache for blocks.
|
// If non-null, use the specified cache for blocks.
|
||||||
// If NULL, leveldb will automatically create and use an 8MB internal cache.
|
// If null, leveldb will automatically create and use an 8MB internal cache.
|
||||||
// Default: NULL
|
Cache* block_cache = nullptr;
|
||||||
Cache* block_cache;
|
|
||||||
|
|
||||||
// Approximate size of user data packed per block. Note that the
|
// Approximate size of user data packed per block. Note that the
|
||||||
// block size specified here corresponds to uncompressed data. The
|
// block size specified here corresponds to uncompressed data. The
|
||||||
// actual size of the unit read from disk may be smaller if
|
// actual size of the unit read from disk may be smaller if
|
||||||
// compression is enabled. This parameter can be changed dynamically.
|
// compression is enabled. This parameter can be changed dynamically.
|
||||||
//
|
size_t block_size = 4 * 1024;
|
||||||
// Default: 4K
|
|
||||||
size_t block_size;
|
|
||||||
|
|
||||||
// Number of keys between restart points for delta encoding of keys.
|
// Number of keys between restart points for delta encoding of keys.
|
||||||
// This parameter can be changed dynamically. Most clients should
|
// This parameter can be changed dynamically. Most clients should
|
||||||
// leave this parameter alone.
|
// leave this parameter alone.
|
||||||
//
|
int block_restart_interval = 16;
|
||||||
// Default: 16
|
|
||||||
int block_restart_interval;
|
|
||||||
|
|
||||||
// Leveldb will write up to this amount of bytes to a file before
|
// Leveldb will write up to this amount of bytes to a file before
|
||||||
// switching to a new one.
|
// switching to a new one.
|
||||||
@ -120,9 +112,7 @@ struct Options {
|
|||||||
// compactions and hence longer latency/performance hiccups.
|
// compactions and hence longer latency/performance hiccups.
|
||||||
// Another reason to increase this parameter might be when you are
|
// Another reason to increase this parameter might be when you are
|
||||||
// initially populating a large database.
|
// initially populating a large database.
|
||||||
//
|
size_t max_file_size = 2 * 1024 * 1024;
|
||||||
// Default: 2MB
|
|
||||||
size_t max_file_size;
|
|
||||||
|
|
||||||
// Compress blocks using the specified compression algorithm. This
|
// Compress blocks using the specified compression algorithm. This
|
||||||
// parameter can be changed dynamically.
|
// parameter can be changed dynamically.
|
||||||
@ -138,53 +128,43 @@ struct Options {
|
|||||||
// worth switching to kNoCompression. Even if the input data is
|
// worth switching to kNoCompression. Even if the input data is
|
||||||
// incompressible, the kSnappyCompression implementation will
|
// incompressible, the kSnappyCompression implementation will
|
||||||
// efficiently detect that and will switch to uncompressed mode.
|
// efficiently detect that and will switch to uncompressed mode.
|
||||||
CompressionType compression;
|
CompressionType compression = kSnappyCompression;
|
||||||
|
|
||||||
// EXPERIMENTAL: If true, append to existing MANIFEST and log files
|
// EXPERIMENTAL: If true, append to existing MANIFEST and log files
|
||||||
// when a database is opened. This can significantly speed up open.
|
// when a database is opened. This can significantly speed up open.
|
||||||
//
|
//
|
||||||
// Default: currently false, but may become true later.
|
// Default: currently false, but may become true later.
|
||||||
bool reuse_logs;
|
bool reuse_logs = false;
|
||||||
|
|
||||||
// If non-NULL, use the specified filter policy to reduce disk reads.
|
// If non-null, use the specified filter policy to reduce disk reads.
|
||||||
// Many applications will benefit from passing the result of
|
// Many applications will benefit from passing the result of
|
||||||
// NewBloomFilterPolicy() here.
|
// NewBloomFilterPolicy() here.
|
||||||
//
|
const FilterPolicy* filter_policy = nullptr;
|
||||||
// Default: NULL
|
|
||||||
const FilterPolicy* filter_policy;
|
|
||||||
|
|
||||||
// Create an Options object with default values for all fields.
|
|
||||||
Options();
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Options that control read operations
|
// Options that control read operations
|
||||||
struct ReadOptions {
|
struct LEVELDB_EXPORT ReadOptions {
|
||||||
|
ReadOptions() = default;
|
||||||
|
|
||||||
// If true, all data read from underlying storage will be
|
// If true, all data read from underlying storage will be
|
||||||
// verified against corresponding checksums.
|
// verified against corresponding checksums.
|
||||||
// Default: false
|
bool verify_checksums = false;
|
||||||
bool verify_checksums;
|
|
||||||
|
|
||||||
// Should the data read for this iteration be cached in memory?
|
// Should the data read for this iteration be cached in memory?
|
||||||
// Callers may wish to set this field to false for bulk scans.
|
// Callers may wish to set this field to false for bulk scans.
|
||||||
// Default: true
|
bool fill_cache = true;
|
||||||
bool fill_cache;
|
|
||||||
|
|
||||||
// If "snapshot" is non-NULL, read as of the supplied snapshot
|
// If "snapshot" is non-null, read as of the supplied snapshot
|
||||||
// (which must belong to the DB that is being read and which must
|
// (which must belong to the DB that is being read and which must
|
||||||
// not have been released). If "snapshot" is NULL, use an implicit
|
// not have been released). If "snapshot" is null, use an implicit
|
||||||
// snapshot of the state at the beginning of this read operation.
|
// snapshot of the state at the beginning of this read operation.
|
||||||
// Default: NULL
|
const Snapshot* snapshot = nullptr;
|
||||||
const Snapshot* snapshot;
|
|
||||||
|
|
||||||
ReadOptions()
|
|
||||||
: verify_checksums(false),
|
|
||||||
fill_cache(true),
|
|
||||||
snapshot(NULL) {
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Options that control write operations
|
// Options that control write operations
|
||||||
struct WriteOptions {
|
struct LEVELDB_EXPORT WriteOptions {
|
||||||
|
WriteOptions() = default;
|
||||||
|
|
||||||
// If true, the write will be flushed from the operating system
|
// If true, the write will be flushed from the operating system
|
||||||
// buffer cache (by calling WritableFile::Sync()) before the write
|
// buffer cache (by calling WritableFile::Sync()) before the write
|
||||||
// is considered complete. If this flag is true, writes will be
|
// is considered complete. If this flag is true, writes will be
|
||||||
@ -199,13 +179,7 @@ struct WriteOptions {
|
|||||||
// crash semantics as the "write()" system call. A DB write
|
// crash semantics as the "write()" system call. A DB write
|
||||||
// with sync==true has similar crash semantics to a "write()"
|
// with sync==true has similar crash semantics to a "write()"
|
||||||
// system call followed by "fsync()".
|
// system call followed by "fsync()".
|
||||||
//
|
bool sync = false;
|
||||||
// Default: false
|
|
||||||
bool sync;
|
|
||||||
|
|
||||||
WriteOptions()
|
|
||||||
: sync(false) {
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -18,23 +18,30 @@
|
|||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
|
#include "leveldb/export.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class Slice {
|
class LEVELDB_EXPORT Slice {
|
||||||
public:
|
public:
|
||||||
// Create an empty slice.
|
// Create an empty slice.
|
||||||
Slice() : data_(""), size_(0) { }
|
Slice() : data_(""), size_(0) {}
|
||||||
|
|
||||||
// Create a slice that refers to d[0,n-1].
|
// Create a slice that refers to d[0,n-1].
|
||||||
Slice(const char* d, size_t n) : data_(d), size_(n) { }
|
Slice(const char* d, size_t n) : data_(d), size_(n) {}
|
||||||
|
|
||||||
// Create a slice that refers to the contents of "s"
|
// Create a slice that refers to the contents of "s"
|
||||||
Slice(const std::string& s) : data_(s.data()), size_(s.size()) { }
|
Slice(const std::string& s) : data_(s.data()), size_(s.size()) {}
|
||||||
|
|
||||||
// Create a slice that refers to s[0,strlen(s)-1]
|
// Create a slice that refers to s[0,strlen(s)-1]
|
||||||
Slice(const char* s) : data_(s), size_(strlen(s)) { }
|
Slice(const char* s) : data_(s), size_(strlen(s)) {}
|
||||||
|
|
||||||
|
// Intentionally copyable.
|
||||||
|
Slice(const Slice&) = default;
|
||||||
|
Slice& operator=(const Slice&) = default;
|
||||||
|
|
||||||
// Return a pointer to the beginning of the referenced data
|
// Return a pointer to the beginning of the referenced data
|
||||||
const char* data() const { return data_; }
|
const char* data() const { return data_; }
|
||||||
@ -53,7 +60,10 @@ class Slice {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Change this slice to refer to an empty array
|
// Change this slice to refer to an empty array
|
||||||
void clear() { data_ = ""; size_ = 0; }
|
void clear() {
|
||||||
|
data_ = "";
|
||||||
|
size_ = 0;
|
||||||
|
}
|
||||||
|
|
||||||
// Drop the first "n" bytes from this slice.
|
// Drop the first "n" bytes from this slice.
|
||||||
void remove_prefix(size_t n) {
|
void remove_prefix(size_t n) {
|
||||||
@ -73,15 +83,12 @@ class Slice {
|
|||||||
|
|
||||||
// Return true iff "x" is a prefix of "*this"
|
// Return true iff "x" is a prefix of "*this"
|
||||||
bool starts_with(const Slice& x) const {
|
bool starts_with(const Slice& x) const {
|
||||||
return ((size_ >= x.size_) &&
|
return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0));
|
||||||
(memcmp(data_, x.data_, x.size_) == 0));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const char* data_;
|
const char* data_;
|
||||||
size_t size_;
|
size_t size_;
|
||||||
|
|
||||||
// Intentionally copyable
|
|
||||||
};
|
};
|
||||||
|
|
||||||
inline bool operator==(const Slice& x, const Slice& y) {
|
inline bool operator==(const Slice& x, const Slice& y) {
|
||||||
@ -89,21 +96,20 @@ inline bool operator==(const Slice& x, const Slice& y) {
|
|||||||
(memcmp(x.data(), y.data(), x.size()) == 0));
|
(memcmp(x.data(), y.data(), x.size()) == 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool operator!=(const Slice& x, const Slice& y) {
|
inline bool operator!=(const Slice& x, const Slice& y) { return !(x == y); }
|
||||||
return !(x == y);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline int Slice::compare(const Slice& b) const {
|
inline int Slice::compare(const Slice& b) const {
|
||||||
const size_t min_len = (size_ < b.size_) ? size_ : b.size_;
|
const size_t min_len = (size_ < b.size_) ? size_ : b.size_;
|
||||||
int r = memcmp(data_, b.data_, min_len);
|
int r = memcmp(data_, b.data_, min_len);
|
||||||
if (r == 0) {
|
if (r == 0) {
|
||||||
if (size_ < b.size_) r = -1;
|
if (size_ < b.size_)
|
||||||
else if (size_ > b.size_) r = +1;
|
r = -1;
|
||||||
|
else if (size_ > b.size_)
|
||||||
|
r = +1;
|
||||||
}
|
}
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_INCLUDE_SLICE_H_
|
#endif // STORAGE_LEVELDB_INCLUDE_SLICE_H_
|
||||||
|
@ -13,20 +13,25 @@
|
|||||||
#ifndef STORAGE_LEVELDB_INCLUDE_STATUS_H_
|
#ifndef STORAGE_LEVELDB_INCLUDE_STATUS_H_
|
||||||
#define STORAGE_LEVELDB_INCLUDE_STATUS_H_
|
#define STORAGE_LEVELDB_INCLUDE_STATUS_H_
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
|
#include "leveldb/export.h"
|
||||||
#include "leveldb/slice.h"
|
#include "leveldb/slice.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class Status {
|
class LEVELDB_EXPORT Status {
|
||||||
public:
|
public:
|
||||||
// Create a success status.
|
// Create a success status.
|
||||||
Status() : state_(NULL) { }
|
Status() noexcept : state_(nullptr) {}
|
||||||
~Status() { delete[] state_; }
|
~Status() { delete[] state_; }
|
||||||
|
|
||||||
// Copy the specified status.
|
Status(const Status& rhs);
|
||||||
Status(const Status& s);
|
Status& operator=(const Status& rhs);
|
||||||
void operator=(const Status& s);
|
|
||||||
|
Status(Status&& rhs) noexcept : state_(rhs.state_) { rhs.state_ = nullptr; }
|
||||||
|
Status& operator=(Status&& rhs) noexcept;
|
||||||
|
|
||||||
// Return a success status.
|
// Return a success status.
|
||||||
static Status OK() { return Status(); }
|
static Status OK() { return Status(); }
|
||||||
@ -49,7 +54,7 @@ class Status {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Returns true iff the status indicates success.
|
// Returns true iff the status indicates success.
|
||||||
bool ok() const { return (state_ == NULL); }
|
bool ok() const { return (state_ == nullptr); }
|
||||||
|
|
||||||
// Returns true iff the status indicates a NotFound error.
|
// Returns true iff the status indicates a NotFound error.
|
||||||
bool IsNotFound() const { return code() == kNotFound; }
|
bool IsNotFound() const { return code() == kNotFound; }
|
||||||
@ -71,13 +76,6 @@ class Status {
|
|||||||
std::string ToString() const;
|
std::string ToString() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// OK status has a NULL state_. Otherwise, state_ is a new[] array
|
|
||||||
// of the following form:
|
|
||||||
// state_[0..3] == length of message
|
|
||||||
// state_[4] == code
|
|
||||||
// state_[5..] == message
|
|
||||||
const char* state_;
|
|
||||||
|
|
||||||
enum Code {
|
enum Code {
|
||||||
kOk = 0,
|
kOk = 0,
|
||||||
kNotFound = 1,
|
kNotFound = 1,
|
||||||
@ -88,23 +86,35 @@ class Status {
|
|||||||
};
|
};
|
||||||
|
|
||||||
Code code() const {
|
Code code() const {
|
||||||
return (state_ == NULL) ? kOk : static_cast<Code>(state_[4]);
|
return (state_ == nullptr) ? kOk : static_cast<Code>(state_[4]);
|
||||||
}
|
}
|
||||||
|
|
||||||
Status(Code code, const Slice& msg, const Slice& msg2);
|
Status(Code code, const Slice& msg, const Slice& msg2);
|
||||||
static const char* CopyState(const char* s);
|
static const char* CopyState(const char* s);
|
||||||
|
|
||||||
|
// OK status has a null state_. Otherwise, state_ is a new[] array
|
||||||
|
// of the following form:
|
||||||
|
// state_[0..3] == length of message
|
||||||
|
// state_[4] == code
|
||||||
|
// state_[5..] == message
|
||||||
|
const char* state_;
|
||||||
};
|
};
|
||||||
|
|
||||||
inline Status::Status(const Status& s) {
|
inline Status::Status(const Status& rhs) {
|
||||||
state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_);
|
state_ = (rhs.state_ == nullptr) ? nullptr : CopyState(rhs.state_);
|
||||||
}
|
}
|
||||||
inline void Status::operator=(const Status& s) {
|
inline Status& Status::operator=(const Status& rhs) {
|
||||||
// The following condition catches both aliasing (when this == &s),
|
// The following condition catches both aliasing (when this == &rhs),
|
||||||
// and the common case where both s and *this are ok.
|
// and the common case where both rhs and *this are ok.
|
||||||
if (state_ != s.state_) {
|
if (state_ != rhs.state_) {
|
||||||
delete[] state_;
|
delete[] state_;
|
||||||
state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_);
|
state_ = (rhs.state_ == nullptr) ? nullptr : CopyState(rhs.state_);
|
||||||
}
|
}
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
inline Status& Status::operator=(Status&& rhs) noexcept {
|
||||||
|
std::swap(state_, rhs.state_);
|
||||||
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -6,6 +6,8 @@
|
|||||||
#define STORAGE_LEVELDB_INCLUDE_TABLE_H_
|
#define STORAGE_LEVELDB_INCLUDE_TABLE_H_
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include "leveldb/export.h"
|
||||||
#include "leveldb/iterator.h"
|
#include "leveldb/iterator.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
@ -21,7 +23,7 @@ class TableCache;
|
|||||||
// A Table is a sorted map from strings to strings. Tables are
|
// A Table is a sorted map from strings to strings. Tables are
|
||||||
// immutable and persistent. A Table may be safely accessed from
|
// immutable and persistent. A Table may be safely accessed from
|
||||||
// multiple threads without external synchronization.
|
// multiple threads without external synchronization.
|
||||||
class Table {
|
class LEVELDB_EXPORT Table {
|
||||||
public:
|
public:
|
||||||
// Attempt to open the table that is stored in bytes [0..file_size)
|
// Attempt to open the table that is stored in bytes [0..file_size)
|
||||||
// of "file", and read the metadata entries necessary to allow
|
// of "file", and read the metadata entries necessary to allow
|
||||||
@ -30,15 +32,16 @@ class Table {
|
|||||||
// If successful, returns ok and sets "*table" to the newly opened
|
// If successful, returns ok and sets "*table" to the newly opened
|
||||||
// table. The client should delete "*table" when no longer needed.
|
// table. The client should delete "*table" when no longer needed.
|
||||||
// If there was an error while initializing the table, sets "*table"
|
// If there was an error while initializing the table, sets "*table"
|
||||||
// to NULL and returns a non-ok status. Does not take ownership of
|
// to nullptr and returns a non-ok status. Does not take ownership of
|
||||||
// "*source", but the client must ensure that "source" remains live
|
// "*source", but the client must ensure that "source" remains live
|
||||||
// for the duration of the returned table's lifetime.
|
// for the duration of the returned table's lifetime.
|
||||||
//
|
//
|
||||||
// *file must remain live while this Table is in use.
|
// *file must remain live while this Table is in use.
|
||||||
static Status Open(const Options& options,
|
static Status Open(const Options& options, RandomAccessFile* file,
|
||||||
RandomAccessFile* file,
|
uint64_t file_size, Table** table);
|
||||||
uint64_t file_size,
|
|
||||||
Table** table);
|
Table(const Table&) = delete;
|
||||||
|
Table& operator=(const Table&) = delete;
|
||||||
|
|
||||||
~Table();
|
~Table();
|
||||||
|
|
||||||
@ -56,28 +59,24 @@ class Table {
|
|||||||
uint64_t ApproximateOffsetOf(const Slice& key) const;
|
uint64_t ApproximateOffsetOf(const Slice& key) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
friend class TableCache;
|
||||||
struct Rep;
|
struct Rep;
|
||||||
Rep* rep_;
|
|
||||||
|
|
||||||
explicit Table(Rep* rep) { rep_ = rep; }
|
|
||||||
static Iterator* BlockReader(void*, const ReadOptions&, const Slice&);
|
static Iterator* BlockReader(void*, const ReadOptions&, const Slice&);
|
||||||
|
|
||||||
|
explicit Table(Rep* rep) : rep_(rep) {}
|
||||||
|
|
||||||
// Calls (*handle_result)(arg, ...) with the entry found after a call
|
// Calls (*handle_result)(arg, ...) with the entry found after a call
|
||||||
// to Seek(key). May not make such a call if filter policy says
|
// to Seek(key). May not make such a call if filter policy says
|
||||||
// that key is not present.
|
// that key is not present.
|
||||||
friend class TableCache;
|
Status InternalGet(const ReadOptions&, const Slice& key, void* arg,
|
||||||
Status InternalGet(
|
void (*handle_result)(void* arg, const Slice& k,
|
||||||
const ReadOptions&, const Slice& key,
|
const Slice& v));
|
||||||
void* arg,
|
|
||||||
void (*handle_result)(void* arg, const Slice& k, const Slice& v));
|
|
||||||
|
|
||||||
|
|
||||||
void ReadMeta(const Footer& footer);
|
void ReadMeta(const Footer& footer);
|
||||||
void ReadFilter(const Slice& filter_handle_value);
|
void ReadFilter(const Slice& filter_handle_value);
|
||||||
|
|
||||||
// No copying allowed
|
Rep* const rep_;
|
||||||
Table(const Table&);
|
|
||||||
void operator=(const Table&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -14,6 +14,8 @@
|
|||||||
#define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_
|
#define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include "leveldb/export.h"
|
||||||
#include "leveldb/options.h"
|
#include "leveldb/options.h"
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
|
|
||||||
@ -23,13 +25,16 @@ class BlockBuilder;
|
|||||||
class BlockHandle;
|
class BlockHandle;
|
||||||
class WritableFile;
|
class WritableFile;
|
||||||
|
|
||||||
class TableBuilder {
|
class LEVELDB_EXPORT TableBuilder {
|
||||||
public:
|
public:
|
||||||
// Create a builder that will store the contents of the table it is
|
// Create a builder that will store the contents of the table it is
|
||||||
// building in *file. Does not close the file. It is up to the
|
// building in *file. Does not close the file. It is up to the
|
||||||
// caller to close the file after calling Finish().
|
// caller to close the file after calling Finish().
|
||||||
TableBuilder(const Options& options, WritableFile* file);
|
TableBuilder(const Options& options, WritableFile* file);
|
||||||
|
|
||||||
|
TableBuilder(const TableBuilder&) = delete;
|
||||||
|
TableBuilder& operator=(const TableBuilder&) = delete;
|
||||||
|
|
||||||
// REQUIRES: Either Finish() or Abandon() has been called.
|
// REQUIRES: Either Finish() or Abandon() has been called.
|
||||||
~TableBuilder();
|
~TableBuilder();
|
||||||
|
|
||||||
@ -81,10 +86,6 @@ class TableBuilder {
|
|||||||
|
|
||||||
struct Rep;
|
struct Rep;
|
||||||
Rep* rep_;
|
Rep* rep_;
|
||||||
|
|
||||||
// No copying allowed
|
|
||||||
TableBuilder(const TableBuilder&);
|
|
||||||
void operator=(const TableBuilder&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -22,15 +22,29 @@
|
|||||||
#define STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_
|
#define STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
|
#include "leveldb/export.h"
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class Slice;
|
class Slice;
|
||||||
|
|
||||||
class WriteBatch {
|
class LEVELDB_EXPORT WriteBatch {
|
||||||
public:
|
public:
|
||||||
|
class LEVELDB_EXPORT Handler {
|
||||||
|
public:
|
||||||
|
virtual ~Handler();
|
||||||
|
virtual void Put(const Slice& key, const Slice& value) = 0;
|
||||||
|
virtual void Delete(const Slice& key) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
WriteBatch();
|
WriteBatch();
|
||||||
|
|
||||||
|
// Intentionally copyable.
|
||||||
|
WriteBatch(const WriteBatch&) = default;
|
||||||
|
WriteBatch& operator=(const WriteBatch&) = default;
|
||||||
|
|
||||||
~WriteBatch();
|
~WriteBatch();
|
||||||
|
|
||||||
// Store the mapping "key->value" in the database.
|
// Store the mapping "key->value" in the database.
|
||||||
@ -42,21 +56,26 @@ class WriteBatch {
|
|||||||
// Clear all updates buffered in this batch.
|
// Clear all updates buffered in this batch.
|
||||||
void Clear();
|
void Clear();
|
||||||
|
|
||||||
|
// The size of the database changes caused by this batch.
|
||||||
|
//
|
||||||
|
// This number is tied to implementation details, and may change across
|
||||||
|
// releases. It is intended for LevelDB usage metrics.
|
||||||
|
size_t ApproximateSize() const;
|
||||||
|
|
||||||
|
// Copies the operations in "source" to this batch.
|
||||||
|
//
|
||||||
|
// This runs in O(source size) time. However, the constant factor is better
|
||||||
|
// than calling Iterate() over the source batch with a Handler that replicates
|
||||||
|
// the operations into this batch.
|
||||||
|
void Append(const WriteBatch& source);
|
||||||
|
|
||||||
// Support for iterating over the contents of a batch.
|
// Support for iterating over the contents of a batch.
|
||||||
class Handler {
|
|
||||||
public:
|
|
||||||
virtual ~Handler();
|
|
||||||
virtual void Put(const Slice& key, const Slice& value) = 0;
|
|
||||||
virtual void Delete(const Slice& key) = 0;
|
|
||||||
};
|
|
||||||
Status Iterate(Handler* handler) const;
|
Status Iterate(Handler* handler) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend class WriteBatchInternal;
|
friend class WriteBatchInternal;
|
||||||
|
|
||||||
std::string rep_; // See comment in write_batch.cc for the format of rep_
|
std::string rep_; // See comment in write_batch.cc for the format of rep_
|
||||||
|
|
||||||
// Intentionally copyable
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -3,9 +3,9 @@
|
|||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
// Test for issue 178: a manual compaction causes deleted data to reappear.
|
// Test for issue 178: a manual compaction causes deleted data to reappear.
|
||||||
|
#include <cstdlib>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <cstdlib>
|
|
||||||
|
|
||||||
#include "leveldb/db.h"
|
#include "leveldb/db.h"
|
||||||
#include "leveldb/write_batch.h"
|
#include "leveldb/write_batch.h"
|
||||||
@ -21,11 +21,9 @@ std::string Key1(int i) {
|
|||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string Key2(int i) {
|
std::string Key2(int i) { return Key1(i) + "_xxx"; }
|
||||||
return Key1(i) + "_xxx";
|
|
||||||
}
|
|
||||||
|
|
||||||
class Issue178 { };
|
class Issue178 {};
|
||||||
|
|
||||||
TEST(Issue178, Test) {
|
TEST(Issue178, Test) {
|
||||||
// Get rid of any state from an old run.
|
// Get rid of any state from an old run.
|
||||||
@ -87,6 +85,4 @@ TEST(Issue178, Test) {
|
|||||||
|
|
||||||
} // anonymous namespace
|
} // anonymous namespace
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -11,14 +11,14 @@
|
|||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class Issue200 { };
|
class Issue200 {};
|
||||||
|
|
||||||
TEST(Issue200, Test) {
|
TEST(Issue200, Test) {
|
||||||
// Get rid of any state from an old run.
|
// Get rid of any state from an old run.
|
||||||
std::string dbpath = test::TmpDir() + "/leveldb_issue200_test";
|
std::string dbpath = test::TmpDir() + "/leveldb_issue200_test";
|
||||||
DestroyDB(dbpath, Options());
|
DestroyDB(dbpath, Options());
|
||||||
|
|
||||||
DB *db;
|
DB* db;
|
||||||
Options options;
|
Options options;
|
||||||
options.create_if_missing = true;
|
options.create_if_missing = true;
|
||||||
ASSERT_OK(DB::Open(options, dbpath, &db));
|
ASSERT_OK(DB::Open(options, dbpath, &db));
|
||||||
@ -31,7 +31,7 @@ TEST(Issue200, Test) {
|
|||||||
ASSERT_OK(db->Put(write_options, "5", "f"));
|
ASSERT_OK(db->Put(write_options, "5", "f"));
|
||||||
|
|
||||||
ReadOptions read_options;
|
ReadOptions read_options;
|
||||||
Iterator *iter = db->NewIterator(read_options);
|
Iterator* iter = db->NewIterator(read_options);
|
||||||
|
|
||||||
// Add an element that should not be reflected in the iterator.
|
// Add an element that should not be reflected in the iterator.
|
||||||
ASSERT_OK(db->Put(write_options, "25", "cd"));
|
ASSERT_OK(db->Put(write_options, "25", "cd"));
|
||||||
@ -54,6 +54,4 @@ TEST(Issue200, Test) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
128
issues/issue320_test.cc
Normal file
128
issues/issue320_test.cc
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
// Copyright (c) 2019 The LevelDB Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <iostream>
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "leveldb/db.h"
|
||||||
|
#include "leveldb/write_batch.h"
|
||||||
|
#include "util/testharness.h"
|
||||||
|
|
||||||
|
namespace leveldb {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// Creates a random number in the range of [0, max).
|
||||||
|
int GenerateRandomNumber(int max) { return std::rand() % max; }
|
||||||
|
|
||||||
|
std::string CreateRandomString(int32_t index) {
|
||||||
|
static const size_t len = 1024;
|
||||||
|
char bytes[len];
|
||||||
|
size_t i = 0;
|
||||||
|
while (i < 8) {
|
||||||
|
bytes[i] = 'a' + ((index >> (4 * i)) & 0xf);
|
||||||
|
++i;
|
||||||
|
}
|
||||||
|
while (i < sizeof(bytes)) {
|
||||||
|
bytes[i] = 'a' + GenerateRandomNumber(26);
|
||||||
|
++i;
|
||||||
|
}
|
||||||
|
return std::string(bytes, sizeof(bytes));
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
class Issue320 {};
|
||||||
|
|
||||||
|
TEST(Issue320, Test) {
|
||||||
|
std::srand(0);
|
||||||
|
|
||||||
|
bool delete_before_put = false;
|
||||||
|
bool keep_snapshots = true;
|
||||||
|
|
||||||
|
std::vector<std::unique_ptr<std::pair<std::string, std::string>>> test_map(
|
||||||
|
10000);
|
||||||
|
std::vector<Snapshot const*> snapshots(100, nullptr);
|
||||||
|
|
||||||
|
DB* db;
|
||||||
|
Options options;
|
||||||
|
options.create_if_missing = true;
|
||||||
|
|
||||||
|
std::string dbpath = test::TmpDir() + "/leveldb_issue320_test";
|
||||||
|
ASSERT_OK(DB::Open(options, dbpath, &db));
|
||||||
|
|
||||||
|
uint32_t target_size = 10000;
|
||||||
|
uint32_t num_items = 0;
|
||||||
|
uint32_t count = 0;
|
||||||
|
std::string key;
|
||||||
|
std::string value, old_value;
|
||||||
|
|
||||||
|
WriteOptions writeOptions;
|
||||||
|
ReadOptions readOptions;
|
||||||
|
while (count < 200000) {
|
||||||
|
if ((++count % 1000) == 0) {
|
||||||
|
std::cout << "count: " << count << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
int index = GenerateRandomNumber(test_map.size());
|
||||||
|
WriteBatch batch;
|
||||||
|
|
||||||
|
if (test_map[index] == nullptr) {
|
||||||
|
num_items++;
|
||||||
|
test_map[index].reset(new std::pair<std::string, std::string>(
|
||||||
|
CreateRandomString(index), CreateRandomString(index)));
|
||||||
|
batch.Put(test_map[index]->first, test_map[index]->second);
|
||||||
|
} else {
|
||||||
|
ASSERT_OK(db->Get(readOptions, test_map[index]->first, &old_value));
|
||||||
|
if (old_value != test_map[index]->second) {
|
||||||
|
std::cout << "ERROR incorrect value returned by Get" << std::endl;
|
||||||
|
std::cout << " count=" << count << std::endl;
|
||||||
|
std::cout << " old value=" << old_value << std::endl;
|
||||||
|
std::cout << " test_map[index]->second=" << test_map[index]->second
|
||||||
|
<< std::endl;
|
||||||
|
std::cout << " test_map[index]->first=" << test_map[index]->first
|
||||||
|
<< std::endl;
|
||||||
|
std::cout << " index=" << index << std::endl;
|
||||||
|
ASSERT_EQ(old_value, test_map[index]->second);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (num_items >= target_size && GenerateRandomNumber(100) > 30) {
|
||||||
|
batch.Delete(test_map[index]->first);
|
||||||
|
test_map[index] = nullptr;
|
||||||
|
--num_items;
|
||||||
|
} else {
|
||||||
|
test_map[index]->second = CreateRandomString(index);
|
||||||
|
if (delete_before_put) batch.Delete(test_map[index]->first);
|
||||||
|
batch.Put(test_map[index]->first, test_map[index]->second);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT_OK(db->Write(writeOptions, &batch));
|
||||||
|
|
||||||
|
if (keep_snapshots && GenerateRandomNumber(10) == 0) {
|
||||||
|
int i = GenerateRandomNumber(snapshots.size());
|
||||||
|
if (snapshots[i] != nullptr) {
|
||||||
|
db->ReleaseSnapshot(snapshots[i]);
|
||||||
|
}
|
||||||
|
snapshots[i] = db->GetSnapshot();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (Snapshot const* snapshot : snapshots) {
|
||||||
|
if (snapshot) {
|
||||||
|
db->ReleaseSnapshot(snapshot);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
delete db;
|
||||||
|
DestroyDB(dbpath, options);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace leveldb
|
||||||
|
|
||||||
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
@ -5,6 +5,6 @@ Code in the rest of the package includes "port.h" from this directory.
|
|||||||
"port.h" in turn includes a platform specific "port_<platform>.h" file
|
"port.h" in turn includes a platform specific "port_<platform>.h" file
|
||||||
that provides the platform specific implementation.
|
that provides the platform specific implementation.
|
||||||
|
|
||||||
See port_posix.h for an example of what must be provided in a platform
|
See port_stdcxx.h for an example of what must be provided in a platform
|
||||||
specific header file.
|
specific header file.
|
||||||
|
|
@ -1,242 +0,0 @@
|
|||||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
||||||
|
|
||||||
// AtomicPointer provides storage for a lock-free pointer.
|
|
||||||
// Platform-dependent implementation of AtomicPointer:
|
|
||||||
// - If the platform provides a cheap barrier, we use it with raw pointers
|
|
||||||
// - If <atomic> is present (on newer versions of gcc, it is), we use
|
|
||||||
// a <atomic>-based AtomicPointer. However we prefer the memory
|
|
||||||
// barrier based version, because at least on a gcc 4.4 32-bit build
|
|
||||||
// on linux, we have encountered a buggy <atomic> implementation.
|
|
||||||
// Also, some <atomic> implementations are much slower than a memory-barrier
|
|
||||||
// based implementation (~16ns for <atomic> based acquire-load vs. ~1ns for
|
|
||||||
// a barrier based acquire-load).
|
|
||||||
// This code is based on atomicops-internals-* in Google's perftools:
|
|
||||||
// http://code.google.com/p/google-perftools/source/browse/#svn%2Ftrunk%2Fsrc%2Fbase
|
|
||||||
|
|
||||||
#ifndef PORT_ATOMIC_POINTER_H_
|
|
||||||
#define PORT_ATOMIC_POINTER_H_
|
|
||||||
|
|
||||||
#include <stdint.h>
|
|
||||||
#ifdef LEVELDB_ATOMIC_PRESENT
|
|
||||||
#include <atomic>
|
|
||||||
#endif
|
|
||||||
#ifdef OS_WIN
|
|
||||||
#include <windows.h>
|
|
||||||
#endif
|
|
||||||
#ifdef OS_MACOSX
|
|
||||||
#include <libkern/OSAtomic.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(_M_X64) || defined(__x86_64__)
|
|
||||||
#define ARCH_CPU_X86_FAMILY 1
|
|
||||||
#elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
|
|
||||||
#define ARCH_CPU_X86_FAMILY 1
|
|
||||||
#elif defined(__ARMEL__)
|
|
||||||
#define ARCH_CPU_ARM_FAMILY 1
|
|
||||||
#elif defined(__aarch64__)
|
|
||||||
#define ARCH_CPU_ARM64_FAMILY 1
|
|
||||||
#elif defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__)
|
|
||||||
#define ARCH_CPU_PPC_FAMILY 1
|
|
||||||
#elif defined(__mips__)
|
|
||||||
#define ARCH_CPU_MIPS_FAMILY 1
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace leveldb {
|
|
||||||
namespace port {
|
|
||||||
|
|
||||||
// Define MemoryBarrier() if available
|
|
||||||
// Windows on x86
|
|
||||||
#if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY)
|
|
||||||
// windows.h already provides a MemoryBarrier(void) macro
|
|
||||||
// http://msdn.microsoft.com/en-us/library/ms684208(v=vs.85).aspx
|
|
||||||
#define LEVELDB_HAVE_MEMORY_BARRIER
|
|
||||||
|
|
||||||
// Mac OS
|
|
||||||
#elif defined(OS_MACOSX)
|
|
||||||
inline void MemoryBarrier() {
|
|
||||||
OSMemoryBarrier();
|
|
||||||
}
|
|
||||||
#define LEVELDB_HAVE_MEMORY_BARRIER
|
|
||||||
|
|
||||||
// Gcc on x86
|
|
||||||
#elif defined(ARCH_CPU_X86_FAMILY) && defined(__GNUC__)
|
|
||||||
inline void MemoryBarrier() {
|
|
||||||
// See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
|
|
||||||
// this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
|
|
||||||
__asm__ __volatile__("" : : : "memory");
|
|
||||||
}
|
|
||||||
#define LEVELDB_HAVE_MEMORY_BARRIER
|
|
||||||
|
|
||||||
// Sun Studio
|
|
||||||
#elif defined(ARCH_CPU_X86_FAMILY) && defined(__SUNPRO_CC)
|
|
||||||
inline void MemoryBarrier() {
|
|
||||||
// See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
|
|
||||||
// this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
|
|
||||||
asm volatile("" : : : "memory");
|
|
||||||
}
|
|
||||||
#define LEVELDB_HAVE_MEMORY_BARRIER
|
|
||||||
|
|
||||||
// ARM Linux
|
|
||||||
#elif defined(ARCH_CPU_ARM_FAMILY) && defined(__linux__)
|
|
||||||
typedef void (*LinuxKernelMemoryBarrierFunc)(void);
|
|
||||||
// The Linux ARM kernel provides a highly optimized device-specific memory
|
|
||||||
// barrier function at a fixed memory address that is mapped in every
|
|
||||||
// user-level process.
|
|
||||||
//
|
|
||||||
// This beats using CPU-specific instructions which are, on single-core
|
|
||||||
// devices, un-necessary and very costly (e.g. ARMv7-A "dmb" takes more
|
|
||||||
// than 180ns on a Cortex-A8 like the one on a Nexus One). Benchmarking
|
|
||||||
// shows that the extra function call cost is completely negligible on
|
|
||||||
// multi-core devices.
|
|
||||||
//
|
|
||||||
inline void MemoryBarrier() {
|
|
||||||
(*(LinuxKernelMemoryBarrierFunc)0xffff0fa0)();
|
|
||||||
}
|
|
||||||
#define LEVELDB_HAVE_MEMORY_BARRIER
|
|
||||||
|
|
||||||
// ARM64
|
|
||||||
#elif defined(ARCH_CPU_ARM64_FAMILY)
|
|
||||||
inline void MemoryBarrier() {
|
|
||||||
asm volatile("dmb sy" : : : "memory");
|
|
||||||
}
|
|
||||||
#define LEVELDB_HAVE_MEMORY_BARRIER
|
|
||||||
|
|
||||||
// PPC
|
|
||||||
#elif defined(ARCH_CPU_PPC_FAMILY) && defined(__GNUC__)
|
|
||||||
inline void MemoryBarrier() {
|
|
||||||
// TODO for some powerpc expert: is there a cheaper suitable variant?
|
|
||||||
// Perhaps by having separate barriers for acquire and release ops.
|
|
||||||
asm volatile("sync" : : : "memory");
|
|
||||||
}
|
|
||||||
#define LEVELDB_HAVE_MEMORY_BARRIER
|
|
||||||
|
|
||||||
// MIPS
|
|
||||||
#elif defined(ARCH_CPU_MIPS_FAMILY) && defined(__GNUC__)
|
|
||||||
inline void MemoryBarrier() {
|
|
||||||
__asm__ __volatile__("sync" : : : "memory");
|
|
||||||
}
|
|
||||||
#define LEVELDB_HAVE_MEMORY_BARRIER
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// AtomicPointer built using platform-specific MemoryBarrier()
|
|
||||||
#if defined(LEVELDB_HAVE_MEMORY_BARRIER)
|
|
||||||
class AtomicPointer {
|
|
||||||
private:
|
|
||||||
void* rep_;
|
|
||||||
public:
|
|
||||||
AtomicPointer() { }
|
|
||||||
explicit AtomicPointer(void* p) : rep_(p) {}
|
|
||||||
inline void* NoBarrier_Load() const { return rep_; }
|
|
||||||
inline void NoBarrier_Store(void* v) { rep_ = v; }
|
|
||||||
inline void* Acquire_Load() const {
|
|
||||||
void* result = rep_;
|
|
||||||
MemoryBarrier();
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
inline void Release_Store(void* v) {
|
|
||||||
MemoryBarrier();
|
|
||||||
rep_ = v;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// AtomicPointer based on <cstdatomic>
|
|
||||||
#elif defined(LEVELDB_ATOMIC_PRESENT)
|
|
||||||
class AtomicPointer {
|
|
||||||
private:
|
|
||||||
std::atomic<void*> rep_;
|
|
||||||
public:
|
|
||||||
AtomicPointer() { }
|
|
||||||
explicit AtomicPointer(void* v) : rep_(v) { }
|
|
||||||
inline void* Acquire_Load() const {
|
|
||||||
return rep_.load(std::memory_order_acquire);
|
|
||||||
}
|
|
||||||
inline void Release_Store(void* v) {
|
|
||||||
rep_.store(v, std::memory_order_release);
|
|
||||||
}
|
|
||||||
inline void* NoBarrier_Load() const {
|
|
||||||
return rep_.load(std::memory_order_relaxed);
|
|
||||||
}
|
|
||||||
inline void NoBarrier_Store(void* v) {
|
|
||||||
rep_.store(v, std::memory_order_relaxed);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Atomic pointer based on sparc memory barriers
|
|
||||||
#elif defined(__sparcv9) && defined(__GNUC__)
|
|
||||||
class AtomicPointer {
|
|
||||||
private:
|
|
||||||
void* rep_;
|
|
||||||
public:
|
|
||||||
AtomicPointer() { }
|
|
||||||
explicit AtomicPointer(void* v) : rep_(v) { }
|
|
||||||
inline void* Acquire_Load() const {
|
|
||||||
void* val;
|
|
||||||
__asm__ __volatile__ (
|
|
||||||
"ldx [%[rep_]], %[val] \n\t"
|
|
||||||
"membar #LoadLoad|#LoadStore \n\t"
|
|
||||||
: [val] "=r" (val)
|
|
||||||
: [rep_] "r" (&rep_)
|
|
||||||
: "memory");
|
|
||||||
return val;
|
|
||||||
}
|
|
||||||
inline void Release_Store(void* v) {
|
|
||||||
__asm__ __volatile__ (
|
|
||||||
"membar #LoadStore|#StoreStore \n\t"
|
|
||||||
"stx %[v], [%[rep_]] \n\t"
|
|
||||||
:
|
|
||||||
: [rep_] "r" (&rep_), [v] "r" (v)
|
|
||||||
: "memory");
|
|
||||||
}
|
|
||||||
inline void* NoBarrier_Load() const { return rep_; }
|
|
||||||
inline void NoBarrier_Store(void* v) { rep_ = v; }
|
|
||||||
};
|
|
||||||
|
|
||||||
// Atomic pointer based on ia64 acq/rel
|
|
||||||
#elif defined(__ia64) && defined(__GNUC__)
|
|
||||||
class AtomicPointer {
|
|
||||||
private:
|
|
||||||
void* rep_;
|
|
||||||
public:
|
|
||||||
AtomicPointer() { }
|
|
||||||
explicit AtomicPointer(void* v) : rep_(v) { }
|
|
||||||
inline void* Acquire_Load() const {
|
|
||||||
void* val ;
|
|
||||||
__asm__ __volatile__ (
|
|
||||||
"ld8.acq %[val] = [%[rep_]] \n\t"
|
|
||||||
: [val] "=r" (val)
|
|
||||||
: [rep_] "r" (&rep_)
|
|
||||||
: "memory"
|
|
||||||
);
|
|
||||||
return val;
|
|
||||||
}
|
|
||||||
inline void Release_Store(void* v) {
|
|
||||||
__asm__ __volatile__ (
|
|
||||||
"st8.rel [%[rep_]] = %[v] \n\t"
|
|
||||||
:
|
|
||||||
: [rep_] "r" (&rep_), [v] "r" (v)
|
|
||||||
: "memory"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
inline void* NoBarrier_Load() const { return rep_; }
|
|
||||||
inline void NoBarrier_Store(void* v) { rep_ = v; }
|
|
||||||
};
|
|
||||||
|
|
||||||
// We have neither MemoryBarrier(), nor <atomic>
|
|
||||||
#else
|
|
||||||
#error Please implement AtomicPointer for this platform.
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#undef LEVELDB_HAVE_MEMORY_BARRIER
|
|
||||||
#undef ARCH_CPU_X86_FAMILY
|
|
||||||
#undef ARCH_CPU_ARM_FAMILY
|
|
||||||
#undef ARCH_CPU_ARM64_FAMILY
|
|
||||||
#undef ARCH_CPU_PPC_FAMILY
|
|
||||||
|
|
||||||
} // namespace port
|
|
||||||
} // namespace leveldb
|
|
||||||
|
|
||||||
#endif // PORT_ATOMIC_POINTER_H_
|
|
@ -10,10 +10,10 @@
|
|||||||
// Include the appropriate platform specific file below. If you are
|
// Include the appropriate platform specific file below. If you are
|
||||||
// porting to a new platform, see "port_example.h" for documentation
|
// porting to a new platform, see "port_example.h" for documentation
|
||||||
// of what the new port_<platform>.h file must provide.
|
// of what the new port_<platform>.h file must provide.
|
||||||
#if defined(LEVELDB_PLATFORM_POSIX)
|
#if defined(LEVELDB_PLATFORM_POSIX) || defined(LEVELDB_PLATFORM_WINDOWS)
|
||||||
# include "port/port_posix.h"
|
#include "port/port_stdcxx.h"
|
||||||
#elif defined(LEVELDB_PLATFORM_CHROMIUM)
|
#elif defined(LEVELDB_PLATFORM_CHROMIUM)
|
||||||
# include "port/port_chromium.h"
|
#include "port/port_chromium.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_PORT_PORT_H_
|
#endif // STORAGE_LEVELDB_PORT_PORT_H_
|
||||||
|
39
port/port_config.h.in
Normal file
39
port/port_config.h.in
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
// Copyright 2017 The LevelDB Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
|
#ifndef STORAGE_LEVELDB_PORT_PORT_CONFIG_H_
|
||||||
|
#define STORAGE_LEVELDB_PORT_PORT_CONFIG_H_
|
||||||
|
|
||||||
|
// Define to 1 if you have a definition for fdatasync() in <unistd.h>.
|
||||||
|
#if !defined(HAVE_FDATASYNC)
|
||||||
|
#cmakedefine01 HAVE_FDATASYNC
|
||||||
|
#endif // !defined(HAVE_FDATASYNC)
|
||||||
|
|
||||||
|
// Define to 1 if you have a definition for F_FULLFSYNC in <fcntl.h>.
|
||||||
|
#if !defined(HAVE_FULLFSYNC)
|
||||||
|
#cmakedefine01 HAVE_FULLFSYNC
|
||||||
|
#endif // !defined(HAVE_FULLFSYNC)
|
||||||
|
|
||||||
|
// Define to 1 if you have a definition for O_CLOEXEC in <fcntl.h>.
|
||||||
|
#if !defined(HAVE_O_CLOEXEC)
|
||||||
|
#cmakedefine01 HAVE_O_CLOEXEC
|
||||||
|
#endif // !defined(HAVE_O_CLOEXEC)
|
||||||
|
|
||||||
|
// Define to 1 if you have Google CRC32C.
|
||||||
|
#if !defined(HAVE_CRC32C)
|
||||||
|
#cmakedefine01 HAVE_CRC32C
|
||||||
|
#endif // !defined(HAVE_CRC32C)
|
||||||
|
|
||||||
|
// Define to 1 if you have Google Snappy.
|
||||||
|
#if !defined(HAVE_SNAPPY)
|
||||||
|
#cmakedefine01 HAVE_SNAPPY
|
||||||
|
#endif // !defined(HAVE_SNAPPY)
|
||||||
|
|
||||||
|
// Define to 1 if your processor stores words with the most significant byte
|
||||||
|
// first (like Motorola and SPARC, unlike Intel and VAX).
|
||||||
|
#if !defined(LEVELDB_IS_BIG_ENDIAN)
|
||||||
|
#cmakedefine01 LEVELDB_IS_BIG_ENDIAN
|
||||||
|
#endif // !defined(LEVELDB_IS_BIG_ENDIAN)
|
||||||
|
|
||||||
|
#endif // STORAGE_LEVELDB_PORT_PORT_CONFIG_H_
|
@ -10,6 +10,8 @@
|
|||||||
#ifndef STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_
|
#ifndef STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_
|
||||||
#define STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_
|
#define STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_
|
||||||
|
|
||||||
|
#include "port/thread_annotations.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
namespace port {
|
namespace port {
|
||||||
|
|
||||||
@ -23,23 +25,23 @@ static const bool kLittleEndian = true /* or some other expression */;
|
|||||||
// ------------------ Threading -------------------
|
// ------------------ Threading -------------------
|
||||||
|
|
||||||
// A Mutex represents an exclusive lock.
|
// A Mutex represents an exclusive lock.
|
||||||
class Mutex {
|
class LOCKABLE Mutex {
|
||||||
public:
|
public:
|
||||||
Mutex();
|
Mutex();
|
||||||
~Mutex();
|
~Mutex();
|
||||||
|
|
||||||
// Lock the mutex. Waits until other lockers have exited.
|
// Lock the mutex. Waits until other lockers have exited.
|
||||||
// Will deadlock if the mutex is already locked by this thread.
|
// Will deadlock if the mutex is already locked by this thread.
|
||||||
void Lock();
|
void Lock() EXCLUSIVE_LOCK_FUNCTION();
|
||||||
|
|
||||||
// Unlock the mutex.
|
// Unlock the mutex.
|
||||||
// REQUIRES: This mutex was locked by this thread.
|
// REQUIRES: This mutex was locked by this thread.
|
||||||
void Unlock();
|
void Unlock() UNLOCK_FUNCTION();
|
||||||
|
|
||||||
// Optionally crash if this thread does not hold this mutex.
|
// Optionally crash if this thread does not hold this mutex.
|
||||||
// The implementation must be fast, especially if NDEBUG is
|
// The implementation must be fast, especially if NDEBUG is
|
||||||
// defined. The implementation is allowed to skip all checks.
|
// defined. The implementation is allowed to skip all checks.
|
||||||
void AssertHeld();
|
void AssertHeld() ASSERT_EXCLUSIVE_LOCK();
|
||||||
};
|
};
|
||||||
|
|
||||||
class CondVar {
|
class CondVar {
|
||||||
@ -60,57 +62,18 @@ class CondVar {
|
|||||||
void SignallAll();
|
void SignallAll();
|
||||||
};
|
};
|
||||||
|
|
||||||
// Thread-safe initialization.
|
|
||||||
// Used as follows:
|
|
||||||
// static port::OnceType init_control = LEVELDB_ONCE_INIT;
|
|
||||||
// static void Initializer() { ... do something ...; }
|
|
||||||
// ...
|
|
||||||
// port::InitOnce(&init_control, &Initializer);
|
|
||||||
typedef intptr_t OnceType;
|
|
||||||
#define LEVELDB_ONCE_INIT 0
|
|
||||||
extern void InitOnce(port::OnceType*, void (*initializer)());
|
|
||||||
|
|
||||||
// A type that holds a pointer that can be read or written atomically
|
|
||||||
// (i.e., without word-tearing.)
|
|
||||||
class AtomicPointer {
|
|
||||||
private:
|
|
||||||
intptr_t rep_;
|
|
||||||
public:
|
|
||||||
// Initialize to arbitrary value
|
|
||||||
AtomicPointer();
|
|
||||||
|
|
||||||
// Initialize to hold v
|
|
||||||
explicit AtomicPointer(void* v) : rep_(v) { }
|
|
||||||
|
|
||||||
// Read and return the stored pointer with the guarantee that no
|
|
||||||
// later memory access (read or write) by this thread can be
|
|
||||||
// reordered ahead of this read.
|
|
||||||
void* Acquire_Load() const;
|
|
||||||
|
|
||||||
// Set v as the stored pointer with the guarantee that no earlier
|
|
||||||
// memory access (read or write) by this thread can be reordered
|
|
||||||
// after this store.
|
|
||||||
void Release_Store(void* v);
|
|
||||||
|
|
||||||
// Read the stored pointer with no ordering guarantees.
|
|
||||||
void* NoBarrier_Load() const;
|
|
||||||
|
|
||||||
// Set va as the stored pointer with no ordering guarantees.
|
|
||||||
void NoBarrier_Store(void* v);
|
|
||||||
};
|
|
||||||
|
|
||||||
// ------------------ Compression -------------------
|
// ------------------ Compression -------------------
|
||||||
|
|
||||||
// Store the snappy compression of "input[0,input_length-1]" in *output.
|
// Store the snappy compression of "input[0,input_length-1]" in *output.
|
||||||
// Returns false if snappy is not supported by this port.
|
// Returns false if snappy is not supported by this port.
|
||||||
extern bool Snappy_Compress(const char* input, size_t input_length,
|
bool Snappy_Compress(const char* input, size_t input_length,
|
||||||
std::string* output);
|
std::string* output);
|
||||||
|
|
||||||
// If input[0,input_length-1] looks like a valid snappy compressed
|
// If input[0,input_length-1] looks like a valid snappy compressed
|
||||||
// buffer, store the size of the uncompressed data in *result and
|
// buffer, store the size of the uncompressed data in *result and
|
||||||
// return true. Else return false.
|
// return true. Else return false.
|
||||||
extern bool Snappy_GetUncompressedLength(const char* input, size_t length,
|
bool Snappy_GetUncompressedLength(const char* input, size_t length,
|
||||||
size_t* result);
|
size_t* result);
|
||||||
|
|
||||||
// Attempt to snappy uncompress input[0,input_length-1] into *output.
|
// Attempt to snappy uncompress input[0,input_length-1] into *output.
|
||||||
// Returns true if successful, false if the input is invalid lightweight
|
// Returns true if successful, false if the input is invalid lightweight
|
||||||
@ -119,15 +82,15 @@ extern bool Snappy_GetUncompressedLength(const char* input, size_t length,
|
|||||||
// REQUIRES: at least the first "n" bytes of output[] must be writable
|
// REQUIRES: at least the first "n" bytes of output[] must be writable
|
||||||
// where "n" is the result of a successful call to
|
// where "n" is the result of a successful call to
|
||||||
// Snappy_GetUncompressedLength.
|
// Snappy_GetUncompressedLength.
|
||||||
extern bool Snappy_Uncompress(const char* input_data, size_t input_length,
|
bool Snappy_Uncompress(const char* input_data, size_t input_length,
|
||||||
char* output);
|
char* output);
|
||||||
|
|
||||||
// ------------------ Miscellaneous -------------------
|
// ------------------ Miscellaneous -------------------
|
||||||
|
|
||||||
// If heap profiling is not supported, returns false.
|
// If heap profiling is not supported, returns false.
|
||||||
// Else repeatedly calls (*func)(arg, data, n) and then returns true.
|
// Else repeatedly calls (*func)(arg, data, n) and then returns true.
|
||||||
// The concatenation of all "data[0,n-1]" fragments is the heap profile.
|
// The concatenation of all "data[0,n-1]" fragments is the heap profile.
|
||||||
extern bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg);
|
bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg);
|
||||||
|
|
||||||
// Extend the CRC to include the first n bytes of buf.
|
// Extend the CRC to include the first n bytes of buf.
|
||||||
//
|
//
|
||||||
|
@ -1,53 +0,0 @@
|
|||||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
||||||
|
|
||||||
#include "port/port_posix.h"
|
|
||||||
|
|
||||||
#include <cstdlib>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
namespace leveldb {
|
|
||||||
namespace port {
|
|
||||||
|
|
||||||
static void PthreadCall(const char* label, int result) {
|
|
||||||
if (result != 0) {
|
|
||||||
fprintf(stderr, "pthread %s: %s\n", label, strerror(result));
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Mutex::Mutex() { PthreadCall("init mutex", pthread_mutex_init(&mu_, NULL)); }
|
|
||||||
|
|
||||||
Mutex::~Mutex() { PthreadCall("destroy mutex", pthread_mutex_destroy(&mu_)); }
|
|
||||||
|
|
||||||
void Mutex::Lock() { PthreadCall("lock", pthread_mutex_lock(&mu_)); }
|
|
||||||
|
|
||||||
void Mutex::Unlock() { PthreadCall("unlock", pthread_mutex_unlock(&mu_)); }
|
|
||||||
|
|
||||||
CondVar::CondVar(Mutex* mu)
|
|
||||||
: mu_(mu) {
|
|
||||||
PthreadCall("init cv", pthread_cond_init(&cv_, NULL));
|
|
||||||
}
|
|
||||||
|
|
||||||
CondVar::~CondVar() { PthreadCall("destroy cv", pthread_cond_destroy(&cv_)); }
|
|
||||||
|
|
||||||
void CondVar::Wait() {
|
|
||||||
PthreadCall("wait", pthread_cond_wait(&cv_, &mu_->mu_));
|
|
||||||
}
|
|
||||||
|
|
||||||
void CondVar::Signal() {
|
|
||||||
PthreadCall("signal", pthread_cond_signal(&cv_));
|
|
||||||
}
|
|
||||||
|
|
||||||
void CondVar::SignalAll() {
|
|
||||||
PthreadCall("broadcast", pthread_cond_broadcast(&cv_));
|
|
||||||
}
|
|
||||||
|
|
||||||
void InitOnce(OnceType* once, void (*initializer)()) {
|
|
||||||
PthreadCall("once", pthread_once(once, initializer));
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace port
|
|
||||||
} // namespace leveldb
|
|
@ -1,156 +0,0 @@
|
|||||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
||||||
//
|
|
||||||
// See port_example.h for documentation for the following types/functions.
|
|
||||||
|
|
||||||
#ifndef STORAGE_LEVELDB_PORT_PORT_POSIX_H_
|
|
||||||
#define STORAGE_LEVELDB_PORT_PORT_POSIX_H_
|
|
||||||
|
|
||||||
#undef PLATFORM_IS_LITTLE_ENDIAN
|
|
||||||
#if defined(OS_MACOSX)
|
|
||||||
#include <machine/endian.h>
|
|
||||||
#if defined(__DARWIN_LITTLE_ENDIAN) && defined(__DARWIN_BYTE_ORDER)
|
|
||||||
#define PLATFORM_IS_LITTLE_ENDIAN \
|
|
||||||
(__DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN)
|
|
||||||
#endif
|
|
||||||
#elif defined(OS_SOLARIS)
|
|
||||||
#include <sys/isa_defs.h>
|
|
||||||
#ifdef _LITTLE_ENDIAN
|
|
||||||
#define PLATFORM_IS_LITTLE_ENDIAN true
|
|
||||||
#else
|
|
||||||
#define PLATFORM_IS_LITTLE_ENDIAN false
|
|
||||||
#endif
|
|
||||||
#elif defined(OS_FREEBSD) || defined(OS_OPENBSD) ||\
|
|
||||||
defined(OS_NETBSD) || defined(OS_DRAGONFLYBSD)
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <sys/endian.h>
|
|
||||||
#define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN)
|
|
||||||
#elif defined(OS_HPUX)
|
|
||||||
#define PLATFORM_IS_LITTLE_ENDIAN false
|
|
||||||
#elif defined(OS_ANDROID)
|
|
||||||
// Due to a bug in the NDK x86 <sys/endian.h> definition,
|
|
||||||
// _BYTE_ORDER must be used instead of __BYTE_ORDER on Android.
|
|
||||||
// See http://code.google.com/p/android/issues/detail?id=39824
|
|
||||||
#include <endian.h>
|
|
||||||
#define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN)
|
|
||||||
#else
|
|
||||||
#include <endian.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <pthread.h>
|
|
||||||
#ifdef SNAPPY
|
|
||||||
#include <snappy.h>
|
|
||||||
#endif
|
|
||||||
#include <stdint.h>
|
|
||||||
#include <string>
|
|
||||||
#include "port/atomic_pointer.h"
|
|
||||||
|
|
||||||
#ifndef PLATFORM_IS_LITTLE_ENDIAN
|
|
||||||
#define PLATFORM_IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(OS_MACOSX) || defined(OS_SOLARIS) || defined(OS_FREEBSD) ||\
|
|
||||||
defined(OS_NETBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD) ||\
|
|
||||||
defined(OS_ANDROID) || defined(OS_HPUX) || defined(CYGWIN)
|
|
||||||
// Use fread/fwrite/fflush on platforms without _unlocked variants
|
|
||||||
#define fread_unlocked fread
|
|
||||||
#define fwrite_unlocked fwrite
|
|
||||||
#define fflush_unlocked fflush
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(OS_MACOSX) || defined(OS_FREEBSD) ||\
|
|
||||||
defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD)
|
|
||||||
// Use fsync() on platforms without fdatasync()
|
|
||||||
#define fdatasync fsync
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(OS_ANDROID) && __ANDROID_API__ < 9
|
|
||||||
// fdatasync() was only introduced in API level 9 on Android. Use fsync()
|
|
||||||
// when targetting older platforms.
|
|
||||||
#define fdatasync fsync
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace leveldb {
|
|
||||||
namespace port {
|
|
||||||
|
|
||||||
static const bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN;
|
|
||||||
#undef PLATFORM_IS_LITTLE_ENDIAN
|
|
||||||
|
|
||||||
class CondVar;
|
|
||||||
|
|
||||||
class Mutex {
|
|
||||||
public:
|
|
||||||
Mutex();
|
|
||||||
~Mutex();
|
|
||||||
|
|
||||||
void Lock();
|
|
||||||
void Unlock();
|
|
||||||
void AssertHeld() { }
|
|
||||||
|
|
||||||
private:
|
|
||||||
friend class CondVar;
|
|
||||||
pthread_mutex_t mu_;
|
|
||||||
|
|
||||||
// No copying
|
|
||||||
Mutex(const Mutex&);
|
|
||||||
void operator=(const Mutex&);
|
|
||||||
};
|
|
||||||
|
|
||||||
class CondVar {
|
|
||||||
public:
|
|
||||||
explicit CondVar(Mutex* mu);
|
|
||||||
~CondVar();
|
|
||||||
void Wait();
|
|
||||||
void Signal();
|
|
||||||
void SignalAll();
|
|
||||||
private:
|
|
||||||
pthread_cond_t cv_;
|
|
||||||
Mutex* mu_;
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef pthread_once_t OnceType;
|
|
||||||
#define LEVELDB_ONCE_INIT PTHREAD_ONCE_INIT
|
|
||||||
extern void InitOnce(OnceType* once, void (*initializer)());
|
|
||||||
|
|
||||||
inline bool Snappy_Compress(const char* input, size_t length,
|
|
||||||
::std::string* output) {
|
|
||||||
#ifdef SNAPPY
|
|
||||||
output->resize(snappy::MaxCompressedLength(length));
|
|
||||||
size_t outlen;
|
|
||||||
snappy::RawCompress(input, length, &(*output)[0], &outlen);
|
|
||||||
output->resize(outlen);
|
|
||||||
return true;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
|
|
||||||
size_t* result) {
|
|
||||||
#ifdef SNAPPY
|
|
||||||
return snappy::GetUncompressedLength(input, length, result);
|
|
||||||
#else
|
|
||||||
return false;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool Snappy_Uncompress(const char* input, size_t length,
|
|
||||||
char* output) {
|
|
||||||
#ifdef SNAPPY
|
|
||||||
return snappy::RawUncompress(input, length, output);
|
|
||||||
#else
|
|
||||||
return false;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size);
|
|
||||||
|
|
||||||
} // namespace port
|
|
||||||
} // namespace leveldb
|
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_PORT_PORT_POSIX_H_
|
|
@ -1,129 +0,0 @@
|
|||||||
// Copyright 2016 The LevelDB Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
||||||
//
|
|
||||||
// A portable implementation of crc32c, optimized to handle
|
|
||||||
// four bytes at a time.
|
|
||||||
//
|
|
||||||
// In a separate source file to allow this accelerated CRC32C function to be
|
|
||||||
// compiled with the appropriate compiler flags to enable x86 SSE 4.2
|
|
||||||
// instructions.
|
|
||||||
|
|
||||||
#include <stdint.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include "port/port.h"
|
|
||||||
|
|
||||||
#if defined(LEVELDB_PLATFORM_POSIX_SSE)
|
|
||||||
|
|
||||||
#if defined(_MSC_VER)
|
|
||||||
#include <intrin.h>
|
|
||||||
#elif defined(__GNUC__) && defined(__SSE4_2__)
|
|
||||||
#include <nmmintrin.h>
|
|
||||||
#include <cpuid.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif // defined(LEVELDB_PLATFORM_POSIX_SSE)
|
|
||||||
|
|
||||||
namespace leveldb {
|
|
||||||
namespace port {
|
|
||||||
|
|
||||||
#if defined(LEVELDB_PLATFORM_POSIX_SSE)
|
|
||||||
|
|
||||||
// Used to fetch a naturally-aligned 32-bit word in little endian byte-order
|
|
||||||
static inline uint32_t LE_LOAD32(const uint8_t *p) {
|
|
||||||
// SSE is x86 only, so ensured that |p| is always little-endian.
|
|
||||||
uint32_t word;
|
|
||||||
memcpy(&word, p, sizeof(word));
|
|
||||||
return word;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if defined(_M_X64) || defined(__x86_64__) // LE_LOAD64 is only used on x64.
|
|
||||||
|
|
||||||
// Used to fetch a naturally-aligned 64-bit word in little endian byte-order
|
|
||||||
static inline uint64_t LE_LOAD64(const uint8_t *p) {
|
|
||||||
uint64_t dword;
|
|
||||||
memcpy(&dword, p, sizeof(dword));
|
|
||||||
return dword;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // defined(_M_X64) || defined(__x86_64__)
|
|
||||||
|
|
||||||
static inline bool HaveSSE42() {
|
|
||||||
#if defined(_MSC_VER)
|
|
||||||
int cpu_info[4];
|
|
||||||
__cpuid(cpu_info, 1);
|
|
||||||
return (cpu_info[2] & (1 << 20)) != 0;
|
|
||||||
#elif defined(__GNUC__)
|
|
||||||
unsigned int eax, ebx, ecx, edx;
|
|
||||||
__get_cpuid(1, &eax, &ebx, &ecx, &edx);
|
|
||||||
return (ecx & (1 << 20)) != 0;
|
|
||||||
#else
|
|
||||||
return false;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // defined(LEVELDB_PLATFORM_POSIX_SSE)
|
|
||||||
|
|
||||||
// For further improvements see Intel publication at:
|
|
||||||
// http://download.intel.com/design/intarch/papers/323405.pdf
|
|
||||||
uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) {
|
|
||||||
#if !defined(LEVELDB_PLATFORM_POSIX_SSE)
|
|
||||||
return 0;
|
|
||||||
#else
|
|
||||||
static bool have = HaveSSE42();
|
|
||||||
if (!have) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
const uint8_t *p = reinterpret_cast<const uint8_t *>(buf);
|
|
||||||
const uint8_t *e = p + size;
|
|
||||||
uint32_t l = crc ^ 0xffffffffu;
|
|
||||||
|
|
||||||
#define STEP1 do { \
|
|
||||||
l = _mm_crc32_u8(l, *p++); \
|
|
||||||
} while (0)
|
|
||||||
#define STEP4 do { \
|
|
||||||
l = _mm_crc32_u32(l, LE_LOAD32(p)); \
|
|
||||||
p += 4; \
|
|
||||||
} while (0)
|
|
||||||
#define STEP8 do { \
|
|
||||||
l = _mm_crc32_u64(l, LE_LOAD64(p)); \
|
|
||||||
p += 8; \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
if (size > 16) {
|
|
||||||
// Process unaligned bytes
|
|
||||||
for (unsigned int i = reinterpret_cast<uintptr_t>(p) % 8; i; --i) {
|
|
||||||
STEP1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// _mm_crc32_u64 is only available on x64.
|
|
||||||
#if defined(_M_X64) || defined(__x86_64__)
|
|
||||||
// Process 8 bytes at a time
|
|
||||||
while ((e-p) >= 8) {
|
|
||||||
STEP8;
|
|
||||||
}
|
|
||||||
// Process 4 bytes at a time
|
|
||||||
if ((e-p) >= 4) {
|
|
||||||
STEP4;
|
|
||||||
}
|
|
||||||
#else // !(defined(_M_X64) || defined(__x86_64__))
|
|
||||||
// Process 4 bytes at a time
|
|
||||||
while ((e-p) >= 4) {
|
|
||||||
STEP4;
|
|
||||||
}
|
|
||||||
#endif // defined(_M_X64) || defined(__x86_64__)
|
|
||||||
}
|
|
||||||
// Process the last few bytes
|
|
||||||
while (p != e) {
|
|
||||||
STEP1;
|
|
||||||
}
|
|
||||||
#undef STEP8
|
|
||||||
#undef STEP4
|
|
||||||
#undef STEP1
|
|
||||||
return l ^ 0xffffffffu;
|
|
||||||
#endif // defined(LEVELDB_PLATFORM_POSIX_SSE)
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace port
|
|
||||||
} // namespace leveldb
|
|
153
port/port_stdcxx.h
Normal file
153
port/port_stdcxx.h
Normal file
@ -0,0 +1,153 @@
|
|||||||
|
// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
|
#ifndef STORAGE_LEVELDB_PORT_PORT_STDCXX_H_
|
||||||
|
#define STORAGE_LEVELDB_PORT_PORT_STDCXX_H_
|
||||||
|
|
||||||
|
// port/port_config.h availability is automatically detected via __has_include
|
||||||
|
// in newer compilers. If LEVELDB_HAS_PORT_CONFIG_H is defined, it overrides the
|
||||||
|
// configuration detection.
|
||||||
|
#if defined(LEVELDB_HAS_PORT_CONFIG_H)
|
||||||
|
|
||||||
|
#if LEVELDB_HAS_PORT_CONFIG_H
|
||||||
|
#include "port/port_config.h"
|
||||||
|
#endif // LEVELDB_HAS_PORT_CONFIG_H
|
||||||
|
|
||||||
|
#elif defined(__has_include)
|
||||||
|
|
||||||
|
#if __has_include("port/port_config.h")
|
||||||
|
#include "port/port_config.h"
|
||||||
|
#endif // __has_include("port/port_config.h")
|
||||||
|
|
||||||
|
#endif // defined(LEVELDB_HAS_PORT_CONFIG_H)
|
||||||
|
|
||||||
|
#if HAVE_CRC32C
|
||||||
|
#include <crc32c/crc32c.h>
|
||||||
|
#endif // HAVE_CRC32C
|
||||||
|
#if HAVE_SNAPPY
|
||||||
|
#include <snappy.h>
|
||||||
|
#endif // HAVE_SNAPPY
|
||||||
|
|
||||||
|
#include <cassert>
|
||||||
|
#include <condition_variable> // NOLINT
|
||||||
|
#include <cstddef>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <mutex> // NOLINT
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "port/thread_annotations.h"
|
||||||
|
|
||||||
|
namespace leveldb {
|
||||||
|
namespace port {
|
||||||
|
|
||||||
|
static const bool kLittleEndian = !LEVELDB_IS_BIG_ENDIAN;
|
||||||
|
|
||||||
|
class CondVar;
|
||||||
|
|
||||||
|
// Thinly wraps std::mutex.
|
||||||
|
class LOCKABLE Mutex {
|
||||||
|
public:
|
||||||
|
Mutex() = default;
|
||||||
|
~Mutex() = default;
|
||||||
|
|
||||||
|
Mutex(const Mutex&) = delete;
|
||||||
|
Mutex& operator=(const Mutex&) = delete;
|
||||||
|
|
||||||
|
void Lock() EXCLUSIVE_LOCK_FUNCTION() { mu_.lock(); }
|
||||||
|
void Unlock() UNLOCK_FUNCTION() { mu_.unlock(); }
|
||||||
|
void AssertHeld() ASSERT_EXCLUSIVE_LOCK() {}
|
||||||
|
|
||||||
|
private:
|
||||||
|
friend class CondVar;
|
||||||
|
std::mutex mu_;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Thinly wraps std::condition_variable.
|
||||||
|
class CondVar {
|
||||||
|
public:
|
||||||
|
explicit CondVar(Mutex* mu) : mu_(mu) { assert(mu != nullptr); }
|
||||||
|
~CondVar() = default;
|
||||||
|
|
||||||
|
CondVar(const CondVar&) = delete;
|
||||||
|
CondVar& operator=(const CondVar&) = delete;
|
||||||
|
|
||||||
|
void Wait() {
|
||||||
|
std::unique_lock<std::mutex> lock(mu_->mu_, std::adopt_lock);
|
||||||
|
cv_.wait(lock);
|
||||||
|
lock.release();
|
||||||
|
}
|
||||||
|
void Signal() { cv_.notify_one(); }
|
||||||
|
void SignalAll() { cv_.notify_all(); }
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::condition_variable cv_;
|
||||||
|
Mutex* const mu_;
|
||||||
|
};
|
||||||
|
|
||||||
|
inline bool Snappy_Compress(const char* input, size_t length,
|
||||||
|
std::string* output) {
|
||||||
|
#if HAVE_SNAPPY
|
||||||
|
output->resize(snappy::MaxCompressedLength(length));
|
||||||
|
size_t outlen;
|
||||||
|
snappy::RawCompress(input, length, &(*output)[0], &outlen);
|
||||||
|
output->resize(outlen);
|
||||||
|
return true;
|
||||||
|
#else
|
||||||
|
// Silence compiler warnings about unused arguments.
|
||||||
|
(void)input;
|
||||||
|
(void)length;
|
||||||
|
(void)output;
|
||||||
|
#endif // HAVE_SNAPPY
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
|
||||||
|
size_t* result) {
|
||||||
|
#if HAVE_SNAPPY
|
||||||
|
return snappy::GetUncompressedLength(input, length, result);
|
||||||
|
#else
|
||||||
|
// Silence compiler warnings about unused arguments.
|
||||||
|
(void)input;
|
||||||
|
(void)length;
|
||||||
|
(void)result;
|
||||||
|
return false;
|
||||||
|
#endif // HAVE_SNAPPY
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool Snappy_Uncompress(const char* input, size_t length, char* output) {
|
||||||
|
#if HAVE_SNAPPY
|
||||||
|
return snappy::RawUncompress(input, length, output);
|
||||||
|
#else
|
||||||
|
// Silence compiler warnings about unused arguments.
|
||||||
|
(void)input;
|
||||||
|
(void)length;
|
||||||
|
(void)output;
|
||||||
|
return false;
|
||||||
|
#endif // HAVE_SNAPPY
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
|
||||||
|
// Silence compiler warnings about unused arguments.
|
||||||
|
(void)func;
|
||||||
|
(void)arg;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) {
|
||||||
|
#if HAVE_CRC32C
|
||||||
|
return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t*>(buf), size);
|
||||||
|
#else
|
||||||
|
// Silence compiler warnings about unused arguments.
|
||||||
|
(void)crc;
|
||||||
|
(void)buf;
|
||||||
|
(void)size;
|
||||||
|
return 0;
|
||||||
|
#endif // HAVE_CRC32C
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace port
|
||||||
|
} // namespace leveldb
|
||||||
|
|
||||||
|
#endif // STORAGE_LEVELDB_PORT_PORT_STDCXX_H_
|
@ -5,56 +5,104 @@
|
|||||||
#ifndef STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
|
#ifndef STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
|
||||||
#define STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
|
#define STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
|
||||||
|
|
||||||
// Some environments provide custom macros to aid in static thread-safety
|
// Use Clang's thread safety analysis annotations when available. In other
|
||||||
// analysis. Provide empty definitions of such macros unless they are already
|
// environments, the macros receive empty definitions.
|
||||||
// defined.
|
// Usage documentation: https://clang.llvm.org/docs/ThreadSafetyAnalysis.html
|
||||||
|
|
||||||
|
#if !defined(THREAD_ANNOTATION_ATTRIBUTE__)
|
||||||
|
|
||||||
|
#if defined(__clang__)
|
||||||
|
|
||||||
|
#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
|
||||||
|
#else
|
||||||
|
#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif // !defined(THREAD_ANNOTATION_ATTRIBUTE__)
|
||||||
|
|
||||||
|
#ifndef GUARDED_BY
|
||||||
|
#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef PT_GUARDED_BY
|
||||||
|
#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef ACQUIRED_AFTER
|
||||||
|
#define ACQUIRED_AFTER(...) \
|
||||||
|
THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef ACQUIRED_BEFORE
|
||||||
|
#define ACQUIRED_BEFORE(...) \
|
||||||
|
THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifndef EXCLUSIVE_LOCKS_REQUIRED
|
#ifndef EXCLUSIVE_LOCKS_REQUIRED
|
||||||
#define EXCLUSIVE_LOCKS_REQUIRED(...)
|
#define EXCLUSIVE_LOCKS_REQUIRED(...) \
|
||||||
|
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef SHARED_LOCKS_REQUIRED
|
#ifndef SHARED_LOCKS_REQUIRED
|
||||||
#define SHARED_LOCKS_REQUIRED(...)
|
#define SHARED_LOCKS_REQUIRED(...) \
|
||||||
|
THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef LOCKS_EXCLUDED
|
#ifndef LOCKS_EXCLUDED
|
||||||
#define LOCKS_EXCLUDED(...)
|
#define LOCKS_EXCLUDED(...) \
|
||||||
|
THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef LOCK_RETURNED
|
#ifndef LOCK_RETURNED
|
||||||
#define LOCK_RETURNED(x)
|
#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef LOCKABLE
|
#ifndef LOCKABLE
|
||||||
#define LOCKABLE
|
#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef SCOPED_LOCKABLE
|
#ifndef SCOPED_LOCKABLE
|
||||||
#define SCOPED_LOCKABLE
|
#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef EXCLUSIVE_LOCK_FUNCTION
|
#ifndef EXCLUSIVE_LOCK_FUNCTION
|
||||||
#define EXCLUSIVE_LOCK_FUNCTION(...)
|
#define EXCLUSIVE_LOCK_FUNCTION(...) \
|
||||||
|
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef SHARED_LOCK_FUNCTION
|
#ifndef SHARED_LOCK_FUNCTION
|
||||||
#define SHARED_LOCK_FUNCTION(...)
|
#define SHARED_LOCK_FUNCTION(...) \
|
||||||
|
THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef EXCLUSIVE_TRYLOCK_FUNCTION
|
#ifndef EXCLUSIVE_TRYLOCK_FUNCTION
|
||||||
#define EXCLUSIVE_TRYLOCK_FUNCTION(...)
|
#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \
|
||||||
|
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef SHARED_TRYLOCK_FUNCTION
|
#ifndef SHARED_TRYLOCK_FUNCTION
|
||||||
#define SHARED_TRYLOCK_FUNCTION(...)
|
#define SHARED_TRYLOCK_FUNCTION(...) \
|
||||||
|
THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef UNLOCK_FUNCTION
|
#ifndef UNLOCK_FUNCTION
|
||||||
#define UNLOCK_FUNCTION(...)
|
#define UNLOCK_FUNCTION(...) \
|
||||||
|
THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef NO_THREAD_SAFETY_ANALYSIS
|
#ifndef NO_THREAD_SAFETY_ANALYSIS
|
||||||
#define NO_THREAD_SAFETY_ANALYSIS
|
#define NO_THREAD_SAFETY_ANALYSIS \
|
||||||
|
THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef ASSERT_EXCLUSIVE_LOCK
|
||||||
|
#define ASSERT_EXCLUSIVE_LOCK(...) \
|
||||||
|
THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef ASSERT_SHARED_LOCK
|
||||||
|
#define ASSERT_SHARED_LOCK(...) \
|
||||||
|
THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
|
#endif // STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
|
||||||
|
@ -1,24 +0,0 @@
|
|||||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
||||||
|
|
||||||
// MSVC didn't ship with this file until the 2010 version.
|
|
||||||
|
|
||||||
#ifndef STORAGE_LEVELDB_PORT_WIN_STDINT_H_
|
|
||||||
#define STORAGE_LEVELDB_PORT_WIN_STDINT_H_
|
|
||||||
|
|
||||||
#if !defined(_MSC_VER)
|
|
||||||
#error This file should only be included when compiling with MSVC.
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Define C99 equivalent types.
|
|
||||||
typedef signed char int8_t;
|
|
||||||
typedef signed short int16_t;
|
|
||||||
typedef signed int int32_t;
|
|
||||||
typedef signed long long int64_t;
|
|
||||||
typedef unsigned char uint8_t;
|
|
||||||
typedef unsigned short uint16_t;
|
|
||||||
typedef unsigned int uint32_t;
|
|
||||||
typedef unsigned long long uint64_t;
|
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_PORT_WIN_STDINT_H_
|
|
@ -6,8 +6,9 @@
|
|||||||
|
|
||||||
#include "table/block.h"
|
#include "table/block.h"
|
||||||
|
|
||||||
#include <vector>
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
#include "leveldb/comparator.h"
|
#include "leveldb/comparator.h"
|
||||||
#include "table/format.h"
|
#include "table/format.h"
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
@ -27,7 +28,7 @@ Block::Block(const BlockContents& contents)
|
|||||||
if (size_ < sizeof(uint32_t)) {
|
if (size_ < sizeof(uint32_t)) {
|
||||||
size_ = 0; // Error marker
|
size_ = 0; // Error marker
|
||||||
} else {
|
} else {
|
||||||
size_t max_restarts_allowed = (size_-sizeof(uint32_t)) / sizeof(uint32_t);
|
size_t max_restarts_allowed = (size_ - sizeof(uint32_t)) / sizeof(uint32_t);
|
||||||
if (NumRestarts() > max_restarts_allowed) {
|
if (NumRestarts() > max_restarts_allowed) {
|
||||||
// The size is too small for NumRestarts()
|
// The size is too small for NumRestarts()
|
||||||
size_ = 0;
|
size_ = 0;
|
||||||
@ -48,13 +49,12 @@ Block::~Block() {
|
|||||||
// and the length of the value in "*shared", "*non_shared", and
|
// and the length of the value in "*shared", "*non_shared", and
|
||||||
// "*value_length", respectively. Will not dereference past "limit".
|
// "*value_length", respectively. Will not dereference past "limit".
|
||||||
//
|
//
|
||||||
// If any errors are detected, returns NULL. Otherwise, returns a
|
// If any errors are detected, returns nullptr. Otherwise, returns a
|
||||||
// pointer to the key delta (just past the three decoded values).
|
// pointer to the key delta (just past the three decoded values).
|
||||||
static inline const char* DecodeEntry(const char* p, const char* limit,
|
static inline const char* DecodeEntry(const char* p, const char* limit,
|
||||||
uint32_t* shared,
|
uint32_t* shared, uint32_t* non_shared,
|
||||||
uint32_t* non_shared,
|
|
||||||
uint32_t* value_length) {
|
uint32_t* value_length) {
|
||||||
if (limit - p < 3) return NULL;
|
if (limit - p < 3) return nullptr;
|
||||||
*shared = reinterpret_cast<const unsigned char*>(p)[0];
|
*shared = reinterpret_cast<const unsigned char*>(p)[0];
|
||||||
*non_shared = reinterpret_cast<const unsigned char*>(p)[1];
|
*non_shared = reinterpret_cast<const unsigned char*>(p)[1];
|
||||||
*value_length = reinterpret_cast<const unsigned char*>(p)[2];
|
*value_length = reinterpret_cast<const unsigned char*>(p)[2];
|
||||||
@ -62,13 +62,13 @@ static inline const char* DecodeEntry(const char* p, const char* limit,
|
|||||||
// Fast path: all three values are encoded in one byte each
|
// Fast path: all three values are encoded in one byte each
|
||||||
p += 3;
|
p += 3;
|
||||||
} else {
|
} else {
|
||||||
if ((p = GetVarint32Ptr(p, limit, shared)) == NULL) return NULL;
|
if ((p = GetVarint32Ptr(p, limit, shared)) == nullptr) return nullptr;
|
||||||
if ((p = GetVarint32Ptr(p, limit, non_shared)) == NULL) return NULL;
|
if ((p = GetVarint32Ptr(p, limit, non_shared)) == nullptr) return nullptr;
|
||||||
if ((p = GetVarint32Ptr(p, limit, value_length)) == NULL) return NULL;
|
if ((p = GetVarint32Ptr(p, limit, value_length)) == nullptr) return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (static_cast<uint32_t>(limit - p) < (*non_shared + *value_length)) {
|
if (static_cast<uint32_t>(limit - p) < (*non_shared + *value_length)) {
|
||||||
return NULL;
|
return nullptr;
|
||||||
}
|
}
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
@ -76,9 +76,9 @@ static inline const char* DecodeEntry(const char* p, const char* limit,
|
|||||||
class Block::Iter : public Iterator {
|
class Block::Iter : public Iterator {
|
||||||
private:
|
private:
|
||||||
const Comparator* const comparator_;
|
const Comparator* const comparator_;
|
||||||
const char* const data_; // underlying block contents
|
const char* const data_; // underlying block contents
|
||||||
uint32_t const restarts_; // Offset of restart array (list of fixed32)
|
uint32_t const restarts_; // Offset of restart array (list of fixed32)
|
||||||
uint32_t const num_restarts_; // Number of uint32_t entries in restart array
|
uint32_t const num_restarts_; // Number of uint32_t entries in restart array
|
||||||
|
|
||||||
// current_ is offset in data_ of current entry. >= restarts_ if !Valid
|
// current_ is offset in data_ of current entry. >= restarts_ if !Valid
|
||||||
uint32_t current_;
|
uint32_t current_;
|
||||||
@ -112,9 +112,7 @@ class Block::Iter : public Iterator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Iter(const Comparator* comparator,
|
Iter(const Comparator* comparator, const char* data, uint32_t restarts,
|
||||||
const char* data,
|
|
||||||
uint32_t restarts,
|
|
||||||
uint32_t num_restarts)
|
uint32_t num_restarts)
|
||||||
: comparator_(comparator),
|
: comparator_(comparator),
|
||||||
data_(data),
|
data_(data),
|
||||||
@ -171,10 +169,10 @@ class Block::Iter : public Iterator {
|
|||||||
uint32_t mid = (left + right + 1) / 2;
|
uint32_t mid = (left + right + 1) / 2;
|
||||||
uint32_t region_offset = GetRestartPoint(mid);
|
uint32_t region_offset = GetRestartPoint(mid);
|
||||||
uint32_t shared, non_shared, value_length;
|
uint32_t shared, non_shared, value_length;
|
||||||
const char* key_ptr = DecodeEntry(data_ + region_offset,
|
const char* key_ptr =
|
||||||
data_ + restarts_,
|
DecodeEntry(data_ + region_offset, data_ + restarts_, &shared,
|
||||||
&shared, &non_shared, &value_length);
|
&non_shared, &value_length);
|
||||||
if (key_ptr == NULL || (shared != 0)) {
|
if (key_ptr == nullptr || (shared != 0)) {
|
||||||
CorruptionError();
|
CorruptionError();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -237,7 +235,7 @@ class Block::Iter : public Iterator {
|
|||||||
// Decode next entry
|
// Decode next entry
|
||||||
uint32_t shared, non_shared, value_length;
|
uint32_t shared, non_shared, value_length;
|
||||||
p = DecodeEntry(p, limit, &shared, &non_shared, &value_length);
|
p = DecodeEntry(p, limit, &shared, &non_shared, &value_length);
|
||||||
if (p == NULL || key_.size() < shared) {
|
if (p == nullptr || key_.size() < shared) {
|
||||||
CorruptionError();
|
CorruptionError();
|
||||||
return false;
|
return false;
|
||||||
} else {
|
} else {
|
||||||
@ -253,7 +251,7 @@ class Block::Iter : public Iterator {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Iterator* Block::NewIterator(const Comparator* cmp) {
|
Iterator* Block::NewIterator(const Comparator* comparator) {
|
||||||
if (size_ < sizeof(uint32_t)) {
|
if (size_ < sizeof(uint32_t)) {
|
||||||
return NewErrorIterator(Status::Corruption("bad block contents"));
|
return NewErrorIterator(Status::Corruption("bad block contents"));
|
||||||
}
|
}
|
||||||
@ -261,7 +259,7 @@ Iterator* Block::NewIterator(const Comparator* cmp) {
|
|||||||
if (num_restarts == 0) {
|
if (num_restarts == 0) {
|
||||||
return NewEmptyIterator();
|
return NewEmptyIterator();
|
||||||
} else {
|
} else {
|
||||||
return new Iter(cmp, data_, restart_offset_, num_restarts);
|
return new Iter(comparator, data_, restart_offset_, num_restarts);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include "leveldb/iterator.h"
|
#include "leveldb/iterator.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
@ -19,24 +20,23 @@ class Block {
|
|||||||
// Initialize the block with the specified contents.
|
// Initialize the block with the specified contents.
|
||||||
explicit Block(const BlockContents& contents);
|
explicit Block(const BlockContents& contents);
|
||||||
|
|
||||||
|
Block(const Block&) = delete;
|
||||||
|
Block& operator=(const Block&) = delete;
|
||||||
|
|
||||||
~Block();
|
~Block();
|
||||||
|
|
||||||
size_t size() const { return size_; }
|
size_t size() const { return size_; }
|
||||||
Iterator* NewIterator(const Comparator* comparator);
|
Iterator* NewIterator(const Comparator* comparator);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
class Iter;
|
||||||
|
|
||||||
uint32_t NumRestarts() const;
|
uint32_t NumRestarts() const;
|
||||||
|
|
||||||
const char* data_;
|
const char* data_;
|
||||||
size_t size_;
|
size_t size_;
|
||||||
uint32_t restart_offset_; // Offset in data_ of restart array
|
uint32_t restart_offset_; // Offset in data_ of restart array
|
||||||
bool owned_; // Block owns data_[]
|
bool owned_; // Block owns data_[]
|
||||||
|
|
||||||
// No copying allowed
|
|
||||||
Block(const Block&);
|
|
||||||
void operator=(const Block&);
|
|
||||||
|
|
||||||
class Iter;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -28,36 +28,35 @@
|
|||||||
|
|
||||||
#include "table/block_builder.h"
|
#include "table/block_builder.h"
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
#include "leveldb/comparator.h"
|
#include "leveldb/comparator.h"
|
||||||
#include "leveldb/table_builder.h"
|
#include "leveldb/options.h"
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
BlockBuilder::BlockBuilder(const Options* options)
|
BlockBuilder::BlockBuilder(const Options* options)
|
||||||
: options_(options),
|
: options_(options), restarts_(), counter_(0), finished_(false) {
|
||||||
restarts_(),
|
|
||||||
counter_(0),
|
|
||||||
finished_(false) {
|
|
||||||
assert(options->block_restart_interval >= 1);
|
assert(options->block_restart_interval >= 1);
|
||||||
restarts_.push_back(0); // First restart point is at offset 0
|
restarts_.push_back(0); // First restart point is at offset 0
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlockBuilder::Reset() {
|
void BlockBuilder::Reset() {
|
||||||
buffer_.clear();
|
buffer_.clear();
|
||||||
restarts_.clear();
|
restarts_.clear();
|
||||||
restarts_.push_back(0); // First restart point is at offset 0
|
restarts_.push_back(0); // First restart point is at offset 0
|
||||||
counter_ = 0;
|
counter_ = 0;
|
||||||
finished_ = false;
|
finished_ = false;
|
||||||
last_key_.clear();
|
last_key_.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t BlockBuilder::CurrentSizeEstimate() const {
|
size_t BlockBuilder::CurrentSizeEstimate() const {
|
||||||
return (buffer_.size() + // Raw data buffer
|
return (buffer_.size() + // Raw data buffer
|
||||||
restarts_.size() * sizeof(uint32_t) + // Restart array
|
restarts_.size() * sizeof(uint32_t) + // Restart array
|
||||||
sizeof(uint32_t)); // Restart array length
|
sizeof(uint32_t)); // Restart array length
|
||||||
}
|
}
|
||||||
|
|
||||||
Slice BlockBuilder::Finish() {
|
Slice BlockBuilder::Finish() {
|
||||||
@ -74,7 +73,7 @@ void BlockBuilder::Add(const Slice& key, const Slice& value) {
|
|||||||
Slice last_key_piece(last_key_);
|
Slice last_key_piece(last_key_);
|
||||||
assert(!finished_);
|
assert(!finished_);
|
||||||
assert(counter_ <= options_->block_restart_interval);
|
assert(counter_ <= options_->block_restart_interval);
|
||||||
assert(buffer_.empty() // No values yet?
|
assert(buffer_.empty() // No values yet?
|
||||||
|| options_->comparator->Compare(key, last_key_piece) > 0);
|
|| options_->comparator->Compare(key, last_key_piece) > 0);
|
||||||
size_t shared = 0;
|
size_t shared = 0;
|
||||||
if (counter_ < options_->block_restart_interval) {
|
if (counter_ < options_->block_restart_interval) {
|
||||||
|
@ -5,9 +5,10 @@
|
|||||||
#ifndef STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
|
#ifndef STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
|
||||||
#define STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
|
#define STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include <stdint.h>
|
|
||||||
#include "leveldb/slice.h"
|
#include "leveldb/slice.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
@ -18,6 +19,9 @@ class BlockBuilder {
|
|||||||
public:
|
public:
|
||||||
explicit BlockBuilder(const Options* options);
|
explicit BlockBuilder(const Options* options);
|
||||||
|
|
||||||
|
BlockBuilder(const BlockBuilder&) = delete;
|
||||||
|
BlockBuilder& operator=(const BlockBuilder&) = delete;
|
||||||
|
|
||||||
// Reset the contents as if the BlockBuilder was just constructed.
|
// Reset the contents as if the BlockBuilder was just constructed.
|
||||||
void Reset();
|
void Reset();
|
||||||
|
|
||||||
@ -35,21 +39,15 @@ class BlockBuilder {
|
|||||||
size_t CurrentSizeEstimate() const;
|
size_t CurrentSizeEstimate() const;
|
||||||
|
|
||||||
// Return true iff no entries have been added since the last Reset()
|
// Return true iff no entries have been added since the last Reset()
|
||||||
bool empty() const {
|
bool empty() const { return buffer_.empty(); }
|
||||||
return buffer_.empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const Options* options_;
|
const Options* options_;
|
||||||
std::string buffer_; // Destination buffer
|
std::string buffer_; // Destination buffer
|
||||||
std::vector<uint32_t> restarts_; // Restart points
|
std::vector<uint32_t> restarts_; // Restart points
|
||||||
int counter_; // Number of entries emitted since restart
|
int counter_; // Number of entries emitted since restart
|
||||||
bool finished_; // Has Finish() been called?
|
bool finished_; // Has Finish() been called?
|
||||||
std::string last_key_;
|
std::string last_key_;
|
||||||
|
|
||||||
// No copying allowed
|
|
||||||
BlockBuilder(const BlockBuilder&);
|
|
||||||
void operator=(const BlockBuilder&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -16,8 +16,7 @@ static const size_t kFilterBaseLg = 11;
|
|||||||
static const size_t kFilterBase = 1 << kFilterBaseLg;
|
static const size_t kFilterBase = 1 << kFilterBaseLg;
|
||||||
|
|
||||||
FilterBlockBuilder::FilterBlockBuilder(const FilterPolicy* policy)
|
FilterBlockBuilder::FilterBlockBuilder(const FilterPolicy* policy)
|
||||||
: policy_(policy) {
|
: policy_(policy) {}
|
||||||
}
|
|
||||||
|
|
||||||
void FilterBlockBuilder::StartBlock(uint64_t block_offset) {
|
void FilterBlockBuilder::StartBlock(uint64_t block_offset) {
|
||||||
uint64_t filter_index = (block_offset / kFilterBase);
|
uint64_t filter_index = (block_offset / kFilterBase);
|
||||||
@ -62,7 +61,7 @@ void FilterBlockBuilder::GenerateFilter() {
|
|||||||
tmp_keys_.resize(num_keys);
|
tmp_keys_.resize(num_keys);
|
||||||
for (size_t i = 0; i < num_keys; i++) {
|
for (size_t i = 0; i < num_keys; i++) {
|
||||||
const char* base = keys_.data() + start_[i];
|
const char* base = keys_.data() + start_[i];
|
||||||
size_t length = start_[i+1] - start_[i];
|
size_t length = start_[i + 1] - start_[i];
|
||||||
tmp_keys_[i] = Slice(base, length);
|
tmp_keys_[i] = Slice(base, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -77,14 +76,10 @@ void FilterBlockBuilder::GenerateFilter() {
|
|||||||
|
|
||||||
FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
|
FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
|
||||||
const Slice& contents)
|
const Slice& contents)
|
||||||
: policy_(policy),
|
: policy_(policy), data_(nullptr), offset_(nullptr), num_(0), base_lg_(0) {
|
||||||
data_(NULL),
|
|
||||||
offset_(NULL),
|
|
||||||
num_(0),
|
|
||||||
base_lg_(0) {
|
|
||||||
size_t n = contents.size();
|
size_t n = contents.size();
|
||||||
if (n < 5) return; // 1 byte for base_lg_ and 4 for start of offset array
|
if (n < 5) return; // 1 byte for base_lg_ and 4 for start of offset array
|
||||||
base_lg_ = contents[n-1];
|
base_lg_ = contents[n - 1];
|
||||||
uint32_t last_word = DecodeFixed32(contents.data() + n - 5);
|
uint32_t last_word = DecodeFixed32(contents.data() + n - 5);
|
||||||
if (last_word > n - 5) return;
|
if (last_word > n - 5) return;
|
||||||
data_ = contents.data();
|
data_ = contents.data();
|
||||||
@ -95,8 +90,8 @@ FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
|
|||||||
bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
|
bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
|
||||||
uint64_t index = block_offset >> base_lg_;
|
uint64_t index = block_offset >> base_lg_;
|
||||||
if (index < num_) {
|
if (index < num_) {
|
||||||
uint32_t start = DecodeFixed32(offset_ + index*4);
|
uint32_t start = DecodeFixed32(offset_ + index * 4);
|
||||||
uint32_t limit = DecodeFixed32(offset_ + index*4 + 4);
|
uint32_t limit = DecodeFixed32(offset_ + index * 4 + 4);
|
||||||
if (start <= limit && limit <= static_cast<size_t>(offset_ - data_)) {
|
if (start <= limit && limit <= static_cast<size_t>(offset_ - data_)) {
|
||||||
Slice filter = Slice(data_ + start, limit - start);
|
Slice filter = Slice(data_ + start, limit - start);
|
||||||
return policy_->KeyMayMatch(key, filter);
|
return policy_->KeyMayMatch(key, filter);
|
||||||
@ -108,4 +103,4 @@ bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
|
|||||||
return true; // Errors are treated as potential matches
|
return true; // Errors are treated as potential matches
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
} // namespace leveldb
|
||||||
|
@ -11,8 +11,10 @@
|
|||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "leveldb/slice.h"
|
#include "leveldb/slice.h"
|
||||||
#include "util/hash.h"
|
#include "util/hash.h"
|
||||||
|
|
||||||
@ -30,6 +32,9 @@ class FilterBlockBuilder {
|
|||||||
public:
|
public:
|
||||||
explicit FilterBlockBuilder(const FilterPolicy*);
|
explicit FilterBlockBuilder(const FilterPolicy*);
|
||||||
|
|
||||||
|
FilterBlockBuilder(const FilterBlockBuilder&) = delete;
|
||||||
|
FilterBlockBuilder& operator=(const FilterBlockBuilder&) = delete;
|
||||||
|
|
||||||
void StartBlock(uint64_t block_offset);
|
void StartBlock(uint64_t block_offset);
|
||||||
void AddKey(const Slice& key);
|
void AddKey(const Slice& key);
|
||||||
Slice Finish();
|
Slice Finish();
|
||||||
@ -38,20 +43,16 @@ class FilterBlockBuilder {
|
|||||||
void GenerateFilter();
|
void GenerateFilter();
|
||||||
|
|
||||||
const FilterPolicy* policy_;
|
const FilterPolicy* policy_;
|
||||||
std::string keys_; // Flattened key contents
|
std::string keys_; // Flattened key contents
|
||||||
std::vector<size_t> start_; // Starting index in keys_ of each key
|
std::vector<size_t> start_; // Starting index in keys_ of each key
|
||||||
std::string result_; // Filter data computed so far
|
std::string result_; // Filter data computed so far
|
||||||
std::vector<Slice> tmp_keys_; // policy_->CreateFilter() argument
|
std::vector<Slice> tmp_keys_; // policy_->CreateFilter() argument
|
||||||
std::vector<uint32_t> filter_offsets_;
|
std::vector<uint32_t> filter_offsets_;
|
||||||
|
|
||||||
// No copying allowed
|
|
||||||
FilterBlockBuilder(const FilterBlockBuilder&);
|
|
||||||
void operator=(const FilterBlockBuilder&);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class FilterBlockReader {
|
class FilterBlockReader {
|
||||||
public:
|
public:
|
||||||
// REQUIRES: "contents" and *policy must stay live while *this is live.
|
// REQUIRES: "contents" and *policy must stay live while *this is live.
|
||||||
FilterBlockReader(const FilterPolicy* policy, const Slice& contents);
|
FilterBlockReader(const FilterPolicy* policy, const Slice& contents);
|
||||||
bool KeyMayMatch(uint64_t block_offset, const Slice& key);
|
bool KeyMayMatch(uint64_t block_offset, const Slice& key);
|
||||||
|
|
||||||
@ -63,6 +64,6 @@ class FilterBlockReader {
|
|||||||
size_t base_lg_; // Encoding parameter (see kFilterBaseLg in .cc file)
|
size_t base_lg_; // Encoding parameter (see kFilterBaseLg in .cc file)
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
} // namespace leveldb
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_
|
#endif // STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_
|
||||||
|
@ -16,9 +16,7 @@ namespace leveldb {
|
|||||||
// For testing: emit an array with one hash value per key
|
// For testing: emit an array with one hash value per key
|
||||||
class TestHashFilter : public FilterPolicy {
|
class TestHashFilter : public FilterPolicy {
|
||||||
public:
|
public:
|
||||||
virtual const char* Name() const {
|
virtual const char* Name() const { return "TestHashFilter"; }
|
||||||
return "TestHashFilter";
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
|
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
|
||||||
for (int i = 0; i < n; i++) {
|
for (int i = 0; i < n; i++) {
|
||||||
@ -69,8 +67,8 @@ TEST(FilterBlockTest, SingleChunk) {
|
|||||||
ASSERT_TRUE(reader.KeyMayMatch(100, "box"));
|
ASSERT_TRUE(reader.KeyMayMatch(100, "box"));
|
||||||
ASSERT_TRUE(reader.KeyMayMatch(100, "hello"));
|
ASSERT_TRUE(reader.KeyMayMatch(100, "hello"));
|
||||||
ASSERT_TRUE(reader.KeyMayMatch(100, "foo"));
|
ASSERT_TRUE(reader.KeyMayMatch(100, "foo"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(100, "missing"));
|
ASSERT_TRUE(!reader.KeyMayMatch(100, "missing"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(100, "other"));
|
ASSERT_TRUE(!reader.KeyMayMatch(100, "other"));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(FilterBlockTest, MultiChunk) {
|
TEST(FilterBlockTest, MultiChunk) {
|
||||||
@ -99,30 +97,28 @@ TEST(FilterBlockTest, MultiChunk) {
|
|||||||
// Check first filter
|
// Check first filter
|
||||||
ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));
|
ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));
|
||||||
ASSERT_TRUE(reader.KeyMayMatch(2000, "bar"));
|
ASSERT_TRUE(reader.KeyMayMatch(2000, "bar"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(0, "box"));
|
ASSERT_TRUE(!reader.KeyMayMatch(0, "box"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(0, "hello"));
|
ASSERT_TRUE(!reader.KeyMayMatch(0, "hello"));
|
||||||
|
|
||||||
// Check second filter
|
// Check second filter
|
||||||
ASSERT_TRUE(reader.KeyMayMatch(3100, "box"));
|
ASSERT_TRUE(reader.KeyMayMatch(3100, "box"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(3100, "foo"));
|
ASSERT_TRUE(!reader.KeyMayMatch(3100, "foo"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(3100, "bar"));
|
ASSERT_TRUE(!reader.KeyMayMatch(3100, "bar"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(3100, "hello"));
|
ASSERT_TRUE(!reader.KeyMayMatch(3100, "hello"));
|
||||||
|
|
||||||
// Check third filter (empty)
|
// Check third filter (empty)
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(4100, "foo"));
|
ASSERT_TRUE(!reader.KeyMayMatch(4100, "foo"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(4100, "bar"));
|
ASSERT_TRUE(!reader.KeyMayMatch(4100, "bar"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(4100, "box"));
|
ASSERT_TRUE(!reader.KeyMayMatch(4100, "box"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(4100, "hello"));
|
ASSERT_TRUE(!reader.KeyMayMatch(4100, "hello"));
|
||||||
|
|
||||||
// Check last filter
|
// Check last filter
|
||||||
ASSERT_TRUE(reader.KeyMayMatch(9000, "box"));
|
ASSERT_TRUE(reader.KeyMayMatch(9000, "box"));
|
||||||
ASSERT_TRUE(reader.KeyMayMatch(9000, "hello"));
|
ASSERT_TRUE(reader.KeyMayMatch(9000, "hello"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(9000, "foo"));
|
ASSERT_TRUE(!reader.KeyMayMatch(9000, "foo"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(9000, "bar"));
|
ASSERT_TRUE(!reader.KeyMayMatch(9000, "bar"));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -21,8 +21,7 @@ void BlockHandle::EncodeTo(std::string* dst) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Status BlockHandle::DecodeFrom(Slice* input) {
|
Status BlockHandle::DecodeFrom(Slice* input) {
|
||||||
if (GetVarint64(input, &offset_) &&
|
if (GetVarint64(input, &offset_) && GetVarint64(input, &size_)) {
|
||||||
GetVarint64(input, &size_)) {
|
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
} else {
|
} else {
|
||||||
return Status::Corruption("bad block handle");
|
return Status::Corruption("bad block handle");
|
||||||
@ -62,10 +61,8 @@ Status Footer::DecodeFrom(Slice* input) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
Status ReadBlock(RandomAccessFile* file,
|
Status ReadBlock(RandomAccessFile* file, const ReadOptions& options,
|
||||||
const ReadOptions& options,
|
const BlockHandle& handle, BlockContents* result) {
|
||||||
const BlockHandle& handle,
|
|
||||||
BlockContents* result) {
|
|
||||||
result->data = Slice();
|
result->data = Slice();
|
||||||
result->cachable = false;
|
result->cachable = false;
|
||||||
result->heap_allocated = false;
|
result->heap_allocated = false;
|
||||||
@ -86,7 +83,7 @@ Status ReadBlock(RandomAccessFile* file,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check the crc of the type and the block contents
|
// Check the crc of the type and the block contents
|
||||||
const char* data = contents.data(); // Pointer to where Read put the data
|
const char* data = contents.data(); // Pointer to where Read put the data
|
||||||
if (options.verify_checksums) {
|
if (options.verify_checksums) {
|
||||||
const uint32_t crc = crc32c::Unmask(DecodeFixed32(data + n + 1));
|
const uint32_t crc = crc32c::Unmask(DecodeFixed32(data + n + 1));
|
||||||
const uint32_t actual = crc32c::Value(data, n + 1);
|
const uint32_t actual = crc32c::Value(data, n + 1);
|
||||||
|
@ -5,8 +5,10 @@
|
|||||||
#ifndef STORAGE_LEVELDB_TABLE_FORMAT_H_
|
#ifndef STORAGE_LEVELDB_TABLE_FORMAT_H_
|
||||||
#define STORAGE_LEVELDB_TABLE_FORMAT_H_
|
#define STORAGE_LEVELDB_TABLE_FORMAT_H_
|
||||||
|
|
||||||
#include <string>
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
#include "leveldb/slice.h"
|
#include "leveldb/slice.h"
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
#include "leveldb/table_builder.h"
|
#include "leveldb/table_builder.h"
|
||||||
@ -21,6 +23,9 @@ struct ReadOptions;
|
|||||||
// block or a meta block.
|
// block or a meta block.
|
||||||
class BlockHandle {
|
class BlockHandle {
|
||||||
public:
|
public:
|
||||||
|
// Maximum encoding length of a BlockHandle
|
||||||
|
enum { kMaxEncodedLength = 10 + 10 };
|
||||||
|
|
||||||
BlockHandle();
|
BlockHandle();
|
||||||
|
|
||||||
// The offset of the block in the file.
|
// The offset of the block in the file.
|
||||||
@ -34,9 +39,6 @@ class BlockHandle {
|
|||||||
void EncodeTo(std::string* dst) const;
|
void EncodeTo(std::string* dst) const;
|
||||||
Status DecodeFrom(Slice* input);
|
Status DecodeFrom(Slice* input);
|
||||||
|
|
||||||
// Maximum encoding length of a BlockHandle
|
|
||||||
enum { kMaxEncodedLength = 10 + 10 };
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
uint64_t offset_;
|
uint64_t offset_;
|
||||||
uint64_t size_;
|
uint64_t size_;
|
||||||
@ -46,30 +48,24 @@ class BlockHandle {
|
|||||||
// end of every table file.
|
// end of every table file.
|
||||||
class Footer {
|
class Footer {
|
||||||
public:
|
public:
|
||||||
Footer() { }
|
// Encoded length of a Footer. Note that the serialization of a
|
||||||
|
// Footer will always occupy exactly this many bytes. It consists
|
||||||
|
// of two block handles and a magic number.
|
||||||
|
enum { kEncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8 };
|
||||||
|
|
||||||
|
Footer() = default;
|
||||||
|
|
||||||
// The block handle for the metaindex block of the table
|
// The block handle for the metaindex block of the table
|
||||||
const BlockHandle& metaindex_handle() const { return metaindex_handle_; }
|
const BlockHandle& metaindex_handle() const { return metaindex_handle_; }
|
||||||
void set_metaindex_handle(const BlockHandle& h) { metaindex_handle_ = h; }
|
void set_metaindex_handle(const BlockHandle& h) { metaindex_handle_ = h; }
|
||||||
|
|
||||||
// The block handle for the index block of the table
|
// The block handle for the index block of the table
|
||||||
const BlockHandle& index_handle() const {
|
const BlockHandle& index_handle() const { return index_handle_; }
|
||||||
return index_handle_;
|
void set_index_handle(const BlockHandle& h) { index_handle_ = h; }
|
||||||
}
|
|
||||||
void set_index_handle(const BlockHandle& h) {
|
|
||||||
index_handle_ = h;
|
|
||||||
}
|
|
||||||
|
|
||||||
void EncodeTo(std::string* dst) const;
|
void EncodeTo(std::string* dst) const;
|
||||||
Status DecodeFrom(Slice* input);
|
Status DecodeFrom(Slice* input);
|
||||||
|
|
||||||
// Encoded length of a Footer. Note that the serialization of a
|
|
||||||
// Footer will always occupy exactly this many bytes. It consists
|
|
||||||
// of two block handles and a magic number.
|
|
||||||
enum {
|
|
||||||
kEncodedLength = 2*BlockHandle::kMaxEncodedLength + 8
|
|
||||||
};
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
BlockHandle metaindex_handle_;
|
BlockHandle metaindex_handle_;
|
||||||
BlockHandle index_handle_;
|
BlockHandle index_handle_;
|
||||||
@ -91,17 +87,13 @@ struct BlockContents {
|
|||||||
|
|
||||||
// Read the block identified by "handle" from "file". On failure
|
// Read the block identified by "handle" from "file". On failure
|
||||||
// return non-OK. On success fill *result and return OK.
|
// return non-OK. On success fill *result and return OK.
|
||||||
extern Status ReadBlock(RandomAccessFile* file,
|
Status ReadBlock(RandomAccessFile* file, const ReadOptions& options,
|
||||||
const ReadOptions& options,
|
const BlockHandle& handle, BlockContents* result);
|
||||||
const BlockHandle& handle,
|
|
||||||
BlockContents* result);
|
|
||||||
|
|
||||||
// Implementation details follow. Clients should ignore,
|
// Implementation details follow. Clients should ignore,
|
||||||
|
|
||||||
inline BlockHandle::BlockHandle()
|
inline BlockHandle::BlockHandle()
|
||||||
: offset_(~static_cast<uint64_t>(0)),
|
: offset_(~static_cast<uint64_t>(0)), size_(~static_cast<uint64_t>(0)) {}
|
||||||
size_(~static_cast<uint64_t>(0)) {
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user