From dd598676cd655dc2a2aaef47715ce18175d4a550 Mon Sep 17 00:00:00 2001
From: Wankai Zhang <wankaizhang@gmail.com>
Date: Thu, 29 Jan 2015 14:15:31 +0800
Subject: [PATCH 001/181] block_builder header file dependency fixed

---
 table/block_builder.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/table/block_builder.cc b/table/block_builder.cc
index db660cd..c744cfa 100644
--- a/table/block_builder.cc
+++ b/table/block_builder.cc
@@ -30,8 +30,8 @@
 
 #include <algorithm>
 #include <assert.h>
+#include "leveldb/options.h"
 #include "leveldb/comparator.h"
-#include "leveldb/table_builder.h"
 #include "util/coding.h"
 
 namespace leveldb {

From e5f0a51fa44115fb083c1e71d5ddcd07a7aba719 Mon Sep 17 00:00:00 2001
From: ivanabc <sjingivan@gmail.com>
Date: Mon, 20 Jun 2016 10:41:25 +0800
Subject: [PATCH 002/181] reduce lock's range in DeleteObsoleteFiles

---
 db/db_impl.cc | 15 ++++++++++++---
 1 file changed, 12 insertions(+), 3 deletions(-)

diff --git a/db/db_impl.cc b/db/db_impl.cc
index 49b9595..c6da3a6 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -216,18 +216,26 @@ void DBImpl::MaybeIgnoreError(Status* s) const {
 }
 
 void DBImpl::DeleteObsoleteFiles() {
+  mutex_.AssertHeld();
+
   if (!bg_error_.ok()) {
     // After a background error, we don't know whether a new version may
     // or may not have been committed, so we cannot safely garbage collect.
     return;
   }
 
+  uint64_t log_number = versions_->LogNumber();
+  uint64_t prev_log_number = versions_->PrevLogNumber();
+  uint64_t manifest_file_number = versions_->ManifestFileNumber();
+
   // Make a set of all of the live files
   std::set<uint64_t> live = pending_outputs_;
   versions_->AddLiveFiles(&live);
 
   std::vector<std::string> filenames;
   env_->GetChildren(dbname_, &filenames); // Ignoring errors on purpose
+
+  mutex_.Unlock();
   uint64_t number;
   FileType type;
   for (size_t i = 0; i < filenames.size(); i++) {
@@ -235,13 +243,13 @@ void DBImpl::DeleteObsoleteFiles() {
       bool keep = true;
       switch (type) {
         case kLogFile:
-          keep = ((number >= versions_->LogNumber()) ||
-                  (number == versions_->PrevLogNumber()));
+          keep = ((number >= log_number) ||
+                  (number == prev_log_number));
           break;
         case kDescriptorFile:
           // Keep my manifest file, and any newer incarnations'
           // (in case there is a race that allows other incarnations)
-          keep = (number >= versions_->ManifestFileNumber());
+          keep = (number >= manifest_file_number);
           break;
         case kTableFile:
           keep = (live.find(number) != live.end());
@@ -269,6 +277,7 @@ void DBImpl::DeleteObsoleteFiles() {
       }
     }
   }
+  mutex_.Lock();
 }
 
 Status DBImpl::Recover(VersionEdit* edit) {

From 2883fcd849ca7b479d8a2f4fc929f0b6c7b9e372 Mon Sep 17 00:00:00 2001
From: ivanabc <sjingivan@gmail.com>
Date: Tue, 21 Jun 2016 16:57:57 +0800
Subject: [PATCH 003/181] set const property

---
 db/db_impl.cc | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/db/db_impl.cc b/db/db_impl.cc
index c6da3a6..cc1cea4 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -224,9 +224,9 @@ void DBImpl::DeleteObsoleteFiles() {
     return;
   }
 
-  uint64_t log_number = versions_->LogNumber();
-  uint64_t prev_log_number = versions_->PrevLogNumber();
-  uint64_t manifest_file_number = versions_->ManifestFileNumber();
+  const uint64_t log_number = versions_->LogNumber();
+  const uint64_t prev_log_number = versions_->PrevLogNumber();
+  const uint64_t manifest_file_number = versions_->ManifestFileNumber();
 
   // Make a set of all of the live files
   std::set<uint64_t> live = pending_outputs_;
@@ -235,7 +235,9 @@ void DBImpl::DeleteObsoleteFiles() {
   std::vector<std::string> filenames;
   env_->GetChildren(dbname_, &filenames); // Ignoring errors on purpose
 
+  // Unlock while deleting obsolete files
   mutex_.Unlock();
+
   uint64_t number;
   FileType type;
   for (size_t i = 0; i < filenames.size(); i++) {

From 7d060117fa0d5cab7cb15b0cf127533bea9ffbc7 Mon Sep 17 00:00:00 2001
From: proller <proller@github.com>
Date: Mon, 3 Oct 2016 19:40:07 +0300
Subject: [PATCH 004/181] broken db: fix assertion in
 leveldb::InternalKey::Encode, mark base as corrupt

---
 db/dbformat.h      | 6 +++++-
 db/version_edit.cc | 3 +--
 2 files changed, 6 insertions(+), 3 deletions(-)

diff --git a/db/dbformat.h b/db/dbformat.h
index ea897b1..415dc96 100644
--- a/db/dbformat.h
+++ b/db/dbformat.h
@@ -150,7 +150,11 @@ class InternalKey {
     AppendInternalKey(&rep_, ParsedInternalKey(user_key, s, t));
   }
 
-  void DecodeFrom(const Slice& s) { rep_.assign(s.data(), s.size()); }
+  bool DecodeFrom(const Slice& s) {
+    rep_.assign(s.data(), s.size());
+    return !rep_.empty();
+  }
+
   Slice Encode() const {
     assert(!rep_.empty());
     return rep_;
diff --git a/db/version_edit.cc b/db/version_edit.cc
index f10a2d5..1eea2d1 100644
--- a/db/version_edit.cc
+++ b/db/version_edit.cc
@@ -88,8 +88,7 @@ void VersionEdit::EncodeTo(std::string* dst) const {
 static bool GetInternalKey(Slice* input, InternalKey* dst) {
   Slice str;
   if (GetLengthPrefixedSlice(input, &str)) {
-    dst->DecodeFrom(str);
-    return true;
+    return dst->DecodeFrom(str);
   } else {
     return false;
   }

From 5b817400a0a5afe3badbb8859706a571882ababc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E6=9E=9C=E5=86=BB?= <18814092650@163.com>
Date: Fri, 10 Mar 2017 14:23:19 +0800
Subject: [PATCH 005/181] fix comment

---
 util/env_posix.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/util/env_posix.cc b/util/env_posix.cc
index 84aabb2..1c2ba9a 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -306,7 +306,7 @@ class PosixFileLock : public FileLock {
 };
 
 // Set of locked files.  We keep a separate set instead of just
-// relying on fcntrl(F_SETLK) since fcntl(F_SETLK) does not provide
+// relying on fcntl(F_SETLK) since fcntl(F_SETLK) does not provide
 // any protection against multiple uses from the same process.
 class PosixLockTable {
  private:

From 471f0b84ec3420c7565511eb6e2fee8e0a0550e8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E7=82=80?= <zhoudayang2@163.com>
Date: Mon, 22 May 2017 14:01:38 +0800
Subject: [PATCH 006/181] fix comment

---
 include/leveldb/env.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/leveldb/env.h b/include/leveldb/env.h
index 99b6c21..9f4bdfd 100644
--- a/include/leveldb/env.h
+++ b/include/leveldb/env.h
@@ -145,7 +145,7 @@ class Env {
   virtual void StartThread(void (*function)(void* arg), void* arg) = 0;
 
   // *path is set to a temporary directory that can be used for testing. It may
-  // or many not have just been created. The directory may or may not differ
+  // or may not have just been created. The directory may or may not differ
   // between runs of the same process, but subsequent calls will return the
   // same directory.
   virtual Status GetTestDirectory(std::string* path) = 0;

From 3ee04c5ceae6535e922521ed940411a0318fdd3b Mon Sep 17 00:00:00 2001
From: lingbin <lingbinlb@gmail.com>
Date: Mon, 4 Sep 2017 16:10:38 +0800
Subject: [PATCH 007/181] fix style and remove unused code

---
 util/coding.cc | 10 ----------
 util/coding.h  |  4 ++--
 2 files changed, 2 insertions(+), 12 deletions(-)

diff --git a/util/coding.cc b/util/coding.cc
index 21e3186..c37b24a 100644
--- a/util/coding.cc
+++ b/util/coding.cc
@@ -169,16 +169,6 @@ bool GetVarint64(Slice* input, uint64_t* value) {
   }
 }
 
-const char* GetLengthPrefixedSlice(const char* p, const char* limit,
-                                   Slice* result) {
-  uint32_t len;
-  p = GetVarint32Ptr(p, limit, &len);
-  if (p == NULL) return NULL;
-  if (p + len > limit) return NULL;
-  *result = Slice(p, len);
-  return p + len;
-}
-
 bool GetLengthPrefixedSlice(Slice* input, Slice* result) {
   uint32_t len;
   if (GetVarint32(input, &len) &&
diff --git a/util/coding.h b/util/coding.h
index 3993c4a..8582d90 100644
--- a/util/coding.h
+++ b/util/coding.h
@@ -35,8 +35,8 @@ extern bool GetLengthPrefixedSlice(Slice* input, Slice* result);
 // in *v and return a pointer just past the parsed value, or return
 // NULL on error.  These routines only look at bytes in the range
 // [p..limit-1]
-extern const char* GetVarint32Ptr(const char* p,const char* limit, uint32_t* v);
-extern const char* GetVarint64Ptr(const char* p,const char* limit, uint64_t* v);
+extern const char* GetVarint32Ptr(const char* p, const char* limit, uint32_t* v);
+extern const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* v);
 
 // Returns the length of the varint32 or varint64 encoding of "v"
 extern int VarintLength(uint64_t v);

From 14cce848e7b8a040a8f457d5a796722a55e19597 Mon Sep 17 00:00:00 2001
From: MarcoFalke <falke.marco@gmail.com>
Date: Mon, 16 Apr 2018 12:27:06 -0700
Subject: [PATCH 008/181] Fix sign mismatch warnings in GCC.

This was contributed in https://github.com/google/leveldb/pull/492

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=193080913
---
 db/memtable.cc    | 2 +-
 db/version_set.cc | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/db/memtable.cc b/db/memtable.cc
index bfec0a7..287afdb 100644
--- a/db/memtable.cc
+++ b/db/memtable.cc
@@ -101,7 +101,7 @@ void MemTable::Add(SequenceNumber s, ValueType type,
   p += 8;
   p = EncodeVarint32(p, val_size);
   memcpy(p, value.data(), val_size);
-  assert((p + val_size) - buf == encoded_len);
+  assert(p + val_size == buf + encoded_len);
   table_.Insert(buf);
 }
 
diff --git a/db/version_set.cc b/db/version_set.cc
index 02cc66f..c27ccad 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -20,7 +20,7 @@
 
 namespace leveldb {
 
-static int TargetFileSize(const Options* options) {
+static size_t TargetFileSize(const Options* options) {
   return options->max_file_size;
 }
 

From d177a0263cce4344d05188521ad53459c369b940 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Tue, 17 Apr 2018 13:23:10 -0700
Subject: [PATCH 009/181] Replace port_posix with port_stdcxx.

The porting layer implements threading primitives: atomic pointers,
condition variables, mutexes, thread-safe initialization. These are all
specified in C++11, so the reference open source port implementation can
become platform-independent.

The porting layer will remain in place to allow the use of other
implementations with more features, such as the built-in deadlock
detection in abseil's Mutex.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=193245934
---
 CMakeLists.txt                       | 64 ++++++------------------
 port/README                          |  2 +-
 port/port.h                          |  2 +-
 port/port_posix.cc                   | 53 --------------------
 port/{port_posix.h => port_stdcxx.h} | 74 ++++++++++++++++------------
 util/env_posix.cc                    |  6 +++
 6 files changed, 66 insertions(+), 135 deletions(-)
 delete mode 100644 port/port_posix.cc
 rename port/{port_posix.h => port_stdcxx.h} (62%)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 5588120..4bf3df3 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -79,33 +79,10 @@ if(BUILD_SHARED_LIBS)
   add_compile_options(-fvisibility=hidden)
 endif(BUILD_SHARED_LIBS)
 
-# POSIX code is specified separately so we can leave it out in the future.
-add_library(leveldb_port_posix OBJECT "")
-target_sources(leveldb_port_posix
-  PRIVATE
-    "${PROJECT_SOURCE_DIR}/port/port_posix.cc"
-
-  PUBLIC
-    # The headers below are dependencies for leveldb, but aren't needed by users
-    # that link to the installed version of leveldb and rely on its public API.
-    $<BUILD_INTERFACE:${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h>
-    $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/port/atomic_pointer.h>
-    $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/port/port_posix.h>
-    $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/port/port.h>
-)
-if (NOT HAVE_CXX17_HAS_INCLUDE)
-  target_compile_definitions(leveldb_port_posix
-    PRIVATE
-      LEVELDB_HAS_PORT_CONFIG_H=1
-  )
-endif(NOT HAVE_CXX17_HAS_INCLUDE)
-if(BUILD_SHARED_LIBS)
-  set_property(TARGET leveldb_port_posix PROPERTY POSITION_INDEPENDENT_CODE ON)
-endif(BUILD_SHARED_LIBS)
-
 add_library(leveldb "")
 target_sources(leveldb
   PRIVATE
+    "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
     "${PROJECT_SOURCE_DIR}/db/builder.cc"
     "${PROJECT_SOURCE_DIR}/db/builder.h"
     "${PROJECT_SOURCE_DIR}/db/c.cc"
@@ -136,6 +113,8 @@ target_sources(leveldb
     "${PROJECT_SOURCE_DIR}/db/version_set.h"
     "${PROJECT_SOURCE_DIR}/db/write_batch_internal.h"
     "${PROJECT_SOURCE_DIR}/db/write_batch.cc"
+    "${PROJECT_SOURCE_DIR}/port/atomic_pointer.h"
+    "${PROJECT_SOURCE_DIR}/port/port_stdcxx.h"
     "${PROJECT_SOURCE_DIR}/port/port.h"
     "${PROJECT_SOURCE_DIR}/port/thread_annotations.h"
     "${PROJECT_SOURCE_DIR}/table/block_builder.cc"
@@ -163,7 +142,6 @@ target_sources(leveldb
     "${PROJECT_SOURCE_DIR}/util/comparator.cc"
     "${PROJECT_SOURCE_DIR}/util/crc32c.cc"
     "${PROJECT_SOURCE_DIR}/util/crc32c.h"
-    "${PROJECT_SOURCE_DIR}/util/env_posix.cc"
     "${PROJECT_SOURCE_DIR}/util/env.cc"
     "${PROJECT_SOURCE_DIR}/util/filter_policy.cc"
     "${PROJECT_SOURCE_DIR}/util/hash.cc"
@@ -172,10 +150,8 @@ target_sources(leveldb
     "${PROJECT_SOURCE_DIR}/util/logging.h"
     "${PROJECT_SOURCE_DIR}/util/mutexlock.h"
     "${PROJECT_SOURCE_DIR}/util/options.cc"
-    "${PROJECT_SOURCE_DIR}/util/posix_logger.h"
     "${PROJECT_SOURCE_DIR}/util/random.h"
     "${PROJECT_SOURCE_DIR}/util/status.cc"
-    $<TARGET_OBJECTS:leveldb_port_posix>
 
   # Only CMake 3.3+ supports PUBLIC sources in targets exported by "install".
   $<$<VERSION_GREATER:CMAKE_VERSION,3.2>:PUBLIC>
@@ -195,12 +171,21 @@ target_sources(leveldb
     "${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h"
     "${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h"
 )
+
+# POSIX code is specified separately so we can leave it out in the future.
+target_sources(leveldb
+  PRIVATE
+    "${PROJECT_SOURCE_DIR}/util/env_posix.cc"
+    "${PROJECT_SOURCE_DIR}/util/posix_logger.h"
+)
+
 # MemEnv is not part of the interface and could be pulled to a separate library.
 target_sources(leveldb
   PRIVATE
     "${PROJECT_SOURCE_DIR}/helpers/memenv/memenv.cc"
     "${PROJECT_SOURCE_DIR}/helpers/memenv/memenv.h"
 )
+
 target_include_directories(leveldb
   PUBLIC
     $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>
@@ -234,11 +219,6 @@ if(HAVE_CLANG_THREAD_SAFETY)
       -Werror -Wthread-safety)
 endif(HAVE_CLANG_THREAD_SAFETY)
 
-# TODO(costan): This is only needed for port_posix.
-set(THREADS_PREFER_PTHREAD_FLAG ON)
-find_package(Threads REQUIRED)
-target_link_libraries(leveldb Threads::Threads)
-
 if(HAVE_CRC32C)
   target_link_libraries(leveldb crc32c)
 endif(HAVE_CRC32C)
@@ -249,6 +229,10 @@ if(HAVE_TCMALLOC)
   target_link_libraries(leveldb tcmalloc)
 endif(HAVE_TCMALLOC)
 
+# Needed by port_stdcxx.h
+find_package(Threads REQUIRED)
+target_link_libraries(leveldb Threads::Threads)
+
 add_executable(leveldbutil
   "${PROJECT_SOURCE_DIR}/db/leveldbutil.cc"
 )
@@ -271,14 +255,6 @@ if(LEVELDB_BUILD_TESTS)
 
         "${test_file}"
     )
-    if(BUILD_SHARED_LIBS)
-      # Port functions aren't exposed in the shared library build.
-      target_sources("${test_target_name}"
-        PRIVATE
-          $<TARGET_OBJECTS:leveldb_port_posix>
-      )
-    endif(BUILD_SHARED_LIBS)
-
     target_link_libraries("${test_target_name}" leveldb)
     target_compile_definitions("${test_target_name}"
       PRIVATE
@@ -351,14 +327,6 @@ if(LEVELDB_BUILD_BENCHMARKS)
 
         "${bench_file}"
     )
-    if(BUILD_SHARED_LIBS)
-      # Port functions aren't exposed in the shared library build.
-      target_sources("${bench_target_name}"
-        PRIVATE
-          $<TARGET_OBJECTS:leveldb_port_posix>
-      )
-    endif(BUILD_SHARED_LIBS)
-
     target_link_libraries("${bench_target_name}" leveldb)
     target_compile_definitions("${bench_target_name}"
       PRIVATE
diff --git a/port/README b/port/README
index 422563e..8b17153 100644
--- a/port/README
+++ b/port/README
@@ -5,6 +5,6 @@ Code in the rest of the package includes "port.h" from this directory.
 "port.h" in turn includes a platform specific "port_<platform>.h" file
 that provides the platform specific implementation.
 
-See port_posix.h for an example of what must be provided in a platform
+See port_stdcxx.h for an example of what must be provided in a platform
 specific header file.
 
diff --git a/port/port.h b/port/port.h
index e667db4..0975fed 100644
--- a/port/port.h
+++ b/port/port.h
@@ -11,7 +11,7 @@
 // porting to a new platform, see "port_example.h" for documentation
 // of what the new port_<platform>.h file must provide.
 #if defined(LEVELDB_PLATFORM_POSIX)
-#  include "port/port_posix.h"
+#  include "port/port_stdcxx.h"
 #elif defined(LEVELDB_PLATFORM_CHROMIUM)
 #  include "port/port_chromium.h"
 #endif
diff --git a/port/port_posix.cc b/port/port_posix.cc
deleted file mode 100644
index 04095bb..0000000
--- a/port/port_posix.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "port/port_posix.h"
-
-#include <cstdlib>
-#include <stdio.h>
-#include <string.h>
-
-namespace leveldb {
-namespace port {
-
-static void PthreadCall(const char* label, int result) {
-  if (result != 0) {
-    fprintf(stderr, "pthread %s: %s\n", label, strerror(result));
-    abort();
-  }
-}
-
-Mutex::Mutex() { PthreadCall("init mutex", pthread_mutex_init(&mu_, nullptr)); }
-
-Mutex::~Mutex() { PthreadCall("destroy mutex", pthread_mutex_destroy(&mu_)); }
-
-void Mutex::Lock() { PthreadCall("lock", pthread_mutex_lock(&mu_)); }
-
-void Mutex::Unlock() { PthreadCall("unlock", pthread_mutex_unlock(&mu_)); }
-
-CondVar::CondVar(Mutex* mu)
-    : mu_(mu) {
-    PthreadCall("init cv", pthread_cond_init(&cv_, nullptr));
-}
-
-CondVar::~CondVar() { PthreadCall("destroy cv", pthread_cond_destroy(&cv_)); }
-
-void CondVar::Wait() {
-  PthreadCall("wait", pthread_cond_wait(&cv_, &mu_->mu_));
-}
-
-void CondVar::Signal() {
-  PthreadCall("signal", pthread_cond_signal(&cv_));
-}
-
-void CondVar::SignalAll() {
-  PthreadCall("broadcast", pthread_cond_broadcast(&cv_));
-}
-
-void InitOnce(OnceType* once, void (*initializer)()) {
-  PthreadCall("once", pthread_once(once, initializer));
-}
-
-}  // namespace port
-}  // namespace leveldb
diff --git a/port/port_posix.h b/port/port_stdcxx.h
similarity index 62%
rename from port/port_posix.h
rename to port/port_stdcxx.h
index 54b07b6..4e58cba 100644
--- a/port/port_posix.h
+++ b/port/port_stdcxx.h
@@ -1,11 +1,9 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// See port_example.h for documentation for the following types/functions.
 
-#ifndef STORAGE_LEVELDB_PORT_PORT_POSIX_H_
-#define STORAGE_LEVELDB_PORT_PORT_POSIX_H_
+#ifndef STORAGE_LEVELDB_PORT_PORT_STDCXX_H_
+#define STORAGE_LEVELDB_PORT_PORT_STDCXX_H_
 
 // port/port_config.h availability is automatically detected via __has_include
 // in newer compilers. If LEVELDB_HAS_PORT_CONFIG_H is defined, it overrides the
@@ -24,22 +22,22 @@
 
 #endif  // defined(LEVELDB_HAS_PORT_CONFIG_H)
 
-#include <pthread.h>
 #if HAVE_CRC32C
 #include <crc32c/crc32c.h>
 #endif  // HAVE_CRC32C
 #if HAVE_SNAPPY
 #include <snappy.h>
 #endif  // HAVE_SNAPPY
+
+#include <stddef.h>
 #include <stdint.h>
+#include <cassert>
+#include <condition_variable>  // NOLINT
+#include <mutex>               // NOLINT
 #include <string>
 #include "port/atomic_pointer.h"
 #include "port/thread_annotations.h"
 
-#if !HAVE_FDATASYNC
-#define fdatasync fsync
-#endif  // !HAVE_FDATASYNC
-
 namespace leveldb {
 namespace port {
 
@@ -47,39 +45,52 @@ static const bool kLittleEndian = !LEVELDB_IS_BIG_ENDIAN;
 
 class CondVar;
 
+// Thinly wraps std::mutex.
 class LOCKABLE Mutex {
  public:
-  Mutex();
-  ~Mutex();
+  Mutex() = default;
+  ~Mutex() = default;
 
-  void Lock() EXCLUSIVE_LOCK_FUNCTION();
-  void Unlock() UNLOCK_FUNCTION();
+  Mutex(const Mutex&) = delete;
+  Mutex& operator=(const Mutex&) = delete;
+
+  void Lock() EXCLUSIVE_LOCK_FUNCTION() { mu_.lock(); }
+  void Unlock() UNLOCK_FUNCTION() { mu_.unlock(); }
   void AssertHeld() ASSERT_EXCLUSIVE_LOCK() { }
 
  private:
   friend class CondVar;
-  pthread_mutex_t mu_;
-
-  // No copying
-  Mutex(const Mutex&);
-  void operator=(const Mutex&);
+  std::mutex mu_;
 };
 
+// Thinly wraps std::condition_variable.
 class CondVar {
  public:
-  explicit CondVar(Mutex* mu);
-  ~CondVar();
-  void Wait();
-  void Signal();
-  void SignalAll();
+  explicit CondVar(Mutex* mu) : mu_(mu) { assert(mu != nullptr); }
+  ~CondVar() = default;
+
+  CondVar(const CondVar&) = delete;
+  CondVar& operator=(const CondVar&) = delete;
+
+  void Wait() {
+    std::unique_lock<std::mutex> lock(mu_->mu_, std::adopt_lock);
+    cv_.wait(lock);
+    lock.release();
+  }
+  void Signal() { cv_.notify_one(); }
+  void SignalAll() { cv_.notify_all(); }
  private:
-  pthread_cond_t cv_;
-  Mutex* mu_;
+  std::condition_variable cv_;
+  Mutex* const mu_;
 };
 
-typedef pthread_once_t OnceType;
-#define LEVELDB_ONCE_INIT PTHREAD_ONCE_INIT
-void InitOnce(OnceType* once, void (*initializer)());
+using OnceType = std::once_flag;
+#define LEVELDB_ONCE_INIT {}
+
+// Thinly wraps std::call_once.
+inline void InitOnce(OnceType* once, void (*initializer)()) {
+  std::call_once(*once, *initializer);
+}
 
 inline bool Snappy_Compress(const char* input, size_t length,
                             ::std::string* output) {
@@ -103,8 +114,7 @@ inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
 #endif  // HAVE_SNAPPY
 }
 
-inline bool Snappy_Uncompress(const char* input, size_t length,
-                              char* output) {
+inline bool Snappy_Uncompress(const char* input, size_t length, char* output) {
 #if HAVE_SNAPPY
   return snappy::RawUncompress(input, length, output);
 #else
@@ -127,4 +137,4 @@ inline uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) {
 }  // namespace port
 }  // namespace leveldb
 
-#endif  // STORAGE_LEVELDB_PORT_PORT_POSIX_H_
+#endif  // STORAGE_LEVELDB_PORT_PORT_STDCXX_H_
diff --git a/util/env_posix.cc b/util/env_posix.cc
index e758d5f..51844ad 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -28,6 +28,12 @@
 #include "util/posix_logger.h"
 #include "util/env_posix_test_helper.h"
 
+// HAVE_FDATASYNC is defined in the auto-generated port_config.h, which is
+// included by port_stdcxx.h.
+#if !HAVE_FDATASYNC
+#define fdatasync fsync
+#endif  // !HAVE_FDATASYNC
+
 namespace leveldb {
 
 namespace {

From 4de9594f6fbfd69043239a5705b5f32065f02d34 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Mon, 23 Apr 2018 16:15:21 -0700
Subject: [PATCH 010/181] Add move constructor to Status.

This will result in smaller code generation when Status instances are
passed around.

Benchmarks don't indicate a significant change either way.
CPU:        48 * Intel(R) Xeon(R) CPU E5-2690 v3 @ 2.60GHz
CPUCache:   30720 KB
Keys:       16 bytes each
Values:     100 bytes each (50 bytes after compression)
Entries:    1000000
RawSize:    110.6 MB (estimated)
FileSize:   62.9 MB (estimated)

Baseline:
fillseq      :       3.589 micros/op;   30.8 MB/s
fillsync     :    4165.299 micros/op;    0.0 MB/s (1000 ops)
fillrandom   :       5.864 micros/op;   18.9 MB/s
overwrite    :       7.830 micros/op;   14.1 MB/s
readrandom   :       5.534 micros/op; (1000000 of 1000000 found)
readrandom   :       4.292 micros/op; (1000000 of 1000000 found)
readseq      :       0.312 micros/op;  354.1 MB/s
readreverse  :       0.501 micros/op;  220.8 MB/s
compact      :  886211.000 micros/op;
readrandom   :       3.518 micros/op; (1000000 of 1000000 found)
readseq      :       0.251 micros/op;  441.2 MB/s
readreverse  :       0.456 micros/op;  242.4 MB/s
fill100K     :    1329.723 micros/op;   71.7 MB/s (1000 ops)
crc32c       :       1.976 micros/op; 1976.7 MB/s (4K per op)
snappycomp   :       4.705 micros/op;  830.2 MB/s (output: 55.1%)
snappyuncomp :       0.958 micros/op; 4079.1 MB/s
acquireload  :       0.727 micros/op; (each op is 1000 loads)

New:
fillseq      :       3.129 micros/op;   35.4 MB/s
fillsync     :    2748.099 micros/op;    0.0 MB/s (1000 ops)
fillrandom   :       5.394 micros/op;   20.5 MB/s
overwrite    :       7.253 micros/op;   15.3 MB/s
readrandom   :       5.655 micros/op; (1000000 of 1000000 found)
readrandom   :       4.425 micros/op; (1000000 of 1000000 found)
readseq      :       0.298 micros/op;  371.3 MB/s
readreverse  :       0.508 micros/op;  217.9 MB/s
compact      :  885842.000 micros/op;
readrandom   :       3.545 micros/op; (1000000 of 1000000 found)
readseq      :       0.252 micros/op;  438.2 MB/s
readreverse  :       0.425 micros/op;  260.2 MB/s
fill100K     :    1418.347 micros/op;   67.2 MB/s (1000 ops)
crc32c       :       1.987 micros/op; 1966.0 MB/s (4K per op)
snappycomp   :       4.767 micros/op;  819.4 MB/s (output: 55.1%)
snappyuncomp :       0.916 micros/op; 4264.9 MB/s
acquireload  :       0.665 micros/op; (each op is 1000 loads)

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=194002392
---
 CMakeLists.txt           |  1 +
 include/leveldb/status.h | 30 +++++++++++++++++-----------
 util/status_test.cc      | 42 ++++++++++++++++++++++++++++++++++++++++
 3 files changed, 62 insertions(+), 11 deletions(-)
 create mode 100644 util/status_test.cc

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 4bf3df3..d49c31e 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -277,6 +277,7 @@ if(LEVELDB_BUILD_TESTS)
   leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue200_test.cc")
 
   leveldb_test("${PROJECT_SOURCE_DIR}/util/env_test.cc")
+  leveldb_test("${PROJECT_SOURCE_DIR}/util/status_test.cc")
 
   if(NOT BUILD_SHARED_LIBS)
     leveldb_test("${PROJECT_SOURCE_DIR}/db/autocompact_test.cc")
diff --git a/include/leveldb/status.h b/include/leveldb/status.h
index 39d692d..ee9fac2 100644
--- a/include/leveldb/status.h
+++ b/include/leveldb/status.h
@@ -13,6 +13,7 @@
 #ifndef STORAGE_LEVELDB_INCLUDE_STATUS_H_
 #define STORAGE_LEVELDB_INCLUDE_STATUS_H_
 
+#include <algorithm>
 #include <string>
 #include "leveldb/export.h"
 #include "leveldb/slice.h"
@@ -22,12 +23,14 @@ namespace leveldb {
 class LEVELDB_EXPORT Status {
  public:
   // Create a success status.
-  Status() : state_(nullptr) { }
+  Status() noexcept : state_(nullptr) { }
   ~Status() { delete[] state_; }
 
-  // Copy the specified status.
-  Status(const Status& s);
-  void operator=(const Status& s);
+  Status(const Status& rhs);
+  Status& operator=(const Status& rhs);
+
+  Status(Status&& rhs) noexcept : state_(rhs.state_) { rhs.state_ = nullptr; }
+  Status& operator=(Status&& rhs) noexcept;
 
   // Return a success status.
   static Status OK() { return Status(); }
@@ -96,16 +99,21 @@ class LEVELDB_EXPORT Status {
   static const char* CopyState(const char* s);
 };
 
-inline Status::Status(const Status& s) {
-  state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_);
+inline Status::Status(const Status& rhs) {
+  state_ = (rhs.state_ == nullptr) ? nullptr : CopyState(rhs.state_);
 }
-inline void Status::operator=(const Status& s) {
-  // The following condition catches both aliasing (when this == &s),
-  // and the common case where both s and *this are ok.
-  if (state_ != s.state_) {
+inline Status& Status::operator=(const Status& rhs) {
+  // The following condition catches both aliasing (when this == &rhs),
+  // and the common case where both rhs and *this are ok.
+  if (state_ != rhs.state_) {
     delete[] state_;
-    state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_);
+    state_ = (rhs.state_ == nullptr) ? nullptr : CopyState(rhs.state_);
   }
+  return *this;
+}
+inline Status& Status::operator=(Status&& rhs) noexcept {
+  std::swap(state_, rhs.state_);
+  return *this;
 }
 
 }  // namespace leveldb
diff --git a/util/status_test.cc b/util/status_test.cc
new file mode 100644
index 0000000..7ed3b9e
--- /dev/null
+++ b/util/status_test.cc
@@ -0,0 +1,42 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <utility>
+
+#include "leveldb/slice.h"
+#include "leveldb/status.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+TEST(Status, MoveConstructor) {
+  {
+    Status ok = Status::OK();
+    Status ok2 = std::move(ok);
+
+    ASSERT_TRUE(ok2.ok());
+  }
+
+  {
+    Status status = Status::NotFound("custom NotFound status message");
+    Status status2 = std::move(status);
+
+    ASSERT_TRUE(status2.IsNotFound());
+    ASSERT_EQ("NotFound: custom NotFound status message", status2.ToString());
+  }
+
+  {
+    Status self_moved = Status::IOError("custom IOError status message");
+
+    // Needed to bypass compiler warning about explicit move-assignment.
+    Status& self_moved_reference = self_moved;
+    self_moved_reference = std::move(self_moved);
+  }
+}
+
+}  // namespace leveldb
+
+int main(int argc, char** argv) {
+  return leveldb::test::RunAllTests();
+}

From bc23e00f955eadb9e26f8ce07c1c664e7b985ff0 Mon Sep 17 00:00:00 2001
From: cmumford <cmumford@google.com>
Date: Fri, 27 Apr 2018 09:00:28 -0700
Subject: [PATCH 011/181] Update default log file size in doc.

The default size was changed in #f779e7a5 but the documentation was
never updated.

This fixes #566 reported on GitHub.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=194547959
---
 doc/impl.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/doc/impl.md b/doc/impl.md
index 4b13f2a..6e6b2ab 100644
--- a/doc/impl.md
+++ b/doc/impl.md
@@ -64,7 +64,7 @@ Other files used for miscellaneous purposes may also be present (LOCK, *.dbtmp).
 
 ## Level 0
 
-When the log file grows above a certain size (1MB by default):
+When the log file grows above a certain size (4MB by default):
 Create a brand new memtable and log file and direct future updates here
 In the background:
 Write the contents of the previous memtable to an sstable

From e7840de9f3db1a5eddedfecbbbc1ff72a4c2631a Mon Sep 17 00:00:00 2001
From: cmumford <cmumford@google.com>
Date: Fri, 27 Apr 2018 09:14:32 -0700
Subject: [PATCH 012/181] Fix documentation for log file growth.

This fixes #546 reported on GitHub.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=194549692
---
 doc/impl.md | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/doc/impl.md b/doc/impl.md
index 6e6b2ab..cacabb9 100644
--- a/doc/impl.md
+++ b/doc/impl.md
@@ -65,12 +65,14 @@ Other files used for miscellaneous purposes may also be present (LOCK, *.dbtmp).
 ## Level 0
 
 When the log file grows above a certain size (4MB by default):
-Create a brand new memtable and log file and direct future updates here
+Create a brand new memtable and log file and direct future updates here.
+
 In the background:
-Write the contents of the previous memtable to an sstable
-Discard the memtable
-Delete the old log file and the old memtable
-Add the new sstable to the young (level-0) level.
+
+1. Write the contents of the previous memtable to an sstable.
+2. Discard the memtable.
+3. Delete the old log file and the old memtable.
+4. Add the new sstable to the young (level-0) level.
 
 ## Compactions
 

From 18683981505dc374ce29211c80a9552f8f2f4571 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Mon, 30 Apr 2018 15:11:03 -0700
Subject: [PATCH 013/181] Clean up SnapshotImpl.

* Omit SnapshotImpl::list_ when assert() isn't on
* Make SnapshotImpl::number_ const and set it in the constructor
* Make SnapshotImpl::number_ private and access it via a getter
* Rename SnapshotImpl::number_ to SnapshotImpl::sequence_number_
* Rename SnapshotList::list_ to SnapshotList::head_
* Wrap casting from Snapshot* to SnapshotImpl* in ToSnapshotImpl()

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=194852828
---
 db/db_impl.cc | 11 ++++----
 db/db_test.cc | 49 ++++++++++++++++++++++++++++++++++++
 db/snapshot.h | 70 +++++++++++++++++++++++++++++++++------------------
 3 files changed, 101 insertions(+), 29 deletions(-)

diff --git a/db/db_impl.cc b/db/db_impl.cc
index 02a6872..fefb883 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -906,7 +906,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
   if (snapshots_.empty()) {
     compact->smallest_snapshot = versions_->LastSequence();
   } else {
-    compact->smallest_snapshot = snapshots_.oldest()->number_;
+    compact->smallest_snapshot = snapshots_.oldest()->sequence_number();
   }
 
   // Release mutex while we're actually doing the compaction work
@@ -1121,7 +1121,8 @@ Status DBImpl::Get(const ReadOptions& options,
   MutexLock l(&mutex_);
   SequenceNumber snapshot;
   if (options.snapshot != nullptr) {
-    snapshot = reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_;
+    snapshot =
+        static_cast<const SnapshotImpl*>(options.snapshot)->sequence_number();
   } else {
     snapshot = versions_->LastSequence();
   }
@@ -1168,7 +1169,7 @@ Iterator* DBImpl::NewIterator(const ReadOptions& options) {
   return NewDBIterator(
       this, user_comparator(), iter,
       (options.snapshot != nullptr
-       ? reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_
+       ? static_cast<const SnapshotImpl*>(options.snapshot)->sequence_number()
        : latest_snapshot),
       seed);
 }
@@ -1185,9 +1186,9 @@ const Snapshot* DBImpl::GetSnapshot() {
   return snapshots_.New(versions_->LastSequence());
 }
 
-void DBImpl::ReleaseSnapshot(const Snapshot* s) {
+void DBImpl::ReleaseSnapshot(const Snapshot* snapshot) {
   MutexLock l(&mutex_);
-  snapshots_.Delete(reinterpret_cast<const SnapshotImpl*>(s));
+  snapshots_.Delete(static_cast<const SnapshotImpl*>(snapshot));
 }
 
 // Convenience methods
diff --git a/db/db_test.cc b/db/db_test.cc
index 47e3287..878b7d4 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -631,6 +631,55 @@ TEST(DBTest, GetSnapshot) {
   } while (ChangeOptions());
 }
 
+TEST(DBTest, GetIdenticalSnapshots) {
+  do {
+    // Try with both a short key and a long key
+    for (int i = 0; i < 2; i++) {
+      std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
+      ASSERT_OK(Put(key, "v1"));
+      const Snapshot* s1 = db_->GetSnapshot();
+      const Snapshot* s2 = db_->GetSnapshot();
+      const Snapshot* s3 = db_->GetSnapshot();
+      ASSERT_OK(Put(key, "v2"));
+      ASSERT_EQ("v2", Get(key));
+      ASSERT_EQ("v1", Get(key, s1));
+      ASSERT_EQ("v1", Get(key, s2));
+      ASSERT_EQ("v1", Get(key, s3));
+      db_->ReleaseSnapshot(s1);
+      dbfull()->TEST_CompactMemTable();
+      ASSERT_EQ("v2", Get(key));
+      ASSERT_EQ("v1", Get(key, s2));
+      db_->ReleaseSnapshot(s2);
+      ASSERT_EQ("v1", Get(key, s3));
+      db_->ReleaseSnapshot(s3);
+    }
+  } while (ChangeOptions());
+}
+
+TEST(DBTest, IterateOverEmptySnapshot) {
+  do {
+    const Snapshot* snapshot = db_->GetSnapshot();
+    ReadOptions read_options;
+    read_options.snapshot = snapshot;
+    ASSERT_OK(Put("foo", "v1"));
+    ASSERT_OK(Put("foo", "v2"));
+
+    Iterator* iterator1 = db_->NewIterator(read_options);
+    iterator1->SeekToFirst();
+    ASSERT_TRUE(!iterator1->Valid());
+    delete iterator1;
+
+    dbfull()->TEST_CompactMemTable();
+
+    Iterator* iterator2 = db_->NewIterator(read_options);
+    iterator2->SeekToFirst();
+    ASSERT_TRUE(!iterator2->Valid());
+    delete iterator2;
+
+    db_->ReleaseSnapshot(snapshot);
+  } while (ChangeOptions());
+}
+
 TEST(DBTest, GetLevel0Ordering) {
   do {
     // Check that we process level-0 files in correct order.  The code
diff --git a/db/snapshot.h b/db/snapshot.h
index 6ed413c..c43d9f9 100644
--- a/db/snapshot.h
+++ b/db/snapshot.h
@@ -16,50 +16,72 @@ class SnapshotList;
 // Each SnapshotImpl corresponds to a particular sequence number.
 class SnapshotImpl : public Snapshot {
  public:
-  SequenceNumber number_;  // const after creation
+  SnapshotImpl(SequenceNumber sequence_number)
+      : sequence_number_(sequence_number) {}
+
+  SequenceNumber sequence_number() const { return sequence_number_; }
 
  private:
   friend class SnapshotList;
 
-  // SnapshotImpl is kept in a doubly-linked circular list
+  // SnapshotImpl is kept in a doubly-linked circular list. The SnapshotList
+  // implementation operates on the next/previous fields direcly.
   SnapshotImpl* prev_;
   SnapshotImpl* next_;
 
-  SnapshotList* list_;                 // just for sanity checks
+  const SequenceNumber sequence_number_;
+
+#if !defined(NDEBUG)
+  SnapshotList* list_ = nullptr;
+#endif  // !defined(NDEBUG)
 };
 
 class SnapshotList {
  public:
-  SnapshotList() {
-    list_.prev_ = &list_;
-    list_.next_ = &list_;
+  SnapshotList() : head_(0) {
+    head_.prev_ = &head_;
+    head_.next_ = &head_;
   }
 
-  bool empty() const { return list_.next_ == &list_; }
-  SnapshotImpl* oldest() const { assert(!empty()); return list_.next_; }
-  SnapshotImpl* newest() const { assert(!empty()); return list_.prev_; }
+  bool empty() const { return head_.next_ == &head_; }
+  SnapshotImpl* oldest() const { assert(!empty()); return head_.next_; }
+  SnapshotImpl* newest() const { assert(!empty()); return head_.prev_; }
 
-  const SnapshotImpl* New(SequenceNumber seq) {
-    SnapshotImpl* s = new SnapshotImpl;
-    s->number_ = seq;
-    s->list_ = this;
-    s->next_ = &list_;
-    s->prev_ = list_.prev_;
-    s->prev_->next_ = s;
-    s->next_->prev_ = s;
-    return s;
+  // Creates a SnapshotImpl and appends it to the end of the list.
+  SnapshotImpl* New(SequenceNumber sequence_number) {
+    assert(empty() || newest()->sequence_number_ <= sequence_number);
+
+    SnapshotImpl* snapshot = new SnapshotImpl(sequence_number);
+
+#if !defined(NDEBUG)
+    snapshot->list_ = this;
+#endif  // !defined(NDEBUG)
+    snapshot->next_ = &head_;
+    snapshot->prev_ = head_.prev_;
+    snapshot->prev_->next_ = snapshot;
+    snapshot->next_->prev_ = snapshot;
+    return snapshot;
   }
 
-  void Delete(const SnapshotImpl* s) {
-    assert(s->list_ == this);
-    s->prev_->next_ = s->next_;
-    s->next_->prev_ = s->prev_;
-    delete s;
+  // Removes a SnapshotImpl from this list.
+  //
+  // The snapshot must have been created by calling New() on this list.
+  //
+  // The snapshot pointer should not be const, because its memory is
+  // deallocated. However, that would force us to change DB::ReleaseSnapshot(),
+  // which is in the API, and currently takes a const Snapshot.
+  void Delete(const SnapshotImpl* snapshot) {
+#if !defined(NDEBUG)
+    assert(snapshot->list_ == this);
+#endif  // !defined(NDEBUG)
+    snapshot->prev_->next_ = snapshot->next_;
+    snapshot->next_->prev_ = snapshot->prev_;
+    delete snapshot;
   }
 
  private:
   // Dummy head of doubly-linked list of snapshots
-  SnapshotImpl list_;
+  SnapshotImpl head_;
 };
 
 }  // namespace leveldb

From 6a6bdafcf10f5d4bef1ca52697c38d10c28b1a8b Mon Sep 17 00:00:00 2001
From: cmumford <cmumford@google.com>
Date: Mon, 21 May 2018 13:53:36 -0700
Subject: [PATCH 014/181] Corrected typo in docs: "cache" to "block_cache".

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=197452015
---
 doc/index.md | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/doc/index.md b/doc/index.md
index be85696..ea4609d 100644
--- a/doc/index.md
+++ b/doc/index.md
@@ -338,19 +338,19 @@ options.compression = leveldb::kNoCompression;
 ### Cache
 
 The contents of the database are stored in a set of files in the filesystem and
-each file stores a sequence of compressed blocks. If options.cache is non-NULL,
-it is used to cache frequently used uncompressed block contents.
+each file stores a sequence of compressed blocks. If options.block_cache is
+non-NULL, it is used to cache frequently used uncompressed block contents.
 
 ```c++
 #include "leveldb/cache.h"
 
 leveldb::Options options;
-options.cache = leveldb::NewLRUCache(100 * 1048576);  // 100MB cache
+options.block_cache = leveldb::NewLRUCache(100 * 1048576);  // 100MB cache
 leveldb::DB* db;
 leveldb::DB::Open(options, name, &db);
 ... use the db ...
 delete db
-delete options.cache;
+delete options.block_cache;
 ```
 
 Note that the cache holds uncompressed data, and therefore it should be sized

From 6caf73ad9dae0ee91873bcb39554537b85163770 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Mon, 4 Jun 2018 12:29:13 -0700
Subject: [PATCH 015/181] Clean up Iterator.

This CL renames the private struct Iterator::Cleanup ->
Iterator::CleanupNode, to better reflect that it's a linked list node,
and extracts duplicated code from its user in IsEmpty() and Run()
methods.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=199175058
---
 include/leveldb/iterator.h | 16 +++++++---
 table/iterator.cc          | 61 +++++++++++++++++++++-----------------
 2 files changed, 45 insertions(+), 32 deletions(-)

diff --git a/include/leveldb/iterator.h b/include/leveldb/iterator.h
index 436508a..6c1d91b 100644
--- a/include/leveldb/iterator.h
+++ b/include/leveldb/iterator.h
@@ -77,17 +77,25 @@ class LEVELDB_EXPORT Iterator {
   //
   // Note that unlike all of the preceding methods, this method is
   // not abstract and therefore clients should not override it.
-  typedef void (*CleanupFunction)(void* arg1, void* arg2);
+  using CleanupFunction = void (*)(void* arg1, void* arg2);
   void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2);
 
  private:
-  struct Cleanup {
+  // Cleanup functions are stored in a single-linked list.
+  // The list's head node is inlined in the iterator.
+  struct CleanupNode {
+    // The head node is used if the function pointer is not null.
     CleanupFunction function;
     void* arg1;
     void* arg2;
-    Cleanup* next;
+    CleanupNode* next;
+
+    // True if the node is not used. Only head nodes might be unused.
+    bool IsEmpty() const { return function == nullptr; }
+    // Invokes the cleanup function.
+    void Run() { assert(function != nullptr); (*function)(arg1, arg2); }
   };
-  Cleanup cleanup_;
+  CleanupNode cleanup_head_;
 };
 
 // Return an empty iterator (yields nothing).
diff --git a/table/iterator.cc b/table/iterator.cc
index aff0e59..41ec1aa 100644
--- a/table/iterator.cc
+++ b/table/iterator.cc
@@ -7,54 +7,59 @@
 namespace leveldb {
 
 Iterator::Iterator() {
-  cleanup_.function = nullptr;
-  cleanup_.next = nullptr;
+  cleanup_head_.function = nullptr;
+  cleanup_head_.next = nullptr;
 }
 
 Iterator::~Iterator() {
-  if (cleanup_.function != nullptr) {
-    (*cleanup_.function)(cleanup_.arg1, cleanup_.arg2);
-    for (Cleanup* c = cleanup_.next; c != nullptr; ) {
-      (*c->function)(c->arg1, c->arg2);
-      Cleanup* next = c->next;
-      delete c;
-      c = next;
+  if (!cleanup_head_.IsEmpty()) {
+    cleanup_head_.Run();
+    for (CleanupNode* node = cleanup_head_.next; node != nullptr; ) {
+      node->Run();
+      CleanupNode* next_node = node->next;
+      delete node;
+      node = next_node;
     }
   }
 }
 
 void Iterator::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) {
   assert(func != nullptr);
-  Cleanup* c;
-  if (cleanup_.function == nullptr) {
-    c = &cleanup_;
+  CleanupNode* node;
+  if (cleanup_head_.IsEmpty()) {
+    node = &cleanup_head_;
   } else {
-    c = new Cleanup;
-    c->next = cleanup_.next;
-    cleanup_.next = c;
+    node = new CleanupNode();
+    node->next = cleanup_head_.next;
+    cleanup_head_.next = node;
   }
-  c->function = func;
-  c->arg1 = arg1;
-  c->arg2 = arg2;
+  node->function = func;
+  node->arg1 = arg1;
+  node->arg2 = arg2;
 }
 
 namespace {
+
 class EmptyIterator : public Iterator {
  public:
   EmptyIterator(const Status& s) : status_(s) { }
-  virtual bool Valid() const { return false; }
-  virtual void Seek(const Slice& target) { }
-  virtual void SeekToFirst() { }
-  virtual void SeekToLast() { }
-  virtual void Next() { assert(false); }
-  virtual void Prev() { assert(false); }
-  Slice key() const { assert(false); return Slice(); }
-  Slice value() const { assert(false); return Slice(); }
-  virtual Status status() const { return status_; }
+  ~EmptyIterator() override = default;
+
+  bool Valid() const override { return false; }
+  void Seek(const Slice& target) override { }
+  void SeekToFirst() override { }
+  void SeekToLast() override { }
+  void Next() override { assert(false); }
+  void Prev() override { assert(false); }
+  Slice key() const override { assert(false); return Slice(); }
+  Slice value() const override { assert(false); return Slice(); }
+  Status status() const override { return status_; }
+
  private:
   Status status_;
 };
-}  // namespace
+
+}  // anonymous namespace
 
 Iterator* NewEmptyIterator() {
   return new EmptyIterator(Status::OK());

From f314b63e5e045a64fdaf84bba54f8ed6f47304d3 Mon Sep 17 00:00:00 2001
From: andy <zhiangli029@gmail.com>
Date: Thu, 28 Jun 2018 22:03:37 +0800
Subject: [PATCH 016/181] lack of sequence and type in comments to introduce
 entry format

---
 db/memtable.cc | 1 +
 1 file changed, 1 insertion(+)

diff --git a/db/memtable.cc b/db/memtable.cc
index 287afdb..8cefcae 100644
--- a/db/memtable.cc
+++ b/db/memtable.cc
@@ -85,6 +85,7 @@ void MemTable::Add(SequenceNumber s, ValueType type,
   // Format of an entry is concatenation of:
   //  key_size     : varint32 of internal_key.size()
   //  key bytes    : char[internal_key.size()]
+  //    tag        : uint64((sequence << 8) | type)
   //  value_size   : varint32 of value.size()
   //  value bytes  : char[value.size()]
   size_t key_size = key.size();

From f7b0e1d901da26ac5ce6ad7f0a9806ce1440197e Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Tue, 14 Aug 2018 15:23:53 -0700
Subject: [PATCH 017/181] Expose WriteBatch::Append().

WriteBatchInternal has a method for efficiently concatenating two
WriteBatches. This commit exposes the method to the public API.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=208724311
---
 db/write_batch.cc             | 4 ++++
 db/write_batch_test.cc        | 8 ++++----
 include/leveldb/write_batch.h | 7 +++++++
 3 files changed, 15 insertions(+), 4 deletions(-)

diff --git a/db/write_batch.cc b/db/write_batch.cc
index 7f8f3e8..40eed2e 100644
--- a/db/write_batch.cc
+++ b/db/write_batch.cc
@@ -112,6 +112,10 @@ void WriteBatch::Delete(const Slice& key) {
   PutLengthPrefixedSlice(&rep_, key);
 }
 
+void WriteBatch::Append(const WriteBatch &source) {
+  WriteBatchInternal::Append(this, &source);
+}
+
 namespace {
 class MemTableInserter : public WriteBatch::Handler {
  public:
diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc
index 8d38023..49c178d 100644
--- a/db/write_batch_test.cc
+++ b/db/write_batch_test.cc
@@ -91,21 +91,21 @@ TEST(WriteBatchTest, Append) {
   WriteBatch b1, b2;
   WriteBatchInternal::SetSequence(&b1, 200);
   WriteBatchInternal::SetSequence(&b2, 300);
-  WriteBatchInternal::Append(&b1, &b2);
+  b1.Append(b2);
   ASSERT_EQ("",
             PrintContents(&b1));
   b2.Put("a", "va");
-  WriteBatchInternal::Append(&b1, &b2);
+  b1.Append(b2);
   ASSERT_EQ("Put(a, va)@200",
             PrintContents(&b1));
   b2.Clear();
   b2.Put("b", "vb");
-  WriteBatchInternal::Append(&b1, &b2);
+  b1.Append(b2);
   ASSERT_EQ("Put(a, va)@200"
             "Put(b, vb)@201",
             PrintContents(&b1));
   b2.Delete("foo");
-  WriteBatchInternal::Append(&b1, &b2);
+  b1.Append(b2);
   ASSERT_EQ("Put(a, va)@200"
             "Put(b, vb)@202"
             "Put(b, vb)@201"
diff --git a/include/leveldb/write_batch.h b/include/leveldb/write_batch.h
index b6d72cb..9386ace 100644
--- a/include/leveldb/write_batch.h
+++ b/include/leveldb/write_batch.h
@@ -54,6 +54,13 @@ class LEVELDB_EXPORT WriteBatch {
   // releases. It is intended for LevelDB usage metrics.
   size_t ApproximateSize();
 
+  // Copies the operations in "source" to this batch.
+  //
+  // This runs in O(source size) time. However, the constant factor is better
+  // than calling Iterate() over the source batch with a Handler that replicates
+  // the operations into this batch.
+  void Append(const WriteBatch& source);
+
   // Support for iterating over the contents of a batch.
   class Handler {
    public:

From 16a2b8bb3af5b1f54676256e55a5d3f0ec02da42 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Sun, 19 Aug 2018 15:17:37 -0700
Subject: [PATCH 018/181] Expose WriteBatch::Append in the C API.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=209345072
---
 db/c.cc             | 7 ++++++-
 db/c_test.c         | 8 +++++++-
 include/leveldb/c.h | 4 +++-
 3 files changed, 16 insertions(+), 3 deletions(-)

diff --git a/db/c.cc b/db/c.cc
index 77b33d5..7756ea3 100644
--- a/db/c.cc
+++ b/db/c.cc
@@ -359,7 +359,7 @@ void leveldb_writebatch_delete(
 }
 
 void leveldb_writebatch_iterate(
-    leveldb_writebatch_t* b,
+    const leveldb_writebatch_t* b,
     void* state,
     void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
     void (*deleted)(void*, const char* k, size_t klen)) {
@@ -382,6 +382,11 @@ void leveldb_writebatch_iterate(
   b->rep.Iterate(&handler);
 }
 
+void leveldb_writebatch_append(leveldb_writebatch_t *destination,
+                               const leveldb_writebatch_t *source) {
+  destination->rep.Append(source->rep);
+}
+
 leveldb_options_t* leveldb_options_create() {
   return new leveldb_options_t;
 }
diff --git a/db/c_test.c b/db/c_test.c
index 7284de5..ae14b99 100644
--- a/db/c_test.c
+++ b/db/c_test.c
@@ -228,12 +228,18 @@ int main(int argc, char** argv) {
     leveldb_writebatch_clear(wb);
     leveldb_writebatch_put(wb, "bar", 3, "b", 1);
     leveldb_writebatch_put(wb, "box", 3, "c", 1);
-    leveldb_writebatch_delete(wb, "bar", 3);
+
+    leveldb_writebatch_t* wb2 = leveldb_writebatch_create();
+    leveldb_writebatch_delete(wb2, "bar", 3);
+    leveldb_writebatch_append(wb, wb2);
+    leveldb_writebatch_destroy(wb2);
+
     leveldb_write(db, woptions, wb, &err);
     CheckNoError(err);
     CheckGet(db, roptions, "foo", "hello");
     CheckGet(db, roptions, "bar", NULL);
     CheckGet(db, roptions, "box", "c");
+
     int pos = 0;
     leveldb_writebatch_iterate(wb, &pos, CheckPut, CheckDel);
     CheckCondition(pos == 3);
diff --git a/include/leveldb/c.h b/include/leveldb/c.h
index 1124153..d8aab5b 100644
--- a/include/leveldb/c.h
+++ b/include/leveldb/c.h
@@ -155,9 +155,11 @@ LEVELDB_EXPORT void leveldb_writebatch_put(leveldb_writebatch_t*,
 LEVELDB_EXPORT void leveldb_writebatch_delete(leveldb_writebatch_t*,
                                               const char* key, size_t klen);
 LEVELDB_EXPORT void leveldb_writebatch_iterate(
-    leveldb_writebatch_t*, void* state,
+    const leveldb_writebatch_t*, void* state,
     void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
     void (*deleted)(void*, const char* k, size_t klen));
+LEVELDB_EXPORT void leveldb_writebatch_append(
+    leveldb_writebatch_t* destination, const leveldb_writebatch_t* source);
 
 /* Options */
 

From 0ef2310f67f0c0b4ba3e6ad86d8138440af30d67 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Tue, 4 Sep 2018 09:15:30 -0700
Subject: [PATCH 019/181] Remove GCC on OSX from the Travis CI matrix.

Equivalent of
https://github.com/google/snappy/commit/db082d2cd6512981c28849d00dd47a4216768e10

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=211467181
---
 .travis.yml | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/.travis.yml b/.travis.yml
index 5999274..fd7b52d 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -17,6 +17,13 @@ env:
   - BUILD_TYPE=Debug
   - BUILD_TYPE=RelWithDebInfo
 
+matrix:
+  exclude:
+    # GCC fails on recent Travis OSX images.
+    # https://github.com/travis-ci/travis-ci/issues/9640
+    - compiler: gcc
+      os: osx
+
 addons:
   apt:
     # List of whitelisted in travis packages for ubuntu-trusty can be found here:

From 9b44da73d9b1d839c437e3fdaaa14ea08260dce4 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Tue, 4 Sep 2018 09:21:24 -0700
Subject: [PATCH 020/181] Clarify comments for leveldb::Env file reading
 methods.

"Create a brand new [adjective] file" seems like the description for a
method that will create a new file, but is used for methods that open
existing files for read access.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=211468002
---
 include/leveldb/env.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/leveldb/env.h b/include/leveldb/env.h
index 87dc06e..59e2a6f 100644
--- a/include/leveldb/env.h
+++ b/include/leveldb/env.h
@@ -45,7 +45,7 @@ class LEVELDB_EXPORT Env {
   // The result of Default() belongs to leveldb and must never be deleted.
   static Env* Default();
 
-  // Create a brand new sequentially-readable file with the specified name.
+  // Create an object that sequentially reads the file with the specified name.
   // On success, stores a pointer to the new file in *result and returns OK.
   // On failure stores nullptr in *result and returns non-OK.  If the file does
   // not exist, returns a non-OK status.  Implementations should return a
@@ -55,7 +55,7 @@ class LEVELDB_EXPORT Env {
   virtual Status NewSequentialFile(const std::string& fname,
                                    SequentialFile** result) = 0;
 
-  // Create a brand new random access read-only file with the
+  // Create an object supporting random-access reads from the file with the
   // specified name.  On success, stores a pointer to the new file in
   // *result and returns OK.  On failure stores nullptr in *result and
   // returns non-OK.  If the file does not exist, returns a non-OK

From 03064cbbb2c00c3e6e41a78e8111d14a020f7d6f Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Tue, 4 Sep 2018 09:31:27 -0700
Subject: [PATCH 021/181] Simplify Limiter in env_posix.cc.

Now that we require C++11, we can use std::atomic<int>, which has
primitives for most of the logic we need. As a bonus, the happy path for
Limiter::Acquire() and Limiter::Release() only performs one atomic
operation.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=211469518
---
 util/env_posix.cc | 58 ++++++++++++++++++++---------------------------
 1 file changed, 25 insertions(+), 33 deletions(-)

diff --git a/util/env_posix.cc b/util/env_posix.cc
index 51844ad..18e7664 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -16,9 +16,12 @@
 #include <sys/types.h>
 #include <time.h>
 #include <unistd.h>
+
+#include <atomic>
 #include <deque>
 #include <limits>
 #include <set>
+
 #include "leveldb/env.h"
 #include "leveldb/slice.h"
 #include "port/port.h"
@@ -53,52 +56,41 @@ static Status PosixError(const std::string& context, int err_number) {
 
 // Helper class to limit resource usage to avoid exhaustion.
 // Currently used to limit read-only file descriptors and mmap file usage
-// so that we do not end up running out of file descriptors, virtual memory,
-// or running into kernel performance problems for very large databases.
+// so that we do not run out of file descriptors or virtual memory, or run into
+// kernel performance problems for very large databases.
 class Limiter {
  public:
-  // Limit maximum number of resources to |n|.
-  Limiter(intptr_t n) {
-    SetAllowed(n);
-  }
+  // Limit maximum number of resources to |max_acquires|.
+  Limiter(int max_acquires) : acquires_allowed_(max_acquires) {}
+
+  Limiter(const Limiter&) = delete;
+  Limiter operator=(const Limiter&) = delete;
 
   // If another resource is available, acquire it and return true.
   // Else return false.
-  bool Acquire() LOCKS_EXCLUDED(mu_) {
-    if (GetAllowed() <= 0) {
-      return false;
-    }
-    MutexLock l(&mu_);
-    intptr_t x = GetAllowed();
-    if (x <= 0) {
-      return false;
-    } else {
-      SetAllowed(x - 1);
+  bool Acquire() {
+    int old_acquires_allowed =
+        acquires_allowed_.fetch_sub(1, std::memory_order_relaxed);
+
+    if (old_acquires_allowed > 0)
       return true;
-    }
+
+    acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
+    return false;
   }
 
   // Release a resource acquired by a previous call to Acquire() that returned
   // true.
-  void Release() LOCKS_EXCLUDED(mu_) {
-    MutexLock l(&mu_);
-    SetAllowed(GetAllowed() + 1);
+  void Release() {
+    acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
   }
 
  private:
-  port::Mutex mu_;
-  port::AtomicPointer allowed_;
-
-  intptr_t GetAllowed() const {
-    return reinterpret_cast<intptr_t>(allowed_.Acquire_Load());
-  }
-
-  void SetAllowed(intptr_t v) EXCLUSIVE_LOCKS_REQUIRED(mu_) {
-    allowed_.Release_Store(reinterpret_cast<void*>(v));
-  }
-
-  Limiter(const Limiter&);
-  void operator=(const Limiter&);
+  // The number of available resources.
+  //
+  // This is a counter and is not tied to the invariants of any other class, so
+  // it can be operated on safely using std::memory_order_relaxed.
+  std::atomic<int> acquires_allowed_;
 };
 
 class PosixSequentialFile: public SequentialFile {

From 89af27bde59fbbb3025653812b45fec10a655cb7 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Tue, 4 Sep 2018 09:44:56 -0700
Subject: [PATCH 022/181] Remove ssize_t from code that is not POSIX-specific.

ssize_t is not standard C++. It is a POSIX extension. Therefore, it does
not belong in generic code.

This change tweaks the logic in DBIter to remove the need for signed
integers, so ssize_t can be replaced with size_t. The impacted method
and private member are renamed to better express their purpose.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=211471606
---
 db/db_iter.cc              | 19 +++++++++++--------
 db/fault_injection_test.cc |  8 ++++----
 2 files changed, 15 insertions(+), 12 deletions(-)

diff --git a/db/db_iter.cc b/db/db_iter.cc
index 3b2035e..4d0f42e 100644
--- a/db/db_iter.cc
+++ b/db/db_iter.cc
@@ -57,7 +57,7 @@ class DBIter: public Iterator {
         direction_(kForward),
         valid_(false),
         rnd_(seed),
-        bytes_counter_(RandomPeriod()) {
+        bytes_until_read_sampling_(RandomCompactionPeriod()) {
   }
   virtual ~DBIter() {
     delete iter_;
@@ -103,8 +103,8 @@ class DBIter: public Iterator {
     }
   }
 
-  // Pick next gap with average value of config::kReadBytesPeriod.
-  ssize_t RandomPeriod() {
+  // Picks the number of bytes that can be read until a compaction is scheduled.
+  size_t RandomCompactionPeriod() {
     return rnd_.Uniform(2*config::kReadBytesPeriod);
   }
 
@@ -120,7 +120,7 @@ class DBIter: public Iterator {
   bool valid_;
 
   Random rnd_;
-  ssize_t bytes_counter_;
+  size_t bytes_until_read_sampling_;
 
   // No copying allowed
   DBIter(const DBIter&);
@@ -129,12 +129,15 @@ class DBIter: public Iterator {
 
 inline bool DBIter::ParseKey(ParsedInternalKey* ikey) {
   Slice k = iter_->key();
-  ssize_t n = k.size() + iter_->value().size();
-  bytes_counter_ -= n;
-  while (bytes_counter_ < 0) {
-    bytes_counter_ += RandomPeriod();
+
+  size_t bytes_read = k.size() + iter_->value().size();
+  while (bytes_until_read_sampling_ < bytes_read) {
+    bytes_until_read_sampling_ += RandomCompactionPeriod();
     db_->RecordReadSample(k);
   }
+  assert(bytes_until_read_sampling_ >= bytes_read);
+  bytes_until_read_sampling_ -= bytes_read;
+
   if (!ParseInternalKey(k, ikey)) {
     status_ = Status::Corruption("corrupted internal key in DBIter");
     return false;
diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc
index 7894999..b3429ac 100644
--- a/db/fault_injection_test.cc
+++ b/db/fault_injection_test.cc
@@ -85,9 +85,9 @@ Status Truncate(const std::string& filename, uint64_t length) {
 
 struct FileState {
   std::string filename_;
-  ssize_t pos_;
-  ssize_t pos_at_last_sync_;
-  ssize_t pos_at_last_flush_;
+  int64_t pos_;
+  int64_t pos_at_last_sync_;
+  int64_t pos_at_last_flush_;
 
   FileState(const std::string& filename)
       : filename_(filename),
@@ -360,7 +360,7 @@ void FaultInjectionTestEnv::WritableFileClosed(const FileState& state) {
 }
 
 Status FileState::DropUnsyncedData() const {
-  ssize_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_;
+  int64_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_;
   return Truncate(filename_, sync_pos);
 }
 

From 7b945f200339aa47c24788d3ee9910c09c513843 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Tue, 4 Sep 2018 09:50:50 -0700
Subject: [PATCH 023/181] Clean up posix_logger.h.

General cleanup principles:
* Use override when applicable.
* Use const on class members where possible.
* Renames where clarity can be improved.
* Qualify standard library names with std:: when possible, to
  distinguish from POSIX names.
* Qualify POSIX names with the global namespace (::) when possible, to
  distinguish from standard library names.

This also revamps the logic for putting together a message into the
in-memory buffer before that is passed to fwrite(). While correct in
practice, the current implementation advances a char pointer past the
size of its buffer, which is technically undefined behavior.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=211472570
---
 util/posix_logger.h | 154 ++++++++++++++++++++++++++------------------
 1 file changed, 92 insertions(+), 62 deletions(-)

diff --git a/util/posix_logger.h b/util/posix_logger.h
index 1909e61..a01a4fe 100644
--- a/util/posix_logger.h
+++ b/util/posix_logger.h
@@ -8,89 +8,119 @@
 #ifndef STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
 #define STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
 
-#include <algorithm>
-#include <stdio.h>
 #include <sys/time.h>
-#include <time.h>
+
+#include <cassert>
+#include <cinttypes>
+#include <cstdarg>
+#include <cstdio>
+#include <ctime>
+
 #include "leveldb/env.h"
 
 namespace leveldb {
 
-class PosixLogger : public Logger {
- private:
-  FILE* file_;
-  uint64_t (*gettid_)();  // Return the thread id for the current thread
+class PosixLogger final : public Logger {
  public:
-  PosixLogger(FILE* f, uint64_t (*gettid)()) : file_(f), gettid_(gettid) { }
-  virtual ~PosixLogger() {
-    fclose(file_);
+  PosixLogger(FILE* fp, uint64_t (*gettid)()) : fp_(fp), gettid_(gettid) {
+    assert(fp != nullptr);
   }
-  virtual void Logv(const char* format, va_list ap) {
+
+  ~PosixLogger() override {
+    std::fclose(fp_);
+  }
+
+  void Logv(const char* format, va_list arguments) override {
+    // Record the time as close to the Logv() call as possible.
+    struct ::timeval now_timeval;
+    ::gettimeofday(&now_timeval, nullptr);
+    const std::time_t now_seconds = now_timeval.tv_sec;
+    struct std::tm now_components;
+    ::localtime_r(&now_seconds, &now_components);
+
     const uint64_t thread_id = (*gettid_)();
 
-    // We try twice: the first time with a fixed-size stack allocated buffer,
-    // and the second time with a much larger dynamically allocated buffer.
-    char buffer[500];
-    for (int iter = 0; iter < 2; iter++) {
-      char* base;
-      int bufsize;
-      if (iter == 0) {
-        bufsize = sizeof(buffer);
-        base = buffer;
-      } else {
-        bufsize = 30000;
-        base = new char[bufsize];
-      }
-      char* p = base;
-      char* limit = base + bufsize;
+    // We first attempt to print into a stack-allocated buffer. If this attempt
+    // fails, we make a second attempt with a dynamically allocated buffer.
+    constexpr const int kStackBufferSize = 512;
+    char stack_buffer[kStackBufferSize];
+    static_assert(sizeof(stack_buffer) == static_cast<size_t>(kStackBufferSize),
+                  "sizeof(char) is expected to be 1 in C++");
 
-      struct timeval now_tv;
-      gettimeofday(&now_tv, nullptr);
-      const time_t seconds = now_tv.tv_sec;
-      struct tm t;
-      localtime_r(&seconds, &t);
-      p += snprintf(p, limit - p,
-                    "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ",
-                    t.tm_year + 1900,
-                    t.tm_mon + 1,
-                    t.tm_mday,
-                    t.tm_hour,
-                    t.tm_min,
-                    t.tm_sec,
-                    static_cast<int>(now_tv.tv_usec),
-                    static_cast<long long unsigned int>(thread_id));
+    int dynamic_buffer_size = 0;  // Computed in the first iteration.
+    for (int iteration = 0; iteration < 2; ++iteration) {
+      const int buffer_size =
+          (iteration == 0) ? kStackBufferSize : dynamic_buffer_size;
+      char* const buffer =
+          (iteration == 0) ? stack_buffer : new char[dynamic_buffer_size];
 
-      // Print the message
-      if (p < limit) {
-        va_list backup_ap;
-        va_copy(backup_ap, ap);
-        p += vsnprintf(p, limit - p, format, backup_ap);
-        va_end(backup_ap);
-      }
+      // Print the header into the buffer.
+      int buffer_offset = snprintf(
+          buffer, buffer_size,
+          "%04d/%02d/%02d-%02d:%02d:%02d.%06d %" PRIx64 " ",
+          now_components.tm_year + 1900,
+          now_components.tm_mon + 1,
+          now_components.tm_mday,
+          now_components.tm_hour,
+          now_components.tm_min,
+          now_components.tm_sec,
+          static_cast<int>(now_timeval.tv_usec),
+          thread_id);
 
-      // Truncate to available space if necessary
-      if (p >= limit) {
-        if (iter == 0) {
-          continue;       // Try again with larger buffer
-        } else {
-          p = limit - 1;
+      // The header can be at most 48 characters (10 date + 15 time + 3 spacing
+      // + 20 thread ID), which should fit comfortably into the static buffer.
+      assert(buffer_offset <= 48);
+      static_assert(48 < kStackBufferSize,
+                    "stack-allocated buffer may not fit the message header");
+      assert(buffer_offset < buffer_size);
+
+      // Print the message into the buffer.
+      std::va_list arguments_copy;
+      va_copy(arguments_copy, arguments);
+      buffer_offset += std::vsnprintf(buffer + buffer_offset,
+                                      buffer_size - buffer_offset, format,
+                                      arguments_copy);
+      va_end(arguments_copy);
+
+      // The code below may append a newline at the end of the buffer, which
+      // requires an extra character.
+      if (buffer_offset >= buffer_size - 1) {
+        // The message did not fit into the buffer.
+        if (iteration == 0) {
+          // Re-run the loop and use a dynamically-allocated buffer. The buffer
+          // will be large enough for the log message, an extra newline and a
+          // null terminator.
+          dynamic_buffer_size = buffer_offset + 2;
+          continue;
         }
+
+        // The dynamically-allocated buffer was incorrectly sized. This should
+        // not happen, assuming a correct implementation of (v)snprintf. Fail
+        // in tests, recover by truncating the log message in production.
+        assert(false);
+        buffer_offset = buffer_size - 1;
       }
 
-      // Add newline if necessary
-      if (p == base || p[-1] != '\n') {
-        *p++ = '\n';
+      // Add a newline if necessary.
+      if (buffer[buffer_offset - 1] != '\n') {
+        buffer[buffer_offset] = '\n';
+        ++buffer_offset;
       }
 
-      assert(p <= limit);
-      fwrite(base, 1, p - base, file_);
-      fflush(file_);
-      if (base != buffer) {
-        delete[] base;
+      assert(buffer_offset <= buffer_size);
+      std::fwrite(buffer, 1, buffer_offset, fp_);
+      std::fflush(fp_);
+
+      if (iteration != 0) {
+        delete[] buffer;
       }
       break;
     }
   }
+
+ private:
+  std::FILE* const fp_;
+  uint64_t (* const gettid_)();  // Return the thread id for the current thread.
 };
 
 }  // namespace leveldb

From bb88f25115d20a6d73dfb6b16cc298db2f66948b Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Wed, 5 Sep 2018 15:23:28 -0700
Subject: [PATCH 024/181] Clean up PosixWritableFile in env_posix.cc.

This is separated from the general cleanup because of the logic changes
in SyncDirIfManifest().

General cleanup principles:
* Use override when applicable.
* Remove static when redundant (methods and  globals in anonymous
  namespaces).
* Use const on class members where possible.
* Standardize on "status" for Status local variables.
* Renames where clarity can be improved.
* Qualify standard library names with std:: when possible, to
  distinguish from POSIX names.
* Qualify POSIX names with the global namespace (::) when possible, to
  distinguish from standard library names.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=211709673
---
 util/env_posix.cc | 199 +++++++++++++++++++++++++++-------------------
 1 file changed, 117 insertions(+), 82 deletions(-)

diff --git a/util/env_posix.cc b/util/env_posix.cc
index 18e7664..b201c5b 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -18,6 +18,7 @@
 #include <unistd.h>
 
 #include <atomic>
+#include <cstring>
 #include <deque>
 #include <limits>
 #include <set>
@@ -44,7 +45,7 @@ namespace {
 static int open_read_only_file_limit = -1;
 static int mmap_limit = -1;
 
-static const size_t kBufSize = 65536;
+constexpr const size_t kWritableFileBufferSize = 65536;
 
 static Status PosixError(const std::string& context, int err_number) {
   if (err_number == ENOENT) {
@@ -213,131 +214,165 @@ class PosixMmapReadableFile: public RandomAccessFile {
   }
 };
 
-class PosixWritableFile : public WritableFile {
- private:
-  // buf_[0, pos_-1] contains data to be written to fd_.
-  std::string filename_;
-  int fd_;
-  char buf_[kBufSize];
-  size_t pos_;
-
+class PosixWritableFile final : public WritableFile {
  public:
-  PosixWritableFile(const std::string& fname, int fd)
-      : filename_(fname), fd_(fd), pos_(0) { }
+  PosixWritableFile(std::string filename, int fd)
+      : pos_(0), fd_(fd), is_manifest_(IsManifest(filename)),
+        filename_(std::move(filename)), dirname_(Dirname(filename_)) {}
 
-  ~PosixWritableFile() {
+  ~PosixWritableFile() override {
     if (fd_ >= 0) {
       // Ignoring any potential errors
       Close();
     }
   }
 
-  virtual Status Append(const Slice& data) {
-    size_t n = data.size();
-    const char* p = data.data();
+  Status Append(const Slice& data) override {
+    size_t write_size = data.size();
+    const char* write_data = data.data();
 
     // Fit as much as possible into buffer.
-    size_t copy = std::min(n, kBufSize - pos_);
-    memcpy(buf_ + pos_, p, copy);
-    p += copy;
-    n -= copy;
-    pos_ += copy;
-    if (n == 0) {
+    size_t copy_size = std::min(write_size, kWritableFileBufferSize - pos_);
+    std::memcpy(buf_ + pos_, write_data, copy_size);
+    write_data += copy_size;
+    write_size -= copy_size;
+    pos_ += copy_size;
+    if (write_size == 0) {
       return Status::OK();
     }
 
     // Can't fit in buffer, so need to do at least one write.
-    Status s = FlushBuffered();
-    if (!s.ok()) {
-      return s;
+    Status status = FlushBuffer();
+    if (!status.ok()) {
+      return status;
     }
 
     // Small writes go to buffer, large writes are written directly.
-    if (n < kBufSize) {
-      memcpy(buf_, p, n);
-      pos_ = n;
+    if (write_size < kWritableFileBufferSize) {
+      std::memcpy(buf_, write_data, write_size);
+      pos_ = write_size;
       return Status::OK();
     }
-    return WriteRaw(p, n);
+    return WriteUnbuffered(write_data, write_size);
   }
 
-  virtual Status Close() {
-    Status result = FlushBuffered();
-    const int r = close(fd_);
-    if (r < 0 && result.ok()) {
-      result = PosixError(filename_, errno);
+  Status Close() override {
+    Status status = FlushBuffer();
+    const int close_result = ::close(fd_);
+    if (close_result < 0 && status.ok()) {
+      status = PosixError(filename_, errno);
     }
     fd_ = -1;
-    return result;
+    return status;
   }
 
-  virtual Status Flush() {
-    return FlushBuffered();
+  Status Flush() override {
+    return FlushBuffer();
   }
 
-  Status SyncDirIfManifest() {
-    const char* f = filename_.c_str();
-    const char* sep = strrchr(f, '/');
-    Slice basename;
-    std::string dir;
-    if (sep == nullptr) {
-      dir = ".";
-      basename = f;
-    } else {
-      dir = std::string(f, sep - f);
-      basename = sep + 1;
-    }
-    Status s;
-    if (basename.starts_with("MANIFEST")) {
-      int fd = open(dir.c_str(), O_RDONLY);
-      if (fd < 0) {
-        s = PosixError(dir, errno);
-      } else {
-        if (fsync(fd) < 0) {
-          s = PosixError(dir, errno);
-        }
-        close(fd);
-      }
-    }
-    return s;
-  }
-
-  virtual Status Sync() {
+  Status Sync() override {
     // Ensure new files referred to by the manifest are in the filesystem.
-    Status s = SyncDirIfManifest();
-    if (!s.ok()) {
-      return s;
+    //
+    // This needs to happen before the manifest file is flushed to disk, to
+    // avoid crashing in a state where the manifest refers to files that are not
+    // yet on disk.
+    Status status = SyncDirIfManifest();
+    if (!status.ok()) {
+      return status;
     }
-    s = FlushBuffered();
-    if (s.ok()) {
-      if (fdatasync(fd_) != 0) {
-        s = PosixError(filename_, errno);
-      }
+
+    status = FlushBuffer();
+    if (status.ok() && ::fdatasync(fd_) != 0) {
+      status = PosixError(filename_, errno);
     }
-    return s;
+    return status;
   }
 
  private:
-  Status FlushBuffered() {
-    Status s = WriteRaw(buf_, pos_);
+  Status FlushBuffer() {
+    Status status = WriteUnbuffered(buf_, pos_);
     pos_ = 0;
-    return s;
+    return status;
   }
 
-  Status WriteRaw(const char* p, size_t n) {
-    while (n > 0) {
-      ssize_t r = write(fd_, p, n);
-      if (r < 0) {
+  Status WriteUnbuffered(const char* data, size_t size) {
+    while (size > 0) {
+      ssize_t write_result = ::write(fd_, data, size);
+      if (write_result < 0) {
         if (errno == EINTR) {
           continue;  // Retry
         }
         return PosixError(filename_, errno);
       }
-      p += r;
-      n -= r;
+      data += write_result;
+      size -= write_result;
     }
     return Status::OK();
   }
+
+  Status SyncDirIfManifest() {
+    Status status;
+    if (!is_manifest_) {
+      return status;
+    }
+
+    int fd = ::open(dirname_.c_str(), O_RDONLY);
+    if (fd < 0) {
+      status = PosixError(dirname_, errno);
+    } else {
+      if (::fsync(fd) < 0) {
+        status = PosixError(dirname_, errno);
+      }
+      ::close(fd);
+    }
+    return status;
+  }
+
+  // Returns the directory name in a path pointing to a file.
+  //
+  // Returns "." if the path does not contain any directory separator.
+  static std::string Dirname(const std::string& filename) {
+    std::string::size_type separator_pos = filename.rfind('/');
+    if (separator_pos == std::string::npos) {
+      return std::string(".");
+    }
+    // The filename component should not contain a path separator. If it does,
+    // the splitting was done incorrectly.
+    assert(filename.find('/', separator_pos + 1) == std::string::npos);
+
+    return filename.substr(0, separator_pos);
+  }
+
+  // Extracts the file name from a path pointing to a file.
+  //
+  // The returned Slice points to |filename|'s data buffer, so it is only valid
+  // while |filename| is alive and unchanged.
+  static Slice Basename(const std::string& filename) {
+    std::string::size_type separator_pos = filename.rfind('/');
+    if (separator_pos == std::string::npos) {
+      return Slice(filename);
+    }
+    // The filename component should not contain a path separator. If it does,
+    // the splitting was done incorrectly.
+    assert(filename.find('/', separator_pos + 1) == std::string::npos);
+
+    return Slice(filename.data() + separator_pos + 1,
+                 filename.length() - separator_pos - 1);
+  }
+
+  // True if the given file is a manifest file.
+  static bool IsManifest(const std::string& filename) {
+    return Basename(filename).starts_with("MANIFEST");
+  }
+
+  // buf_[0, pos_ - 1] contains data to be written to fd_.
+  char buf_[kWritableFileBufferSize];
+  size_t pos_;
+  int fd_;
+
+  const bool is_manifest_;  // True if the file's name starts with MANIFEST.
+  const std::string filename_;
+  const std::string dirname_;  // The directory of filename_.
 };
 
 static int LockOrUnlock(int fd, bool lock) {

From 05709fb43eea34936c9f535edcb74d5e91a0b495 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Mon, 10 Sep 2018 15:38:12 -0700
Subject: [PATCH 025/181] Remove InitOnce from the port API.

This is not an API-breaking change, because it reduces the API that the
leveldb embedder must implement. The project will build just fine
against ports that still implement InitOnce.

C++11 guarantees thread-safe initialization of static variables inside
functions. This is a more restricted form of std::call_once or
pthread_once_t (e.g., single call site), so the compiler might be able
to generate better code [1]. Equally important, having less code in
port_example.h makes it easier to port to other platforms.

Due to the change above, this CL introduces a new approach for storing
the singleton BytewiseComparatorImpl instance returned by
BytewiseComparator(). The new approach avoids a dynamic memory
allocation, which eliminates the false positive from LeakSanitizer
reported in https://github.com/google/leveldb/issues/200

[1] https://stackoverflow.com/a/27206650/

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=212348004
---
 CMakeLists.txt             |  2 ++
 port/port_example.h        | 10 --------
 port/port_stdcxx.h         |  8 -------
 util/comparator.cc         | 17 +++++--------
 util/no_destructor.h       | 47 ++++++++++++++++++++++++++++++++++++
 util/no_destructor_test.cc | 49 ++++++++++++++++++++++++++++++++++++++
 6 files changed, 104 insertions(+), 29 deletions(-)
 create mode 100644 util/no_destructor.h
 create mode 100644 util/no_destructor_test.cc

diff --git a/CMakeLists.txt b/CMakeLists.txt
index d49c31e..36d6cbd 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -149,6 +149,7 @@ target_sources(leveldb
     "${PROJECT_SOURCE_DIR}/util/logging.cc"
     "${PROJECT_SOURCE_DIR}/util/logging.h"
     "${PROJECT_SOURCE_DIR}/util/mutexlock.h"
+    "${PROJECT_SOURCE_DIR}/util/no_destructor.h"
     "${PROJECT_SOURCE_DIR}/util/options.cc"
     "${PROJECT_SOURCE_DIR}/util/random.h"
     "${PROJECT_SOURCE_DIR}/util/status.cc"
@@ -278,6 +279,7 @@ if(LEVELDB_BUILD_TESTS)
 
   leveldb_test("${PROJECT_SOURCE_DIR}/util/env_test.cc")
   leveldb_test("${PROJECT_SOURCE_DIR}/util/status_test.cc")
+  leveldb_test("${PROJECT_SOURCE_DIR}/util/no_destructor_test.cc")
 
   if(NOT BUILD_SHARED_LIBS)
     leveldb_test("${PROJECT_SOURCE_DIR}/db/autocompact_test.cc")
diff --git a/port/port_example.h b/port/port_example.h
index 88fc9cb..9c648c3 100644
--- a/port/port_example.h
+++ b/port/port_example.h
@@ -62,16 +62,6 @@ class CondVar {
   void SignallAll();
 };
 
-// Thread-safe initialization.
-// Used as follows:
-//      static port::OnceType init_control = LEVELDB_ONCE_INIT;
-//      static void Initializer() { ... do something ...; }
-//      ...
-//      port::InitOnce(&init_control, &Initializer);
-typedef intptr_t OnceType;
-#define LEVELDB_ONCE_INIT 0
-void InitOnce(port::OnceType*, void (*initializer)());
-
 // A type that holds a pointer that can be read or written atomically
 // (i.e., without word-tearing.)
 class AtomicPointer {
diff --git a/port/port_stdcxx.h b/port/port_stdcxx.h
index 4e58cba..4713e26 100644
--- a/port/port_stdcxx.h
+++ b/port/port_stdcxx.h
@@ -84,14 +84,6 @@ class CondVar {
   Mutex* const mu_;
 };
 
-using OnceType = std::once_flag;
-#define LEVELDB_ONCE_INIT {}
-
-// Thinly wraps std::call_once.
-inline void InitOnce(OnceType* once, void (*initializer)()) {
-  std::call_once(*once, *initializer);
-}
-
 inline bool Snappy_Compress(const char* input, size_t length,
                             ::std::string* output) {
 #if HAVE_SNAPPY
diff --git a/util/comparator.cc b/util/comparator.cc
index 4b7b572..e1e2963 100644
--- a/util/comparator.cc
+++ b/util/comparator.cc
@@ -3,11 +3,13 @@
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
 #include <algorithm>
-#include <stdint.h>
+#include <cstdint>
+#include <string>
+
 #include "leveldb/comparator.h"
 #include "leveldb/slice.h"
-#include "port/port.h"
 #include "util/logging.h"
+#include "util/no_destructor.h"
 
 namespace leveldb {
 
@@ -66,16 +68,9 @@ class BytewiseComparatorImpl : public Comparator {
 };
 }  // namespace
 
-static port::OnceType once = LEVELDB_ONCE_INIT;
-static const Comparator* bytewise;
-
-static void InitModule() {
-  bytewise = new BytewiseComparatorImpl;
-}
-
 const Comparator* BytewiseComparator() {
-  port::InitOnce(&once, InitModule);
-  return bytewise;
+  static NoDestructor<BytewiseComparatorImpl> singleton;
+  return singleton.get();
 }
 
 }  // namespace leveldb
diff --git a/util/no_destructor.h b/util/no_destructor.h
new file mode 100644
index 0000000..4827e45
--- /dev/null
+++ b/util/no_destructor.h
@@ -0,0 +1,47 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_
+#define STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_
+
+#include <type_traits>
+#include <utility>
+
+namespace leveldb {
+
+// Wraps an instance whose destructor is never called.
+//
+// This is intended for use with function-level static variables.
+template<typename InstanceType>
+class NoDestructor {
+ public:
+  template <typename... ConstructorArgTypes>
+  explicit NoDestructor(ConstructorArgTypes&&... constructor_args) {
+    static_assert(sizeof(instance_storage_) >= sizeof(InstanceType),
+                  "instance_storage_ is not large enough to hold the instance");
+    static_assert(
+        alignof(decltype(instance_storage_)) >= alignof(InstanceType),
+        "instance_storage_ does not meet the instance's alignment requirement");
+    new (&instance_storage_) InstanceType(
+        std::forward<ConstructorArgTypes>(constructor_args)...);
+  }
+
+  ~NoDestructor() = default;
+
+  NoDestructor(const NoDestructor&) = delete;
+  NoDestructor& operator=(const NoDestructor&) = delete;
+
+  InstanceType* get() {
+    return reinterpret_cast<InstanceType*>(&instance_storage_);
+  }
+
+ private:
+  typename
+      std::aligned_storage<sizeof(InstanceType), alignof(InstanceType)>::type
+      instance_storage_;
+};
+
+}  // namespace leveldb
+
+#endif  // STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_
diff --git a/util/no_destructor_test.cc b/util/no_destructor_test.cc
new file mode 100644
index 0000000..7ce2631
--- /dev/null
+++ b/util/no_destructor_test.cc
@@ -0,0 +1,49 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <cstdint>
+#include <cstdlib>
+#include <utility>
+
+#include "util/no_destructor.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+namespace {
+
+struct DoNotDestruct {
+ public:
+  DoNotDestruct(uint32_t a, uint64_t b) : a(a), b(b) {}
+  ~DoNotDestruct() { std::abort(); }
+
+  // Used to check constructor argument forwarding.
+  uint32_t a;
+  uint64_t b;
+};
+
+constexpr const uint32_t kGoldenA = 0xdeadbeef;
+constexpr const uint64_t kGoldenB = 0xaabbccddeeffaabb;
+
+}  // namespace
+
+class NoDestructorTest { };
+
+TEST(NoDestructorTest, StackInstance) {
+  NoDestructor<DoNotDestruct> instance(kGoldenA, kGoldenB);
+  ASSERT_EQ(kGoldenA, instance.get()->a);
+  ASSERT_EQ(kGoldenB, instance.get()->b);
+}
+
+TEST(NoDestructorTest, StaticInstance) {
+  static NoDestructor<DoNotDestruct> instance(kGoldenA, kGoldenB);
+  ASSERT_EQ(kGoldenA, instance.get()->a);
+  ASSERT_EQ(kGoldenB, instance.get()->b);
+}
+
+}  // namespace leveldb
+
+int main(int argc, char** argv) {
+  return leveldb::test::RunAllTests();
+}

From 73d5834eceee8efa9a8ccfec77dc096a9e8ba18a Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Tue, 11 Sep 2018 10:39:08 -0700
Subject: [PATCH 026/181] Rework threading in env_posix.cc.

This commit replaces the use of pthreads in the POSIX port with std::thread
and port::Mutex + port::CondVar. This is intended to simplify porting
the env to a different platform.

The indirect use of pthreads in PosixLogger is replaced with
std::thread::id(), based on an approach prototyped by @cmumfordx@.

The pthreads dependency in CMakeFiles is not removed, because some C++
standard library implementations must be linked against pthreads for
std::thread use. Figuring out this dependency is left for future work.

Switching away from pthreads also fixes
https://github.com/google/leveldb/issues/381

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=212478311
---
 util/env_posix.cc   | 142 +++++++++++++++++++-------------------------
 util/posix_logger.h |  31 ++++++----
 2 files changed, 81 insertions(+), 92 deletions(-)

diff --git a/util/env_posix.cc b/util/env_posix.cc
index b201c5b..d6b0d61 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -6,7 +6,6 @@
 #include <errno.h>
 #include <fcntl.h>
 #include <pthread.h>
-#include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 #include <sys/mman.h>
@@ -19,9 +18,10 @@
 
 #include <atomic>
 #include <cstring>
-#include <deque>
 #include <limits>
+#include <queue>
 #include <set>
+#include <thread>
 
 #include "leveldb/env.h"
 #include "leveldb/slice.h"
@@ -600,20 +600,13 @@ class PosixEnv : public Env {
     return Status::OK();
   }
 
-  static uint64_t gettid() {
-    pthread_t tid = pthread_self();
-    uint64_t thread_id = 0;
-    memcpy(&thread_id, &tid, std::min(sizeof(thread_id), sizeof(tid)));
-    return thread_id;
-  }
-
   virtual Status NewLogger(const std::string& fname, Logger** result) {
     FILE* f = fopen(fname.c_str(), "w");
     if (f == nullptr) {
       *result = nullptr;
       return PosixError(fname, errno);
     } else {
-      *result = new PosixLogger(f, &PosixEnv::gettid);
+      *result = new PosixLogger(f);
       return Status::OK();
     }
   }
@@ -629,29 +622,33 @@ class PosixEnv : public Env {
   }
 
  private:
-  void PthreadCall(const char* label, int result) {
-    if (result != 0) {
-      fprintf(stderr, "pthread %s: %s\n", label, strerror(result));
-      abort();
-    }
+  void BackgroundThreadMain();
+
+  static void BackgroundThreadEntryPoint(PosixEnv* env) {
+    env->BackgroundThreadMain();
   }
 
-  // BGThread() is the body of the background thread
-  void BGThread();
-  static void* BGThreadWrapper(void* arg) {
-    reinterpret_cast<PosixEnv*>(arg)->BGThread();
-    return nullptr;
-  }
+  // Stores the work item data in a Schedule() call.
+  //
+  // Instances are constructed on the thread calling Schedule() and used on the
+  // background thread.
+  //
+  // This structure is thread-safe beacuse it is immutable.
+  struct BackgroundWorkItem {
+    explicit BackgroundWorkItem(void (*function)(void* arg), void* arg)
+        : function(function), arg(arg) {}
 
-  pthread_mutex_t mu_;
-  pthread_cond_t bgsignal_;
-  pthread_t bgthread_;
-  bool started_bgthread_;
+    void (* const function)(void*);
+    void* const arg;
+  };
 
-  // Entry per Schedule() call
-  struct BGItem { void* arg; void (*function)(void*); };
-  typedef std::deque<BGItem> BGQueue;
-  BGQueue queue_;
+
+  port::Mutex background_work_mutex_;
+  port::CondVar background_work_cv_ GUARDED_BY(background_work_mutex_);
+  bool started_background_thread_ GUARDED_BY(background_work_mutex_);
+
+  std::queue<BackgroundWorkItem> background_work_queue_
+      GUARDED_BY(background_work_mutex_);
 
   PosixLockTable locks_;
   Limiter mmap_limit_;
@@ -687,79 +684,60 @@ static intptr_t MaxOpenFiles() {
 }
 
 PosixEnv::PosixEnv()
-    : started_bgthread_(false),
+    : background_work_cv_(&background_work_mutex_),
+      started_background_thread_(false),
       mmap_limit_(MaxMmaps()),
       fd_limit_(MaxOpenFiles()) {
-  PthreadCall("mutex_init", pthread_mutex_init(&mu_, nullptr));
-  PthreadCall("cvar_init", pthread_cond_init(&bgsignal_, nullptr));
 }
 
-void PosixEnv::Schedule(void (*function)(void*), void* arg) {
-  PthreadCall("lock", pthread_mutex_lock(&mu_));
+void PosixEnv::Schedule(
+    void (*background_work_function)(void* background_work_arg),
+    void* background_work_arg) {
+  MutexLock lock(&background_work_mutex_);
 
-  // Start background thread if necessary
-  if (!started_bgthread_) {
-    started_bgthread_ = true;
-    PthreadCall(
-        "create thread",
-        pthread_create(&bgthread_, nullptr,  &PosixEnv::BGThreadWrapper, this));
+  // Start the background thread, if we haven't done so already.
+  if (!started_background_thread_) {
+    started_background_thread_ = true;
+    std::thread background_thread(PosixEnv::BackgroundThreadEntryPoint, this);
+    background_thread.detach();
   }
 
-  // If the queue is currently empty, the background thread may currently be
-  // waiting.
-  if (queue_.empty()) {
-    PthreadCall("signal", pthread_cond_signal(&bgsignal_));
+  // If the queue is empty, the background thread may be waiting for work.
+  if (background_work_queue_.empty()) {
+    background_work_cv_.Signal();
   }
 
-  // Add to priority queue
-  queue_.push_back(BGItem());
-  queue_.back().function = function;
-  queue_.back().arg = arg;
-
-  PthreadCall("unlock", pthread_mutex_unlock(&mu_));
+  background_work_queue_.emplace(background_work_function, background_work_arg);
 }
 
-void PosixEnv::BGThread() {
+void PosixEnv::BackgroundThreadMain() {
   while (true) {
-    // Wait until there is an item that is ready to run
-    PthreadCall("lock", pthread_mutex_lock(&mu_));
-    while (queue_.empty()) {
-      PthreadCall("wait", pthread_cond_wait(&bgsignal_, &mu_));
+    background_work_mutex_.Lock();
+
+    // Wait until there is work to be done.
+    while (background_work_queue_.empty()) {
+      background_work_cv_.Wait();
     }
 
-    void (*function)(void*) = queue_.front().function;
-    void* arg = queue_.front().arg;
-    queue_.pop_front();
+    assert(!background_work_queue_.empty());
+    auto background_work_function =
+        background_work_queue_.front().function;
+    void* background_work_arg = background_work_queue_.front().arg;
+    background_work_queue_.pop();
 
-    PthreadCall("unlock", pthread_mutex_unlock(&mu_));
-    (*function)(arg);
+    background_work_mutex_.Unlock();
+    background_work_function(background_work_arg);
   }
 }
 
-namespace {
-struct StartThreadState {
-  void (*user_function)(void*);
-  void* arg;
-};
-}
-static void* StartThreadWrapper(void* arg) {
-  StartThreadState* state = reinterpret_cast<StartThreadState*>(arg);
-  state->user_function(state->arg);
-  delete state;
-  return nullptr;
-}
-
-void PosixEnv::StartThread(void (*function)(void* arg), void* arg) {
-  pthread_t t;
-  StartThreadState* state = new StartThreadState;
-  state->user_function = function;
-  state->arg = arg;
-  PthreadCall("start thread",
-              pthread_create(&t, nullptr,  &StartThreadWrapper, state));
-}
-
 }  // namespace
 
+void PosixEnv::StartThread(void (*thread_main)(void* thread_main_arg),
+                           void* thread_main_arg) {
+  std::thread new_thread(thread_main, thread_main_arg);
+  new_thread.detach();
+}
+
 static pthread_once_t once = PTHREAD_ONCE_INIT;
 static Env* default_env;
 static void InitDefaultEnv() { default_env = new PosixEnv; }
diff --git a/util/posix_logger.h b/util/posix_logger.h
index a01a4fe..28b290e 100644
--- a/util/posix_logger.h
+++ b/util/posix_logger.h
@@ -11,10 +11,11 @@
 #include <sys/time.h>
 
 #include <cassert>
-#include <cinttypes>
 #include <cstdarg>
 #include <cstdio>
 #include <ctime>
+#include <sstream>
+#include <thread>
 
 #include "leveldb/env.h"
 
@@ -22,7 +23,10 @@ namespace leveldb {
 
 class PosixLogger final : public Logger {
  public:
-  PosixLogger(FILE* fp, uint64_t (*gettid)()) : fp_(fp), gettid_(gettid) {
+  // Creates a logger that writes to the given file.
+  //
+  // The PosixLogger instance takes ownership of the file handle.
+  explicit PosixLogger(std::FILE* fp) : fp_(fp) {
     assert(fp != nullptr);
   }
 
@@ -38,7 +42,14 @@ class PosixLogger final : public Logger {
     struct std::tm now_components;
     ::localtime_r(&now_seconds, &now_components);
 
-    const uint64_t thread_id = (*gettid_)();
+    // Record the thread ID.
+    constexpr const int kMaxThreadIdSize = 32;
+    std::ostringstream thread_stream;
+    thread_stream << std::this_thread::get_id();
+    std::string thread_id = thread_stream.str();
+    if (thread_id.size() > kMaxThreadIdSize) {
+      thread_id.resize(kMaxThreadIdSize);
+    }
 
     // We first attempt to print into a stack-allocated buffer. If this attempt
     // fails, we make a second attempt with a dynamically allocated buffer.
@@ -57,7 +68,7 @@ class PosixLogger final : public Logger {
       // Print the header into the buffer.
       int buffer_offset = snprintf(
           buffer, buffer_size,
-          "%04d/%02d/%02d-%02d:%02d:%02d.%06d %" PRIx64 " ",
+          "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s",
           now_components.tm_year + 1900,
           now_components.tm_mon + 1,
           now_components.tm_mday,
@@ -65,12 +76,13 @@ class PosixLogger final : public Logger {
           now_components.tm_min,
           now_components.tm_sec,
           static_cast<int>(now_timeval.tv_usec),
-          thread_id);
+          thread_id.c_str());
 
-      // The header can be at most 48 characters (10 date + 15 time + 3 spacing
-      // + 20 thread ID), which should fit comfortably into the static buffer.
-      assert(buffer_offset <= 48);
-      static_assert(48 < kStackBufferSize,
+      // The header can be at most 28 characters (10 date + 15 time +
+      // 3 spacing) plus the thread ID, which should fit comfortably into the
+      // static buffer.
+      assert(buffer_offset <= 28 + kMaxThreadIdSize);
+      static_assert(28 + kMaxThreadIdSize < kStackBufferSize,
                     "stack-allocated buffer may not fit the message header");
       assert(buffer_offset < buffer_size);
 
@@ -120,7 +132,6 @@ class PosixLogger final : public Logger {
 
  private:
   std::FILE* const fp_;
-  uint64_t (* const gettid_)();  // Return the thread id for the current thread.
 };
 
 }  // namespace leveldb

From 0145a94ab6bec48e596df499e8f6103e138a74ab Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Wed, 12 Sep 2018 10:23:16 -0700
Subject: [PATCH 027/181] Update .gitignore.

The version in the repository covers the Makefile build. The new version
is simpler and contains entries relevant to the CMake build.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=212661504
---
 .gitignore | 17 ++++++++---------
 1 file changed, 8 insertions(+), 9 deletions(-)

diff --git a/.gitignore b/.gitignore
index 0630251..c4b2425 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,9 +1,8 @@
-build_config.mk
-*.a
-*.o
-*.dylib*
-*.so
-*.so.*
-*_test
-db_bench
-leveldbutil
+# Editors.
+*.sw*
+.vscode
+.DS_Store
+
+# Build directory.
+build/
+out/

From c43565dd398b2233db8eb49ba05234d62fb42e03 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Mon, 17 Sep 2018 23:05:57 -0700
Subject: [PATCH 028/181] C++11 cleanup for util/mutexlock.h.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=213398583
---
 util/mutexlock.h | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/util/mutexlock.h b/util/mutexlock.h
index 1ff5a9e..08d709a 100644
--- a/util/mutexlock.h
+++ b/util/mutexlock.h
@@ -28,11 +28,11 @@ class SCOPED_LOCKABLE MutexLock {
   }
   ~MutexLock() UNLOCK_FUNCTION() { this->mu_->Unlock(); }
 
+  MutexLock(const MutexLock&) = delete;
+  MutexLock& operator=(const MutexLock&) = delete;
+
  private:
   port::Mutex *const mu_;
-  // No copying allowed
-  MutexLock(const MutexLock&);
-  void operator=(const MutexLock&);
 };
 
 }  // namespace leveldb

From a7dc502e9f11c2e5c911ba45b999676c43eaa51f Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Mon, 24 Sep 2018 10:50:40 -0700
Subject: [PATCH 029/181] Rework once initialization in env_posix.cc.

C++11 guarantees thread-safe initialization of static variables inside
functions. This is a more restricted form of std::call_once or
pthread_once_t (e.g., single call site), so the compiler might be able
to generate better code [1]. Equally important, having less
platform-dependent code in env_posix.cc makes it easier to port to other
platforms.

Due to the change above, this CL introduced a new approach for storing
the singleton PosixEnv instance returned by Env::Default(). The new
approach avoids a dynamic memory allocation, which eliminates the false
positive from LeakSanitizer reported in
https://github.com/google/leveldb/issues/539 and
https://github.com/google/leveldb/issues/113

[1] https://stackoverflow.com/a/27206650/

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=214293129
---
 util/env_posix.cc | 84 +++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 70 insertions(+), 14 deletions(-)

diff --git a/util/env_posix.cc b/util/env_posix.cc
index d6b0d61..68a8808 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -17,18 +17,21 @@
 #include <unistd.h>
 
 #include <atomic>
+#include <cstddef>
+#include <cstdint>
 #include <cstring>
 #include <limits>
 #include <queue>
 #include <set>
+#include <string>
 #include <thread>
+#include <type_traits>
 
 #include "leveldb/env.h"
 #include "leveldb/slice.h"
+#include "leveldb/status.h"
 #include "port/port.h"
 #include "port/thread_annotations.h"
-#include "util/logging.h"
-#include "util/mutexlock.h"
 #include "util/posix_logger.h"
 #include "util/env_posix_test_helper.h"
 
@@ -401,12 +404,15 @@ class PosixLockTable {
   std::set<std::string> locked_files_ GUARDED_BY(mu_);
  public:
   bool Insert(const std::string& fname) LOCKS_EXCLUDED(mu_) {
-    MutexLock l(&mu_);
-    return locked_files_.insert(fname).second;
+    mu_.Lock();
+    bool succeeded = locked_files_.insert(fname).second;
+    mu_.Unlock();
+    return succeeded;
   }
   void Remove(const std::string& fname) LOCKS_EXCLUDED(mu_) {
-    MutexLock l(&mu_);
+    mu_.Lock();
     locked_files_.erase(fname);
+    mu_.Unlock();
   }
 };
 
@@ -693,7 +699,7 @@ PosixEnv::PosixEnv()
 void PosixEnv::Schedule(
     void (*background_work_function)(void* background_work_arg),
     void* background_work_arg) {
-  MutexLock lock(&background_work_mutex_);
+  background_work_mutex_.Lock();
 
   // Start the background thread, if we haven't done so already.
   if (!started_background_thread_) {
@@ -708,6 +714,7 @@ void PosixEnv::Schedule(
   }
 
   background_work_queue_.emplace(background_work_function, background_work_arg);
+  background_work_mutex_.Unlock();
 }
 
 void PosixEnv::BackgroundThreadMain() {
@@ -730,6 +737,59 @@ void PosixEnv::BackgroundThreadMain() {
   }
 }
 
+// Wraps an Env instance whose destructor is never created.
+//
+// Intended usage:
+//   using PlatformSingletonEnv = SingletonEnv<PlatformEnv>;
+//   void ConfigurePosixEnv(int param) {
+//     PlatformSingletonEnv::AssertEnvNotInitialized();
+//     // set global configuration flags.
+//   }
+//   Env* Env::Default() {
+//     static PlatformSingletonEnv default_env;
+//     return default_env.env();
+//   }
+template<typename EnvType>
+class SingletonEnv {
+ public:
+  SingletonEnv() {
+#if !defined(NDEBUG)
+    env_initialized_.store(true, std::memory_order::memory_order_relaxed);
+#endif  // !defined(NDEBUG)
+    static_assert(sizeof(env_storage_) >= sizeof(EnvType),
+                  "env_storage_ will not fit the Env");
+    static_assert(alignof(decltype(env_storage_)) >= alignof(EnvType),
+                  "env_storage_ does not meet the Env's alignment needs");
+    new (&env_storage_) EnvType();
+  }
+  ~SingletonEnv() = default;
+
+  SingletonEnv(const SingletonEnv&) = delete;
+  SingletonEnv& operator=(const SingletonEnv&) = delete;
+
+  Env* env() { return reinterpret_cast<Env*>(&env_storage_); }
+
+  static void AssertEnvNotInitialized() {
+#if !defined(NDEBUG)
+    assert(!env_initialized_.load(std::memory_order::memory_order_relaxed));
+#endif  // !defined(NDEBUG)
+  }
+
+ private:
+  typename std::aligned_storage<sizeof(EnvType), alignof(EnvType)>::type
+      env_storage_;
+#if !defined(NDEBUG)
+  static std::atomic<bool> env_initialized_;
+#endif  // !defined(NDEBUG)
+};
+
+#if !defined(NDEBUG)
+template<typename EnvType>
+std::atomic<bool> SingletonEnv<EnvType>::env_initialized_;
+#endif  // !defined(NDEBUG)
+
+using PosixDefaultEnv = SingletonEnv<PosixEnv>;
+
 }  // namespace
 
 void PosixEnv::StartThread(void (*thread_main)(void* thread_main_arg),
@@ -738,23 +798,19 @@ void PosixEnv::StartThread(void (*thread_main)(void* thread_main_arg),
   new_thread.detach();
 }
 
-static pthread_once_t once = PTHREAD_ONCE_INIT;
-static Env* default_env;
-static void InitDefaultEnv() { default_env = new PosixEnv; }
-
 void EnvPosixTestHelper::SetReadOnlyFDLimit(int limit) {
-  assert(default_env == nullptr);
+  PosixDefaultEnv::AssertEnvNotInitialized();
   open_read_only_file_limit = limit;
 }
 
 void EnvPosixTestHelper::SetReadOnlyMMapLimit(int limit) {
-  assert(default_env == nullptr);
+  PosixDefaultEnv::AssertEnvNotInitialized();
   mmap_limit = limit;
 }
 
 Env* Env::Default() {
-  pthread_once(&once, InitDefaultEnv);
-  return default_env;
+  static PosixDefaultEnv env_container;
+  return env_container.env();
 }
 
 }  // namespace leveldb

From 1cb384088184be9840bd59b4040503a9fa9aee66 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Mon, 29 Oct 2018 16:17:46 -0700
Subject: [PATCH 030/181] Clean up env_posix.cc.

General cleanup principles:
* Use override when applicable.
* Remove static when redundant (methods and  globals in anonymous
  namespaces).
* Use const on class members where possible.
* Standardize on "status" for Status local variables.
* Renames where clarity can be improved.
* Qualify standard library names with std:: when possible, to
  distinguish from POSIX names.
* Qualify POSIX names with the global namespace (::) when possible, to
  distinguish from standard library names.

This also refactors the background thread synchronization logic so that
it's statically analyzable.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=219212089
---
 util/env_posix.cc | 631 +++++++++++++++++++++++++---------------------
 1 file changed, 338 insertions(+), 293 deletions(-)

diff --git a/util/env_posix.cc b/util/env_posix.cc
index 68a8808..76bb648 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -3,22 +3,21 @@
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
 #include <dirent.h>
-#include <errno.h>
 #include <fcntl.h>
 #include <pthread.h>
-#include <stdlib.h>
-#include <string.h>
 #include <sys/mman.h>
 #include <sys/resource.h>
 #include <sys/stat.h>
 #include <sys/time.h>
 #include <sys/types.h>
-#include <time.h>
 #include <unistd.h>
 
 #include <atomic>
+#include <cerrno>
 #include <cstddef>
 #include <cstdint>
+#include <cstdio>
+#include <cstdlib>
 #include <cstring>
 #include <limits>
 #include <queue>
@@ -26,6 +25,7 @@
 #include <string>
 #include <thread>
 #include <type_traits>
+#include <utility>
 
 #include "leveldb/env.h"
 #include "leveldb/slice.h"
@@ -45,16 +45,22 @@ namespace leveldb {
 
 namespace {
 
-static int open_read_only_file_limit = -1;
-static int mmap_limit = -1;
+// Set by EnvPosixTestHelper::SetReadOnlyMMapLimit() and MaxOpenFiles().
+int g_open_read_only_file_limit = -1;
+
+// Up to 1000 mmap regions for 64-bit binaries; none for 32-bit.
+constexpr const int kDefaultMmapLimit = (sizeof(void*) >= 8) ? 1000 : 0;
+
+// Can be set using EnvPosixTestHelper::SetReadOnlyMMapLimit.
+int g_mmap_limit = kDefaultMmapLimit;
 
 constexpr const size_t kWritableFileBufferSize = 65536;
 
-static Status PosixError(const std::string& context, int err_number) {
-  if (err_number == ENOENT) {
-    return Status::NotFound(context, strerror(err_number));
+Status PosixError(const std::string& context, int error_number) {
+  if (error_number == ENOENT) {
+    return Status::NotFound(context, std::strerror(error_number));
   } else {
-    return Status::IOError(context, strerror(err_number));
+    return Status::IOError(context, std::strerror(error_number));
   }
 }
 
@@ -97,124 +103,147 @@ class Limiter {
   std::atomic<int> acquires_allowed_;
 };
 
-class PosixSequentialFile: public SequentialFile {
- private:
-  std::string filename_;
-  int fd_;
-
+// Implements sequential read access in a file using read().
+//
+// Instances of this class are thread-friendly but not thread-safe, as required
+// by the SequentialFile API.
+class PosixSequentialFile final : public SequentialFile {
  public:
-  PosixSequentialFile(const std::string& fname, int fd)
-      : filename_(fname), fd_(fd) {}
-  virtual ~PosixSequentialFile() { close(fd_); }
+  PosixSequentialFile(std::string filename, int fd)
+      : fd_(fd), filename_(filename) {}
+  ~PosixSequentialFile() override { close(fd_); }
 
-  virtual Status Read(size_t n, Slice* result, char* scratch) {
-    Status s;
+  Status Read(size_t n, Slice* result, char* scratch) override {
+    Status status;
     while (true) {
-      ssize_t r = read(fd_, scratch, n);
-      if (r < 0) {
+      ::ssize_t read_size = ::read(fd_, scratch, n);
+      if (read_size < 0) {  // Read error.
         if (errno == EINTR) {
           continue;  // Retry
         }
-        s = PosixError(filename_, errno);
+        status = PosixError(filename_, errno);
         break;
       }
-      *result = Slice(scratch, r);
+      *result = Slice(scratch, read_size);
       break;
     }
-    return s;
+    return status;
   }
 
-  virtual Status Skip(uint64_t n) {
-    if (lseek(fd_, n, SEEK_CUR) == static_cast<off_t>(-1)) {
+  Status Skip(uint64_t n) override {
+    if (::lseek(fd_, n, SEEK_CUR) == static_cast<off_t>(-1)) {
       return PosixError(filename_, errno);
     }
     return Status::OK();
   }
+
+ private:
+  const int fd_;
+  const std::string filename_;
 };
 
-// pread() based random-access
-class PosixRandomAccessFile: public RandomAccessFile {
- private:
-  std::string filename_;
-  bool temporary_fd_;  // If true, fd_ is -1 and we open on every read.
-  int fd_;
-  Limiter* limiter_;
-
+// Implements random read access in a file using pread().
+//
+// Instances of this class are thread-safe, as required by the RandomAccessFile
+// API. Instances are immutable and Read() only calls thread-safe library
+// functions.
+class PosixRandomAccessFile final : public RandomAccessFile {
  public:
-  PosixRandomAccessFile(const std::string& fname, int fd, Limiter* limiter)
-      : filename_(fname), fd_(fd), limiter_(limiter) {
-    temporary_fd_ = !limiter->Acquire();
-    if (temporary_fd_) {
-      // Open file on every access.
-      close(fd_);
-      fd_ = -1;
+  // The new instance takes ownership of |fd|. |fd_limiter| must outlive this
+  // instance, and will be used to determine if .
+  PosixRandomAccessFile(std::string filename, int fd, Limiter* fd_limiter)
+      : has_permanent_fd_(fd_limiter->Acquire()),
+        fd_(has_permanent_fd_ ? fd : -1),
+        fd_limiter_(fd_limiter),
+        filename_(std::move(filename)) {
+    if (!has_permanent_fd_) {
+      assert(fd_ == -1);
+      ::close(fd);  // The file will be opened on every read.
     }
   }
 
-  virtual ~PosixRandomAccessFile() {
-    if (!temporary_fd_) {
-      close(fd_);
-      limiter_->Release();
+  ~PosixRandomAccessFile() override {
+    if (has_permanent_fd_) {
+      assert(fd_ != -1);
+      ::close(fd_);
+      fd_limiter_->Release();
     }
   }
 
-  virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                      char* scratch) const {
+  Status Read(uint64_t offset, size_t n, Slice* result,
+              char* scratch) const override {
     int fd = fd_;
-    if (temporary_fd_) {
-      fd = open(filename_.c_str(), O_RDONLY);
+    if (!has_permanent_fd_) {
+      fd = ::open(filename_.c_str(), O_RDONLY);
       if (fd < 0) {
         return PosixError(filename_, errno);
       }
     }
 
-    Status s;
-    ssize_t r = pread(fd, scratch, n, static_cast<off_t>(offset));
-    *result = Slice(scratch, (r < 0) ? 0 : r);
-    if (r < 0) {
-      // An error: return a non-ok status
-      s = PosixError(filename_, errno);
+    assert(fd != -1);
+
+    Status status;
+    ssize_t read_size = ::pread(fd, scratch, n, static_cast<off_t>(offset));
+    *result = Slice(scratch, (read_size < 0) ? 0 : read_size);
+    if (read_size < 0) {
+      // An error: return a non-ok status.
+      status = PosixError(filename_, errno);
     }
-    if (temporary_fd_) {
+    if (!has_permanent_fd_) {
       // Close the temporary file descriptor opened earlier.
-      close(fd);
+      assert(fd != fd_);
+      ::close(fd);
     }
-    return s;
+    return status;
   }
+
+ private:
+  const bool has_permanent_fd_;  // If false, the file is opened on every read.
+  const int fd_;  // -1 if has_permanent_fd_ is false.
+  Limiter* const fd_limiter_;
+  const std::string filename_;
 };
 
-// mmap() based random-access
-class PosixMmapReadableFile: public RandomAccessFile {
- private:
-  std::string filename_;
-  void* mmapped_region_;
-  size_t length_;
-  Limiter* limiter_;
-
+// Implements random read access in a file using mmap().
+//
+// Instances of this class are thread-safe, as required by the RandomAccessFile
+// API. Instances are immutable and Read() only calls thread-safe library
+// functions.
+class PosixMmapReadableFile final : public RandomAccessFile {
  public:
-  // base[0,length-1] contains the mmapped contents of the file.
-  PosixMmapReadableFile(const std::string& fname, void* base, size_t length,
-                        Limiter* limiter)
-      : filename_(fname), mmapped_region_(base), length_(length),
-        limiter_(limiter) {
+  // mmap_base[0, length-1] points to the memory-mapped contents of the file. It
+  // must be the result of a successful call to mmap(). This instances takes
+  // over the ownership of the region.
+  //
+  // |mmap_limiter| must outlive this instance. The caller must have already
+  // aquired the right to use one mmap region, which will be released when this
+  // instance is destroyed.
+  PosixMmapReadableFile(std::string filename, char* mmap_base, size_t length,
+                        Limiter* mmap_limiter)
+      : mmap_base_(mmap_base), length_(length), mmap_limiter_(mmap_limiter),
+        filename_(std::move(filename)) {}
+
+  ~PosixMmapReadableFile() override {
+    ::munmap(static_cast<void*>(mmap_base_), length_);
+    mmap_limiter_->Release();
   }
 
-  virtual ~PosixMmapReadableFile() {
-    munmap(mmapped_region_, length_);
-    limiter_->Release();
-  }
-
-  virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                      char* scratch) const {
-    Status s;
+  Status Read(uint64_t offset, size_t n, Slice* result,
+              char* scratch) const override {
     if (offset + n > length_) {
       *result = Slice();
-      s = PosixError(filename_, EINVAL);
-    } else {
-      *result = Slice(reinterpret_cast<char*>(mmapped_region_) + offset, n);
+      return PosixError(filename_, EINVAL);
     }
-    return s;
+
+    *result = Slice(mmap_base_ + offset, n);
+    return Status::OK();
   }
+
+ private:
+  char* const mmap_base_;
+  const size_t length_;
+  Limiter* const mmap_limiter_;
+  const std::string filename_;
 };
 
 class PosixWritableFile final : public WritableFile {
@@ -378,30 +407,39 @@ class PosixWritableFile final : public WritableFile {
   const std::string dirname_;  // The directory of filename_.
 };
 
-static int LockOrUnlock(int fd, bool lock) {
+int LockOrUnlock(int fd, bool lock) {
   errno = 0;
-  struct flock f;
-  memset(&f, 0, sizeof(f));
-  f.l_type = (lock ? F_WRLCK : F_UNLCK);
-  f.l_whence = SEEK_SET;
-  f.l_start = 0;
-  f.l_len = 0;        // Lock/unlock entire file
-  return fcntl(fd, F_SETLK, &f);
+  struct ::flock file_lock_info;
+  std::memset(&file_lock_info, 0, sizeof(file_lock_info));
+  file_lock_info.l_type = (lock ? F_WRLCK : F_UNLCK);
+  file_lock_info.l_whence = SEEK_SET;
+  file_lock_info.l_start = 0;
+  file_lock_info.l_len = 0;  // Lock/unlock entire file.
+  return ::fcntl(fd, F_SETLK, &file_lock_info);
 }
 
+// Instances are thread-safe because they are immutable.
 class PosixFileLock : public FileLock {
  public:
-  int fd_;
-  std::string name_;
+  PosixFileLock(int fd, std::string filename)
+      : fd_(fd), filename_(std::move(filename)) {}
+
+  int fd() const { return fd_; }
+  const std::string& filename() const { return filename_; }
+
+ private:
+  const int fd_;
+  const std::string filename_;
 };
 
-// Set of locked files.  We keep a separate set instead of just
-// relying on fcntrl(F_SETLK) since fcntl(F_SETLK) does not provide
-// any protection against multiple uses from the same process.
+// Tracks the files locked by PosixEnv::LockFile().
+//
+// We maintain a separate set instead of relying on fcntrl(F_SETLK) because
+// fcntl(F_SETLK) does not provide any protection against multiple uses from the
+// same process.
+//
+// Instances are thread-safe because all member data is guarded by a mutex.
 class PosixLockTable {
- private:
-  port::Mutex mu_;
-  std::set<std::string> locked_files_ GUARDED_BY(mu_);
  public:
   bool Insert(const std::string& fname) LOCKS_EXCLUDED(mu_) {
     mu_.Lock();
@@ -414,217 +452,225 @@ class PosixLockTable {
     locked_files_.erase(fname);
     mu_.Unlock();
   }
+
+ private:
+  port::Mutex mu_;
+  std::set<std::string> locked_files_ GUARDED_BY(mu_);
 };
 
 class PosixEnv : public Env {
  public:
   PosixEnv();
-  virtual ~PosixEnv() {
-    char msg[] = "Destroying Env::Default()\n";
-    fwrite(msg, 1, sizeof(msg), stderr);
-    abort();
+  ~PosixEnv() override {
+    static char msg[] = "PosixEnv singleton destroyed. Unsupported behavior!\n";
+    std::fwrite(msg, 1, sizeof(msg), stderr);
+    std::abort();
   }
 
-  virtual Status NewSequentialFile(const std::string& fname,
-                                   SequentialFile** result) {
-    int fd = open(fname.c_str(), O_RDONLY);
+  Status NewSequentialFile(const std::string& filename,
+                           SequentialFile** result) override {
+    int fd = ::open(filename.c_str(), O_RDONLY);
     if (fd < 0) {
       *result = nullptr;
-      return PosixError(fname, errno);
-    } else {
-      *result = new PosixSequentialFile(fname, fd);
-      return Status::OK();
+      return PosixError(filename, errno);
     }
-  }
 
-  virtual Status NewRandomAccessFile(const std::string& fname,
-                                     RandomAccessFile** result) {
-    *result = nullptr;
-    Status s;
-    int fd = open(fname.c_str(), O_RDONLY);
-    if (fd < 0) {
-      s = PosixError(fname, errno);
-    } else if (mmap_limit_.Acquire()) {
-      uint64_t size;
-      s = GetFileSize(fname, &size);
-      if (s.ok()) {
-        void* base = mmap(nullptr, size, PROT_READ, MAP_SHARED, fd, 0);
-        if (base != MAP_FAILED) {
-          *result = new PosixMmapReadableFile(fname, base, size, &mmap_limit_);
-        } else {
-          s = PosixError(fname, errno);
-        }
-      }
-      close(fd);
-      if (!s.ok()) {
-        mmap_limit_.Release();
-      }
-    } else {
-      *result = new PosixRandomAccessFile(fname, fd, &fd_limit_);
-    }
-    return s;
-  }
-
-  virtual Status NewWritableFile(const std::string& fname,
-                                 WritableFile** result) {
-    Status s;
-    int fd = open(fname.c_str(), O_TRUNC | O_WRONLY | O_CREAT, 0644);
-    if (fd < 0) {
-      *result = nullptr;
-      s = PosixError(fname, errno);
-    } else {
-      *result = new PosixWritableFile(fname, fd);
-    }
-    return s;
-  }
-
-  virtual Status NewAppendableFile(const std::string& fname,
-                                   WritableFile** result) {
-    Status s;
-    int fd = open(fname.c_str(), O_APPEND | O_WRONLY | O_CREAT, 0644);
-    if (fd < 0) {
-      *result = nullptr;
-      s = PosixError(fname, errno);
-    } else {
-      *result = new PosixWritableFile(fname, fd);
-    }
-    return s;
-  }
-
-  virtual bool FileExists(const std::string& fname) {
-    return access(fname.c_str(), F_OK) == 0;
-  }
-
-  virtual Status GetChildren(const std::string& dir,
-                             std::vector<std::string>* result) {
-    result->clear();
-    DIR* d = opendir(dir.c_str());
-    if (d == nullptr) {
-      return PosixError(dir, errno);
-    }
-    struct dirent* entry;
-    while ((entry = readdir(d)) != nullptr) {
-      result->push_back(entry->d_name);
-    }
-    closedir(d);
+    *result = new PosixSequentialFile(filename, fd);
     return Status::OK();
   }
 
-  virtual Status DeleteFile(const std::string& fname) {
-    Status result;
-    if (unlink(fname.c_str()) != 0) {
-      result = PosixError(fname, errno);
-    }
-    return result;
-  }
-
-  virtual Status CreateDir(const std::string& name) {
-    Status result;
-    if (mkdir(name.c_str(), 0755) != 0) {
-      result = PosixError(name, errno);
-    }
-    return result;
-  }
-
-  virtual Status DeleteDir(const std::string& name) {
-    Status result;
-    if (rmdir(name.c_str()) != 0) {
-      result = PosixError(name, errno);
-    }
-    return result;
-  }
-
-  virtual Status GetFileSize(const std::string& fname, uint64_t* size) {
-    Status s;
-    struct stat sbuf;
-    if (stat(fname.c_str(), &sbuf) != 0) {
-      *size = 0;
-      s = PosixError(fname, errno);
-    } else {
-      *size = sbuf.st_size;
-    }
-    return s;
-  }
-
-  virtual Status RenameFile(const std::string& src, const std::string& target) {
-    Status result;
-    if (rename(src.c_str(), target.c_str()) != 0) {
-      result = PosixError(src, errno);
-    }
-    return result;
-  }
-
-  virtual Status LockFile(const std::string& fname, FileLock** lock) {
-    *lock = nullptr;
-    Status result;
-    int fd = open(fname.c_str(), O_RDWR | O_CREAT, 0644);
+  Status NewRandomAccessFile(const std::string& filename,
+                             RandomAccessFile** result) override {
+    *result = nullptr;
+    int fd = ::open(filename.c_str(), O_RDONLY);
     if (fd < 0) {
-      result = PosixError(fname, errno);
-    } else if (!locks_.Insert(fname)) {
-      close(fd);
-      result = Status::IOError("lock " + fname, "already held by process");
-    } else if (LockOrUnlock(fd, true) == -1) {
-      result = PosixError("lock " + fname, errno);
-      close(fd);
-      locks_.Remove(fname);
-    } else {
-      PosixFileLock* my_lock = new PosixFileLock;
-      my_lock->fd_ = fd;
-      my_lock->name_ = fname;
-      *lock = my_lock;
+      return PosixError(filename, errno);
     }
-    return result;
+
+    if (!mmap_limiter_.Acquire()) {
+      *result = new PosixRandomAccessFile(filename, fd, &fd_limiter_);
+      return Status::OK();
+    }
+
+    uint64_t file_size;
+    Status status = GetFileSize(filename, &file_size);
+    if (status.ok()) {
+      void* mmap_base = ::mmap(/*addr=*/nullptr, file_size, PROT_READ,
+                               MAP_SHARED, fd, 0);
+      if (mmap_base != MAP_FAILED) {
+        *result = new PosixMmapReadableFile(
+            filename, reinterpret_cast<char*>(mmap_base), file_size,
+            &mmap_limiter_);
+      } else {
+        status = PosixError(filename, errno);
+      }
+    }
+    ::close(fd);
+    if (!status.ok()) {
+      mmap_limiter_.Release();
+    }
+    return status;
   }
 
-  virtual Status UnlockFile(FileLock* lock) {
-    PosixFileLock* my_lock = reinterpret_cast<PosixFileLock*>(lock);
-    Status result;
-    if (LockOrUnlock(my_lock->fd_, false) == -1) {
-      result = PosixError("unlock", errno);
+  Status NewWritableFile(const std::string& filename,
+                         WritableFile** result) override {
+    int fd = ::open(filename.c_str(), O_TRUNC | O_WRONLY | O_CREAT, 0644);
+    if (fd < 0) {
+      *result = nullptr;
+      return PosixError(filename, errno);
     }
-    locks_.Remove(my_lock->name_);
-    close(my_lock->fd_);
-    delete my_lock;
-    return result;
+
+    *result = new PosixWritableFile(filename, fd);
+    return Status::OK();
   }
 
-  virtual void Schedule(void (*function)(void*), void* arg);
+  Status NewAppendableFile(const std::string& filename,
+                           WritableFile** result) override {
+    int fd = ::open(filename.c_str(), O_APPEND | O_WRONLY | O_CREAT, 0644);
+    if (fd < 0) {
+      *result = nullptr;
+      return PosixError(filename, errno);
+    }
 
-  virtual void StartThread(void (*function)(void* arg), void* arg);
+    *result = new PosixWritableFile(filename, fd);
+    return Status::OK();
+  }
 
-  virtual Status GetTestDirectory(std::string* result) {
-    const char* env = getenv("TEST_TMPDIR");
+  bool FileExists(const std::string& filename) override {
+    return ::access(filename.c_str(), F_OK) == 0;
+  }
+
+  Status GetChildren(const std::string& directory_path,
+                     std::vector<std::string>* result) override {
+    result->clear();
+    ::DIR* dir = ::opendir(directory_path.c_str());
+    if (dir == nullptr) {
+      return PosixError(directory_path, errno);
+    }
+    struct ::dirent* entry;
+    while ((entry = ::readdir(dir)) != nullptr) {
+      result->emplace_back(entry->d_name);
+    }
+    ::closedir(dir);
+    return Status::OK();
+  }
+
+  Status DeleteFile(const std::string& filename) override {
+    if (::unlink(filename.c_str()) != 0) {
+      return PosixError(filename, errno);
+    }
+    return Status::OK();
+  }
+
+  Status CreateDir(const std::string& dirname) override {
+    if (::mkdir(dirname.c_str(), 0755) != 0) {
+      return PosixError(dirname, errno);
+    }
+    return Status::OK();
+  }
+
+  Status DeleteDir(const std::string& dirname) override {
+    if (::rmdir(dirname.c_str()) != 0) {
+      return PosixError(dirname, errno);
+    }
+    return Status::OK();
+  }
+
+  Status GetFileSize(const std::string& filename, uint64_t* size) override {
+    struct ::stat file_stat;
+    if (::stat(filename.c_str(), &file_stat) != 0) {
+      *size = 0;
+      return PosixError(filename, errno);
+    }
+    *size = file_stat.st_size;
+    return Status::OK();
+  }
+
+  Status RenameFile(const std::string& from, const std::string& to) override {
+    if (std::rename(from.c_str(), to.c_str()) != 0) {
+      return PosixError(from, errno);
+    }
+    return Status::OK();
+  }
+
+  Status LockFile(const std::string& filename, FileLock** lock) override {
+    *lock = nullptr;
+
+    int fd = ::open(filename.c_str(), O_RDWR | O_CREAT, 0644);
+    if (fd < 0) {
+      return PosixError(filename, errno);
+    }
+
+    if (!locks_.Insert(filename)) {
+      ::close(fd);
+      return Status::IOError("lock " + filename, "already held by process");
+    }
+
+    if (LockOrUnlock(fd, true) == -1) {
+      int lock_errno = errno;
+      ::close(fd);
+      locks_.Remove(filename);
+      return PosixError("lock " + filename, lock_errno);
+    }
+
+    *lock = new PosixFileLock(fd, filename);
+    return Status::OK();
+  }
+
+  Status UnlockFile(FileLock* lock) override {
+    PosixFileLock* posix_file_lock = static_cast<PosixFileLock*>(lock);
+    if (LockOrUnlock(posix_file_lock->fd(), false) == -1) {
+      return PosixError("unlock " + posix_file_lock->filename(), errno);
+    }
+    locks_.Remove(posix_file_lock->filename());
+    ::close(posix_file_lock->fd());
+    delete posix_file_lock;
+    return Status::OK();
+  }
+
+  void Schedule(void (*background_work_function)(void* background_work_arg),
+                void* background_work_arg) override;
+
+  void StartThread(void (*thread_main)(void* thread_main_arg),
+                   void* thread_main_arg) override;
+
+  Status GetTestDirectory(std::string* result) override {
+    const char* env = std::getenv("TEST_TMPDIR");
     if (env && env[0] != '\0') {
       *result = env;
     } else {
       char buf[100];
-      snprintf(buf, sizeof(buf), "/tmp/leveldbtest-%d", int(geteuid()));
+      std::snprintf(buf, sizeof(buf), "/tmp/leveldbtest-%d",
+                    static_cast<int>(::geteuid()));
       *result = buf;
     }
-    // Directory may already exist
+
+    // The CreateDir status is ignored because the directory may already exist.
     CreateDir(*result);
+
     return Status::OK();
   }
 
-  virtual Status NewLogger(const std::string& fname, Logger** result) {
-    FILE* f = fopen(fname.c_str(), "w");
-    if (f == nullptr) {
+  Status NewLogger(const std::string& filename, Logger** result) override {
+    std::FILE* fp = std::fopen(filename.c_str(), "w");
+    if (fp == nullptr) {
       *result = nullptr;
-      return PosixError(fname, errno);
+      return PosixError(filename, errno);
     } else {
-      *result = new PosixLogger(f);
+      *result = new PosixLogger(fp);
       return Status::OK();
     }
   }
 
-  virtual uint64_t NowMicros() {
-    struct timeval tv;
-    gettimeofday(&tv, nullptr);
-    return static_cast<uint64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
+  uint64_t NowMicros() override {
+    static constexpr uint64_t kUsecondsPerSecond = 1000000;
+    struct ::timeval tv;
+    ::gettimeofday(&tv, nullptr);
+    return static_cast<uint64_t>(tv.tv_sec) * kUsecondsPerSecond + tv.tv_usec;
   }
 
-  virtual void SleepForMicroseconds(int micros) {
-    usleep(micros);
+  void SleepForMicroseconds(int micros) override {
+    ::usleep(micros);
   }
 
  private:
@@ -656,44 +702,41 @@ class PosixEnv : public Env {
   std::queue<BackgroundWorkItem> background_work_queue_
       GUARDED_BY(background_work_mutex_);
 
-  PosixLockTable locks_;
-  Limiter mmap_limit_;
-  Limiter fd_limit_;
+  PosixLockTable locks_;  // Thread-safe.
+  Limiter mmap_limiter_;  // Thread-safe.
+  Limiter fd_limiter_;  // Thread-safe.
 };
 
 // Return the maximum number of concurrent mmaps.
-static int MaxMmaps() {
-  if (mmap_limit >= 0) {
-    return mmap_limit;
-  }
-  // Up to 1000 mmaps for 64-bit binaries; none for smaller pointer sizes.
-  mmap_limit = sizeof(void*) >= 8 ? 1000 : 0;
-  return mmap_limit;
+int MaxMmaps() {
+  return g_mmap_limit;
 }
 
 // Return the maximum number of read-only files to keep open.
-static intptr_t MaxOpenFiles() {
-  if (open_read_only_file_limit >= 0) {
-    return open_read_only_file_limit;
+int MaxOpenFiles() {
+  if (g_open_read_only_file_limit >= 0) {
+    return g_open_read_only_file_limit;
   }
-  struct rlimit rlim;
-  if (getrlimit(RLIMIT_NOFILE, &rlim)) {
+  struct ::rlimit rlim;
+  if (::getrlimit(RLIMIT_NOFILE, &rlim)) {
     // getrlimit failed, fallback to hard-coded default.
-    open_read_only_file_limit = 50;
+    g_open_read_only_file_limit = 50;
   } else if (rlim.rlim_cur == RLIM_INFINITY) {
-    open_read_only_file_limit = std::numeric_limits<int>::max();
+    g_open_read_only_file_limit = std::numeric_limits<int>::max();
   } else {
     // Allow use of 20% of available file descriptors for read-only files.
-    open_read_only_file_limit = rlim.rlim_cur / 5;
+    g_open_read_only_file_limit = rlim.rlim_cur / 5;
   }
-  return open_read_only_file_limit;
+  return g_open_read_only_file_limit;
 }
 
+}  // namespace
+
 PosixEnv::PosixEnv()
     : background_work_cv_(&background_work_mutex_),
       started_background_thread_(false),
-      mmap_limit_(MaxMmaps()),
-      fd_limit_(MaxOpenFiles()) {
+      mmap_limiter_(MaxMmaps()),
+      fd_limiter_(MaxOpenFiles()) {
 }
 
 void PosixEnv::Schedule(
@@ -737,6 +780,8 @@ void PosixEnv::BackgroundThreadMain() {
   }
 }
 
+namespace {
+
 // Wraps an Env instance whose destructor is never created.
 //
 // Intended usage:
@@ -800,12 +845,12 @@ void PosixEnv::StartThread(void (*thread_main)(void* thread_main_arg),
 
 void EnvPosixTestHelper::SetReadOnlyFDLimit(int limit) {
   PosixDefaultEnv::AssertEnvNotInitialized();
-  open_read_only_file_limit = limit;
+  g_open_read_only_file_limit = limit;
 }
 
 void EnvPosixTestHelper::SetReadOnlyMMapLimit(int limit) {
   PosixDefaultEnv::AssertEnvNotInitialized();
-  mmap_limit = limit;
+  g_mmap_limit = limit;
 }
 
 Env* Env::Default() {

From 58d70545af9ec7f30821f973b604f8e2a2f9ebdb Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Wed, 2 Jan 2019 17:58:33 -0800
Subject: [PATCH 031/181] Update Travis CI configuration.

The Travis CI configuration updates reflect the following changes:
* Container-based builds (sudo: false) have been removed.
  https://changelog.travis-ci.com/the-container-based-build-environment-is-fully-deprecated-84517
* Ubuntu Xenial (16.04) is available as a base image.
  https://blog.travis-ci.com/2018-11-08-xenial-release
* Homebrew now has a dedicated DSL.
  https://docs.travis-ci.com/user/installing-dependencies/#installing-packages-on-os-x

To take full advantage of VM resources, CI builds now use Ninja
https://ninja-build.org/ instead of Make.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=227611641
---
 .travis.yml | 44 ++++++++++++++++++++++----------------------
 1 file changed, 22 insertions(+), 22 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index fd7b52d..3ff5cfc 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,8 +2,7 @@
 # http://about.travis-ci.org/docs/user/build-configuration/
 # This file can be validated on: http://lint.travis-ci.org/
 
-sudo: false
-dist: trusty
+dist: xenial
 language: cpp
 
 compiler:
@@ -26,38 +25,39 @@ matrix:
 
 addons:
   apt:
-    # List of whitelisted in travis packages for ubuntu-trusty can be found here:
-    #   https://github.com/travis-ci/apt-package-whitelist/blob/master/ubuntu-trusty
-    # List of whitelisted in travis apt-sources:
-    #   https://github.com/travis-ci/apt-source-whitelist/blob/master/ubuntu.json
     sources:
+    - llvm-toolchain-xenial-7
     - ubuntu-toolchain-r-test
-    - llvm-toolchain-trusty-5.0
     packages:
+    - clang-7
     - cmake
-    - gcc-7
-    - g++-7
-    - clang-5.0
+    - gcc-8
+    - g++-8
     - libgoogle-perftools-dev
     - libkyotocabinet-dev
     - libsnappy-dev
     - libsqlite3-dev
+    - ninja-build
+  homebrew:
+    packages:
+    - crc32c
+    - gperftools
+    - kyotocabinet
+    - gcc@7
+    - ninja
+    - snappy
+    - sqlite3
 
-install:
-# Travis doesn't have a DSL for installing homebrew packages yet. Status tracked
-# in https://github.com/travis-ci/travis-ci/issues/5377
+before_install:
 # The Travis VM image for Mac already has a link at /usr/local/include/c++,
-# causing Homebrew's gcc@7 installation to error out. This was reported to
+# causing Homebrew's gcc installation to error out. This was reported to
 # Homebrew maintainers at https://github.com/Homebrew/brew/issues/1742 and
 # removing the link emerged as a workaround.
-- if [ "$TRAVIS_OS_NAME" == "osx" ]; then
-    brew update;
-    if [ -L /usr/local/include/c++ ]; then rm /usr/local/include/c++; fi;
-    brew install gcc@7;
-    brew install crc32c gperftools kyoto-cabinet snappy sqlite3;
-  fi
+- if [ "$TRAVIS_OS_NAME" == "osx" ]; then rm -f /usr/local/include/c++ ; fi
+
+install:
 # /usr/bin/gcc is stuck to old versions on both Linux and OSX.
-- if [ "$CXX" = "g++" ]; then export CXX="g++-7" CC="gcc-7"; fi
+- if [ "$CXX" = "g++" ]; then export CXX="g++-8" CC="gcc-8"; fi
 - echo ${CC}
 - echo ${CXX}
 - ${CXX} --version
@@ -65,7 +65,7 @@ install:
 
 before_script:
 - mkdir -p build && cd build
-- cmake .. -DCMAKE_BUILD_TYPE=$BUILD_TYPE
+- cmake .. -G Ninja -DCMAKE_BUILD_TYPE=$BUILD_TYPE
 - cmake --build .
 - cd ..
 

From af7abf06ea061222c2c34d98e1995c5a901f374f Mon Sep 17 00:00:00 2001
From: cmumford <cmumford@google.com>
Date: Mon, 7 Jan 2019 11:29:24 -0800
Subject: [PATCH 032/181] Add back space to POSIX Logger.

The space in between the header and log message was mistakenly omitted
in a prior commit. Re-adding.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=228202737
---
 util/posix_logger.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/util/posix_logger.h b/util/posix_logger.h
index 28b290e..5685fa3 100644
--- a/util/posix_logger.h
+++ b/util/posix_logger.h
@@ -68,7 +68,7 @@ class PosixLogger final : public Logger {
       // Print the header into the buffer.
       int buffer_offset = snprintf(
           buffer, buffer_size,
-          "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s",
+          "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
           now_components.tm_year + 1900,
           now_components.tm_mon + 1,
           now_components.tm_mday,
@@ -79,7 +79,7 @@ class PosixLogger final : public Logger {
           thread_id.c_str());
 
       // The header can be at most 28 characters (10 date + 15 time +
-      // 3 spacing) plus the thread ID, which should fit comfortably into the
+      // 3 delimiters) plus the thread ID, which should fit comfortably into the
       // static buffer.
       assert(buffer_offset <= 28 + kMaxThreadIdSize);
       static_assert(28 + kMaxThreadIdSize < kStackBufferSize,

From b70493ca8586285b49e9888e2b528f71806bdc6e Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Tue, 8 Jan 2019 13:49:13 -0800
Subject: [PATCH 033/181] Fix fdatasync() feature detection in opensource
 build.

The CMake feature-detection code used check_symbol_exists(), which
invokes the C compiler. However, some glibc versions don't expose the
fdatasync() declaration when compiled with -std=c11, but do expose it
when compiled with -std=c++11. This most likely comes down to how
_POSIX_SOURCE is defined -- it needs to be >= 201112L for <unistd.h> to
expose fdatasync().

This CL switches to check_cxx_symbol_exists(), which uses the C++
compiler. Asides from fixing the problem above, this is the right thing
to do, because we use <unistd.h> in env_posix.cc, which is compiled with
the C++ compiler.

This CL also fixes a previously introduced inconsistency, where the
macro indicating the fdatasync() feature detection result was referred
to as HAVE_FDATASYNC and HAVE_FUNC_FDATASYNC. The former appears to be
used in other libraries, so this CL switches all our references to
HAVE_FDATASYNC.

Fixes https://github.com/google/leveldb/issues/629

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=228392612
---
 CMakeLists.txt        | 9 +++++++--
 port/port_config.h.in | 6 +++---
 2 files changed, 10 insertions(+), 5 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 36d6cbd..57a0c74 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -30,8 +30,13 @@ check_library_exists(crc32c crc32c_value "" HAVE_CRC32C)
 check_library_exists(snappy snappy_compress "" HAVE_SNAPPY)
 check_library_exists(tcmalloc malloc "" HAVE_TCMALLOC)
 
-include(CheckSymbolExists)
-check_symbol_exists(fdatasync "unistd.h" HAVE_FDATASYNC)
+include(CheckCXXSymbolExists)
+# Using check_cxx_symbol_exists() instead of check_c_symbol_exists() because
+# we're including the header from C++, and feature detection should use the same
+# compiler language that the project will use later. Principles aside, some
+# versions of do not expose fdatasync() in <unistd.h> in standard C mode
+# (-std=c11), but do expose the function in standard C++ mode (-std=c++11).
+check_cxx_symbol_exists(fdatasync "unistd.h" HAVE_FDATASYNC)
 
 include(CheckCXXSourceCompiles)
 
diff --git a/port/port_config.h.in b/port/port_config.h.in
index 1934055..645c78c 100644
--- a/port/port_config.h.in
+++ b/port/port_config.h.in
@@ -6,9 +6,9 @@
 #define STORAGE_LEVELDB_PORT_PORT_CONFIG_H_
 
 // Define to 1 if you have a definition for fdatasync() in <unistd.h>.
-#if !defined(HAVE_FUNC_FDATASYNC)
-#cmakedefine01 HAVE_FUNC_FDATASYNC
-#endif  // !defined(HAVE_FUNC_FDATASYNC)
+#if !defined(HAVE_FDATASYNC)
+#cmakedefine01 HAVE_FDATASYNC
+#endif  // !defined(HAVE_FDATASYNC)
 
 // Define to 1 if you have Google CRC32C.
 #if !defined(HAVE_CRC32C)

From 296de8d5b8e4e57bd1e46c981114dfbe58a8c4fa Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Wed, 9 Jan 2019 14:53:09 -0800
Subject: [PATCH 034/181] leveldb: Fix PosixWritableFile::Sync() on Apple
 systems.

Apple doesn't follow POSIX specifications for fsync(). Instead, fsync() guarantees to flush the buffer cache to the device, which means the data will survive kernel panics, but may not survive power outages. Applications that need stronger guarantees (like databases) need to use fcntl(F_FULLFSYNC).

This CL switches PosixWritableFile::Sync() to get the stronger guarantees on Apple systems. The improved implementation follows the same principles as SQLite [1] and node.js [2].

Research for the fcntl() to fsync() fallback strategy:

Apple's released source code at https://opensource.apple.com/ shows at least three different error codes being returned when a filesystem does not support F_FULLFSYNC.

fcntl() is implemented in xnu-4903.221.2 in bsd/kern/kern_descrip.c, where it delegates to fcntl_nocancel(). The documentation for fcntl_nocancel() mentions error codes for some operations, but does not include F_FULLFSYNC. The F_FULLSYNC branch in fcntl_nocancel() calls VNOP_IOCTL(_, F_FULLSYNC, NULL, 0, _), whose return value sets the error
code.

VNOP_IOCTL() is implemented in bsd/vfs/kpi_vfs.c and calls the ioctl function in the vnode's operation vector. The per-filesystem function names follow the pattern _vnop_ioctl() for all the instances in opensource code: {hfs,msdosfs,nfs,ntfs,smbfs,webdav,zfs}_vnop_ioctl().

hfs-407.30.1, msdosfs-229.200.3, and nfs in xnu-4903.221.2 handle F_FULLFSYNC. ntfs-94.200.1 and smb-759.40.1 do not handle F_FULLFSYNC, and the default branch returns ENOSUP. webdav-380.200.1 also does not handle F_FULLFSYNC, but the default branch returns EINVAL. zfs-59 also does not handle F_FULLSYNC, and its default branch returns ENOTTY.

From a different angle, Apple's ntfs-94.200.1 includes utility code that uses fcntl(F_FULLFSYNC) and falls back to fsync() just like we do, supporting the hypothesis that there is no good way to detect lack of F_FULLFSYNC support. Also, Apple's fcntl() man page [3] does not mention a way to detect lack of F_FULLFSYNC support.

[1] https://www.sqlite.org/src/doc/trunk/src/os_unix.c
[2] https://github.com/libuv/libuv/blob/master/src/unix/fs.c
[3] https://developer.apple.com/library/archive/documentatiVon/System/Conceptual/ManPages_iPhoneOS/man2/fcntl.2.html
Tested:
    https://travis-ci.org/pwnall/leveldb/builds/477318498
    TAP global presubmit

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=228593729
---
 CMakeLists.txt        |  1 +
 port/port_config.h.in |  5 +++++
 util/env_posix.cc     | 46 ++++++++++++++++++++++++++++++++-----------
 3 files changed, 40 insertions(+), 12 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 57a0c74..f6a7c0a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -37,6 +37,7 @@ include(CheckCXXSymbolExists)
 # versions of do not expose fdatasync() in <unistd.h> in standard C mode
 # (-std=c11), but do expose the function in standard C++ mode (-std=c++11).
 check_cxx_symbol_exists(fdatasync "unistd.h" HAVE_FDATASYNC)
+check_cxx_symbol_exists(F_FULLFSYNC "fcntl.h" HAVE_FULLFSYNC)
 
 include(CheckCXXSourceCompiles)
 
diff --git a/port/port_config.h.in b/port/port_config.h.in
index 645c78c..d6a6d01 100644
--- a/port/port_config.h.in
+++ b/port/port_config.h.in
@@ -10,6 +10,11 @@
 #cmakedefine01 HAVE_FDATASYNC
 #endif  // !defined(HAVE_FDATASYNC)
 
+// Define to 1 if you have a definition for F_FULLFSYNC in <fcntl.h>.
+#if !defined(HAVE_FULLFSYNC)
+#cmakedefine01 HAVE_FULLFSYNC
+#endif  // !defined(HAVE_FULLFSYNC)
+
 // Define to 1 if you have Google CRC32C.
 #if !defined(HAVE_CRC32C)
 #cmakedefine01 HAVE_CRC32C
diff --git a/util/env_posix.cc b/util/env_posix.cc
index 76bb648..362adb3 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -35,12 +35,6 @@
 #include "util/posix_logger.h"
 #include "util/env_posix_test_helper.h"
 
-// HAVE_FDATASYNC is defined in the auto-generated port_config.h, which is
-// included by port_stdcxx.h.
-#if !HAVE_FDATASYNC
-#define fdatasync fsync
-#endif  // !HAVE_FDATASYNC
-
 namespace leveldb {
 
 namespace {
@@ -314,10 +308,11 @@ class PosixWritableFile final : public WritableFile {
     }
 
     status = FlushBuffer();
-    if (status.ok() && ::fdatasync(fd_) != 0) {
-      status = PosixError(filename_, errno);
+    if (!status.ok()) {
+      return status;
     }
-    return status;
+
+    return SyncFd(fd_, filename_);
   }
 
  private:
@@ -352,14 +347,41 @@ class PosixWritableFile final : public WritableFile {
     if (fd < 0) {
       status = PosixError(dirname_, errno);
     } else {
-      if (::fsync(fd) < 0) {
-        status = PosixError(dirname_, errno);
-      }
+      status = SyncFd(fd, dirname_);
       ::close(fd);
     }
     return status;
   }
 
+  // Ensures that all the caches associated with the given file descriptor's
+  // data are flushed all the way to durable media, and can withstand power
+  // failures.
+  //
+  // The path argument is only used to populate the description string in the
+  // returned Status if an error occurs.
+  static Status SyncFd(int fd, const std::string& fd_path) {
+#if HAVE_FULLFSYNC
+    // On macOS and iOS, fsync() doesn't guarantee durability past power
+    // failures. fcntl(F_FULLFSYNC) is required for that purpose. Some
+    // filesystems don't support fcntl(F_FULLFSYNC), and require a fallback to
+    // fsync().
+    if (::fcntl(fd, F_FULLFSYNC) == 0) {
+      return Status::OK();
+    }
+#endif  // HAVE_FULLFSYNC
+
+#if HAVE_FDATASYNC
+    bool sync_success = ::fdatasync(fd) == 0;
+#else
+    bool sync_success = ::fsync(fd) == 0;
+#endif  // HAVE_FDATASYNC
+
+    if (sync_success) {
+      return Status::OK();
+    }
+    return PosixError(fd_path, errno);
+  }
+
   // Returns the directory name in a path pointing to a file.
   //
   // Returns "." if the path does not contain any directory separator.

From fe4494804f5e3a2e25485d32aeb0eb7d2f25732e Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Tue, 15 Jan 2019 10:29:55 -0800
Subject: [PATCH 035/181] leveldb: Make WriteBatch::ApproximateSize() const.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=229395810
---
 db/write_batch.cc             | 2 +-
 include/leveldb/write_batch.h | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/db/write_batch.cc b/db/write_batch.cc
index 40eed2e..23eb00f 100644
--- a/db/write_batch.cc
+++ b/db/write_batch.cc
@@ -39,7 +39,7 @@ void WriteBatch::Clear() {
   rep_.resize(kHeader);
 }
 
-size_t WriteBatch::ApproximateSize() {
+size_t WriteBatch::ApproximateSize() const {
   return rep_.size();
 }
 
diff --git a/include/leveldb/write_batch.h b/include/leveldb/write_batch.h
index 9386ace..9b319f0 100644
--- a/include/leveldb/write_batch.h
+++ b/include/leveldb/write_batch.h
@@ -52,7 +52,7 @@ class LEVELDB_EXPORT WriteBatch {
   //
   // This number is tied to implementation details, and may change across
   // releases. It is intended for LevelDB usage metrics.
-  size_t ApproximateSize();
+  size_t ApproximateSize() const;
 
   // Copies the operations in "source" to this batch.
   //

From 77e9dfad9f685b398eb4f886ef5b0c8d724d7c30 Mon Sep 17 00:00:00 2001
From: caodhuan <cao.d.huan@gmail.com>
Date: Tue, 22 Jan 2019 10:10:40 +0800
Subject: [PATCH 036/181] add:compact_pointers_ should be clear when Clear()
 called

---
 db/version_edit.cc | 1 +
 1 file changed, 1 insertion(+)

diff --git a/db/version_edit.cc b/db/version_edit.cc
index f10a2d5..f1fb534 100644
--- a/db/version_edit.cc
+++ b/db/version_edit.cc
@@ -34,6 +34,7 @@ void VersionEdit::Clear() {
   has_prev_log_number_ = false;
   has_next_file_number_ = false;
   has_last_sequence_ = false;
+  compact_pointers_.clear();
   deleted_files_.clear();
   new_files_.clear();
 }

From 75fceae7003e217e16b04433831da7528ae56881 Mon Sep 17 00:00:00 2001
From: Adam Azarchs <adam.azarchs@10xgenomics.com>
Date: Wed, 19 Sep 2018 16:29:00 -0700
Subject: [PATCH 037/181] Add O_CLOEXEC to open calls.

This prevents file descriptors from leaking to child processes.

When compiled for older (pre-2.6.23) kernels which lack support for
O_CLOEXEC there is no change in behavior.  With newer kernels, child
processes will no longer inherit leveldb's file handles, which
reduces the changes of accidentally corrupting the database.

Fixes https://github.com/google/leveldb/issues/623
---
 CMakeLists.txt         |   1 +
 port/port_config.h.in  |   5 ++
 util/env_posix.cc      |  27 +++++++----
 util/env_posix_test.cc | 102 ++++++++++++++++++++++++++++++++++++++++-
 4 files changed, 126 insertions(+), 9 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index f6a7c0a..9b0042b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -38,6 +38,7 @@ include(CheckCXXSymbolExists)
 # (-std=c11), but do expose the function in standard C++ mode (-std=c++11).
 check_cxx_symbol_exists(fdatasync "unistd.h" HAVE_FDATASYNC)
 check_cxx_symbol_exists(F_FULLFSYNC "fcntl.h" HAVE_FULLFSYNC)
+check_cxx_symbol_exists(O_CLOEXEC "fcntl.h" HAVE_O_CLOEXEC)
 
 include(CheckCXXSourceCompiles)
 
diff --git a/port/port_config.h.in b/port/port_config.h.in
index d6a6d01..2127315 100644
--- a/port/port_config.h.in
+++ b/port/port_config.h.in
@@ -15,6 +15,11 @@
 #cmakedefine01 HAVE_FULLFSYNC
 #endif  // !defined(HAVE_FULLFSYNC)
 
+// Define to 1 if you have a definition for O_CLOEXEC in <fcntl.h>.
+#if !defined(HAVE_O_CLOEXEC)
+#cmakedefine01 HAVE_O_CLOEXEC
+#endif  // !defined(HAVE_O_CLOEXEC)
+
 // Define to 1 if you have Google CRC32C.
 #if !defined(HAVE_CRC32C)
 #cmakedefine01 HAVE_CRC32C
diff --git a/util/env_posix.cc b/util/env_posix.cc
index 362adb3..7a0f04d 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -48,6 +48,13 @@ constexpr const int kDefaultMmapLimit = (sizeof(void*) >= 8) ? 1000 : 0;
 // Can be set using EnvPosixTestHelper::SetReadOnlyMMapLimit.
 int g_mmap_limit = kDefaultMmapLimit;
 
+// Common flags defined for all posix open operations
+#if defined(HAVE_O_CLOEXEC)
+constexpr const int O_FLAGS = O_CLOEXEC;
+#else
+constexpr const int O_FLAGS = 0;
+#endif  // defined(HAVE_O_CLOEXEC)
+
 constexpr const size_t kWritableFileBufferSize = 65536;
 
 Status PosixError(const std::string& context, int error_number) {
@@ -168,7 +175,7 @@ class PosixRandomAccessFile final : public RandomAccessFile {
               char* scratch) const override {
     int fd = fd_;
     if (!has_permanent_fd_) {
-      fd = ::open(filename_.c_str(), O_RDONLY);
+      fd = ::open(filename_.c_str(), O_RDONLY | O_FLAGS);
       if (fd < 0) {
         return PosixError(filename_, errno);
       }
@@ -343,7 +350,7 @@ class PosixWritableFile final : public WritableFile {
       return status;
     }
 
-    int fd = ::open(dirname_.c_str(), O_RDONLY);
+    int fd = ::open(dirname_.c_str(), O_RDONLY | O_FLAGS);
     if (fd < 0) {
       status = PosixError(dirname_, errno);
     } else {
@@ -491,7 +498,7 @@ class PosixEnv : public Env {
 
   Status NewSequentialFile(const std::string& filename,
                            SequentialFile** result) override {
-    int fd = ::open(filename.c_str(), O_RDONLY);
+    int fd = ::open(filename.c_str(), O_RDONLY | O_FLAGS);
     if (fd < 0) {
       *result = nullptr;
       return PosixError(filename, errno);
@@ -504,7 +511,7 @@ class PosixEnv : public Env {
   Status NewRandomAccessFile(const std::string& filename,
                              RandomAccessFile** result) override {
     *result = nullptr;
-    int fd = ::open(filename.c_str(), O_RDONLY);
+    int fd = ::open(filename.c_str(), O_RDONLY | O_FLAGS);
     if (fd < 0) {
       return PosixError(filename, errno);
     }
@@ -536,7 +543,9 @@ class PosixEnv : public Env {
 
   Status NewWritableFile(const std::string& filename,
                          WritableFile** result) override {
-    int fd = ::open(filename.c_str(), O_TRUNC | O_WRONLY | O_CREAT, 0644);
+    int fd = ::open(filename.c_str(),
+                    O_TRUNC | O_WRONLY | O_CREAT | O_FLAGS,
+                    0644);
     if (fd < 0) {
       *result = nullptr;
       return PosixError(filename, errno);
@@ -548,7 +557,9 @@ class PosixEnv : public Env {
 
   Status NewAppendableFile(const std::string& filename,
                            WritableFile** result) override {
-    int fd = ::open(filename.c_str(), O_APPEND | O_WRONLY | O_CREAT, 0644);
+    int fd = ::open(filename.c_str(),
+                    O_APPEND | O_WRONLY | O_CREAT | O_FLAGS,
+                    0644);
     if (fd < 0) {
       *result = nullptr;
       return PosixError(filename, errno);
@@ -618,7 +629,7 @@ class PosixEnv : public Env {
   Status LockFile(const std::string& filename, FileLock** lock) override {
     *lock = nullptr;
 
-    int fd = ::open(filename.c_str(), O_RDWR | O_CREAT, 0644);
+    int fd = ::open(filename.c_str(), O_RDWR | O_CREAT | O_FLAGS, 0644);
     if (fd < 0) {
       return PosixError(filename, errno);
     }
@@ -674,7 +685,7 @@ class PosixEnv : public Env {
   }
 
   Status NewLogger(const std::string& filename, Logger** result) override {
-    std::FILE* fp = std::fopen(filename.c_str(), "w");
+    std::FILE* fp = std::fopen(filename.c_str(), "we");
     if (fp == nullptr) {
       *result = nullptr;
       return PosixError(filename, errno);
diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc
index e28df9a..5519d8d 100644
--- a/util/env_posix_test.cc
+++ b/util/env_posix_test.cc
@@ -2,6 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include <sys/wait.h>
+#include <unistd.h>
+
 #include "leveldb/env.h"
 
 #include "port/port.h"
@@ -31,7 +34,7 @@ TEST(EnvPosixTest, TestOpenOnRead) {
   ASSERT_OK(env_->GetTestDirectory(&test_dir));
   std::string test_file = test_dir + "/open_on_read.txt";
 
-  FILE* f = fopen(test_file.c_str(), "w");
+  FILE* f = fopen(test_file.c_str(), "we");
   ASSERT_TRUE(f != nullptr);
   const char kFileData[] = "abcdefghijklmnopqrstuvwxyz";
   fputs(kFileData, f);
@@ -56,9 +59,106 @@ TEST(EnvPosixTest, TestOpenOnRead) {
   ASSERT_OK(env_->DeleteFile(test_file));
 }
 
+#if defined(HAVE_O_CLOEXEC)
+
+TEST(EnvPosixTest, TestCloseOnExec) {
+  // Test that file handles are not inherited by child processes.
+
+  // Open file handles with each of the open methods.
+  std::string test_dir;
+  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  std::vector<std::string> test_files = {
+      test_dir + "/close_on_exec_seq.txt",
+      test_dir + "/close_on_exec_rand.txt",
+      test_dir + "/close_on_exec_write.txt",
+      test_dir + "/close_on_exec_append.txt",
+      test_dir + "/close_on_exec_lock.txt",
+      test_dir + "/close_on_exec_log.txt",
+  };
+  for (const std::string& test_file : test_files) {
+    const char kFileData[] = "0123456789";
+    ASSERT_OK(WriteStringToFile(env_, kFileData, test_file));
+  }
+  leveldb::SequentialFile* seqFile = nullptr;
+  leveldb::RandomAccessFile* randFile = nullptr;
+  leveldb::WritableFile* writeFile = nullptr;
+  leveldb::WritableFile* appendFile = nullptr;
+  leveldb::FileLock* lockFile = nullptr;
+  leveldb::Logger* logFile = nullptr;
+  ASSERT_OK(env_->NewSequentialFile(test_files[0], &seqFile));
+  ASSERT_OK(env_->NewRandomAccessFile(test_files[1], &randFile));
+  ASSERT_OK(env_->NewWritableFile(test_files[2], &writeFile));
+  ASSERT_OK(env_->NewAppendableFile(test_files[3], &appendFile));
+  ASSERT_OK(env_->LockFile(test_files[4], &lockFile));
+  ASSERT_OK(env_->NewLogger(test_files[5], &logFile));
+
+  // Fork a child process and wait for it to complete.
+  int pid = fork();
+  if (pid == 0) {
+    const char* const child[] = {"/proc/self/exe", "-cloexec-child", nullptr};
+    execv(child[0], const_cast<char* const*>(child));
+    printf("Error spawning child process: %s\n", strerror(errno));
+    exit(6);
+  }
+  int status;
+  waitpid(pid, &status, 0);
+  ASSERT_EQ(0, WEXITSTATUS(status));
+
+  // cleanup
+  ASSERT_OK(env_->UnlockFile(lockFile));
+  delete seqFile;
+  delete randFile;
+  delete writeFile;
+  delete appendFile;
+  delete logFile;
+  for (const std::string& test_file : test_files) {
+    ASSERT_OK(env_->DeleteFile(test_file));
+  }
+}
+
+#endif  // defined(HAVE_O_CLOEXEC)
+
+int cloexecChild() {
+  // Checks for open file descriptors in the range 3..FD_SETSIZE.
+  for (int i = 3; i < FD_SETSIZE; i++) {
+    int dup_result = dup2(i, i);
+    if (dup_result != -1) {
+      printf("Unexpected open file %d\n", i);
+      char nbuf[28];
+      snprintf(nbuf, 28, "/proc/self/fd/%d", i);
+      char dbuf[1024];
+      int result = readlink(nbuf, dbuf, 1024);
+      if (0 < result && result < 1024) {
+        dbuf[result] = 0;
+        printf("File descriptor %d is %s\n", i, dbuf);
+        if (strstr(dbuf, "close_on_exec_") == nullptr) {
+          continue;
+        }
+      } else if (result >= 1024) {
+        printf("(file name length is too long)\n");
+      } else {
+        printf("Couldn't get file name: %s\n", strerror(errno));
+      }
+      return 3;
+    } else {
+      int e = errno;
+      if (e != EBADF) {
+        printf("Unexpected result reading file handle %d: %s\n", i,
+               strerror(errno));
+        return 4;
+      }
+    }
+  }
+  return 0;
+}
+
 }  // namespace leveldb
 
 int main(int argc, char** argv) {
+  // Check if this is the child process for TestCloseOnExec
+  if (argc > 1 && strcmp(argv[1], "-cloexec-child") == 0) {
+    return leveldb::cloexecChild();
+  }
   // All tests currently run with the same read-only file limits.
   leveldb::EnvPosixTest::SetFileLimits(leveldb::kReadOnlyFileLimit,
                                        leveldb::kMMapLimit);

From c69d33b0ec3dad2a8063ad66da9d51a1d6309f4e Mon Sep 17 00:00:00 2001
From: cmumford <cmumford@google.com>
Date: Fri, 1 Mar 2019 13:12:01 -0800
Subject: [PATCH 038/181] Added native support for Windows.

This change adds a native Windows port (port_windows.h) and a
Windows Env (WindowsEnv).

Note1: "small" is defined when including <Windows.h> so some
parameters were renamed to avoid conflict.

Note2: leveldb::Env defines the method: "DeleteFile" which is
also a constant defined when including <Windows.h>. The solution
was to ensure this macro is defined in env.h which forces
the function, when compiled, to be either DeleteFileA or
DeleteFileW when building for MBCS or UNICODE respectively.

This resolves #519 on GitHub.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=236364778
---
 .appveyor.yml                  |  38 ++
 CMakeLists.txt                 |  31 +-
 README.md                      |  33 +-
 db/corruption_test.cc          |  26 ++
 db/db_test.cc                  |  11 +-
 db/recovery_test.cc            |   3 +
 db/version_set.cc              |   6 +-
 include/leveldb/env.h          |  30 ++
 port/atomic_pointer.h          |   4 -
 port/port.h                    |   2 +-
 util/env_windows.cc            | 742 +++++++++++++++++++++++++++++++++
 util/env_windows_test.cc       |  63 +++
 util/env_windows_test_helper.h |  30 ++
 util/windows_logger.h          | 124 ++++++
 14 files changed, 1120 insertions(+), 23 deletions(-)
 create mode 100644 .appveyor.yml
 create mode 100644 util/env_windows.cc
 create mode 100644 util/env_windows_test.cc
 create mode 100644 util/env_windows_test_helper.h
 create mode 100644 util/windows_logger.h

diff --git a/.appveyor.yml b/.appveyor.yml
new file mode 100644
index 0000000..78aeaf1
--- /dev/null
+++ b/.appveyor.yml
@@ -0,0 +1,38 @@
+# Build matrix / environment variables are explained on:
+# https://www.appveyor.com/docs/appveyor-yml/
+# This file can be validated on: https://ci.appveyor.com/tools/validate-yaml
+
+version: "{build}"
+
+environment:
+  matrix:
+    # AppVeyor currently has no custom job name feature.
+    # http://help.appveyor.com/discussions/questions/1623-can-i-provide-a-friendly-name-for-jobs
+    - JOB: Visual Studio 2017
+      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
+      CMAKE_GENERATOR: Visual Studio 15 2017
+
+platform:
+  - x86
+  - x64
+
+configuration:
+  - RelWithDebInfo
+  - Debug
+
+build:
+  verbosity: minimal
+
+build_script:
+  - git submodule update --init --recursive
+  - mkdir build
+  - cd build
+  - if "%platform%"=="x64" set CMAKE_GENERATOR=%CMAKE_GENERATOR% Win64
+  - cmake --version
+  - cmake .. -G "%CMAKE_GENERATOR%"
+      -DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%"
+  - cmake --build . --config "%CONFIGURATION%"
+  - cd ..
+
+test_script:
+  - cd build ; ctest --verbose ; cd ..
diff --git a/CMakeLists.txt b/CMakeLists.txt
index f6a7c0a..1eaf48e 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -15,6 +15,14 @@ set(CMAKE_CXX_STANDARD 11)
 set(CMAKE_CXX_STANDARD_REQUIRED ON)
 set(CMAKE_CXX_EXTENSIONS OFF)
 
+if (WIN32)
+  set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_WINDOWS)
+  # TODO(cmumford): Make UNICODE configurable for Windows.
+  add_definitions(-D_UNICODE -DUNICODE)
+else (WIN32)
+  set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_POSIX)
+endif (WIN32)
+
 option(LEVELDB_BUILD_TESTS "Build LevelDB's unit tests" ON)
 option(LEVELDB_BUILD_BENCHMARKS "Build LevelDB's benchmarks" ON)
 option(LEVELDB_INSTALL "Install LevelDB's header and library" ON)
@@ -179,12 +187,19 @@ target_sources(leveldb
     "${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h"
 )
 
-# POSIX code is specified separately so we can leave it out in the future.
+if (WIN32)
+target_sources(leveldb
+  PRIVATE
+    "${PROJECT_SOURCE_DIR}/util/env_windows.cc"
+    "${PROJECT_SOURCE_DIR}/util/windows_logger.h"
+)
+else (WIN32)
 target_sources(leveldb
   PRIVATE
     "${PROJECT_SOURCE_DIR}/util/env_posix.cc"
     "${PROJECT_SOURCE_DIR}/util/posix_logger.h"
 )
+endif (WIN32)
 
 # MemEnv is not part of the interface and could be pulled to a separate library.
 target_sources(leveldb
@@ -203,7 +218,7 @@ target_compile_definitions(leveldb
     # Used by include/export.h when building shared libraries.
     LEVELDB_COMPILE_LIBRARY
     # Used by port/port.h.
-    LEVELDB_PLATFORM_POSIX=1
+    ${LEVELDB_PLATFORM_NAME}=1
 )
 if (NOT HAVE_CXX17_HAS_INCLUDE)
   target_compile_definitions(leveldb
@@ -265,7 +280,7 @@ if(LEVELDB_BUILD_TESTS)
     target_link_libraries("${test_target_name}" leveldb)
     target_compile_definitions("${test_target_name}"
       PRIVATE
-        LEVELDB_PLATFORM_POSIX=1
+        ${LEVELDB_PLATFORM_NAME}=1
     )
     if (NOT HAVE_CXX17_HAS_INCLUDE)
       target_compile_definitions("${test_target_name}"
@@ -314,8 +329,12 @@ if(LEVELDB_BUILD_TESTS)
     leveldb_test("${PROJECT_SOURCE_DIR}/util/logging_test.cc")
 
     # TODO(costan): This test also uses
-    #               "${PROJECT_SOURCE_DIR}/util/env_posix_test_helper.h"
-    leveldb_test("${PROJECT_SOURCE_DIR}/util/env_posix_test.cc")
+    #               "${PROJECT_SOURCE_DIR}/util/env_{posix|windows}_test_helper.h"
+    if (WIN32)
+      leveldb_test("${PROJECT_SOURCE_DIR}/util/env_windows_test.cc")
+    else (WIN32)
+      leveldb_test("${PROJECT_SOURCE_DIR}/util/env_posix_test.cc")
+    endif (WIN32)
   endif(NOT BUILD_SHARED_LIBS)
 endif(LEVELDB_BUILD_TESTS)
 
@@ -339,7 +358,7 @@ if(LEVELDB_BUILD_BENCHMARKS)
     target_link_libraries("${bench_target_name}" leveldb)
     target_compile_definitions("${bench_target_name}"
       PRIVATE
-        LEVELDB_PLATFORM_POSIX=1
+        ${LEVELDB_PLATFORM_NAME}=1
     )
     if (NOT HAVE_CXX17_HAS_INCLUDE)
       target_compile_definitions("${bench_target_name}"
diff --git a/README.md b/README.md
index 15fbdc2..493bdbd 100644
--- a/README.md
+++ b/README.md
@@ -30,6 +30,8 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
 
 This project supports [CMake](https://cmake.org/) out of the box.
 
+### Build for POSIX
+
 Quick start:
 
 ```bash
@@ -37,6 +39,29 @@ mkdir -p build && cd build
 cmake -DCMAKE_BUILD_TYPE=Release .. && cmake --build .
 ```
 
+### Building for Windows
+
+First generate the Visual Studio 2017 project/solution files:
+
+```bash
+mkdir -p build
+cd build
+cmake -G "Visual Studio 15" ..
+```
+The default default will build for x86. For 64-bit run:
+
+```bash
+cmake -G "Visual Studio 15 Win64" ..
+```
+
+To compile the Windows solution from the command-line:
+
+```bash
+devenv /build Debug leveldb.sln
+```
+
+or open leveldb.sln in Visual Studio and build from within.
+
 Please see the CMake documentation and `CMakeLists.txt` for more advanced usage.
 
 # Contributing to the leveldb Project
@@ -48,10 +73,10 @@ will be considered.
 
 Contribution requirements:
 
-1. **POSIX only**. We _generally_ will only accept changes that are both
-   compiled, and tested on a POSIX platform - usually Linux. Very small
-   changes will sometimes be accepted, but consider that more of an
-   exception than the rule.
+1. **Tested platforms only**. We _generally_ will only accept changes for
+   platforms that are compiled and tested. This means POSIX (for Linux and
+   macOS) or Windows. Very small changes will sometimes be accepted, but
+   consider that more of an exception than the rule.
 
 2. **Stable API**. We strive very hard to maintain a stable API. Changes that
    require changes for projects using leveldb _might_ be rejected without
diff --git a/db/corruption_test.cc b/db/corruption_test.cc
index 0b93c24..98aaf8c 100644
--- a/db/corruption_test.cc
+++ b/db/corruption_test.cc
@@ -20,6 +20,10 @@
 #include "util/testharness.h"
 #include "util/testutil.h"
 
+#if defined(LEVELDB_PLATFORM_WINDOWS)
+#include "util/env_windows_test_helper.h"
+#endif  // defined(LEVELDB_PLATFORM_WINDOWS)
+
 namespace leveldb {
 
 static const int kValueSize = 1000;
@@ -32,6 +36,17 @@ class CorruptionTest {
   Options options_;
   DB* db_;
 
+#if defined(LEVELDB_PLATFORM_WINDOWS)
+  static void SetFileLimits(int mmap_limit) {
+    EnvWindowsTestHelper::SetReadOnlyMMapLimit(mmap_limit);
+  }
+
+  // TODO(cmumford): Modify corruption_test to use MemEnv and remove.
+  static void RelaxFilePermissions() {
+    EnvWindowsTestHelper::RelaxFilePermissions();
+  }
+#endif  // defined(LEVELDB_PLATFORM_WINDOWS)
+
   CorruptionTest() {
     tiny_cache_ = NewLRUCache(100);
     options_.env = &env_;
@@ -370,5 +385,16 @@ TEST(CorruptionTest, UnrelatedKeys) {
 }  // namespace leveldb
 
 int main(int argc, char** argv) {
+#if defined(LEVELDB_PLATFORM_WINDOWS)
+  // When Windows maps the contents of a file into memory, even if read/write,
+  // subsequent attempts to open that file for write access will fail. Forcing
+  // all RandomAccessFile instances to use base file I/O (e.g. ReadFile)
+  // allows these tests to open files in order to corrupt their contents.
+  leveldb::CorruptionTest::SetFileLimits(0);
+
+  // Allow this test to write to (and corrupt) files which are normally
+  // open for exclusive read access.
+  leveldb::CorruptionTest::RelaxFilePermissions();
+#endif  // defined(LEVELDB_PLATFORM_WINDOWS)
   return leveldb::test::RunAllTests();
 }
diff --git a/db/db_test.cc b/db/db_test.cc
index 878b7d4..894ed23 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -470,11 +470,12 @@ class DBTest {
   }
 
   // Do n memtable compactions, each of which produces an sstable
-  // covering the range [small,large].
-  void MakeTables(int n, const std::string& small, const std::string& large) {
+  // covering the range [small_key,large_key].
+  void MakeTables(int n, const std::string& small_key,
+                  const std::string& large_key) {
     for (int i = 0; i < n; i++) {
-      Put(small, "begin");
-      Put(large, "end");
+      Put(small_key, "begin");
+      Put(large_key, "end");
       dbfull()->TEST_CompactMemTable();
     }
   }
@@ -1655,7 +1656,7 @@ TEST(DBTest, DestroyEmptyDir) {
   ASSERT_TRUE(env.FileExists(dbname));
   std::vector<std::string> children;
   ASSERT_OK(env.GetChildren(dbname, &children));
-  // The POSIX env does not filter out '.' and '..' special files.
+  // The stock Env's do not filter out '.' and '..' special files.
   ASSERT_EQ(2, children.size());
   ASSERT_OK(DestroyDB(dbname, opts));
   ASSERT_TRUE(!env.FileExists(dbname));
diff --git a/db/recovery_test.cc b/db/recovery_test.cc
index c852803..87bd53c 100644
--- a/db/recovery_test.cc
+++ b/db/recovery_test.cc
@@ -97,6 +97,9 @@ class RecoveryTest {
   }
 
   size_t DeleteLogFiles() {
+    // Linux allows unlinking open files, but Windows does not.
+    // Closing the db allows for file deletion.
+    Close();
     std::vector<uint64_t> logs = GetFiles(kLogFile);
     for (size_t i = 0; i < logs.size(); i++) {
       ASSERT_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]);
diff --git a/db/version_set.cc b/db/version_set.cc
index c27ccad..ae06089 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -143,8 +143,8 @@ bool SomeFileOverlapsRange(
   uint32_t index = 0;
   if (smallest_user_key != nullptr) {
     // Find the earliest possible internal key for smallest_user_key
-    InternalKey small(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek);
-    index = FindFile(icmp, files, small.Encode());
+    InternalKey small_key(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek);
+    index = FindFile(icmp, files, small_key.Encode());
   }
 
   if (index >= files.size()) {
@@ -700,7 +700,7 @@ class VersionSet::Builder {
       // same as the compaction of 40KB of data.  We are a little
       // conservative and allow approximately one seek for every 16KB
       // of data before triggering a compaction.
-      f->allowed_seeks = (f->file_size / 16384);
+      f->allowed_seeks = static_cast<int>((f->file_size / 16384U));
       if (f->allowed_seeks < 100) f->allowed_seeks = 100;
 
       levels_[level].deleted_files.erase(f->number);
diff --git a/include/leveldb/env.h b/include/leveldb/env.h
index 59e2a6f..946ea98 100644
--- a/include/leveldb/env.h
+++ b/include/leveldb/env.h
@@ -20,6 +20,27 @@
 #include "leveldb/export.h"
 #include "leveldb/status.h"
 
+#if defined(_WIN32)
+// The leveldb::Env class below contains a DeleteFile method.
+// At the same time, <windows.h>, a fairly popular header
+// file for Windows applications, defines a DeleteFile macro.
+//
+// Without any intervention on our part, the result of this
+// unfortunate coincidence is that the name of the
+// leveldb::Env::DeleteFile method seen by the compiler depends on
+// whether <windows.h> was included before or after the LevelDB
+// headers.
+//
+// To avoid headaches, we undefined DeleteFile (if defined) and
+// redefine it at the bottom of this file. This way <windows.h>
+// can be included before this file (or not at all) and the
+// exported method will always be leveldb::Env::DeleteFile.
+#if defined(DeleteFile)
+#undef DeleteFile
+#define LEVELDB_DELETEFILE_UNDEFINED
+#endif  // defined(DeleteFile)
+#endif  // defined(_WIN32)
+
 namespace leveldb {
 
 class FileLock;
@@ -356,4 +377,13 @@ class LEVELDB_EXPORT EnvWrapper : public Env {
 
 }  // namespace leveldb
 
+// Redefine DeleteFile if necessary.
+#if defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED)
+#if defined(UNICODE)
+#define DeleteFile DeleteFileW
+#else
+#define DeleteFile DeleteFileA
+#endif  // defined(UNICODE)
+#endif  // defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED)
+
 #endif  // STORAGE_LEVELDB_INCLUDE_ENV_H_
diff --git a/port/atomic_pointer.h b/port/atomic_pointer.h
index bb4e183..d906f63 100644
--- a/port/atomic_pointer.h
+++ b/port/atomic_pointer.h
@@ -22,10 +22,6 @@
 
 #include <atomic>
 
-#ifdef OS_WIN
-#include <windows.h>
-#endif
-
 #if defined(_M_X64) || defined(__x86_64__)
 #define ARCH_CPU_X86_FAMILY 1
 #elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
diff --git a/port/port.h b/port/port.h
index 0975fed..b2210a7 100644
--- a/port/port.h
+++ b/port/port.h
@@ -10,7 +10,7 @@
 // Include the appropriate platform specific file below.  If you are
 // porting to a new platform, see "port_example.h" for documentation
 // of what the new port_<platform>.h file must provide.
-#if defined(LEVELDB_PLATFORM_POSIX)
+#if defined(LEVELDB_PLATFORM_POSIX) || defined(LEVELDB_PLATFORM_WINDOWS)
 #  include "port/port_stdcxx.h"
 #elif defined(LEVELDB_PLATFORM_CHROMIUM)
 #  include "port/port_chromium.h"
diff --git a/util/env_windows.cc b/util/env_windows.cc
new file mode 100644
index 0000000..03da266
--- /dev/null
+++ b/util/env_windows.cc
@@ -0,0 +1,742 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+// Prevent Windows headers from defining min/max macros and instead
+// use STL.
+#define NOMINMAX
+#include <windows.h>
+
+#include <algorithm>
+#include <chrono>
+#include <condition_variable>
+#include <deque>
+#include <memory>
+#include <mutex>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "leveldb/env.h"
+#include "leveldb/slice.h"
+#include "port/port.h"
+#include "port/thread_annotations.h"
+#include "util/env_windows_test_helper.h"
+#include "util/logging.h"
+#include "util/mutexlock.h"
+#include "util/windows_logger.h"
+
+#if defined(DeleteFile)
+#undef DeleteFile
+#endif  // defined(DeleteFile)
+
+namespace leveldb {
+
+namespace {
+
+constexpr const size_t kWritableFileBufferSize = 65536;
+
+// Up to 1000 mmaps for 64-bit binaries; none for 32-bit.
+constexpr int kDefaultMmapLimit = sizeof(void*) >= 8 ? 1000 : 0;
+
+// Modified by EnvWindowsTestHelper::SetReadOnlyMMapLimit().
+int g_mmap_limit = kDefaultMmapLimit;
+
+// Relax some file access permissions for testing.
+bool g_relax_permissions = false;
+
+std::string GetWindowsErrorMessage(DWORD error_code) {
+  std::string message;
+  char* error_text = nullptr;
+  // Use MBCS version of FormatMessage to match return value.
+  size_t error_text_size = ::FormatMessageA(
+      FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER |
+          FORMAT_MESSAGE_IGNORE_INSERTS,
+      nullptr, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+      reinterpret_cast<char*>(&error_text), 0, nullptr);
+  if (!error_text) {
+    return message;
+  }
+  message.assign(error_text, error_text_size);
+  ::LocalFree(error_text);
+  return message;
+}
+
+Status WindowsError(const std::string& context, DWORD error_code) {
+  if (error_code == ERROR_FILE_NOT_FOUND || error_code == ERROR_PATH_NOT_FOUND)
+    return Status::NotFound(context, GetWindowsErrorMessage(error_code));
+  return Status::IOError(context, GetWindowsErrorMessage(error_code));
+}
+
+class ScopedHandle {
+ public:
+  ScopedHandle(HANDLE handle) : handle_(handle) {}
+  ScopedHandle(ScopedHandle&& other) noexcept : handle_(other.Release()) {}
+  ~ScopedHandle() { Close(); }
+
+  ScopedHandle& operator=(ScopedHandle&& rhs) noexcept {
+    if (this != &rhs) handle_ = rhs.Release();
+    return *this;
+  }
+
+  bool Close() {
+    if (!is_valid()) {
+      return true;
+    }
+    HANDLE h = handle_;
+    handle_ = INVALID_HANDLE_VALUE;
+    return ::CloseHandle(h);
+  }
+
+  bool is_valid() const {
+    return handle_ != INVALID_HANDLE_VALUE && handle_ != nullptr;
+  }
+
+  HANDLE get() const { return handle_; }
+
+  HANDLE Release() {
+    HANDLE h = handle_;
+    handle_ = INVALID_HANDLE_VALUE;
+    return h;
+  }
+
+ private:
+  HANDLE handle_;
+};
+
+// Helper class to limit resource usage to avoid exhaustion.
+// Currently used to limit mmap file usage so that we do not end
+// up running out virtual memory, or running into kernel performance
+// problems for very large databases.
+class Limiter {
+ public:
+  // Limit maximum number of resources to |n|.
+  Limiter(intptr_t n) { SetAllowed(n); }
+
+  // If another resource is available, acquire it and return true.
+  // Else return false.
+  bool Acquire() LOCKS_EXCLUDED(mu_) {
+    if (GetAllowed() <= 0) {
+      return false;
+    }
+    MutexLock l(&mu_);
+    intptr_t x = GetAllowed();
+    if (x <= 0) {
+      return false;
+    } else {
+      SetAllowed(x - 1);
+      return true;
+    }
+  }
+
+  // Release a resource acquired by a previous call to Acquire() that returned
+  // true.
+  void Release() LOCKS_EXCLUDED(mu_) {
+    MutexLock l(&mu_);
+    SetAllowed(GetAllowed() + 1);
+  }
+
+ private:
+  port::Mutex mu_;
+  port::AtomicPointer allowed_;
+
+  intptr_t GetAllowed() const {
+    return reinterpret_cast<intptr_t>(allowed_.Acquire_Load());
+  }
+
+  void SetAllowed(intptr_t v) EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+    allowed_.Release_Store(reinterpret_cast<void*>(v));
+  }
+
+  Limiter(const Limiter&);
+  void operator=(const Limiter&);
+};
+
+class WindowsSequentialFile : public SequentialFile {
+ public:
+  WindowsSequentialFile(std::string fname, ScopedHandle file)
+      : filename_(fname), file_(std::move(file)) {}
+  ~WindowsSequentialFile() override {}
+
+  Status Read(size_t n, Slice* result, char* scratch) override {
+    Status s;
+    DWORD bytes_read;
+    // DWORD is 32-bit, but size_t could technically be larger. However leveldb
+    // files are limited to leveldb::Options::max_file_size which is clamped to
+    // 1<<30 or 1 GiB.
+    assert(n <= std::numeric_limits<DWORD>::max());
+    if (!::ReadFile(file_.get(), scratch, static_cast<DWORD>(n), &bytes_read,
+                    nullptr)) {
+      s = WindowsError(filename_, ::GetLastError());
+    } else {
+      *result = Slice(scratch, bytes_read);
+    }
+    return s;
+  }
+
+  Status Skip(uint64_t n) override {
+    LARGE_INTEGER distance;
+    distance.QuadPart = n;
+    if (!::SetFilePointerEx(file_.get(), distance, nullptr, FILE_CURRENT)) {
+      return WindowsError(filename_, ::GetLastError());
+    }
+    return Status::OK();
+  }
+
+ private:
+  std::string filename_;
+  ScopedHandle file_;
+};
+
+class WindowsRandomAccessFile : public RandomAccessFile {
+ public:
+  WindowsRandomAccessFile(std::string fname, ScopedHandle handle)
+      : filename_(fname), handle_(std::move(handle)) {}
+
+  ~WindowsRandomAccessFile() override = default;
+
+  Status Read(uint64_t offset, size_t n, Slice* result,
+              char* scratch) const override {
+    DWORD bytes_read = 0;
+    OVERLAPPED overlapped = {0};
+
+    overlapped.OffsetHigh = static_cast<DWORD>(offset >> 32);
+    overlapped.Offset = static_cast<DWORD>(offset);
+    if (!::ReadFile(handle_.get(), scratch, static_cast<DWORD>(n), &bytes_read,
+                    &overlapped)) {
+      DWORD error_code = ::GetLastError();
+      if (error_code != ERROR_HANDLE_EOF) {
+        *result = Slice(scratch, 0);
+        return Status::IOError(filename_, GetWindowsErrorMessage(error_code));
+      }
+    }
+
+    *result = Slice(scratch, bytes_read);
+    return Status::OK();
+  }
+
+ private:
+  std::string filename_;
+  ScopedHandle handle_;
+};
+
+class WindowsMmapReadableFile : public RandomAccessFile {
+ public:
+  // base[0,length-1] contains the mmapped contents of the file.
+  WindowsMmapReadableFile(std::string fname, void* base, size_t length,
+                          Limiter* limiter)
+      : filename_(std::move(fname)),
+        mmapped_region_(base),
+        length_(length),
+        limiter_(limiter) {}
+
+  ~WindowsMmapReadableFile() override {
+    ::UnmapViewOfFile(mmapped_region_);
+    limiter_->Release();
+  }
+
+  Status Read(uint64_t offset, size_t n, Slice* result,
+              char* scratch) const override {
+    Status s;
+    if (offset + n > length_) {
+      *result = Slice();
+      s = WindowsError(filename_, ERROR_INVALID_PARAMETER);
+    } else {
+      *result = Slice(reinterpret_cast<char*>(mmapped_region_) + offset, n);
+    }
+    return s;
+  }
+
+ private:
+  std::string filename_;
+  void* mmapped_region_;
+  size_t length_;
+  Limiter* limiter_;
+};
+
+class WindowsWritableFile : public WritableFile {
+ public:
+  WindowsWritableFile(std::string fname, ScopedHandle handle)
+      : filename_(std::move(fname)), handle_(std::move(handle)), pos_(0) {}
+
+  ~WindowsWritableFile() override = default;
+
+  Status Append(const Slice& data) override {
+    size_t n = data.size();
+    const char* p = data.data();
+
+    // Fit as much as possible into buffer.
+    size_t copy = std::min(n, kWritableFileBufferSize - pos_);
+    memcpy(buf_ + pos_, p, copy);
+    p += copy;
+    n -= copy;
+    pos_ += copy;
+    if (n == 0) {
+      return Status::OK();
+    }
+
+    // Can't fit in buffer, so need to do at least one write.
+    Status s = FlushBuffered();
+    if (!s.ok()) {
+      return s;
+    }
+
+    // Small writes go to buffer, large writes are written directly.
+    if (n < kWritableFileBufferSize) {
+      memcpy(buf_, p, n);
+      pos_ = n;
+      return Status::OK();
+    }
+    return WriteRaw(p, n);
+  }
+
+  Status Close() override {
+    Status result = FlushBuffered();
+    if (!handle_.Close() && result.ok()) {
+      result = WindowsError(filename_, ::GetLastError());
+    }
+    return result;
+  }
+
+  Status Flush() override { return FlushBuffered(); }
+
+  Status Sync() override {
+    // On Windows no need to sync parent directory. It's metadata will be
+    // updated via the creation of the new file, without an explicit sync.
+    return FlushBuffered();
+  }
+
+ private:
+  Status FlushBuffered() {
+    Status s = WriteRaw(buf_, pos_);
+    pos_ = 0;
+    return s;
+  }
+
+  Status WriteRaw(const char* p, size_t n) {
+    DWORD bytes_written;
+    if (!::WriteFile(handle_.get(), p, static_cast<DWORD>(n), &bytes_written,
+                     nullptr)) {
+      return Status::IOError(filename_,
+                             GetWindowsErrorMessage(::GetLastError()));
+    }
+    return Status::OK();
+  }
+
+  // buf_[0, pos_-1] contains data to be written to handle_.
+  const std::string filename_;
+  ScopedHandle handle_;
+  char buf_[kWritableFileBufferSize];
+  size_t pos_;
+};
+
+// Lock or unlock the entire file as specified by |lock|. Returns true
+// when successful, false upon failure. Caller should call ::GetLastError()
+// to determine cause of failure
+bool LockOrUnlock(HANDLE handle, bool lock) {
+  if (lock) {
+    return ::LockFile(handle,
+                      /*dwFileOffsetLow=*/0, /*dwFileOffsetHigh=*/0,
+                      /*nNumberOfBytesToLockLow=*/MAXDWORD,
+                      /*nNumberOfBytesToLockHigh=*/MAXDWORD);
+  } else {
+    return ::UnlockFile(handle,
+                        /*dwFileOffsetLow=*/0, /*dwFileOffsetHigh=*/0,
+                        /*nNumberOfBytesToLockLow=*/MAXDWORD,
+                        /*nNumberOfBytesToLockHigh=*/MAXDWORD);
+  }
+}
+
+class WindowsFileLock : public FileLock {
+ public:
+  WindowsFileLock(ScopedHandle handle, std::string name)
+      : handle_(std::move(handle)), name_(std::move(name)) {}
+
+  ScopedHandle& handle() { return handle_; }
+  const std::string& name() const { return name_; }
+
+ private:
+  ScopedHandle handle_;
+  std::string name_;
+};
+
+class WindowsEnv : public Env {
+ public:
+  WindowsEnv();
+  ~WindowsEnv() override {
+    static char msg[] = "Destroying Env::Default()\n";
+    fwrite(msg, 1, sizeof(msg), stderr);
+    abort();
+  }
+
+  Status NewSequentialFile(const std::string& fname,
+                           SequentialFile** result) override {
+    *result = nullptr;
+    DWORD desired_access = GENERIC_READ;
+    DWORD share_mode = FILE_SHARE_READ;
+    if (g_relax_permissions) {
+      desired_access |= GENERIC_WRITE;
+      share_mode |= FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
+    }
+    ScopedHandle handle =
+        ::CreateFileA(fname.c_str(), desired_access, share_mode, nullptr,
+                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr);
+    if (!handle.is_valid()) {
+      return WindowsError(fname, ::GetLastError());
+    }
+    *result = new WindowsSequentialFile(fname, std::move(handle));
+    return Status::OK();
+  }
+
+  Status NewRandomAccessFile(const std::string& fname,
+                             RandomAccessFile** result) override {
+    *result = nullptr;
+    DWORD desired_access = GENERIC_READ;
+    DWORD share_mode = FILE_SHARE_READ;
+    if (g_relax_permissions) {
+      // desired_access |= GENERIC_WRITE;
+      share_mode |= FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
+    }
+    DWORD file_flags = FILE_ATTRIBUTE_READONLY;
+
+    ScopedHandle handle =
+        ::CreateFileA(fname.c_str(), desired_access, share_mode, nullptr,
+                      OPEN_EXISTING, file_flags, nullptr);
+    if (!handle.is_valid()) {
+      return WindowsError(fname, ::GetLastError());
+    }
+    if (!mmap_limiter_.Acquire()) {
+      *result = new WindowsRandomAccessFile(fname, std::move(handle));
+      return Status::OK();
+    }
+
+    LARGE_INTEGER file_size;
+    if (!::GetFileSizeEx(handle.get(), &file_size)) {
+      return WindowsError(fname, ::GetLastError());
+    }
+
+    ScopedHandle mapping =
+        ::CreateFileMappingA(handle.get(),
+                             /*security attributes=*/nullptr, PAGE_READONLY,
+                             /*dwMaximumSizeHigh=*/0,
+                             /*dwMaximumSizeLow=*/0, nullptr);
+    if (mapping.is_valid()) {
+      void* base = MapViewOfFile(mapping.get(), FILE_MAP_READ, 0, 0, 0);
+      if (base) {
+        *result = new WindowsMmapReadableFile(
+            fname, base, static_cast<size_t>(file_size.QuadPart),
+            &mmap_limiter_);
+        return Status::OK();
+      }
+    }
+    Status s = WindowsError(fname, ::GetLastError());
+
+    if (!s.ok()) {
+      mmap_limiter_.Release();
+    }
+    return s;
+  }
+
+  Status NewWritableFile(const std::string& fname,
+                         WritableFile** result) override {
+    DWORD desired_access = GENERIC_WRITE;
+    DWORD share_mode = 0;
+    if (g_relax_permissions) {
+      desired_access |= GENERIC_READ;
+      share_mode |= FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
+    }
+
+    ScopedHandle handle =
+        ::CreateFileA(fname.c_str(), desired_access, share_mode, nullptr,
+                      CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr);
+    if (!handle.is_valid()) {
+      *result = nullptr;
+      return WindowsError(fname, ::GetLastError());
+    }
+
+    *result = new WindowsWritableFile(fname, std::move(handle));
+    return Status::OK();
+  }
+
+  Status NewAppendableFile(const std::string& fname,
+                           WritableFile** result) override {
+    ScopedHandle handle =
+        ::CreateFileA(fname.c_str(), FILE_APPEND_DATA, 0, nullptr, OPEN_ALWAYS,
+                      FILE_ATTRIBUTE_NORMAL, nullptr);
+    if (!handle.is_valid()) {
+      *result = nullptr;
+      return WindowsError(fname, ::GetLastError());
+    }
+
+    *result = new WindowsWritableFile(fname, std::move(handle));
+    return Status::OK();
+  }
+
+  bool FileExists(const std::string& fname) override {
+    return GetFileAttributesA(fname.c_str()) != INVALID_FILE_ATTRIBUTES;
+  }
+
+  Status GetChildren(const std::string& dir,
+                     std::vector<std::string>* result) override {
+    const std::string find_pattern = dir + "\\*";
+    WIN32_FIND_DATAA find_data;
+    HANDLE dir_handle = ::FindFirstFileA(find_pattern.c_str(), &find_data);
+    if (dir_handle == INVALID_HANDLE_VALUE) {
+      DWORD last_error = ::GetLastError();
+      if (last_error == ERROR_FILE_NOT_FOUND) {
+        return Status::OK();
+      }
+      return WindowsError(dir, last_error);
+    }
+    do {
+      char base_name[_MAX_FNAME];
+      char ext[_MAX_EXT];
+
+      if (!_splitpath_s(find_data.cFileName, nullptr, 0, nullptr, 0, base_name,
+                        ARRAYSIZE(base_name), ext, ARRAYSIZE(ext))) {
+        result->emplace_back(std::string(base_name) + ext);
+      }
+    } while (::FindNextFileA(dir_handle, &find_data));
+    DWORD last_error = ::GetLastError();
+    ::FindClose(dir_handle);
+    if (last_error != ERROR_NO_MORE_FILES) {
+      return WindowsError(dir, last_error);
+    }
+    return Status::OK();
+  }
+
+  Status DeleteFile(const std::string& fname) override {
+    if (!::DeleteFileA(fname.c_str())) {
+      return WindowsError(fname, ::GetLastError());
+    }
+    return Status::OK();
+  }
+
+  Status CreateDir(const std::string& name) override {
+    if (!::CreateDirectoryA(name.c_str(), nullptr)) {
+      return WindowsError(name, ::GetLastError());
+    }
+    return Status::OK();
+  }
+
+  Status DeleteDir(const std::string& name) override {
+    if (!::RemoveDirectoryA(name.c_str())) {
+      return WindowsError(name, ::GetLastError());
+    }
+    return Status::OK();
+  }
+
+  Status GetFileSize(const std::string& fname, uint64_t* size) override {
+    WIN32_FILE_ATTRIBUTE_DATA attrs;
+    if (!::GetFileAttributesExA(fname.c_str(), GetFileExInfoStandard, &attrs)) {
+      return WindowsError(fname, ::GetLastError());
+    }
+    ULARGE_INTEGER file_size;
+    file_size.HighPart = attrs.nFileSizeHigh;
+    file_size.LowPart = attrs.nFileSizeLow;
+    *size = file_size.QuadPart;
+    return Status::OK();
+  }
+
+  Status RenameFile(const std::string& src,
+                    const std::string& target) override {
+    // Try a simple move first.  It will only succeed when |to_path| doesn't
+    // already exist.
+    if (::MoveFileA(src.c_str(), target.c_str())) {
+      return Status::OK();
+    }
+    DWORD move_error = ::GetLastError();
+
+    // Try the full-blown replace if the move fails, as ReplaceFile will only
+    // succeed when |to_path| does exist. When writing to a network share, we
+    // may not be able to change the ACLs. Ignore ACL errors then
+    // (REPLACEFILE_IGNORE_MERGE_ERRORS).
+    if (::ReplaceFileA(target.c_str(), src.c_str(), nullptr,
+                       REPLACEFILE_IGNORE_MERGE_ERRORS, nullptr, nullptr)) {
+      return Status::OK();
+    }
+    DWORD replace_error = ::GetLastError();
+    // In the case of FILE_ERROR_NOT_FOUND from ReplaceFile, it is likely
+    // that |to_path| does not exist. In this case, the more relevant error
+    // comes from the call to MoveFile.
+    if (replace_error == ERROR_FILE_NOT_FOUND ||
+        replace_error == ERROR_PATH_NOT_FOUND) {
+      return WindowsError(src, move_error);
+    } else {
+      return WindowsError(src, replace_error);
+    }
+  }
+
+  Status LockFile(const std::string& fname, FileLock** lock) override {
+    *lock = nullptr;
+    Status result;
+    ScopedHandle handle = ::CreateFileA(
+        fname.c_str(), GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ,
+        /*lpSecurityAttributes=*/nullptr, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL,
+        nullptr);
+    if (!handle.is_valid()) {
+      result = WindowsError(fname, ::GetLastError());
+    } else if (!LockOrUnlock(handle.get(), true)) {
+      result = WindowsError("lock " + fname, ::GetLastError());
+    } else {
+      *lock = new WindowsFileLock(std::move(handle), std::move(fname));
+    }
+    return result;
+  }
+
+  Status UnlockFile(FileLock* lock) override {
+    std::unique_ptr<WindowsFileLock> my_lock(
+        reinterpret_cast<WindowsFileLock*>(lock));
+    Status result;
+    if (!LockOrUnlock(my_lock->handle().get(), false)) {
+      result = WindowsError("unlock", ::GetLastError());
+    }
+    return result;
+  }
+
+  void Schedule(void (*function)(void*), void* arg) override;
+
+  void StartThread(void (*function)(void* arg), void* arg) override {
+    std::thread t(function, arg);
+    t.detach();
+  }
+
+  Status GetTestDirectory(std::string* result) override {
+    const char* env = getenv("TEST_TMPDIR");
+    if (env && env[0] != '\0') {
+      *result = env;
+      return Status::OK();
+    }
+
+    char tmp_path[MAX_PATH];
+    if (!GetTempPathA(ARRAYSIZE(tmp_path), tmp_path)) {
+      return WindowsError("GetTempPath", ::GetLastError());
+    }
+    std::stringstream ss;
+    ss << tmp_path << "leveldbtest-" << std::this_thread::get_id();
+    *result = ss.str();
+
+    // Directory may already exist
+    CreateDir(*result);
+    return Status::OK();
+  }
+
+  Status NewLogger(const std::string& fname, Logger** result) override {
+    ScopedHandle handle =
+        ::CreateFileA(fname.c_str(), GENERIC_WRITE, FILE_SHARE_READ, nullptr,
+                      CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr);
+    if (!handle.is_valid()) {
+      return WindowsError("NewLogger", ::GetLastError());
+    }
+    *result = new WindowsLogger(handle.Release());
+    return Status::OK();
+  }
+
+  uint64_t NowMicros() override {
+    // GetSystemTimeAsFileTime typically has a resolution of 10-20 msec.
+    // TODO(cmumford): Switch to GetSystemTimePreciseAsFileTime which is
+    // available in Windows 8 and later.
+    FILETIME ft;
+    ::GetSystemTimeAsFileTime(&ft);
+    // Each tick represents a 100-nanosecond intervals since January 1, 1601
+    // (UTC).
+    uint64_t num_ticks =
+        (static_cast<uint64_t>(ft.dwHighDateTime) << 32) + ft.dwLowDateTime;
+    return num_ticks / 10;
+  }
+
+  void SleepForMicroseconds(int micros) override {
+    std::this_thread::sleep_for(std::chrono::microseconds(micros));
+  }
+
+ private:
+  // BGThread() is the body of the background thread
+  void BGThread();
+
+  std::mutex mu_;
+  std::condition_variable bgsignal_;
+  bool started_bgthread_;
+
+  // Entry per Schedule() call
+  struct BGItem {
+    void* arg;
+    void (*function)(void*);
+  };
+  typedef std::deque<BGItem> BGQueue;
+  BGQueue queue_;
+
+  Limiter mmap_limiter_;
+};
+
+// Return the maximum number of concurrent mmaps.
+int MaxMmaps() {
+  if (g_mmap_limit >= 0) {
+    return g_mmap_limit;
+  }
+  // Up to 1000 mmaps for 64-bit binaries; none for smaller pointer sizes.
+  g_mmap_limit = sizeof(void*) >= 8 ? 1000 : 0;
+  return g_mmap_limit;
+}
+
+WindowsEnv::WindowsEnv()
+    : started_bgthread_(false), mmap_limiter_(MaxMmaps()) {}
+
+void WindowsEnv::Schedule(void (*function)(void*), void* arg) {
+  std::lock_guard<std::mutex> guard(mu_);
+
+  // Start background thread if necessary
+  if (!started_bgthread_) {
+    started_bgthread_ = true;
+    std::thread t(&WindowsEnv::BGThread, this);
+    t.detach();
+  }
+
+  // If the queue is currently empty, the background thread may currently be
+  // waiting.
+  if (queue_.empty()) {
+    bgsignal_.notify_one();
+  }
+
+  // Add to priority queue
+  queue_.push_back(BGItem());
+  queue_.back().function = function;
+  queue_.back().arg = arg;
+}
+
+void WindowsEnv::BGThread() {
+  while (true) {
+    // Wait until there is an item that is ready to run
+    std::unique_lock<std::mutex> lk(mu_);
+    bgsignal_.wait(lk, [this] { return !queue_.empty(); });
+
+    void (*function)(void*) = queue_.front().function;
+    void* arg = queue_.front().arg;
+    queue_.pop_front();
+
+    lk.unlock();
+    (*function)(arg);
+  }
+}
+
+}  // namespace
+
+static std::once_flag once;
+static Env* default_env;
+static void InitDefaultEnv() { default_env = new WindowsEnv(); }
+
+void EnvWindowsTestHelper::SetReadOnlyMMapLimit(int limit) {
+  assert(default_env == nullptr);
+  g_mmap_limit = limit;
+}
+
+void EnvWindowsTestHelper::RelaxFilePermissions() {
+  assert(default_env == nullptr);
+  g_relax_permissions = true;
+}
+
+Env* Env::Default() {
+  std::call_once(once, InitDefaultEnv);
+  return default_env;
+}
+
+}  // namespace leveldb
diff --git a/util/env_windows_test.cc b/util/env_windows_test.cc
new file mode 100644
index 0000000..4451b9e
--- /dev/null
+++ b/util/env_windows_test.cc
@@ -0,0 +1,63 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "leveldb/env.h"
+
+#include "port/port.h"
+#include "util/env_windows_test_helper.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+static const int kMMapLimit = 4;
+
+class EnvWindowsTest {
+ public:
+  Env* env_;
+  EnvWindowsTest() : env_(Env::Default()) {}
+
+  static void SetFileLimits(int mmap_limit) {
+    EnvWindowsTestHelper::SetReadOnlyMMapLimit(mmap_limit);
+  }
+};
+
+TEST(EnvWindowsTest, TestOpenOnRead) {
+  // Write some test data to a single file that will be opened |n| times.
+  std::string test_dir;
+  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  std::string test_file = test_dir + "/open_on_read.txt";
+
+  FILE* f = fopen(test_file.c_str(), "w");
+  ASSERT_TRUE(f != nullptr);
+  const char kFileData[] = "abcdefghijklmnopqrstuvwxyz";
+  fputs(kFileData, f);
+  fclose(f);
+
+  // Open test file some number above the sum of the two limits to force
+  // leveldb::WindowsEnv to switch from mapping the file into memory
+  // to basic file reading.
+  const int kNumFiles = kMMapLimit + 5;
+  leveldb::RandomAccessFile* files[kNumFiles] = {0};
+  for (int i = 0; i < kNumFiles; i++) {
+    ASSERT_OK(env_->NewRandomAccessFile(test_file, &files[i]));
+  }
+  char scratch;
+  Slice read_result;
+  for (int i = 0; i < kNumFiles; i++) {
+    ASSERT_OK(files[i]->Read(i, 1, &read_result, &scratch));
+    ASSERT_EQ(kFileData[i], read_result[0]);
+  }
+  for (int i = 0; i < kNumFiles; i++) {
+    delete files[i];
+  }
+  ASSERT_OK(env_->DeleteFile(test_file));
+}
+
+}  // namespace leveldb
+
+int main(int argc, char** argv) {
+  // All tests currently run with the same read-only file limits.
+  leveldb::EnvWindowsTest::SetFileLimits(leveldb::kMMapLimit);
+  return leveldb::test::RunAllTests();
+}
diff --git a/util/env_windows_test_helper.h b/util/env_windows_test_helper.h
new file mode 100644
index 0000000..5ffbe44
--- /dev/null
+++ b/util/env_windows_test_helper.h
@@ -0,0 +1,30 @@
+// Copyright 2018 (c) The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_UTIL_ENV_WINDOWS_TEST_HELPER_H_
+#define STORAGE_LEVELDB_UTIL_ENV_WINDOWS_TEST_HELPER_H_
+
+namespace leveldb {
+
+class EnvWindowsTest;
+
+// A helper for the Windows Env to facilitate testing.
+class EnvWindowsTestHelper {
+ private:
+  friend class CorruptionTest;
+  friend class EnvWindowsTest;
+
+  // Set the maximum number of read-only files that will be mapped via mmap.
+  // Must be called before creating an Env.
+  static void SetReadOnlyMMapLimit(int limit);
+
+  // Relax file permissions for tests. This results in most files being opened
+  // with read-write permissions. This is helpful for corruption tests that
+  // need to corrupt the database files for open databases.
+  static void RelaxFilePermissions();
+};
+
+}  // namespace leveldb
+
+#endif  // STORAGE_LEVELDB_UTIL_ENV_WINDOWS_TEST_HELPER_H_
diff --git a/util/windows_logger.h b/util/windows_logger.h
new file mode 100644
index 0000000..b2a2cae
--- /dev/null
+++ b/util/windows_logger.h
@@ -0,0 +1,124 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+//
+// Logger implementation for the Windows platform.
+
+#ifndef STORAGE_LEVELDB_UTIL_WINDOWS_LOGGER_H_
+#define STORAGE_LEVELDB_UTIL_WINDOWS_LOGGER_H_
+
+#include <stdio.h>
+
+#include <cassert>
+#include <cstdarg>
+#include <ctime>
+#include <sstream>
+#include <thread>
+
+#include "leveldb/env.h"
+
+namespace leveldb {
+
+class WindowsLogger final : public Logger {
+ public:
+  WindowsLogger(HANDLE handle) : handle_(handle) {
+    assert(handle != INVALID_HANDLE_VALUE);
+  }
+
+  ~WindowsLogger() override { ::CloseHandle(handle_); }
+
+  void Logv(const char* format, va_list arguments) override {
+    // Record the time as close to the Logv() call as possible.
+    SYSTEMTIME now_components;
+    ::GetLocalTime(&now_components);
+
+    // Record the thread ID.
+    constexpr const int kMaxThreadIdSize = 32;
+    std::ostringstream thread_stream;
+    thread_stream << std::this_thread::get_id();
+    std::string thread_id = thread_stream.str();
+    if (thread_id.size() > kMaxThreadIdSize) {
+      thread_id.resize(kMaxThreadIdSize);
+    }
+
+    // We first attempt to print into a stack-allocated buffer. If this attempt
+    // fails, we make a second attempt with a dynamically allocated buffer.
+    constexpr const int kStackBufferSize = 512;
+    char stack_buffer[kStackBufferSize];
+    static_assert(sizeof(stack_buffer) == static_cast<size_t>(kStackBufferSize),
+                  "sizeof(char) is expected to be 1 in C++");
+
+    int dynamic_buffer_size = 0;  // Computed in the first iteration.
+    for (int iteration = 0; iteration < 2; ++iteration) {
+      const int buffer_size =
+          (iteration == 0) ? kStackBufferSize : dynamic_buffer_size;
+      char* const buffer =
+          (iteration == 0) ? stack_buffer : new char[dynamic_buffer_size];
+
+      // Print the header into the buffer.
+      // TODO(costan): Sync this logger with another logger.
+      int buffer_offset = snprintf(
+          buffer, buffer_size, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
+          now_components.wYear, now_components.wMonth, now_components.wDay,
+          now_components.wHour, now_components.wMinute, now_components.wSecond,
+          static_cast<int>(now_components.wMilliseconds * 1000),
+          std::stoull(thread_id));
+
+      // The header can be at most 28 characters (10 date + 15 time +
+      // 3 spacing) plus the thread ID, which should fit comfortably into the
+      // static buffer.
+      assert(buffer_offset <= 28 + kMaxThreadIdSize);
+      static_assert(28 + kMaxThreadIdSize < kStackBufferSize,
+                    "stack-allocated buffer may not fit the message header");
+      assert(buffer_offset < buffer_size);
+
+      // Print the message into the buffer.
+      std::va_list arguments_copy;
+      va_copy(arguments_copy, arguments);
+      buffer_offset += std::vsnprintf(buffer + buffer_offset,
+                                      buffer_size - buffer_offset, format,
+                                      arguments_copy);
+      va_end(arguments_copy);
+
+      // The code below may append a newline at the end of the buffer, which
+      // requires an extra character.
+      if (buffer_offset >= buffer_size - 1) {
+        // The message did not fit into the buffer.
+        if (iteration == 0) {
+          // Re-run the loop and use a dynamically-allocated buffer. The buffer
+          // will be large enough for the log message, an extra newline and a
+          // null terminator.
+          dynamic_buffer_size = buffer_offset + 2;
+          continue;
+        }
+
+        // The dynamically-allocated buffer was incorrectly sized. This should
+        // not happen, assuming a correct implementation of (v)snprintf. Fail
+        // in tests, recover by truncating the log message in production.
+        assert(false);
+        buffer_offset = buffer_size - 1;
+      }
+
+      // Add a newline if necessary.
+      if (buffer[buffer_offset - 1] != '\n') {
+        buffer[buffer_offset] = '\n';
+        ++buffer_offset;
+      }
+
+      assert(buffer_offset <= buffer_size);
+      ::WriteFile(handle_, buffer, buffer_offset, nullptr, nullptr);
+
+      if (iteration != 0) {
+        delete[] buffer;
+      }
+      break;
+    }
+  }
+
+ private:
+  HANDLE handle_;
+};
+
+}  // namespace leveldb
+
+#endif  // STORAGE_LEVELDB_UTIL_WINDOWS_LOGGER_H_

From 808e59ec6a160244960cda64b393968ffbdae72c Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Mon, 4 Mar 2019 18:27:03 -0800
Subject: [PATCH 039/181] Improve CI configuration.

This CL fixes the following issues:
* The Travis CI had the ctest invocation followed by a ";", so non-zero
  exit codes (indicating test failures) did not cause the build to fail.
* The AppVeyor CI had the ctest invocation followed by a ";", causing an
  error on Windows, where "&" plays the role of ";" [1].

The Windows CI (AppVeyor) will still be red after this CL, as some of
the tests are failing. However, this CL is a step forward, as it gets us
from failing to start tests to running tests and recording success/error
states.

[1] https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-xp/bb490954(v=technet.10)#using-multiple-commands-and-conditional-processing-symbols

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=236765633
---
 .appveyor.yml | 5 +----
 .travis.yml   | 2 +-
 2 files changed, 2 insertions(+), 5 deletions(-)

diff --git a/.appveyor.yml b/.appveyor.yml
index 78aeaf1..c24b17e 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -20,9 +20,6 @@ configuration:
   - RelWithDebInfo
   - Debug
 
-build:
-  verbosity: minimal
-
 build_script:
   - git submodule update --init --recursive
   - mkdir build
@@ -35,4 +32,4 @@ build_script:
   - cd ..
 
 test_script:
-  - cd build ; ctest --verbose ; cd ..
+  - cd build && ctest --verbose --build-config "%CONFIGURATION%" && cd ..
diff --git a/.travis.yml b/.travis.yml
index 3ff5cfc..0e1ad6a 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -70,7 +70,7 @@ before_script:
 - cd ..
 
 script:
-- cd build ; ctest --verbose ; cd ..
+- cd build && ctest --verbose && cd ..
 - "if [ -f build/db_bench ] ; then build/db_bench ; fi"
 - "if [ -f build/db_bench_sqlite3 ] ; then build/db_bench_sqlite3 ; fi"
 - "if [ -f build/db_bench_tree_db ] ; then build/db_bench_tree_db ; fi"

From ed76289b259d42d0a57c147e791e2c235ed28805 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Thu, 7 Mar 2019 08:52:24 -0800
Subject: [PATCH 040/181] Align windows_logger with posix_logger.

Fixes GitHub issue #657.

This CL also makes the Windows CI green.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=237255887
---
 CMakeLists.txt        | 20 ++++++++++----------
 util/env_windows.cc   | 14 +++++++-------
 util/windows_logger.h | 35 ++++++++++++++++++++++-------------
 3 files changed, 39 insertions(+), 30 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 1eaf48e..1562e3e 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -188,17 +188,17 @@ target_sources(leveldb
 )
 
 if (WIN32)
-target_sources(leveldb
-  PRIVATE
-    "${PROJECT_SOURCE_DIR}/util/env_windows.cc"
-    "${PROJECT_SOURCE_DIR}/util/windows_logger.h"
-)
+  target_sources(leveldb
+    PRIVATE
+      "${PROJECT_SOURCE_DIR}/util/env_windows.cc"
+      "${PROJECT_SOURCE_DIR}/util/windows_logger.h"
+  )
 else (WIN32)
-target_sources(leveldb
-  PRIVATE
-    "${PROJECT_SOURCE_DIR}/util/env_posix.cc"
-    "${PROJECT_SOURCE_DIR}/util/posix_logger.h"
-)
+  target_sources(leveldb
+    PRIVATE
+      "${PROJECT_SOURCE_DIR}/util/env_posix.cc"
+      "${PROJECT_SOURCE_DIR}/util/posix_logger.h"
+  )
 endif (WIN32)
 
 # MemEnv is not part of the interface and could be pulled to a separate library.
diff --git a/util/env_windows.cc b/util/env_windows.cc
index 03da266..57932bb 100644
--- a/util/env_windows.cc
+++ b/util/env_windows.cc
@@ -621,15 +621,15 @@ class WindowsEnv : public Env {
     return Status::OK();
   }
 
-  Status NewLogger(const std::string& fname, Logger** result) override {
-    ScopedHandle handle =
-        ::CreateFileA(fname.c_str(), GENERIC_WRITE, FILE_SHARE_READ, nullptr,
-                      CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr);
-    if (!handle.is_valid()) {
+  Status NewLogger(const std::string& filename, Logger** result) override {
+    std::FILE* fp = std::fopen(filename.c_str(), "w");
+    if (fp == nullptr) {
+      *result = nullptr;
       return WindowsError("NewLogger", ::GetLastError());
+    } else {
+      *result = new WindowsLogger(fp);
+      return Status::OK();
     }
-    *result = new WindowsLogger(handle.Release());
-    return Status::OK();
   }
 
   uint64_t NowMicros() override {
diff --git a/util/windows_logger.h b/util/windows_logger.h
index b2a2cae..96799bc 100644
--- a/util/windows_logger.h
+++ b/util/windows_logger.h
@@ -7,10 +7,9 @@
 #ifndef STORAGE_LEVELDB_UTIL_WINDOWS_LOGGER_H_
 #define STORAGE_LEVELDB_UTIL_WINDOWS_LOGGER_H_
 
-#include <stdio.h>
-
 #include <cassert>
 #include <cstdarg>
+#include <cstdio>
 #include <ctime>
 #include <sstream>
 #include <thread>
@@ -21,11 +20,16 @@ namespace leveldb {
 
 class WindowsLogger final : public Logger {
  public:
-  WindowsLogger(HANDLE handle) : handle_(handle) {
-    assert(handle != INVALID_HANDLE_VALUE);
+  // Creates a logger that writes to the given file.
+  //
+  // The PosixLogger instance takes ownership of the file handle.
+  explicit WindowsLogger(std::FILE* fp) : fp_(fp) {
+    assert(fp != nullptr);
   }
 
-  ~WindowsLogger() override { ::CloseHandle(handle_); }
+  ~WindowsLogger() override {
+    std::fclose(fp_);
+  }
 
   void Logv(const char* format, va_list arguments) override {
     // Record the time as close to the Logv() call as possible.
@@ -56,16 +60,20 @@ class WindowsLogger final : public Logger {
           (iteration == 0) ? stack_buffer : new char[dynamic_buffer_size];
 
       // Print the header into the buffer.
-      // TODO(costan): Sync this logger with another logger.
       int buffer_offset = snprintf(
-          buffer, buffer_size, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
-          now_components.wYear, now_components.wMonth, now_components.wDay,
-          now_components.wHour, now_components.wMinute, now_components.wSecond,
+          buffer, buffer_size,
+          "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
+          now_components.wYear,
+          now_components.wMonth,
+          now_components.wDay,
+          now_components.wHour,
+          now_components.wMinute,
+          now_components.wSecond,
           static_cast<int>(now_components.wMilliseconds * 1000),
-          std::stoull(thread_id));
+          thread_id.c_str());
 
       // The header can be at most 28 characters (10 date + 15 time +
-      // 3 spacing) plus the thread ID, which should fit comfortably into the
+      // 3 delimiters) plus the thread ID, which should fit comfortably into the
       // static buffer.
       assert(buffer_offset <= 28 + kMaxThreadIdSize);
       static_assert(28 + kMaxThreadIdSize < kStackBufferSize,
@@ -106,7 +114,8 @@ class WindowsLogger final : public Logger {
       }
 
       assert(buffer_offset <= buffer_size);
-      ::WriteFile(handle_, buffer, buffer_offset, nullptr, nullptr);
+      std::fwrite(buffer, 1, buffer_offset, fp_);
+      std::fflush(fp_);
 
       if (iteration != 0) {
         delete[] buffer;
@@ -116,7 +125,7 @@ class WindowsLogger final : public Logger {
   }
 
  private:
-  HANDLE handle_;
+  std::FILE* const fp_;
 };
 
 }  // namespace leveldb

From 04470825ac96cab0d9d16e4ed410349d082fbf82 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Thu, 7 Mar 2019 12:08:35 -0800
Subject: [PATCH 041/181] Add AppVeyor (Windows CI) badge to README.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=237295321
---
 README.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/README.md b/README.md
index 493bdbd..4f8ce63 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,7 @@
 **LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.**
 
 [![Build Status](https://travis-ci.org/google/leveldb.svg?branch=master)](https://travis-ci.org/google/leveldb)
+[![Build status](https://ci.appveyor.com/api/projects/status/g2j5j4rfkda6eyw5/branch/master?svg=true)](https://ci.appveyor.com/project/pwnall/leveldb)
 
 Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
 

From a20508dc6a18a34e05a6fc476a8d587fa9bb6608 Mon Sep 17 00:00:00 2001
From: Dimitris Apostolou <dimitris.apostolou@icloud.com>
Date: Mon, 11 Mar 2019 19:36:11 +0200
Subject: [PATCH 042/181] Fix typo (#565)

---
 db/version_set.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/db/version_set.cc b/db/version_set.cc
index ae06089..156a007 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -345,7 +345,7 @@ Status Version::Get(const ReadOptions& options,
 
   // We can search level-by-level since entries never hop across
   // levels.  Therefore we are guaranteed that if we find data
-  // in an smaller level, later levels are irrelevant.
+  // in a smaller level, later levels are irrelevant.
   std::vector<FileMetaData*> tmp;
   FileMetaData* tmp2;
   for (int level = 0; level < config::kNumLevels; level++) {

From cf1d1ab255de2a741695aec53d83e4f808f9e819 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Mon, 11 Mar 2019 10:41:03 -0700
Subject: [PATCH 043/181] leveldb: Remove unused file port/win/stdint.h.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=237832823
---
 port/win/stdint.h | 24 ------------------------
 1 file changed, 24 deletions(-)
 delete mode 100644 port/win/stdint.h

diff --git a/port/win/stdint.h b/port/win/stdint.h
deleted file mode 100644
index 39edd0d..0000000
--- a/port/win/stdint.h
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// MSVC didn't ship with this file until the 2010 version.
-
-#ifndef STORAGE_LEVELDB_PORT_WIN_STDINT_H_
-#define STORAGE_LEVELDB_PORT_WIN_STDINT_H_
-
-#if !defined(_MSC_VER)
-#error This file should only be included when compiling with MSVC.
-#endif
-
-// Define C99 equivalent types.
-typedef signed char           int8_t;
-typedef signed short          int16_t;
-typedef signed int            int32_t;
-typedef signed long long      int64_t;
-typedef unsigned char         uint8_t;
-typedef unsigned short        uint16_t;
-typedef unsigned int          uint32_t;
-typedef unsigned long long    uint64_t;
-
-#endif  // STORAGE_LEVELDB_PORT_WIN_STDINT_H_

From dd906262fd364c08a652dfa914f9995f6b7608a9 Mon Sep 17 00:00:00 2001
From: cmumford <cmumford@google.com>
Date: Mon, 11 Mar 2019 12:32:50 -0700
Subject: [PATCH 044/181] Make InMemoryEnv more consistent with filesystem
 based Env's.

Env's (like the POSIX Env) which use an actual filesystem behave
differently than InMemoryEnv with regards to writing data to a currently
open file.

InMemoryEnv::NewWritableFile would previously delete that file,
if it was open, before creating a new file so any previously
open file would be unlinked. This change truncates an open file
so that subsequent reads will read that new data.

This should have no impact on leveldb as it never has the same
file open for both read and write access. This change is only
being made for tests (specifically a future change to corruption_test)
to allow them to be decoupled from the underlying platform and
allow them to use an Env.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=237858231
---
 helpers/memenv/memenv.cc      | 46 +++++++++++++++++++++++------------
 helpers/memenv/memenv_test.cc | 23 ++++++++++++++++++
 2 files changed, 53 insertions(+), 16 deletions(-)

diff --git a/helpers/memenv/memenv.cc b/helpers/memenv/memenv.cc
index d44627b..b78a998 100644
--- a/helpers/memenv/memenv.cc
+++ b/helpers/memenv/memenv.cc
@@ -51,9 +51,22 @@ class FileState {
     }
   }
 
-  uint64_t Size() const { return size_; }
+  uint64_t Size() const {
+    MutexLock lock(&blocks_mutex_);
+    return size_;
+  }
+
+  void Truncate() {
+    MutexLock lock(&blocks_mutex_);
+    for (char*& block : blocks_) {
+      delete[] block;
+    }
+    blocks_.clear();
+    size_ = 0;
+  }
 
   Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const {
+    MutexLock lock(&blocks_mutex_);
     if (offset > size_) {
       return Status::IOError("Offset greater than file size.");
     }
@@ -100,6 +113,7 @@ class FileState {
     const char* src = data.data();
     size_t src_len = data.size();
 
+    MutexLock lock(&blocks_mutex_);
     while (src_len > 0) {
       size_t avail;
       size_t offset = size_ % kBlockSize;
@@ -128,10 +142,7 @@ class FileState {
  private:
   // Private since only Unref() should be used to delete it.
   ~FileState() {
-    for (std::vector<char*>::iterator i = blocks_.begin(); i != blocks_.end();
-         ++i) {
-      delete [] *i;
-    }
+    Truncate();
   }
 
   // No copying allowed.
@@ -141,11 +152,9 @@ class FileState {
   port::Mutex refs_mutex_;
   int refs_ GUARDED_BY(refs_mutex_);
 
-  // The following fields are not protected by any mutex. They are only mutable
-  // while the file is being written, and concurrent access is not allowed
-  // to writable files.
-  std::vector<char*> blocks_;
-  uint64_t size_;
+  mutable port::Mutex blocks_mutex_;
+  std::vector<char*> blocks_ GUARDED_BY(blocks_mutex_);
+  uint64_t size_ GUARDED_BY(blocks_mutex_);
 
   enum { kBlockSize = 8 * 1024 };
 };
@@ -269,13 +278,18 @@ class InMemoryEnv : public EnvWrapper {
   virtual Status NewWritableFile(const std::string& fname,
                                  WritableFile** result) {
     MutexLock lock(&mutex_);
-    if (file_map_.find(fname) != file_map_.end()) {
-      DeleteFileInternal(fname);
-    }
+    FileSystem::iterator it = file_map_.find(fname);
 
-    FileState* file = new FileState();
-    file->Ref();
-    file_map_[fname] = file;
+    FileState* file;
+    if (it == file_map_.end()) {
+      // File is not currently open.
+      file = new FileState();
+      file->Ref();
+      file_map_[fname] = file;
+    } else {
+      file = it->second;
+      file->Truncate();
+    }
 
     *result = new WritableFileImpl(file);
     return Status::OK();
diff --git a/helpers/memenv/memenv_test.cc b/helpers/memenv/memenv_test.cc
index 5cff776..4664795 100644
--- a/helpers/memenv/memenv_test.cc
+++ b/helpers/memenv/memenv_test.cc
@@ -191,6 +191,29 @@ TEST(MemEnvTest, LargeWrite) {
   delete [] scratch;
 }
 
+TEST(MemEnvTest, OverwriteOpenFile) {
+  const char kWrite1Data[] = "Write #1 data";
+  const size_t kFileDataLen = sizeof(kWrite1Data) - 1;
+  const std::string kTestFileName = test::TmpDir() + "/leveldb-TestFile.dat";
+
+  ASSERT_OK(WriteStringToFile(env_, kWrite1Data, kTestFileName));
+
+  RandomAccessFile* rand_file;
+  ASSERT_OK(env_->NewRandomAccessFile(kTestFileName, &rand_file));
+
+  const char kWrite2Data[] = "Write #2 data";
+  ASSERT_OK(WriteStringToFile(env_, kWrite2Data, kTestFileName));
+
+  // Verify that overwriting an open file will result in the new file data
+  // being read from files opened before the write.
+  Slice result;
+  char scratch[kFileDataLen];
+  ASSERT_OK(rand_file->Read(0, kFileDataLen, &result, scratch));
+  ASSERT_EQ(0, result.compare(kWrite2Data));
+
+  delete rand_file;
+}
+
 TEST(MemEnvTest, DBTest) {
   Options options;
   options.create_if_missing = true;

From 7d8e41e49b8fddda66a2c5f0a6a47f1a916e8d26 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Mon, 11 Mar 2019 13:04:53 -0700
Subject: [PATCH 045/181] leveldb: Replace AtomicPointer with std::atomic.

This CL removes AtomicPointer from leveldb's port interface. Its usage is replaced with std::atomic<> from the C++11 standard library.

AtomicPointer was used to wrap flags, numbers, and pointers, so its instances are replaced with std::atomic<bool>, std::atomic<int>, std::atomic<size_t> and std::atomic<Node*>.

This CL does not revise the memory ordering. AtomicPointer's methods are replaced mechanically with their std::atomic equivalents, even when the underlying usage is incorrect. (Example: DBImpl::has_imm_ is written using release stores, even though it is always read using relaxed ordering.) Revising the memory ordering is left for future CLs.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=237865146
---
 db/db_bench.cc        |  20 -----
 db/db_impl.cc         |  33 ++++----
 db/db_impl.h          |   7 +-
 db/db_test.cc         | 135 +++++++++++++++++----------------
 db/skiplist.h         |  77 ++++++++++---------
 db/skiplist_test.cc   |  21 +++---
 port/atomic_pointer.h | 171 ------------------------------------------
 port/port_example.h   |  29 -------
 port/port_stdcxx.h    |   1 -
 util/arena.cc         |   5 +-
 util/arena.h          |  17 +++--
 util/env_test.cc      |  57 +++++++-------
 util/env_windows.cc   |  56 ++++++--------
 13 files changed, 210 insertions(+), 419 deletions(-)
 delete mode 100644 port/atomic_pointer.h

diff --git a/db/db_bench.cc b/db/db_bench.cc
index 115cf45..f9403f4 100644
--- a/db/db_bench.cc
+++ b/db/db_bench.cc
@@ -34,7 +34,6 @@
 //      seekrandom    -- N random seeks
 //      open          -- cost of opening a DB
 //      crc32c        -- repeated crc32c of 4K of data
-//      acquireload   -- load N*1000 times
 //   Meta operations:
 //      compact     -- Compact the entire DB
 //      stats       -- Print DB stats
@@ -57,7 +56,6 @@ static const char* FLAGS_benchmarks =
     "crc32c,"
     "snappycomp,"
     "snappyuncomp,"
-    "acquireload,"
     ;
 
 // Number of key/values to place in database
@@ -510,8 +508,6 @@ class Benchmark {
         method = &Benchmark::Compact;
       } else if (name == Slice("crc32c")) {
         method = &Benchmark::Crc32c;
-      } else if (name == Slice("acquireload")) {
-        method = &Benchmark::AcquireLoad;
       } else if (name == Slice("snappycomp")) {
         method = &Benchmark::SnappyCompress;
       } else if (name == Slice("snappyuncomp")) {
@@ -639,22 +635,6 @@ class Benchmark {
     thread->stats.AddMessage(label);
   }
 
-  void AcquireLoad(ThreadState* thread) {
-    int dummy;
-    port::AtomicPointer ap(&dummy);
-    int count = 0;
-    void *ptr = nullptr;
-    thread->stats.AddMessage("(each op is 1000 loads)");
-    while (count < 100000) {
-      for (int i = 0; i < 1000; i++) {
-        ptr = ap.Acquire_Load();
-      }
-      count++;
-      thread->stats.FinishedSingleOp();
-    }
-    if (ptr == nullptr) exit(1); // Disable unused variable warning.
-  }
-
   void SnappyCompress(ThreadState* thread) {
     RandomGenerator gen;
     Slice input = gen.Generate(Options().block_size);
diff --git a/db/db_impl.cc b/db/db_impl.cc
index fefb883..3468862 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -8,6 +8,7 @@
 #include <stdio.h>
 
 #include <algorithm>
+#include <atomic>
 #include <set>
 #include <string>
 #include <vector>
@@ -132,10 +133,11 @@ DBImpl::DBImpl(const Options& raw_options, const std::string& dbname)
       dbname_(dbname),
       table_cache_(new TableCache(dbname_, options_, TableCacheSize(options_))),
       db_lock_(nullptr),
-      shutting_down_(nullptr),
+      shutting_down_(false),
       background_work_finished_signal_(&mutex_),
       mem_(nullptr),
       imm_(nullptr),
+      has_imm_(false),
       logfile_(nullptr),
       logfile_number_(0),
       log_(nullptr),
@@ -144,14 +146,12 @@ DBImpl::DBImpl(const Options& raw_options, const std::string& dbname)
       background_compaction_scheduled_(false),
       manual_compaction_(nullptr),
       versions_(new VersionSet(dbname_, &options_, table_cache_,
-                               &internal_comparator_)) {
-  has_imm_.Release_Store(nullptr);
-}
+                               &internal_comparator_)) {}
 
 DBImpl::~DBImpl() {
-  // Wait for background work to finish
+  // Wait for background work to finish.
   mutex_.Lock();
-  shutting_down_.Release_Store(this);  // Any non-null value is ok
+  shutting_down_.store(true, std::memory_order_release);
   while (background_compaction_scheduled_) {
     background_work_finished_signal_.Wait();
   }
@@ -547,7 +547,7 @@ void DBImpl::CompactMemTable() {
   Status s = WriteLevel0Table(imm_, &edit, base);
   base->Unref();
 
-  if (s.ok() && shutting_down_.Acquire_Load()) {
+  if (s.ok() && shutting_down_.load(std::memory_order_acquire)) {
     s = Status::IOError("Deleting DB during memtable compaction");
   }
 
@@ -562,7 +562,7 @@ void DBImpl::CompactMemTable() {
     // Commit to the new state
     imm_->Unref();
     imm_ = nullptr;
-    has_imm_.Release_Store(nullptr);
+    has_imm_.store(false, std::memory_order_release);
     DeleteObsoleteFiles();
   } else {
     RecordBackgroundError(s);
@@ -610,7 +610,8 @@ void DBImpl::TEST_CompactRange(int level, const Slice* begin,
   }
 
   MutexLock l(&mutex_);
-  while (!manual.done && !shutting_down_.Acquire_Load() && bg_error_.ok()) {
+  while (!manual.done && !shutting_down_.load(std::memory_order_acquire) &&
+         bg_error_.ok()) {
     if (manual_compaction_ == nullptr) {  // Idle
       manual_compaction_ = &manual;
       MaybeScheduleCompaction();
@@ -652,7 +653,7 @@ void DBImpl::MaybeScheduleCompaction() {
   mutex_.AssertHeld();
   if (background_compaction_scheduled_) {
     // Already scheduled
-  } else if (shutting_down_.Acquire_Load()) {
+  } else if (shutting_down_.load(std::memory_order_acquire)) {
     // DB is being deleted; no more background compactions
   } else if (!bg_error_.ok()) {
     // Already got an error; no more changes
@@ -673,7 +674,7 @@ void DBImpl::BGWork(void* db) {
 void DBImpl::BackgroundCall() {
   MutexLock l(&mutex_);
   assert(background_compaction_scheduled_);
-  if (shutting_down_.Acquire_Load()) {
+  if (shutting_down_.load(std::memory_order_acquire)) {
     // No more background work when shutting down.
   } else if (!bg_error_.ok()) {
     // No more background work after a background error.
@@ -752,7 +753,7 @@ void DBImpl::BackgroundCompaction() {
 
   if (status.ok()) {
     // Done
-  } else if (shutting_down_.Acquire_Load()) {
+  } else if (shutting_down_.load(std::memory_order_acquire)) {
     // Ignore compaction errors found during shutting down
   } else {
     Log(options_.info_log,
@@ -919,9 +920,9 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
   std::string current_user_key;
   bool has_current_user_key = false;
   SequenceNumber last_sequence_for_key = kMaxSequenceNumber;
-  for (; input->Valid() && !shutting_down_.Acquire_Load(); ) {
+  for (; input->Valid() && !shutting_down_.load(std::memory_order_acquire); ) {
     // Prioritize immutable compaction work
-    if (has_imm_.NoBarrier_Load() != nullptr) {
+    if (has_imm_.load(std::memory_order_relaxed)) {
       const uint64_t imm_start = env_->NowMicros();
       mutex_.Lock();
       if (imm_ != nullptr) {
@@ -1014,7 +1015,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
     input->Next();
   }
 
-  if (status.ok() && shutting_down_.Acquire_Load()) {
+  if (status.ok() && shutting_down_.load(std::memory_order_acquire)) {
     status = Status::IOError("Deleting DB during compaction");
   }
   if (status.ok() && compact->builder != nullptr) {
@@ -1378,7 +1379,7 @@ Status DBImpl::MakeRoomForWrite(bool force) {
       logfile_number_ = new_log_number;
       log_ = new log::Writer(lfile);
       imm_ = mem_;
-      has_imm_.Release_Store(imm_);
+      has_imm_.store(true, std::memory_order_release);
       mem_ = new MemTable(internal_comparator_);
       mem_->Ref();
       force = false;   // Do not force another compaction if have room
diff --git a/db/db_impl.h b/db/db_impl.h
index 00e800a..ca00d42 100644
--- a/db/db_impl.h
+++ b/db/db_impl.h
@@ -5,8 +5,11 @@
 #ifndef STORAGE_LEVELDB_DB_DB_IMPL_H_
 #define STORAGE_LEVELDB_DB_DB_IMPL_H_
 
+#include <atomic>
 #include <deque>
 #include <set>
+#include <string>
+
 #include "db/dbformat.h"
 #include "db/log_writer.h"
 #include "db/snapshot.h"
@@ -136,11 +139,11 @@ class DBImpl : public DB {
 
   // State below is protected by mutex_
   port::Mutex mutex_;
-  port::AtomicPointer shutting_down_;
+  std::atomic<bool> shutting_down_;
   port::CondVar background_work_finished_signal_ GUARDED_BY(mutex_);
   MemTable* mem_;
   MemTable* imm_ GUARDED_BY(mutex_);  // Memtable being compacted
-  port::AtomicPointer has_imm_;       // So bg thread can detect non-null imm_
+  std::atomic<bool> has_imm_;         // So bg thread can detect non-null imm_
   WritableFile* logfile_;
   uint64_t logfile_number_ GUARDED_BY(mutex_);
   log::Writer* log_;
diff --git a/db/db_test.cc b/db/db_test.cc
index 894ed23..e889a74 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -2,6 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include <atomic>
+#include <string>
+
 #include "leveldb/db.h"
 #include "leveldb/filter_policy.h"
 #include "db/db_impl.h"
@@ -61,7 +64,7 @@ class AtomicCounter {
 void DelayMilliseconds(int millis) {
   Env::Default()->SleepForMicroseconds(millis * 1000);
 }
-}
+}  // namespace
 
 // Test Env to override default Env behavior for testing.
 class TestEnv : public EnvWrapper {
@@ -93,45 +96,45 @@ class TestEnv : public EnvWrapper {
   bool ignore_dot_files_;
 };
 
-// Special Env used to delay background operations
+// Special Env used to delay background operations.
 class SpecialEnv : public EnvWrapper {
  public:
   // sstable/log Sync() calls are blocked while this pointer is non-null.
-  port::AtomicPointer delay_data_sync_;
+  std::atomic<bool> delay_data_sync_;
 
   // sstable/log Sync() calls return an error.
-  port::AtomicPointer data_sync_error_;
+  std::atomic<bool> data_sync_error_;
 
   // Simulate no-space errors while this pointer is non-null.
-  port::AtomicPointer no_space_;
+  std::atomic<bool> no_space_;
 
   // Simulate non-writable file system while this pointer is non-null.
-  port::AtomicPointer non_writable_;
+  std::atomic<bool> non_writable_;
 
   // Force sync of manifest files to fail while this pointer is non-null.
-  port::AtomicPointer manifest_sync_error_;
+  std::atomic<bool> manifest_sync_error_;
 
   // Force write to manifest files to fail while this pointer is non-null.
-  port::AtomicPointer manifest_write_error_;
+  std::atomic<bool> manifest_write_error_;
 
   bool count_random_reads_;
   AtomicCounter random_read_counter_;
 
-  explicit SpecialEnv(Env* base) : EnvWrapper(base) {
-    delay_data_sync_.Release_Store(nullptr);
-    data_sync_error_.Release_Store(nullptr);
-    no_space_.Release_Store(nullptr);
-    non_writable_.Release_Store(nullptr);
-    count_random_reads_ = false;
-    manifest_sync_error_.Release_Store(nullptr);
-    manifest_write_error_.Release_Store(nullptr);
+  explicit SpecialEnv(Env* base) : EnvWrapper(base),
+    delay_data_sync_(false),
+    data_sync_error_(false),
+    no_space_(false),
+    non_writable_(false),
+    manifest_sync_error_(false),
+    manifest_write_error_(false),
+    count_random_reads_(false) {
   }
 
   Status NewWritableFile(const std::string& f, WritableFile** r) {
     class DataFile : public WritableFile {
      private:
-      SpecialEnv* env_;
-      WritableFile* base_;
+      SpecialEnv* const env_;
+      WritableFile* const base_;
 
      public:
       DataFile(SpecialEnv* env, WritableFile* base)
@@ -140,7 +143,7 @@ class SpecialEnv : public EnvWrapper {
       }
       ~DataFile() { delete base_; }
       Status Append(const Slice& data) {
-        if (env_->no_space_.Acquire_Load() != nullptr) {
+        if (env_->no_space_.load(std::memory_order_acquire)) {
           // Drop writes on the floor
           return Status::OK();
         } else {
@@ -150,10 +153,10 @@ class SpecialEnv : public EnvWrapper {
       Status Close() { return base_->Close(); }
       Status Flush() { return base_->Flush(); }
       Status Sync() {
-        if (env_->data_sync_error_.Acquire_Load() != nullptr) {
+        if (env_->data_sync_error_.load(std::memory_order_acquire)) {
           return Status::IOError("simulated data sync error");
         }
-        while (env_->delay_data_sync_.Acquire_Load() != nullptr) {
+        while (env_->delay_data_sync_.load(std::memory_order_acquire)) {
           DelayMilliseconds(100);
         }
         return base_->Sync();
@@ -167,7 +170,7 @@ class SpecialEnv : public EnvWrapper {
       ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) { }
       ~ManifestFile() { delete base_; }
       Status Append(const Slice& data) {
-        if (env_->manifest_write_error_.Acquire_Load() != nullptr) {
+        if (env_->manifest_write_error_.load(std::memory_order_acquire)) {
           return Status::IOError("simulated writer error");
         } else {
           return base_->Append(data);
@@ -176,7 +179,7 @@ class SpecialEnv : public EnvWrapper {
       Status Close() { return base_->Close(); }
       Status Flush() { return base_->Flush(); }
       Status Sync() {
-        if (env_->manifest_sync_error_.Acquire_Load() != nullptr) {
+        if (env_->manifest_sync_error_.load(std::memory_order_acquire)) {
           return Status::IOError("simulated sync error");
         } else {
           return base_->Sync();
@@ -184,7 +187,7 @@ class SpecialEnv : public EnvWrapper {
       }
     };
 
-    if (non_writable_.Acquire_Load() != nullptr) {
+    if (non_writable_.load(std::memory_order_acquire)) {
       return Status::IOError("simulated write error");
     }
 
@@ -424,7 +427,7 @@ class DBTest {
     ASSERT_TRUE(
         db_->GetProperty("leveldb.num-files-at-level" + NumberToString(level),
                          &property));
-    return atoi(property.c_str());
+    return std::stoi(property);
   }
 
   int TotalTableFiles() {
@@ -587,11 +590,13 @@ TEST(DBTest, GetFromImmutableLayer) {
     ASSERT_OK(Put("foo", "v1"));
     ASSERT_EQ("v1", Get("foo"));
 
-    env_->delay_data_sync_.Release_Store(env_);      // Block sync calls
-    Put("k1", std::string(100000, 'x'));             // Fill memtable
-    Put("k2", std::string(100000, 'y'));             // Trigger compaction
+    // Block sync calls.
+    env_->delay_data_sync_.store(true, std::memory_order_release);
+    Put("k1", std::string(100000, 'x'));             // Fill memtable.
+    Put("k2", std::string(100000, 'y'));             // Trigger compaction.
     ASSERT_EQ("v1", Get("foo"));
-    env_->delay_data_sync_.Release_Store(nullptr);   // Release sync calls
+    // Release sync calls.
+    env_->delay_data_sync_.store(false, std::memory_order_release);
   } while (ChangeOptions());
 }
 
@@ -608,7 +613,7 @@ TEST(DBTest, GetMemUsage) {
     ASSERT_OK(Put("foo", "v1"));
     std::string val;
     ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val));
-    int mem_usage = atoi(val.c_str());
+    int mem_usage = std::stoi(val);
     ASSERT_GT(mem_usage, 0);
     ASSERT_LT(mem_usage, 5*1024*1024);
   } while (ChangeOptions());
@@ -1106,7 +1111,7 @@ TEST(DBTest, RepeatedWritesToSameKey) {
   for (int i = 0; i < 5 * kMaxFiles; i++) {
     Put("key", value);
     ASSERT_LE(TotalTableFiles(), kMaxFiles);
-    fprintf(stderr, "after %d: %d files\n", int(i+1), TotalTableFiles());
+    fprintf(stderr, "after %d: %d files\n", i + 1, TotalTableFiles());
   }
 }
 
@@ -1271,7 +1276,7 @@ TEST(DBTest, IteratorPinsRef) {
   // Write to force compactions
   Put("foo", "newvalue1");
   for (int i = 0; i < 100; i++) {
-    ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
+    ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v')));  // 100K values
   }
   Put("foo", "newvalue2");
 
@@ -1459,21 +1464,21 @@ TEST(DBTest, L0_CompactionBug_Issue44_a) {
 
 TEST(DBTest, L0_CompactionBug_Issue44_b) {
   Reopen();
-  Put("","");
+  Put("", "");
   Reopen();
   Delete("e");
-  Put("","");
+  Put("", "");
   Reopen();
   Put("c", "cv");
   Reopen();
-  Put("","");
+  Put("", "");
   Reopen();
-  Put("","");
+  Put("", "");
   DelayMilliseconds(1000);  // Wait for compaction to finish
   Reopen();
-  Put("d","dv");
+  Put("d", "dv");
   Reopen();
-  Put("","");
+  Put("", "");
   Reopen();
   Delete("d");
   Delete("b");
@@ -1711,13 +1716,14 @@ TEST(DBTest, NoSpace) {
   ASSERT_EQ("v1", Get("foo"));
   Compact("a", "z");
   const int num_files = CountFiles();
-  env_->no_space_.Release_Store(env_);   // Force out-of-space errors
+  // Force out-of-space errors.
+  env_->no_space_.store(true, std::memory_order_release);
   for (int i = 0; i < 10; i++) {
     for (int level = 0; level < config::kNumLevels-1; level++) {
       dbfull()->TEST_CompactRange(level, nullptr, nullptr);
     }
   }
-  env_->no_space_.Release_Store(nullptr);
+  env_->no_space_.store(false, std::memory_order_release);
   ASSERT_LT(CountFiles(), num_files + 3);
 }
 
@@ -1727,7 +1733,8 @@ TEST(DBTest, NonWritableFileSystem) {
   options.env = env_;
   Reopen(&options);
   ASSERT_OK(Put("foo", "v1"));
-  env_->non_writable_.Release_Store(env_);  // Force errors for new files
+  // Force errors for new files.
+  env_->non_writable_.store(true, std::memory_order_release);
   std::string big(100000, 'x');
   int errors = 0;
   for (int i = 0; i < 20; i++) {
@@ -1738,7 +1745,7 @@ TEST(DBTest, NonWritableFileSystem) {
     }
   }
   ASSERT_GT(errors, 0);
-  env_->non_writable_.Release_Store(nullptr);
+  env_->non_writable_.store(false, std::memory_order_release);
 }
 
 TEST(DBTest, WriteSyncError) {
@@ -1748,7 +1755,7 @@ TEST(DBTest, WriteSyncError) {
   Options options = CurrentOptions();
   options.env = env_;
   Reopen(&options);
-  env_->data_sync_error_.Release_Store(env_);
+  env_->data_sync_error_.store(true, std::memory_order_release);
 
   // (b) Normal write should succeed
   WriteOptions w;
@@ -1762,7 +1769,7 @@ TEST(DBTest, WriteSyncError) {
   ASSERT_EQ("NOT_FOUND", Get("k2"));
 
   // (d) make sync behave normally
-  env_->data_sync_error_.Release_Store(nullptr);
+  env_->data_sync_error_.store(false, std::memory_order_release);
 
   // (e) Do a non-sync write; should fail
   w.sync = false;
@@ -1782,7 +1789,7 @@ TEST(DBTest, ManifestWriteError) {
   // We iterate twice.  In the second iteration, everything is the
   // same except the log record never makes it to the MANIFEST file.
   for (int iter = 0; iter < 2; iter++) {
-    port::AtomicPointer* error_type = (iter == 0)
+    std::atomic<bool>* error_type = (iter == 0)
         ? &env_->manifest_sync_error_
         : &env_->manifest_write_error_;
 
@@ -1802,12 +1809,12 @@ TEST(DBTest, ManifestWriteError) {
     ASSERT_EQ(NumTableFilesAtLevel(last), 1);   // foo=>bar is now in last level
 
     // Merging compaction (will fail)
-    error_type->Release_Store(env_);
+    error_type->store(true, std::memory_order_release);
     dbfull()->TEST_CompactRange(last, nullptr, nullptr);  // Should fail
     ASSERT_EQ("bar", Get("foo"));
 
     // Recovery: should not lose data
-    error_type->Release_Store(nullptr);
+    error_type->store(false, std::memory_order_release);
     Reopen(&options);
     ASSERT_EQ("bar", Get("foo"));
   }
@@ -1878,7 +1885,7 @@ TEST(DBTest, BloomFilter) {
   dbfull()->TEST_CompactMemTable();
 
   // Prevent auto compactions triggered by seeks
-  env_->delay_data_sync_.Release_Store(env_);
+  env_->delay_data_sync_.store(true, std::memory_order_release);
 
   // Lookup present keys.  Should rarely read from small sstable.
   env_->random_read_counter_.Reset();
@@ -1899,7 +1906,7 @@ TEST(DBTest, BloomFilter) {
   fprintf(stderr, "%d missing => %d reads\n", N, reads);
   ASSERT_LE(reads, 3*N/100);
 
-  env_->delay_data_sync_.Release_Store(nullptr);
+  env_->delay_data_sync_.store(false, std::memory_order_release);
   Close();
   delete options.block_cache;
   delete options.filter_policy;
@@ -1914,9 +1921,9 @@ static const int kNumKeys = 1000;
 
 struct MTState {
   DBTest* test;
-  port::AtomicPointer stop;
-  port::AtomicPointer counter[kNumThreads];
-  port::AtomicPointer thread_done[kNumThreads];
+  std::atomic<bool> stop;
+  std::atomic<int> counter[kNumThreads];
+  std::atomic<bool> thread_done[kNumThreads];
 };
 
 struct MTThread {
@@ -1928,13 +1935,13 @@ static void MTThreadBody(void* arg) {
   MTThread* t = reinterpret_cast<MTThread*>(arg);
   int id = t->id;
   DB* db = t->state->test->db_;
-  uintptr_t counter = 0;
+  int counter = 0;
   fprintf(stderr, "... starting thread %d\n", id);
   Random rnd(1000 + id);
   std::string value;
   char valbuf[1500];
-  while (t->state->stop.Acquire_Load() == nullptr) {
-    t->state->counter[id].Release_Store(reinterpret_cast<void*>(counter));
+  while (!t->state->stop.load(std::memory_order_acquire)) {
+    t->state->counter[id].store(counter, std::memory_order_release);
 
     int key = rnd.Uniform(kNumKeys);
     char keybuf[20];
@@ -1959,14 +1966,13 @@ static void MTThreadBody(void* arg) {
         ASSERT_EQ(k, key);
         ASSERT_GE(w, 0);
         ASSERT_LT(w, kNumThreads);
-        ASSERT_LE(static_cast<uintptr_t>(c), reinterpret_cast<uintptr_t>(
-            t->state->counter[w].Acquire_Load()));
+        ASSERT_LE(c, t->state->counter[w].load(std::memory_order_acquire));
       }
     }
     counter++;
   }
-  t->state->thread_done[id].Release_Store(t);
-  fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter));
+  t->state->thread_done[id].store(true, std::memory_order_release);
+  fprintf(stderr, "... stopping thread %d after %d ops\n", id, counter);
 }
 
 }  // namespace
@@ -1976,10 +1982,10 @@ TEST(DBTest, MultiThreaded) {
     // Initialize state
     MTState mt;
     mt.test = this;
-    mt.stop.Release_Store(0);
+    mt.stop.store(false, std::memory_order_release);
     for (int id = 0; id < kNumThreads; id++) {
-      mt.counter[id].Release_Store(0);
-      mt.thread_done[id].Release_Store(0);
+      mt.counter[id].store(false, std::memory_order_release);
+      mt.thread_done[id].store(false, std::memory_order_release);
     }
 
     // Start threads
@@ -1994,9 +2000,9 @@ TEST(DBTest, MultiThreaded) {
     DelayMilliseconds(kTestSeconds * 1000);
 
     // Stop the threads and wait for them to finish
-    mt.stop.Release_Store(&mt);
+    mt.stop.store(true, std::memory_order_release);
     for (int id = 0; id < kNumThreads; id++) {
-      while (mt.thread_done[id].Acquire_Load() == nullptr) {
+      while (!mt.thread_done[id].load(std::memory_order_acquire)) {
         DelayMilliseconds(100);
       }
     }
@@ -2100,6 +2106,7 @@ class ModelDB: public DB {
     virtual Slice key() const { return iter_->first; }
     virtual Slice value() const { return iter_->second; }
     virtual Status status() const { return Status::OK(); }
+
    private:
     const KVMap* const map_;
     const bool owned_;  // Do we own map_
diff --git a/db/skiplist.h b/db/skiplist.h
index b806ce0..7ac914b 100644
--- a/db/skiplist.h
+++ b/db/skiplist.h
@@ -27,9 +27,10 @@
 //
 // ... prev vs. next pointer ordering ...
 
-#include <assert.h>
-#include <stdlib.h>
-#include "port/port.h"
+#include <atomic>
+#include <cassert>
+#include <cstdlib>
+
 #include "util/arena.h"
 #include "util/random.h"
 
@@ -105,11 +106,10 @@ class SkipList {
 
   // Modified only by Insert().  Read racily by readers, but stale
   // values are ok.
-  port::AtomicPointer max_height_;   // Height of the entire list
+  std::atomic<int> max_height_;   // Height of the entire list
 
   inline int GetMaxHeight() const {
-    return static_cast<int>(
-        reinterpret_cast<intptr_t>(max_height_.NoBarrier_Load()));
+    return max_height_.load(std::memory_order_relaxed);
   }
 
   // Read/written only by Insert().
@@ -144,7 +144,7 @@ class SkipList {
 
 // Implementation details follow
 template<typename Key, class Comparator>
-struct SkipList<Key,Comparator>::Node {
+struct SkipList<Key, Comparator>::Node {
   explicit Node(const Key& k) : key(k) { }
 
   Key const key;
@@ -155,63 +155,63 @@ struct SkipList<Key,Comparator>::Node {
     assert(n >= 0);
     // Use an 'acquire load' so that we observe a fully initialized
     // version of the returned Node.
-    return reinterpret_cast<Node*>(next_[n].Acquire_Load());
+    return next_[n].load(std::memory_order_acquire);
   }
   void SetNext(int n, Node* x) {
     assert(n >= 0);
     // Use a 'release store' so that anybody who reads through this
     // pointer observes a fully initialized version of the inserted node.
-    next_[n].Release_Store(x);
+    next_[n].store(x, std::memory_order_release);
   }
 
   // No-barrier variants that can be safely used in a few locations.
   Node* NoBarrier_Next(int n) {
     assert(n >= 0);
-    return reinterpret_cast<Node*>(next_[n].NoBarrier_Load());
+    return next_[n].load(std::memory_order_relaxed);
   }
   void NoBarrier_SetNext(int n, Node* x) {
     assert(n >= 0);
-    next_[n].NoBarrier_Store(x);
+    next_[n].store(x, std::memory_order_relaxed);
   }
 
  private:
   // Array of length equal to the node height.  next_[0] is lowest level link.
-  port::AtomicPointer next_[1];
+  std::atomic<Node*> next_[1];
 };
 
 template<typename Key, class Comparator>
-typename SkipList<Key,Comparator>::Node*
-SkipList<Key,Comparator>::NewNode(const Key& key, int height) {
-  char* mem = arena_->AllocateAligned(
-      sizeof(Node) + sizeof(port::AtomicPointer) * (height - 1));
-  return new (mem) Node(key);
+typename SkipList<Key, Comparator>::Node*
+SkipList<Key, Comparator>::NewNode(const Key& key, int height) {
+  char* const node_memory = arena_->AllocateAligned(
+      sizeof(Node) + sizeof(std::atomic<Node*>) * (height - 1));
+  return new (node_memory) Node(key);
 }
 
 template<typename Key, class Comparator>
-inline SkipList<Key,Comparator>::Iterator::Iterator(const SkipList* list) {
+inline SkipList<Key, Comparator>::Iterator::Iterator(const SkipList* list) {
   list_ = list;
   node_ = nullptr;
 }
 
 template<typename Key, class Comparator>
-inline bool SkipList<Key,Comparator>::Iterator::Valid() const {
+inline bool SkipList<Key, Comparator>::Iterator::Valid() const {
   return node_ != nullptr;
 }
 
 template<typename Key, class Comparator>
-inline const Key& SkipList<Key,Comparator>::Iterator::key() const {
+inline const Key& SkipList<Key, Comparator>::Iterator::key() const {
   assert(Valid());
   return node_->key;
 }
 
 template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::Next() {
+inline void SkipList<Key, Comparator>::Iterator::Next() {
   assert(Valid());
   node_ = node_->Next(0);
 }
 
 template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::Prev() {
+inline void SkipList<Key, Comparator>::Iterator::Prev() {
   // Instead of using explicit "prev" links, we just search for the
   // last node that falls before key.
   assert(Valid());
@@ -222,17 +222,17 @@ inline void SkipList<Key,Comparator>::Iterator::Prev() {
 }
 
 template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::Seek(const Key& target) {
+inline void SkipList<Key, Comparator>::Iterator::Seek(const Key& target) {
   node_ = list_->FindGreaterOrEqual(target, nullptr);
 }
 
 template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::SeekToFirst() {
+inline void SkipList<Key, Comparator>::Iterator::SeekToFirst() {
   node_ = list_->head_->Next(0);
 }
 
 template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::SeekToLast() {
+inline void SkipList<Key, Comparator>::Iterator::SeekToLast() {
   node_ = list_->FindLast();
   if (node_ == list_->head_) {
     node_ = nullptr;
@@ -240,7 +240,7 @@ inline void SkipList<Key,Comparator>::Iterator::SeekToLast() {
 }
 
 template<typename Key, class Comparator>
-int SkipList<Key,Comparator>::RandomHeight() {
+int SkipList<Key, Comparator>::RandomHeight() {
   // Increase height with probability 1 in kBranching
   static const unsigned int kBranching = 4;
   int height = 1;
@@ -253,14 +253,15 @@ int SkipList<Key,Comparator>::RandomHeight() {
 }
 
 template<typename Key, class Comparator>
-bool SkipList<Key,Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
+bool SkipList<Key, Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
   // null n is considered infinite
   return (n != nullptr) && (compare_(n->key, key) < 0);
 }
 
 template<typename Key, class Comparator>
-typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindGreaterOrEqual(const Key& key, Node** prev)
-    const {
+typename SkipList<Key, Comparator>::Node*
+SkipList<Key, Comparator>::FindGreaterOrEqual(const Key& key,
+                                              Node** prev) const {
   Node* x = head_;
   int level = GetMaxHeight() - 1;
   while (true) {
@@ -281,8 +282,8 @@ typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindGreaterOr
 }
 
 template<typename Key, class Comparator>
-typename SkipList<Key,Comparator>::Node*
-SkipList<Key,Comparator>::FindLessThan(const Key& key) const {
+typename SkipList<Key, Comparator>::Node*
+SkipList<Key, Comparator>::FindLessThan(const Key& key) const {
   Node* x = head_;
   int level = GetMaxHeight() - 1;
   while (true) {
@@ -302,7 +303,7 @@ SkipList<Key,Comparator>::FindLessThan(const Key& key) const {
 }
 
 template<typename Key, class Comparator>
-typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindLast()
+typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast()
     const {
   Node* x = head_;
   int level = GetMaxHeight() - 1;
@@ -322,11 +323,11 @@ typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindLast()
 }
 
 template<typename Key, class Comparator>
-SkipList<Key,Comparator>::SkipList(Comparator cmp, Arena* arena)
+SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena)
     : compare_(cmp),
       arena_(arena),
       head_(NewNode(0 /* any key will do */, kMaxHeight)),
-      max_height_(reinterpret_cast<void*>(1)),
+      max_height_(1),
       rnd_(0xdeadbeef) {
   for (int i = 0; i < kMaxHeight; i++) {
     head_->SetNext(i, nullptr);
@@ -334,7 +335,7 @@ SkipList<Key,Comparator>::SkipList(Comparator cmp, Arena* arena)
 }
 
 template<typename Key, class Comparator>
-void SkipList<Key,Comparator>::Insert(const Key& key) {
+void SkipList<Key, Comparator>::Insert(const Key& key) {
   // TODO(opt): We can use a barrier-free variant of FindGreaterOrEqual()
   // here since Insert() is externally synchronized.
   Node* prev[kMaxHeight];
@@ -348,8 +349,6 @@ void SkipList<Key,Comparator>::Insert(const Key& key) {
     for (int i = GetMaxHeight(); i < height; i++) {
       prev[i] = head_;
     }
-    //fprintf(stderr, "Change height from %d to %d\n", max_height_, height);
-
     // It is ok to mutate max_height_ without any synchronization
     // with concurrent readers.  A concurrent reader that observes
     // the new value of max_height_ will see either the old value of
@@ -357,7 +356,7 @@ void SkipList<Key,Comparator>::Insert(const Key& key) {
     // the loop below.  In the former case the reader will
     // immediately drop to the next level since nullptr sorts after all
     // keys.  In the latter case the reader will use the new node.
-    max_height_.NoBarrier_Store(reinterpret_cast<void*>(height));
+    max_height_.store(height, std::memory_order_relaxed);
   }
 
   x = NewNode(key, height);
@@ -370,7 +369,7 @@ void SkipList<Key,Comparator>::Insert(const Key& key) {
 }
 
 template<typename Key, class Comparator>
-bool SkipList<Key,Comparator>::Contains(const Key& key) const {
+bool SkipList<Key, Comparator>::Contains(const Key& key) const {
   Node* x = FindGreaterOrEqual(key, nullptr);
   if (x != nullptr && Equal(key, x->key)) {
     return true;
diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc
index 24e0887..c4cf146 100644
--- a/db/skiplist_test.cc
+++ b/db/skiplist_test.cc
@@ -3,7 +3,10 @@
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
 #include "db/skiplist.h"
+
+#include <atomic>
 #include <set>
+
 #include "leveldb/env.h"
 #include "port/port.h"
 #include "port/thread_annotations.h"
@@ -188,12 +191,12 @@ class ConcurrentTest {
 
   // Per-key generation
   struct State {
-    port::AtomicPointer generation[K];
-    void Set(int k, intptr_t v) {
-      generation[k].Release_Store(reinterpret_cast<void*>(v));
+    std::atomic<int> generation[K];
+    void Set(int k, int v) {
+      generation[k].store(v, std::memory_order_release);
     }
-    intptr_t Get(int k) {
-      return reinterpret_cast<intptr_t>(generation[k].Acquire_Load());
+    int Get(int k) {
+      return generation[k].load(std::memory_order_acquire);
     }
 
     State() {
@@ -300,7 +303,7 @@ class TestState {
  public:
   ConcurrentTest t_;
   int seed_;
-  port::AtomicPointer quit_flag_;
+  std::atomic<bool> quit_flag_;
 
   enum ReaderState {
     STARTING,
@@ -310,7 +313,7 @@ class TestState {
 
   explicit TestState(int s)
       : seed_(s),
-        quit_flag_(nullptr),
+        quit_flag_(false),
         state_(STARTING),
         state_cv_(&mu_) {}
 
@@ -340,7 +343,7 @@ static void ConcurrentReader(void* arg) {
   Random rnd(state->seed_);
   int64_t reads = 0;
   state->Change(TestState::RUNNING);
-  while (!state->quit_flag_.Acquire_Load()) {
+  while (!state->quit_flag_.load(std::memory_order_acquire)) {
     state->t_.ReadStep(&rnd);
     ++reads;
   }
@@ -362,7 +365,7 @@ static void RunConcurrent(int run) {
     for (int i = 0; i < kSize; i++) {
       state.t_.WriteStep(&rnd);
     }
-    state.quit_flag_.Release_Store(&state);  // Any non-null arg will do
+    state.quit_flag_.store(true, std::memory_order_release);
     state.Wait(TestState::DONE);
   }
 }
diff --git a/port/atomic_pointer.h b/port/atomic_pointer.h
deleted file mode 100644
index d906f63..0000000
--- a/port/atomic_pointer.h
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// AtomicPointer provides storage for a lock-free pointer.
-// Platform-dependent implementation of AtomicPointer:
-// - If the platform provides a cheap barrier, we use it with raw pointers
-// - If <atomic> is present (on newer versions of gcc, it is), we use
-//   a <atomic>-based AtomicPointer.  However we prefer the memory
-//   barrier based version, because at least on a gcc 4.4 32-bit build
-//   on linux, we have encountered a buggy <atomic> implementation.
-//   Also, some <atomic> implementations are much slower than a memory-barrier
-//   based implementation (~16ns for <atomic> based acquire-load vs. ~1ns for
-//   a barrier based acquire-load).
-// This code is based on atomicops-internals-* in Google's perftools:
-// http://code.google.com/p/google-perftools/source/browse/#svn%2Ftrunk%2Fsrc%2Fbase
-
-#ifndef PORT_ATOMIC_POINTER_H_
-#define PORT_ATOMIC_POINTER_H_
-
-#include <stdint.h>
-
-#include <atomic>
-
-#if defined(_M_X64) || defined(__x86_64__)
-#define ARCH_CPU_X86_FAMILY 1
-#elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
-#define ARCH_CPU_X86_FAMILY 1
-#elif defined(__ARMEL__)
-#define ARCH_CPU_ARM_FAMILY 1
-#elif defined(__aarch64__)
-#define ARCH_CPU_ARM64_FAMILY 1
-#elif defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__)
-#define ARCH_CPU_PPC_FAMILY 1
-#elif defined(__mips__)
-#define ARCH_CPU_MIPS_FAMILY 1
-#endif
-
-namespace leveldb {
-namespace port {
-
-// Define MemoryBarrier() if available
-// Windows on x86
-#if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY)
-// windows.h already provides a MemoryBarrier(void) macro
-// http://msdn.microsoft.com/en-us/library/ms684208(v=vs.85).aspx
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// Mac OS
-#elif defined(__APPLE__)
-inline void MemoryBarrier() {
-  std::atomic_thread_fence(std::memory_order_seq_cst);
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// Gcc on x86
-#elif defined(ARCH_CPU_X86_FAMILY) && defined(__GNUC__)
-inline void MemoryBarrier() {
-  // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
-  // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
-  __asm__ __volatile__("" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// Sun Studio
-#elif defined(ARCH_CPU_X86_FAMILY) && defined(__SUNPRO_CC)
-inline void MemoryBarrier() {
-  // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
-  // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
-  asm volatile("" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// ARM Linux
-#elif defined(ARCH_CPU_ARM_FAMILY) && defined(__linux__)
-typedef void (*LinuxKernelMemoryBarrierFunc)(void);
-// The Linux ARM kernel provides a highly optimized device-specific memory
-// barrier function at a fixed memory address that is mapped in every
-// user-level process.
-//
-// This beats using CPU-specific instructions which are, on single-core
-// devices, un-necessary and very costly (e.g. ARMv7-A "dmb" takes more
-// than 180ns on a Cortex-A8 like the one on a Nexus One). Benchmarking
-// shows that the extra function call cost is completely negligible on
-// multi-core devices.
-//
-inline void MemoryBarrier() {
-  (*(LinuxKernelMemoryBarrierFunc)0xffff0fa0)();
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// ARM64
-#elif defined(ARCH_CPU_ARM64_FAMILY)
-inline void MemoryBarrier() {
-  asm volatile("dmb sy" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// PPC
-#elif defined(ARCH_CPU_PPC_FAMILY) && defined(__GNUC__)
-inline void MemoryBarrier() {
-  // TODO for some powerpc expert: is there a cheaper suitable variant?
-  // Perhaps by having separate barriers for acquire and release ops.
-  asm volatile("sync" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// MIPS
-#elif defined(ARCH_CPU_MIPS_FAMILY) && defined(__GNUC__)
-inline void MemoryBarrier() {
-  __asm__ __volatile__("sync" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-#endif
-
-// AtomicPointer built using platform-specific MemoryBarrier().
-#if defined(LEVELDB_HAVE_MEMORY_BARRIER)
-class AtomicPointer {
- private:
-  void* rep_;
- public:
-  AtomicPointer() { }
-  explicit AtomicPointer(void* p) : rep_(p) {}
-  inline void* NoBarrier_Load() const { return rep_; }
-  inline void NoBarrier_Store(void* v) { rep_ = v; }
-  inline void* Acquire_Load() const {
-    void* result = rep_;
-    MemoryBarrier();
-    return result;
-  }
-  inline void Release_Store(void* v) {
-    MemoryBarrier();
-    rep_ = v;
-  }
-};
-
-// AtomicPointer based on C++11 <atomic>.
-#else
-class AtomicPointer {
- private:
-  std::atomic<void*> rep_;
- public:
-  AtomicPointer() { }
-  explicit AtomicPointer(void* v) : rep_(v) { }
-  inline void* Acquire_Load() const {
-    return rep_.load(std::memory_order_acquire);
-  }
-  inline void Release_Store(void* v) {
-    rep_.store(v, std::memory_order_release);
-  }
-  inline void* NoBarrier_Load() const {
-    return rep_.load(std::memory_order_relaxed);
-  }
-  inline void NoBarrier_Store(void* v) {
-    rep_.store(v, std::memory_order_relaxed);
-  }
-};
-
-#endif
-
-#undef LEVELDB_HAVE_MEMORY_BARRIER
-#undef ARCH_CPU_X86_FAMILY
-#undef ARCH_CPU_ARM_FAMILY
-#undef ARCH_CPU_ARM64_FAMILY
-#undef ARCH_CPU_PPC_FAMILY
-
-}  // namespace port
-}  // namespace leveldb
-
-#endif  // PORT_ATOMIC_POINTER_H_
diff --git a/port/port_example.h b/port/port_example.h
index 9c648c3..1a8fca2 100644
--- a/port/port_example.h
+++ b/port/port_example.h
@@ -62,35 +62,6 @@ class CondVar {
   void SignallAll();
 };
 
-// A type that holds a pointer that can be read or written atomically
-// (i.e., without word-tearing.)
-class AtomicPointer {
- private:
-  intptr_t rep_;
- public:
-  // Initialize to arbitrary value
-  AtomicPointer();
-
-  // Initialize to hold v
-  explicit AtomicPointer(void* v) : rep_(v) { }
-
-  // Read and return the stored pointer with the guarantee that no
-  // later memory access (read or write) by this thread can be
-  // reordered ahead of this read.
-  void* Acquire_Load() const;
-
-  // Set v as the stored pointer with the guarantee that no earlier
-  // memory access (read or write) by this thread can be reordered
-  // after this store.
-  void Release_Store(void* v);
-
-  // Read the stored pointer with no ordering guarantees.
-  void* NoBarrier_Load() const;
-
-  // Set va as the stored pointer with no ordering guarantees.
-  void NoBarrier_Store(void* v);
-};
-
 // ------------------ Compression -------------------
 
 // Store the snappy compression of "input[0,input_length-1]" in *output.
diff --git a/port/port_stdcxx.h b/port/port_stdcxx.h
index 4713e26..e21fa70 100644
--- a/port/port_stdcxx.h
+++ b/port/port_stdcxx.h
@@ -35,7 +35,6 @@
 #include <condition_variable>  // NOLINT
 #include <mutex>               // NOLINT
 #include <string>
-#include "port/atomic_pointer.h"
 #include "port/thread_annotations.h"
 
 namespace leveldb {
diff --git a/util/arena.cc b/util/arena.cc
index a0338bf..a496ad0 100644
--- a/util/arena.cc
+++ b/util/arena.cc
@@ -3,7 +3,6 @@
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
 #include "util/arena.h"
-#include <assert.h>
 
 namespace leveldb {
 
@@ -60,8 +59,8 @@ char* Arena::AllocateAligned(size_t bytes) {
 char* Arena::AllocateNewBlock(size_t block_bytes) {
   char* result = new char[block_bytes];
   blocks_.push_back(result);
-  memory_usage_.NoBarrier_Store(
-      reinterpret_cast<void*>(MemoryUsage() + block_bytes + sizeof(char*)));
+  memory_usage_.fetch_add(block_bytes + sizeof(char*),
+                          std::memory_order_relaxed);
   return result;
 }
 
diff --git a/util/arena.h b/util/arena.h
index 48bab33..624e262 100644
--- a/util/arena.h
+++ b/util/arena.h
@@ -5,11 +5,11 @@
 #ifndef STORAGE_LEVELDB_UTIL_ARENA_H_
 #define STORAGE_LEVELDB_UTIL_ARENA_H_
 
+#include <atomic>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
 #include <vector>
-#include <assert.h>
-#include <stddef.h>
-#include <stdint.h>
-#include "port/port.h"
 
 namespace leveldb {
 
@@ -21,13 +21,13 @@ class Arena {
   // Return a pointer to a newly allocated memory block of "bytes" bytes.
   char* Allocate(size_t bytes);
 
-  // Allocate memory with the normal alignment guarantees provided by malloc
+  // Allocate memory with the normal alignment guarantees provided by malloc.
   char* AllocateAligned(size_t bytes);
 
   // Returns an estimate of the total memory usage of data allocated
   // by the arena.
   size_t MemoryUsage() const {
-    return reinterpret_cast<uintptr_t>(memory_usage_.NoBarrier_Load());
+    return memory_usage_.load(std::memory_order_relaxed);
   }
 
  private:
@@ -42,7 +42,10 @@ class Arena {
   std::vector<char*> blocks_;
 
   // Total memory usage of the arena.
-  port::AtomicPointer memory_usage_;
+  //
+  // TODO(costan): This member is accessed via atomics, but the others are
+  //               accessed without any locking. Is this OK?
+  std::atomic<size_t> memory_usage_;
 
   // No copying allowed
   Arena(const Arena&);
diff --git a/util/env_test.cc b/util/env_test.cc
index 070109b..b204089 100644
--- a/util/env_test.cc
+++ b/util/env_test.cc
@@ -5,6 +5,7 @@
 #include "leveldb/env.h"
 
 #include <algorithm>
+#include <atomic>
 
 #include "port/port.h"
 #include "port/thread_annotations.h"
@@ -24,10 +25,15 @@ class EnvTest {
   EnvTest() : env_(Env::Default()) { }
 };
 
-static void SetBool(void* ptr) {
-  reinterpret_cast<port::AtomicPointer*>(ptr)->NoBarrier_Store(ptr);
+namespace {
+
+static void SetAtomicBool(void* atomic_bool_ptr) {
+  std::atomic<bool>* atomic_bool =
+      reinterpret_cast<std::atomic<bool>*>(atomic_bool_ptr);
+  atomic_bool->store(true, std::memory_order_relaxed);
 }
 
+}  // namespace
 
 TEST(EnvTest, ReadWrite) {
   Random rnd(test::RandomSeed());
@@ -77,42 +83,41 @@ TEST(EnvTest, ReadWrite) {
 }
 
 TEST(EnvTest, RunImmediately) {
-  port::AtomicPointer called(nullptr);
-  env_->Schedule(&SetBool, &called);
+  std::atomic<bool> called(false);
+  env_->Schedule(&SetAtomicBool, &called);
   env_->SleepForMicroseconds(kDelayMicros);
-  ASSERT_TRUE(called.NoBarrier_Load() != nullptr);
+  ASSERT_TRUE(called.load(std::memory_order_relaxed));
 }
 
 TEST(EnvTest, RunMany) {
-  port::AtomicPointer last_id(nullptr);
+  std::atomic<int> last_id(0);
 
-  struct CB {
-    port::AtomicPointer* last_id_ptr;   // Pointer to shared slot
-    uintptr_t id;             // Order# for the execution of this callback
+  struct Callback {
+    std::atomic<int>* const last_id_ptr_;  // Pointer to shared state.
+    const int id_;  // Order# for the execution of this callback.
 
-    CB(port::AtomicPointer* p, int i) : last_id_ptr(p), id(i) { }
+    Callback(std::atomic<int>* last_id_ptr, int id)
+        : last_id_ptr_(last_id_ptr), id_(id) { }
 
-    static void Run(void* v) {
-      CB* cb = reinterpret_cast<CB*>(v);
-      void* cur = cb->last_id_ptr->NoBarrier_Load();
-      ASSERT_EQ(cb->id-1, reinterpret_cast<uintptr_t>(cur));
-      cb->last_id_ptr->Release_Store(reinterpret_cast<void*>(cb->id));
+    static void Run(void* arg) {
+      Callback* callback = reinterpret_cast<Callback*>(arg);
+      int current_id = callback->last_id_ptr_->load(std::memory_order_relaxed);
+      ASSERT_EQ(callback->id_ - 1, current_id);
+      callback->last_id_ptr_->store(callback->id_, std::memory_order_relaxed);
     }
   };
 
-  // Schedule in different order than start time
-  CB cb1(&last_id, 1);
-  CB cb2(&last_id, 2);
-  CB cb3(&last_id, 3);
-  CB cb4(&last_id, 4);
-  env_->Schedule(&CB::Run, &cb1);
-  env_->Schedule(&CB::Run, &cb2);
-  env_->Schedule(&CB::Run, &cb3);
-  env_->Schedule(&CB::Run, &cb4);
+  Callback callback1(&last_id, 1);
+  Callback callback2(&last_id, 2);
+  Callback callback3(&last_id, 3);
+  Callback callback4(&last_id, 4);
+  env_->Schedule(&Callback::Run, &callback1);
+  env_->Schedule(&Callback::Run, &callback2);
+  env_->Schedule(&Callback::Run, &callback3);
+  env_->Schedule(&Callback::Run, &callback4);
 
   env_->SleepForMicroseconds(kDelayMicros);
-  void* cur = last_id.Acquire_Load();
-  ASSERT_EQ(4, reinterpret_cast<uintptr_t>(cur));
+  ASSERT_EQ(4, last_id.load(std::memory_order_relaxed));
 }
 
 struct State {
diff --git a/util/env_windows.cc b/util/env_windows.cc
index 57932bb..3b4496b 100644
--- a/util/env_windows.cc
+++ b/util/env_windows.cc
@@ -8,6 +8,7 @@
 #include <windows.h>
 
 #include <algorithm>
+#include <atomic>
 #include <chrono>
 #include <condition_variable>
 #include <deque>
@@ -105,51 +106,42 @@ class ScopedHandle {
 };
 
 // Helper class to limit resource usage to avoid exhaustion.
-// Currently used to limit mmap file usage so that we do not end
-// up running out virtual memory, or running into kernel performance
-// problems for very large databases.
+// Currently used to limit read-only file descriptors and mmap file usage
+// so that we do not run out of file descriptors or virtual memory, or run into
+// kernel performance problems for very large databases.
 class Limiter {
  public:
-  // Limit maximum number of resources to |n|.
-  Limiter(intptr_t n) { SetAllowed(n); }
+  // Limit maximum number of resources to |max_acquires|.
+  Limiter(int max_acquires) : acquires_allowed_(max_acquires) {}
+
+  Limiter(const Limiter&) = delete;
+  Limiter operator=(const Limiter&) = delete;
 
   // If another resource is available, acquire it and return true.
   // Else return false.
-  bool Acquire() LOCKS_EXCLUDED(mu_) {
-    if (GetAllowed() <= 0) {
-      return false;
-    }
-    MutexLock l(&mu_);
-    intptr_t x = GetAllowed();
-    if (x <= 0) {
-      return false;
-    } else {
-      SetAllowed(x - 1);
+  bool Acquire() {
+    int old_acquires_allowed =
+        acquires_allowed_.fetch_sub(1, std::memory_order_relaxed);
+
+    if (old_acquires_allowed > 0)
       return true;
-    }
+
+    acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
+    return false;
   }
 
   // Release a resource acquired by a previous call to Acquire() that returned
   // true.
-  void Release() LOCKS_EXCLUDED(mu_) {
-    MutexLock l(&mu_);
-    SetAllowed(GetAllowed() + 1);
+  void Release() {
+    acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
   }
 
  private:
-  port::Mutex mu_;
-  port::AtomicPointer allowed_;
-
-  intptr_t GetAllowed() const {
-    return reinterpret_cast<intptr_t>(allowed_.Acquire_Load());
-  }
-
-  void SetAllowed(intptr_t v) EXCLUSIVE_LOCKS_REQUIRED(mu_) {
-    allowed_.Release_Store(reinterpret_cast<void*>(v));
-  }
-
-  Limiter(const Limiter&);
-  void operator=(const Limiter&);
+  // The number of available resources.
+  //
+  // This is a counter and is not tied to the invariants of any other class, so
+  // it can be operated on safely using std::memory_order_relaxed.
+  std::atomic<int> acquires_allowed_;
 };
 
 class WindowsSequentialFile : public SequentialFile {

From 9ce30510d482f5b2fa2965201453f0fc914f700c Mon Sep 17 00:00:00 2001
From: cmumford <cmumford@google.com>
Date: Mon, 11 Mar 2019 13:33:10 -0700
Subject: [PATCH 046/181] Deleted dangling reference to deleted
 atomic_pointer.h.

Forgot one reference to atomic_pointer.h in CMakeLists.txt
from prior CL.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=237870915
---
 CMakeLists.txt | 1 -
 1 file changed, 1 deletion(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 1562e3e..934f15a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -127,7 +127,6 @@ target_sources(leveldb
     "${PROJECT_SOURCE_DIR}/db/version_set.h"
     "${PROJECT_SOURCE_DIR}/db/write_batch_internal.h"
     "${PROJECT_SOURCE_DIR}/db/write_batch.cc"
-    "${PROJECT_SOURCE_DIR}/port/atomic_pointer.h"
     "${PROJECT_SOURCE_DIR}/port/port_stdcxx.h"
     "${PROJECT_SOURCE_DIR}/port/port.h"
     "${PROJECT_SOURCE_DIR}/port/thread_annotations.h"

From 201f77d137f30ea46e789a2ad60e9119b6f990fc Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Tue, 19 Mar 2019 14:34:51 -0700
Subject: [PATCH 047/181] Inline defaults in options.

This CL moves default values for
leveldb::{Options,ReadOptions,WriteOptions} from constructors to member
declarations, and removes now-redundant comments stating the defaults.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=239271242
---
 db/fault_injection_test.cc |  1 -
 include/leveldb/options.h  | 66 +++++++++++---------------------------
 util/options.cc            | 15 +--------
 3 files changed, 20 insertions(+), 62 deletions(-)

diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc
index b3429ac..1f72984 100644
--- a/db/fault_injection_test.cc
+++ b/db/fault_injection_test.cc
@@ -469,7 +469,6 @@ class FaultInjectionTest {
 
   void DeleteAllData() {
     Iterator* iter = db_->NewIterator(ReadOptions());
-    WriteOptions options;
     for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
       ASSERT_OK(db_->Delete(WriteOptions(), iter->key()));
     }
diff --git a/include/leveldb/options.h b/include/leveldb/options.h
index b6ddbd8..90aa19e 100644
--- a/include/leveldb/options.h
+++ b/include/leveldb/options.h
@@ -42,20 +42,17 @@ struct LEVELDB_EXPORT Options {
   const Comparator* comparator;
 
   // If true, the database will be created if it is missing.
-  // Default: false
-  bool create_if_missing;
+  bool create_if_missing = false;
 
   // If true, an error is raised if the database already exists.
-  // Default: false
-  bool error_if_exists;
+  bool error_if_exists = false;
 
   // If true, the implementation will do aggressive checking of the
   // data it is processing and will stop early if it detects any
   // errors.  This may have unforeseen ramifications: for example, a
   // corruption of one DB entry may cause a large number of entries to
   // become unreadable or for the entire DB to become unopenable.
-  // Default: false
-  bool paranoid_checks;
+  bool paranoid_checks = false;
 
   // Use the specified object to interact with the environment,
   // e.g. to read/write files, schedule background work, etc.
@@ -65,8 +62,7 @@ struct LEVELDB_EXPORT Options {
   // Any internal progress/error information generated by the db will
   // be written to info_log if it is non-null, or to a file stored
   // in the same directory as the DB contents if info_log is null.
-  // Default: nullptr
-  Logger* info_log;
+  Logger* info_log = nullptr;
 
   // -------------------
   // Parameters that affect performance
@@ -79,39 +75,30 @@ struct LEVELDB_EXPORT Options {
   // so you may wish to adjust this parameter to control memory usage.
   // Also, a larger write buffer will result in a longer recovery time
   // the next time the database is opened.
-  //
-  // Default: 4MB
-  size_t write_buffer_size;
+  size_t write_buffer_size = 4 * 1024 * 1024;
 
   // Number of open files that can be used by the DB.  You may need to
   // increase this if your database has a large working set (budget
   // one open file per 2MB of working set).
-  //
-  // Default: 1000
-  int max_open_files;
+  int max_open_files = 1000;
 
   // Control over blocks (user data is stored in a set of blocks, and
   // a block is the unit of reading from disk).
 
   // If non-null, use the specified cache for blocks.
   // If null, leveldb will automatically create and use an 8MB internal cache.
-  // Default: nullptr
-  Cache* block_cache;
+  Cache* block_cache = nullptr;
 
   // Approximate size of user data packed per block.  Note that the
   // block size specified here corresponds to uncompressed data.  The
   // actual size of the unit read from disk may be smaller if
   // compression is enabled.  This parameter can be changed dynamically.
-  //
-  // Default: 4K
-  size_t block_size;
+  size_t block_size = 4 * 1024;
 
   // Number of keys between restart points for delta encoding of keys.
   // This parameter can be changed dynamically.  Most clients should
   // leave this parameter alone.
-  //
-  // Default: 16
-  int block_restart_interval;
+  int block_restart_interval = 16;
 
   // Leveldb will write up to this amount of bytes to a file before
   // switching to a new one.
@@ -121,9 +108,7 @@ struct LEVELDB_EXPORT Options {
   // compactions and hence longer latency/performance hiccups.
   // Another reason to increase this parameter might be when you are
   // initially populating a large database.
-  //
-  // Default: 2MB
-  size_t max_file_size;
+  size_t max_file_size = 2 * 1024 * 1024;
 
   // Compress blocks using the specified compression algorithm.  This
   // parameter can be changed dynamically.
@@ -139,20 +124,18 @@ struct LEVELDB_EXPORT Options {
   // worth switching to kNoCompression.  Even if the input data is
   // incompressible, the kSnappyCompression implementation will
   // efficiently detect that and will switch to uncompressed mode.
-  CompressionType compression;
+  CompressionType compression = kSnappyCompression;
 
   // EXPERIMENTAL: If true, append to existing MANIFEST and log files
   // when a database is opened.  This can significantly speed up open.
   //
   // Default: currently false, but may become true later.
-  bool reuse_logs;
+  bool reuse_logs = false;
 
   // If non-null, use the specified filter policy to reduce disk reads.
   // Many applications will benefit from passing the result of
   // NewBloomFilterPolicy() here.
-  //
-  // Default: nullptr
-  const FilterPolicy* filter_policy;
+  const FilterPolicy* filter_policy = nullptr;
 
   // Create an Options object with default values for all fields.
   Options();
@@ -162,26 +145,19 @@ struct LEVELDB_EXPORT Options {
 struct LEVELDB_EXPORT ReadOptions {
   // If true, all data read from underlying storage will be
   // verified against corresponding checksums.
-  // Default: false
-  bool verify_checksums;
+  bool verify_checksums = false;
 
   // Should the data read for this iteration be cached in memory?
   // Callers may wish to set this field to false for bulk scans.
-  // Default: true
-  bool fill_cache;
+  bool fill_cache = true;
 
   // If "snapshot" is non-null, read as of the supplied snapshot
   // (which must belong to the DB that is being read and which must
   // not have been released).  If "snapshot" is null, use an implicit
   // snapshot of the state at the beginning of this read operation.
-  // Default: nullptr
-  const Snapshot* snapshot;
+  const Snapshot* snapshot = nullptr;
 
-  ReadOptions()
-      : verify_checksums(false),
-        fill_cache(true),
-        snapshot(nullptr) {
-  }
+  ReadOptions() = default;
 };
 
 // Options that control write operations
@@ -200,13 +176,9 @@ struct LEVELDB_EXPORT WriteOptions {
   // crash semantics as the "write()" system call.  A DB write
   // with sync==true has similar crash semantics to a "write()"
   // system call followed by "fsync()".
-  //
-  // Default: false
-  bool sync;
+  bool sync = false;
 
-  WriteOptions()
-      : sync(false) {
-  }
+  WriteOptions() = default;
 };
 
 }  // namespace leveldb
diff --git a/util/options.cc b/util/options.cc
index 351fa39..63284f8 100644
--- a/util/options.cc
+++ b/util/options.cc
@@ -11,20 +11,7 @@ namespace leveldb {
 
 Options::Options()
     : comparator(BytewiseComparator()),
-      create_if_missing(false),
-      error_if_exists(false),
-      paranoid_checks(false),
-      env(Env::Default()),
-      info_log(nullptr),
-      write_buffer_size(4<<20),
-      max_open_files(1000),
-      block_cache(nullptr),
-      block_size(4096),
-      block_restart_interval(16),
-      max_file_size(2<<20),
-      compression(kSnappyCompression),
-      reuse_logs(false),
-      filter_policy(nullptr) {
+      env(Env::Default()) {
 }
 
 }  // namespace leveldb

From ce399ac28af7023b1aff0ede4986cb6d89b3c0b5 Mon Sep 17 00:00:00 2001
From: cmumford <cmumford@google.com>
Date: Tue, 19 Mar 2019 17:10:56 -0700
Subject: [PATCH 048/181] Always copy bytes to scratch buffer when reading
 w/MemEnv.

FileState::Read (used by InMemoryEnv) creates a new Slice when reading.
If all the bytes for the read are in the first block then the Slice
points to the private block data in FileState and is not copied to the
|scratch| buffer.

A recent change allows files in InMemEnv to be overwritten which deletes
these blocks and in this case can result in a Slice having a dangling
pointer. This change fixes this bug by always copying to the |scratch|
buffer.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=239301930
---
 helpers/memenv/memenv.cc | 7 -------
 1 file changed, 7 deletions(-)

diff --git a/helpers/memenv/memenv.cc b/helpers/memenv/memenv.cc
index b78a998..ff384e4 100644
--- a/helpers/memenv/memenv.cc
+++ b/helpers/memenv/memenv.cc
@@ -82,13 +82,6 @@ class FileState {
     assert(offset / kBlockSize <= std::numeric_limits<size_t>::max());
     size_t block = static_cast<size_t>(offset / kBlockSize);
     size_t block_offset = offset % kBlockSize;
-
-    if (n <= kBlockSize - block_offset) {
-      // The requested bytes are all in the first block.
-      *result = Slice(blocks_[block] + block_offset, n);
-      return Status::OK();
-    }
-
     size_t bytes_to_copy = n;
     char* dst = scratch;
 

From ea49b27d062c4bc998616cef7944f7f9088a327d Mon Sep 17 00:00:00 2001
From: cmumford <cmumford@google.com>
Date: Tue, 19 Mar 2019 17:30:42 -0700
Subject: [PATCH 049/181] Switch corruption_test to use InMemEnv.

This change switches corruption_test, which previously used direct file
I/O to corrupt table files for open databases, to use InMemEnv. Using an
Env eliminates some platform dependencies thus simplifying the tests.

Also removed EnvWindowsTestHelper::RelaxFilePermissions().  This was
only added because the Windows Env opens files for exclusive access.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=239305329
---
 db/corruption_test.cc          | 58 +++++++---------------------------
 util/env_windows.cc            | 20 ------------
 util/env_windows_test_helper.h |  5 ---
 util/testutil.h                |  6 +++-
 4 files changed, 17 insertions(+), 72 deletions(-)

diff --git a/db/corruption_test.cc b/db/corruption_test.cc
index 98aaf8c..d50785a 100644
--- a/db/corruption_test.cc
+++ b/db/corruption_test.cc
@@ -4,12 +4,8 @@
 
 #include "leveldb/db.h"
 
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/stat.h>
 #include <sys/types.h>
 #include "leveldb/cache.h"
-#include "leveldb/env.h"
 #include "leveldb/table.h"
 #include "leveldb/write_batch.h"
 #include "db/db_impl.h"
@@ -20,10 +16,6 @@
 #include "util/testharness.h"
 #include "util/testutil.h"
 
-#if defined(LEVELDB_PLATFORM_WINDOWS)
-#include "util/env_windows_test_helper.h"
-#endif  // defined(LEVELDB_PLATFORM_WINDOWS)
-
 namespace leveldb {
 
 static const int kValueSize = 1000;
@@ -36,22 +28,11 @@ class CorruptionTest {
   Options options_;
   DB* db_;
 
-#if defined(LEVELDB_PLATFORM_WINDOWS)
-  static void SetFileLimits(int mmap_limit) {
-    EnvWindowsTestHelper::SetReadOnlyMMapLimit(mmap_limit);
-  }
-
-  // TODO(cmumford): Modify corruption_test to use MemEnv and remove.
-  static void RelaxFilePermissions() {
-    EnvWindowsTestHelper::RelaxFilePermissions();
-  }
-#endif  // defined(LEVELDB_PLATFORM_WINDOWS)
-
   CorruptionTest() {
     tiny_cache_ = NewLRUCache(100);
     options_.env = &env_;
     options_.block_cache = tiny_cache_;
-    dbname_ = test::TmpDir() + "/corruption_test";
+    dbname_ = "/memenv/corruption_test";
     DestroyDB(dbname_, options_);
 
     db_ = nullptr;
@@ -62,7 +43,6 @@ class CorruptionTest {
 
   ~CorruptionTest() {
      delete db_;
-     DestroyDB(dbname_, Options());
      delete tiny_cache_;
   }
 
@@ -141,7 +121,7 @@ class CorruptionTest {
   void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) {
     // Pick file to corrupt
     std::vector<std::string> filenames;
-    ASSERT_OK(env_.GetChildren(dbname_, &filenames));
+    ASSERT_OK(env_.target()->GetChildren(dbname_, &filenames));
     uint64_t number;
     FileType type;
     std::string fname;
@@ -156,35 +136,32 @@ class CorruptionTest {
     }
     ASSERT_TRUE(!fname.empty()) << filetype;
 
-    struct stat sbuf;
-    if (stat(fname.c_str(), &sbuf) != 0) {
-      const char* msg = strerror(errno);
-      ASSERT_TRUE(false) << fname << ": " << msg;
-    }
+    uint64_t file_size;
+    ASSERT_OK(env_.target()->GetFileSize(fname, &file_size));
 
     if (offset < 0) {
       // Relative to end of file; make it absolute
-      if (-offset > sbuf.st_size) {
+      if (-offset > file_size) {
         offset = 0;
       } else {
-        offset = sbuf.st_size + offset;
+        offset = file_size + offset;
       }
     }
-    if (offset > sbuf.st_size) {
-      offset = sbuf.st_size;
+    if (offset > file_size) {
+      offset = file_size;
     }
-    if (offset + bytes_to_corrupt > sbuf.st_size) {
-      bytes_to_corrupt = sbuf.st_size - offset;
+    if (offset + bytes_to_corrupt > file_size) {
+      bytes_to_corrupt = file_size - offset;
     }
 
     // Do it
     std::string contents;
-    Status s = ReadFileToString(Env::Default(), fname, &contents);
+    Status s = ReadFileToString(env_.target(), fname, &contents);
     ASSERT_TRUE(s.ok()) << s.ToString();
     for (int i = 0; i < bytes_to_corrupt; i++) {
       contents[i + offset] ^= 0x80;
     }
-    s = WriteStringToFile(Env::Default(), contents, fname);
+    s = WriteStringToFile(env_.target(), contents, fname);
     ASSERT_TRUE(s.ok()) << s.ToString();
   }
 
@@ -385,16 +362,5 @@ TEST(CorruptionTest, UnrelatedKeys) {
 }  // namespace leveldb
 
 int main(int argc, char** argv) {
-#if defined(LEVELDB_PLATFORM_WINDOWS)
-  // When Windows maps the contents of a file into memory, even if read/write,
-  // subsequent attempts to open that file for write access will fail. Forcing
-  // all RandomAccessFile instances to use base file I/O (e.g. ReadFile)
-  // allows these tests to open files in order to corrupt their contents.
-  leveldb::CorruptionTest::SetFileLimits(0);
-
-  // Allow this test to write to (and corrupt) files which are normally
-  // open for exclusive read access.
-  leveldb::CorruptionTest::RelaxFilePermissions();
-#endif  // defined(LEVELDB_PLATFORM_WINDOWS)
   return leveldb::test::RunAllTests();
 }
diff --git a/util/env_windows.cc b/util/env_windows.cc
index 3b4496b..93555a8 100644
--- a/util/env_windows.cc
+++ b/util/env_windows.cc
@@ -43,9 +43,6 @@ constexpr int kDefaultMmapLimit = sizeof(void*) >= 8 ? 1000 : 0;
 // Modified by EnvWindowsTestHelper::SetReadOnlyMMapLimit().
 int g_mmap_limit = kDefaultMmapLimit;
 
-// Relax some file access permissions for testing.
-bool g_relax_permissions = false;
-
 std::string GetWindowsErrorMessage(DWORD error_code) {
   std::string message;
   char* error_text = nullptr;
@@ -366,10 +363,6 @@ class WindowsEnv : public Env {
     *result = nullptr;
     DWORD desired_access = GENERIC_READ;
     DWORD share_mode = FILE_SHARE_READ;
-    if (g_relax_permissions) {
-      desired_access |= GENERIC_WRITE;
-      share_mode |= FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
-    }
     ScopedHandle handle =
         ::CreateFileA(fname.c_str(), desired_access, share_mode, nullptr,
                       OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr);
@@ -385,10 +378,6 @@ class WindowsEnv : public Env {
     *result = nullptr;
     DWORD desired_access = GENERIC_READ;
     DWORD share_mode = FILE_SHARE_READ;
-    if (g_relax_permissions) {
-      // desired_access |= GENERIC_WRITE;
-      share_mode |= FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
-    }
     DWORD file_flags = FILE_ATTRIBUTE_READONLY;
 
     ScopedHandle handle =
@@ -433,10 +422,6 @@ class WindowsEnv : public Env {
                          WritableFile** result) override {
     DWORD desired_access = GENERIC_WRITE;
     DWORD share_mode = 0;
-    if (g_relax_permissions) {
-      desired_access |= GENERIC_READ;
-      share_mode |= FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
-    }
 
     ScopedHandle handle =
         ::CreateFileA(fname.c_str(), desired_access, share_mode, nullptr,
@@ -721,11 +706,6 @@ void EnvWindowsTestHelper::SetReadOnlyMMapLimit(int limit) {
   g_mmap_limit = limit;
 }
 
-void EnvWindowsTestHelper::RelaxFilePermissions() {
-  assert(default_env == nullptr);
-  g_relax_permissions = true;
-}
-
 Env* Env::Default() {
   std::call_once(once, InitDefaultEnv);
   return default_env;
diff --git a/util/env_windows_test_helper.h b/util/env_windows_test_helper.h
index 5ffbe44..e6f6020 100644
--- a/util/env_windows_test_helper.h
+++ b/util/env_windows_test_helper.h
@@ -18,11 +18,6 @@ class EnvWindowsTestHelper {
   // Set the maximum number of read-only files that will be mapped via mmap.
   // Must be called before creating an Env.
   static void SetReadOnlyMMapLimit(int limit);
-
-  // Relax file permissions for tests. This results in most files being opened
-  // with read-write permissions. This is helpful for corruption tests that
-  // need to corrupt the database files for open databases.
-  static void RelaxFilePermissions();
 };
 
 }  // namespace leveldb
diff --git a/util/testutil.h b/util/testutil.h
index dc77ac3..3934242 100644
--- a/util/testutil.h
+++ b/util/testutil.h
@@ -5,6 +5,7 @@
 #ifndef STORAGE_LEVELDB_UTIL_TESTUTIL_H_
 #define STORAGE_LEVELDB_UTIL_TESTUTIL_H_
 
+#include "helpers/memenv/memenv.h"
 #include "leveldb/env.h"
 #include "leveldb/slice.h"
 #include "util/random.h"
@@ -32,9 +33,12 @@ class ErrorEnv : public EnvWrapper {
   bool writable_file_error_;
   int num_writable_file_errors_;
 
-  ErrorEnv() : EnvWrapper(Env::Default()),
+  ErrorEnv() : EnvWrapper(NewMemEnv(Env::Default())),
                writable_file_error_(false),
                num_writable_file_errors_(0) { }
+  ~ErrorEnv() override {
+    delete target();
+  }
 
   virtual Status NewWritableFile(const std::string& fname,
                                  WritableFile** result) {

From 15e227896621d01ebad4c5d4b3cc82a7a9b5b30b Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Wed, 20 Mar 2019 12:41:49 -0700
Subject: [PATCH 050/181] Use override consistently in leveldb::test::ErrorEnv.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=239453565
---
 util/testutil.h | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/util/testutil.h b/util/testutil.h
index 3934242..a568824 100644
--- a/util/testutil.h
+++ b/util/testutil.h
@@ -40,8 +40,8 @@ class ErrorEnv : public EnvWrapper {
     delete target();
   }
 
-  virtual Status NewWritableFile(const std::string& fname,
-                                 WritableFile** result) {
+  Status NewWritableFile(const std::string& fname,
+                         WritableFile** result) override {
     if (writable_file_error_) {
       ++num_writable_file_errors_;
       *result = nullptr;
@@ -50,8 +50,8 @@ class ErrorEnv : public EnvWrapper {
     return target()->NewWritableFile(fname, result);
   }
 
-  virtual Status NewAppendableFile(const std::string& fname,
-                                   WritableFile** result) {
+  Status NewAppendableFile(const std::string& fname,
+                           WritableFile** result) override {
     if (writable_file_error_) {
       ++num_writable_file_errors_;
       *result = nullptr;

From 6571279d6de21fe33caa31b2ea4170d34b15b10e Mon Sep 17 00:00:00 2001
From: usurai <9886192+usurai@users.noreply.github.com>
Date: Thu, 21 Mar 2019 22:58:29 +0800
Subject: [PATCH 051/181] fix a typo in the comment of skiplist_test.cc (#664)

---
 db/skiplist_test.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc
index c4cf146..38c1941 100644
--- a/db/skiplist_test.cc
+++ b/db/skiplist_test.cc
@@ -131,7 +131,7 @@ TEST(SkipTest, InsertAndLookup) {
 // concurrent readers (with no synchronization other than when a
 // reader's iterator is created), the reader always observes all the
 // data that was present in the skip list when the iterator was
-// constructor.  Because insertions are happening concurrently, we may
+// constructed.  Because insertions are happening concurrently, we may
 // also observe new values that were inserted since the iterator was
 // constructed, but we should never miss any values that were present
 // at iterator construction time.

From 7035af5fc36657447054617759854a726d31dbe0 Mon Sep 17 00:00:00 2001
From: Felipe Oliveira Carvalho <felipekde@gmail.com>
Date: Thu, 21 Mar 2019 16:45:04 +0100
Subject: [PATCH 052/181] Two small fixes for the Windows implementation (#661)

* Check if NOMIMMAX is defined before defining it

* Pass char* for a %s format in a snprintf call
---
 util/env_windows.cc | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/util/env_windows.cc b/util/env_windows.cc
index 93555a8..14e41e9 100644
--- a/util/env_windows.cc
+++ b/util/env_windows.cc
@@ -4,7 +4,9 @@
 
 // Prevent Windows headers from defining min/max macros and instead
 // use STL.
+#ifndef NOMINMAX
 #define NOMINMAX
+#endif  // ifndef NOMINMAX
 #include <windows.h>
 
 #include <algorithm>

From cf1b5f473259e46c667f3fb5a28bcd884ee3a102 Mon Sep 17 00:00:00 2001
From: Cheng Chang <myairia@gmail.com>
Date: Fri, 22 Mar 2019 17:32:20 +0800
Subject: [PATCH 053/181] Remove unnecessary bit operation.

---
 util/coding.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/util/coding.cc b/util/coding.cc
index 9e72613..1a9e333 100644
--- a/util/coding.cc
+++ b/util/coding.cc
@@ -82,7 +82,7 @@ char* EncodeVarint64(char* dst, uint64_t v) {
   static const int B = 128;
   unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
   while (v >= B) {
-    *(ptr++) = (v & (B-1)) | B;
+    *(ptr++) = v | B;
     v >>= 7;
   }
   *(ptr++) = static_cast<unsigned char>(v);

From 6188a54ce95b47cc6bd398d7f2eb45d061857e45 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Thu, 21 Mar 2019 16:15:30 -0700
Subject: [PATCH 054/181] leveldb: Add tests for empty keys and values.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=239695281
---
 db/db_test.cc | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/db/db_test.cc b/db/db_test.cc
index e889a74..3ab4aee 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -558,6 +558,26 @@ TEST(DBTest, Empty) {
   } while (ChangeOptions());
 }
 
+TEST(DBTest, EmptyKey) {
+  do {
+    ASSERT_OK(Put("", "v1"));
+    ASSERT_EQ("v1", Get(""));
+    ASSERT_OK(Put("", "v2"));
+    ASSERT_EQ("v2", Get(""));
+  } while (ChangeOptions());
+}
+
+TEST(DBTest, EmptyValue) {
+  do {
+    ASSERT_OK(Put("key", "v1"));
+    ASSERT_EQ("v1", Get("key"));
+    ASSERT_OK(Put("key", ""));
+    ASSERT_EQ("", Get("key"));
+    ASSERT_OK(Put("key", "v2"));
+    ASSERT_EQ("v2", Get("key"));
+  } while (ChangeOptions());
+}
+
 TEST(DBTest, ReadWrite) {
   do {
     ASSERT_OK(Put("foo", "v1"));

From bd24b963060861518c6648925f9708178562c992 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Tue, 26 Mar 2019 08:56:12 -0700
Subject: [PATCH 055/181] leveldb: Silence unused argument warnings in MSVC.

This CL uses a well-known workaround for silencing arguments that may be unused, depending on the build configuration. The silenced warnings were responsible for a large amount of noise in the MSVC build on Windows.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=240357359
---
 port/port_stdcxx.h | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/port/port_stdcxx.h b/port/port_stdcxx.h
index e21fa70..d0609a8 100644
--- a/port/port_stdcxx.h
+++ b/port/port_stdcxx.h
@@ -91,6 +91,9 @@ inline bool Snappy_Compress(const char* input, size_t length,
   snappy::RawCompress(input, length, &(*output)[0], &outlen);
   output->resize(outlen);
   return true;
+#else
+  // Silence compiler warnings about unused arguments.
+  (void)input; (void)length; (void)output;
 #endif  // HAVE_SNAPPY
 
   return false;
@@ -101,6 +104,8 @@ inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
 #if HAVE_SNAPPY
   return snappy::GetUncompressedLength(input, length, result);
 #else
+  // Silence compiler warnings about unused arguments.
+  (void)input; (void)length; (void)result;
   return false;
 #endif  // HAVE_SNAPPY
 }
@@ -109,11 +114,15 @@ inline bool Snappy_Uncompress(const char* input, size_t length, char* output) {
 #if HAVE_SNAPPY
   return snappy::RawUncompress(input, length, output);
 #else
+  // Silence compiler warnings about unused arguments.
+  (void)input; (void)length; (void)output;
   return false;
 #endif  // HAVE_SNAPPY
 }
 
 inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
+  // Silence compiler warnings about unused arguments.
+  (void)func; (void)arg;
   return false;
 }
 
@@ -121,6 +130,8 @@ inline uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) {
 #if HAVE_CRC32C
   return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t*>(buf), size);
 #else
+  // Silence compiler warnings about unused arguments.
+  (void)crc; (void)buf; (void)size;
   return 0;
 #endif  // HAVE_CRC32C
 }

From da94ac67e91679842a56a876f0b19b429d72de25 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Wed, 27 Mar 2019 12:41:45 -0700
Subject: [PATCH 056/181] leveldb: Minor cleanup in ports.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=240619768
---
 port/port_stdcxx.h | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/port/port_stdcxx.h b/port/port_stdcxx.h
index d0609a8..7638ded 100644
--- a/port/port_stdcxx.h
+++ b/port/port_stdcxx.h
@@ -29,12 +29,13 @@
 #include <snappy.h>
 #endif  // HAVE_SNAPPY
 
-#include <stddef.h>
-#include <stdint.h>
 #include <cassert>
+#include <cstddef>
+#include <cstdint>
 #include <condition_variable>  // NOLINT
 #include <mutex>               // NOLINT
 #include <string>
+
 #include "port/thread_annotations.h"
 
 namespace leveldb {
@@ -84,7 +85,7 @@ class CondVar {
 };
 
 inline bool Snappy_Compress(const char* input, size_t length,
-                            ::std::string* output) {
+                            std::string* output) {
 #if HAVE_SNAPPY
   output->resize(snappy::MaxCompressedLength(length));
   size_t outlen;

From 416344de2fdffb3f17c565b984885d0122bfa1e9 Mon Sep 17 00:00:00 2001
From: costan <costan@google.com>
Date: Thu, 28 Mar 2019 09:17:47 -0700
Subject: [PATCH 057/181] leveldb: Register in copybara whitelist.

The documentation recommends modifying the whitelist after evaluating Copybara. However, evaluating requires significant workarounds without the whitelist entry. So, this CL adds leveldb to the whitelist early.

leveldb is currently open sourced to https://github.com/google/leveldb using MOE.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=240786286
---
 copy.bara.sky | 0
 1 file changed, 0 insertions(+), 0 deletions(-)
 create mode 100644 copy.bara.sky

diff --git a/copy.bara.sky b/copy.bara.sky
new file mode 100644
index 0000000..e69de29

From 35619d248d909b197f68226c7d0a9ff947b82e8a Mon Sep 17 00:00:00 2001
From: leveldb Team <no-reply@google.com>
Date: Fri, 29 Mar 2019 13:58:44 -0700
Subject: [PATCH 058/181] Project import generated by Copybara.

PiperOrigin-RevId: 241045448
---
 copy.bara.sky | 0
 1 file changed, 0 insertions(+), 0 deletions(-)
 delete mode 100644 copy.bara.sky

diff --git a/copy.bara.sky b/copy.bara.sky
deleted file mode 100644
index e69de29..0000000

From 56178ddaf4d3ba6c8d1cfb218610b1be3f5aa710 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Fri, 29 Mar 2019 14:36:59 -0700
Subject: [PATCH 059/181] Update the version to 1.21 in preparation for a new
 release.

PiperOrigin-RevId: 241053616
---
 CMakeLists.txt       | 1 +
 include/leveldb/db.h | 4 ++--
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 934f15a..e471a2a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -3,6 +3,7 @@
 # found in the LICENSE file. See the AUTHORS file for names of contributors.
 
 cmake_minimum_required(VERSION 3.9)
+# Keep the version below in sync with the one in db.h
 project(leveldb VERSION 1.21.0 LANGUAGES C CXX)
 
 # This project can use C11, but will gracefully decay down to C89.
diff --git a/include/leveldb/db.h b/include/leveldb/db.h
index 84c32bc..0239593 100644
--- a/include/leveldb/db.h
+++ b/include/leveldb/db.h
@@ -13,9 +13,9 @@
 
 namespace leveldb {
 
-// Update Makefile if you change these
+// Update CMakeLists.txt if you change these
 static const int kMajorVersion = 1;
-static const int kMinorVersion = 20;
+static const int kMinorVersion = 21;
 
 struct Options;
 struct ReadOptions;

From 952be04df6edb936b8f7d0f652861100a7f61e97 Mon Sep 17 00:00:00 2001
From: Pavel Pimenov <pavel.pimenov@gmail.com>
Date: Sun, 31 Mar 2019 10:46:31 +0300
Subject: [PATCH 060/181] Fix mkdir (windows)

---
 README.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/README.md b/README.md
index 4f8ce63..0121dd8 100644
--- a/README.md
+++ b/README.md
@@ -45,7 +45,7 @@ cmake -DCMAKE_BUILD_TYPE=Release .. && cmake --build .
 First generate the Visual Studio 2017 project/solution files:
 
 ```bash
-mkdir -p build
+mkdir build
 cd build
 cmake -G "Visual Studio 15" ..
 ```

From 37300aa54b8256dd2edfd504942eb2bd20823647 Mon Sep 17 00:00:00 2001
From: leveldb Team <no-reply@google.com>
Date: Mon, 1 Apr 2019 08:59:17 -0700
Subject: [PATCH 061/181] Restore soname versioning with CMake build

Before:

$ readelf -d build/libleveldb.so | grep soname
 0x000000000000000e (SONAME)             Library soname: [libleveldb.so]

After:
$ readelf -d build/libleveldb.so | grep soname
 0x000000000000000e (SONAME)             Library soname: [libleveldb.so.1]

This matches the soname from v1.20.

PiperOrigin-RevId: 241334113
---
 CMakeLists.txt | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index e471a2a..ceb5dd9 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -233,6 +233,10 @@ if(BUILD_SHARED_LIBS)
       # Used by include/export.h.
       LEVELDB_SHARED_LIBRARY
   )
+  set_target_properties(leveldb
+    PROPERTIES VERSION ${PROJECT_VERSION})
+  set_target_properties(leveldb
+    PROPERTIES SOVERSION 1)
 endif(BUILD_SHARED_LIBS)
 
 if(HAVE_CLANG_THREAD_SAFETY)

From 20fb601aa9f68ff0aa147df22524b7d01758552b Mon Sep 17 00:00:00 2001
From: Richard Cole <richcole@amazon.com>
Date: Sun, 24 Jan 2016 18:10:16 -0800
Subject: [PATCH 062/181] Fix snapshot compaction bug

Closes google/leveldb#320

During compaction it was possible that records from a block b1=(l1,u1)
would be pushed down from level i to level i+1. If there is a block
b2=(l2,u2) at level i with k1 = user_key(u1) = user_key(l2) then
a subsequent search for k1 will yield the record l2 which has a smaller
sequence number than u1 because the sort order for records sorts
increasing by user key but decreaing by sequence number.

This change add a call to a new function AddBoundaryInputs to
SetupOtherInputs. AddBoundaryInputs searches for a block b2 matching the
criteria above and adds it to the set of files to be compacted. Whenever
AddBoundaryInputs is called it is important that the compaction fileset
in level i+1 (known as c->inputs_[1] in the code) be recomputed. Each
call to AddBoundaryInputs is followed by a call to GetOverlappingInputs.

SetupOtherInputs is called on both manual and automated compaction
passes. It is called for both level zero and for levels greater than 0.

The original change posted in https://github.com/google/leveldb/pull/339
has been modified to also include changed made by Chris Mumford<cmumford@google.com>
in https://github.com/cmumford/leveldb/commit/4b72cb14f8da2aab12451c24b8e205aff686e9dc

  1. Releasing snapshots during test cleanup to avoid
     memory leak warnings.
  2. Refactored test to use testutil.h to be in line
     with other issue tests and to create the test
     database in the correct temporary location.
  3. Added copyright banner.

  Otherwise, just minor formatting and limiting character
  width to 80 characters.

Additionally the change was rebased on top of current master and
changes previously made to the Makefile were ported to the
CMakeLists.txt.

Testing Done:

  A test program (issue320_test) was constructed that performs mutations
  while snapshots are active. issue320_test fails without this bug fix
  after 64k writes. It passes with this bug fix. It was run with 200M
  writes and passed.

  Unit tests were written for the new function that was added to the
  code. Make test was run and seen to pass.

Signed-off-by: Richard Cole <richcole@amazon.com>
---
 CMakeLists.txt          |   1 +
 db/version_set.cc       |  79 +++++++++++++++++++++
 db/version_set_test.cc  | 148 +++++++++++++++++++++++++++++++++++++++-
 issues/issue320_test.cc | 139 +++++++++++++++++++++++++++++++++++++
 4 files changed, 366 insertions(+), 1 deletion(-)
 create mode 100644 issues/issue320_test.cc

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 934f15a..aada6b7 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -296,6 +296,7 @@ if(LEVELDB_BUILD_TESTS)
 
   leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue178_test.cc")
   leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue200_test.cc")
+  leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue320_test.cc")
 
   leveldb_test("${PROJECT_SOURCE_DIR}/util/env_test.cc")
   leveldb_test("${PROJECT_SOURCE_DIR}/util/status_test.cc")
diff --git a/db/version_set.cc b/db/version_set.cc
index 156a007..7891dfc 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -1347,9 +1347,87 @@ Compaction* VersionSet::PickCompaction() {
   return c;
 }
 
+// find the largest key in a vector of files. returns true if files it not empty
+bool FindLargestKey(const InternalKeyComparator & icmp, const std::vector<FileMetaData*> & files, InternalKey *largestKey) {
+  if (files.empty()) {
+    return false;
+  }
+  *largestKey = files[0]->largest;
+  for (size_t i = 1; i < files.size(); ++i) {
+    FileMetaData* f = files[i];
+    if (icmp.Compare(f->largest, *largestKey) > 0) {
+      *largestKey = f->largest;
+    }
+  }
+  return true;
+}
+
+// find minimum file b2=(l2, u2) in level file for which l2 > u1 and user_key(l2) = user_key(u1)
+FileMetaData* FindSmallestBoundaryFile(const InternalKeyComparator & icmp,
+                                       const std::vector<FileMetaData*> & levelFiles,
+                                       const InternalKey & largestKey) {
+  const Comparator* user_cmp = icmp.user_comparator();
+  FileMetaData* smallestBoundaryFile = NULL;
+  for (size_t i = 0; i < levelFiles.size(); ++i) {
+    FileMetaData* f = levelFiles[i];
+    if (icmp.Compare(f->smallest, largestKey) > 0 &&
+        user_cmp->Compare(f->smallest.user_key(), largestKey.user_key()) == 0) {
+      if (smallestBoundaryFile == NULL ||
+          icmp.Compare(f->smallest, smallestBoundaryFile->smallest) < 0) {
+        smallestBoundaryFile = f;
+      }
+    }
+  }
+  return smallestBoundaryFile;
+}
+
+// If there are two blocks, b1=(l1, u1) and b2=(l2, u2) and
+// user_key(u1) = user_key(l2), and if we compact b1 but not
+// b2 then a subsequent get operation will yield an incorrect
+// result because it will return the record from b2 in level
+// i rather than from b1 because it searches level by level
+// for records matching the supplied user key.
+//
+// This function extracts the largest file b1 from compactionFiles
+// and then searches for a b2 in levelFiles for which user_key(u1) =
+// user_key(l2). If it finds such a file b2 (known as a boundary file)
+// it adds it to compactionFiles and then searches again using this
+// new upper bound.
+//
+// parameters:
+//   in     levelFiles:      list of files to search for boundary files
+//   in/out compactionFiles: list of files to extend by adding boundary files
+void AddBoundaryInputs(const InternalKeyComparator& icmp,
+                       const std::vector<FileMetaData*>& levelFiles,
+                       std::vector<FileMetaData*>* compactionFiles) {
+  InternalKey largestKey;
+
+  // find largestKey in compactionFiles, quick return if compactionFiles is
+  // empty
+  if (!FindLargestKey(icmp, *compactionFiles, &largestKey)) {
+    return;
+  }
+
+  bool continueSearching = true;
+  while (continueSearching) {
+    FileMetaData* smallestBoundaryFile =
+        FindSmallestBoundaryFile(icmp, levelFiles, largestKey);
+
+    // if a boundary file was found advance largestKey, otherwise we're done
+    if (smallestBoundaryFile != NULL) {
+      compactionFiles->push_back(smallestBoundaryFile);
+      largestKey = smallestBoundaryFile->largest;
+    } else {
+      continueSearching = false;
+    }
+  }
+}
+
 void VersionSet::SetupOtherInputs(Compaction* c) {
   const int level = c->level();
   InternalKey smallest, largest;
+
+  AddBoundaryInputs(icmp_, current_->files_[level], &c->inputs_[0]);
   GetRange(c->inputs_[0], &smallest, &largest);
 
   current_->GetOverlappingInputs(level+1, &smallest, &largest, &c->inputs_[1]);
@@ -1363,6 +1441,7 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
   if (!c->inputs_[1].empty()) {
     std::vector<FileMetaData*> expanded0;
     current_->GetOverlappingInputs(level, &all_start, &all_limit, &expanded0);
+    AddBoundaryInputs(icmp_, current_->files_[level], &expanded0);
     const int64_t inputs0_size = TotalFileSize(c->inputs_[0]);
     const int64_t inputs1_size = TotalFileSize(c->inputs_[1]);
     const int64_t expanded0_size = TotalFileSize(expanded0);
diff --git a/db/version_set_test.cc b/db/version_set_test.cc
index d21433e..090f115 100644
--- a/db/version_set_test.cc
+++ b/db/version_set_test.cc
@@ -172,7 +172,153 @@ TEST(FindFileTest, OverlappingFiles) {
   ASSERT_TRUE(Overlaps("600", "700"));
 }
 
-}  // namespace leveldb
+void AddBoundaryInputs(const InternalKeyComparator &icmp,
+                       const std::vector<FileMetaData *> &levelFiles,
+                       std::vector<FileMetaData *> *compactionFiles);
+
+class AddBoundaryInputsTest {
+ public:
+  std::vector<FileMetaData *> levelFiles_;
+  std::vector<FileMetaData *> compactionFiles_;
+  std::vector<FileMetaData *> allFiles_;
+  InternalKeyComparator icmp_;
+
+  AddBoundaryInputsTest() : icmp_(BytewiseComparator()){};
+
+  ~AddBoundaryInputsTest() {
+    for (size_t i = 0; i < allFiles_.size(); ++i) {
+      delete allFiles_[i];
+    }
+    allFiles_.clear();
+  };
+
+  FileMetaData *CreateFileMetaData(uint64_t number, InternalKey smallest,
+                                   InternalKey largest) {
+    FileMetaData *f = new FileMetaData();
+    f->number = number;
+    f->smallest = smallest;
+    f->largest = largest;
+    allFiles_.push_back(f);
+    return f;
+  }
+};
+
+TEST(AddBoundaryInputsTest, TestEmptyFileSets) {
+  AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_);
+  ASSERT_TRUE(compactionFiles_.empty());
+  ASSERT_TRUE(levelFiles_.empty());
+}
+
+TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) {
+  FileMetaData *f1 =
+      CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+                         InternalKey(InternalKey("100", 1, kTypeValue)));
+  compactionFiles_.push_back(f1);
+
+  AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_);
+  ASSERT_EQ(1, compactionFiles_.size());
+  ASSERT_EQ(f1, compactionFiles_[0]);
+  ASSERT_TRUE(levelFiles_.empty());
+}
+
+TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) {
+  FileMetaData *f1 =
+      CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+                         InternalKey(InternalKey("100", 1, kTypeValue)));
+  levelFiles_.push_back(f1);
+
+  AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_);
+  ASSERT_TRUE(compactionFiles_.empty());
+  ASSERT_EQ(1, levelFiles_.size());
+  ASSERT_EQ(f1, levelFiles_[0]);
+}
+
+TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) {
+  FileMetaData *f1 =
+      CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+                         InternalKey(InternalKey("100", 1, kTypeValue)));
+  FileMetaData *f2 =
+      CreateFileMetaData(1, InternalKey("200", 2, kTypeValue),
+                         InternalKey(InternalKey("200", 1, kTypeValue)));
+  FileMetaData *f3 =
+      CreateFileMetaData(1, InternalKey("300", 2, kTypeValue),
+                         InternalKey(InternalKey("300", 1, kTypeValue)));
+
+  levelFiles_.push_back(f3);
+  levelFiles_.push_back(f2);
+  levelFiles_.push_back(f1);
+  compactionFiles_.push_back(f2);
+  compactionFiles_.push_back(f3);
+
+  AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_);
+  ASSERT_EQ(2, compactionFiles_.size());
+}
+
+TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) {
+  FileMetaData *f1 =
+      CreateFileMetaData(1, InternalKey("100", 3, kTypeValue),
+                         InternalKey(InternalKey("100", 2, kTypeValue)));
+  FileMetaData *f2 =
+      CreateFileMetaData(1, InternalKey("100", 1, kTypeValue),
+                         InternalKey(InternalKey("200", 3, kTypeValue)));
+  FileMetaData *f3 =
+      CreateFileMetaData(1, InternalKey("300", 2, kTypeValue),
+                         InternalKey(InternalKey("300", 1, kTypeValue)));
+
+  levelFiles_.push_back(f3);
+  levelFiles_.push_back(f2);
+  levelFiles_.push_back(f1);
+  compactionFiles_.push_back(f1);
+
+  AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_);
+  ASSERT_EQ(2, compactionFiles_.size());
+  ASSERT_EQ(f1, compactionFiles_[0]);
+  ASSERT_EQ(f2, compactionFiles_[1]);
+}
+
+TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) {
+  FileMetaData *f1 =
+      CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
+                         InternalKey(InternalKey("100", 5, kTypeValue)));
+  FileMetaData *f2 =
+      CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+                         InternalKey(InternalKey("300", 1, kTypeValue)));
+  FileMetaData *f3 =
+      CreateFileMetaData(1, InternalKey("100", 4, kTypeValue),
+                         InternalKey(InternalKey("100", 3, kTypeValue)));
+
+  levelFiles_.push_back(f2);
+  levelFiles_.push_back(f3);
+  levelFiles_.push_back(f1);
+  compactionFiles_.push_back(f1);
+
+  AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_);
+  ASSERT_EQ(3, compactionFiles_.size());
+  ASSERT_EQ(f1, compactionFiles_[0]);
+  ASSERT_EQ(f3, compactionFiles_[1]);
+  ASSERT_EQ(f2, compactionFiles_[2]);
+}
+
+TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) {
+  FileMetaData *f1 = CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), InternalKey(InternalKey("100", 5, kTypeValue)));
+  FileMetaData *f2 = CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), InternalKey(InternalKey("100", 5, kTypeValue)));
+  FileMetaData *f3 = CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), InternalKey(InternalKey("300", 1, kTypeValue)));
+  FileMetaData *f4 = CreateFileMetaData(1, InternalKey("100", 4, kTypeValue), InternalKey(InternalKey("100", 3, kTypeValue)));
+
+  levelFiles_.push_back(f2);
+  levelFiles_.push_back(f3);
+  levelFiles_.push_back(f4);
+
+  compactionFiles_.push_back(f1);
+
+  AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_);
+  ASSERT_EQ(3, compactionFiles_.size());
+  ASSERT_EQ(f1, compactionFiles_[0]);
+  ASSERT_EQ(f4, compactionFiles_[1]);
+  ASSERT_EQ(f3, compactionFiles_[2]);
+}
+
+} // namespace leveldb
 
 int main(int argc, char** argv) {
   return leveldb::test::RunAllTests();
diff --git a/issues/issue320_test.cc b/issues/issue320_test.cc
new file mode 100644
index 0000000..619ddb5
--- /dev/null
+++ b/issues/issue320_test.cc
@@ -0,0 +1,139 @@
+// Copyright (c) 2019 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include <iostream>
+#include <sstream>
+#include <string>
+#include <map>
+#include <vector>
+#include <memory>
+
+#include <math.h>
+
+#include <leveldb/db.h>
+#include <leveldb/write_batch.h>
+#include <util/testharness.h>
+
+using namespace std;
+
+namespace leveldb {
+
+namespace {
+
+unsigned int random(unsigned int max) {
+  return std::rand() % max;
+}
+
+string newString(int32_t index) {
+  const unsigned int len = 1024;
+  char bytes[len];
+  unsigned int i = 0;
+  while (i < 8) {
+    bytes[i] = 'a' + ((index >> (4 * i)) & 0xf);
+    ++i;
+  }
+  while (i < sizeof(bytes)) {
+    bytes[i] = 'a' + random(26);
+    ++i;
+  }
+  return string(bytes, sizeof(bytes));
+}
+
+}  // namespace
+
+class Issue320 { };
+
+TEST(Issue320, Test) {
+  std::srand(0);
+
+  bool delete_before_put = false;
+  bool keep_snapshots = true;
+
+  vector<pair<string, string>*> test_map(10000, nullptr);
+  vector<Snapshot const*> snapshots(100, nullptr);
+
+  DB* db;
+  Options options;
+  options.create_if_missing = true;
+
+  std::string dbpath = test::TmpDir() + "/leveldb_issue320_test";
+  ASSERT_OK(DB::Open(options, dbpath, &db));
+
+  unsigned int target_size = 10000;
+  unsigned int num_items = 0;
+  unsigned long count = 0;
+  string key;
+  string value, old_value;
+
+  WriteOptions writeOptions;
+  ReadOptions readOptions;
+  while (count < 200000) {
+    if ((++count % 1000) == 0) {
+      cout << "count: " << count << endl;
+    }
+
+    unsigned int index = random(test_map.size());
+    WriteBatch batch;
+
+    if (test_map[index] == nullptr) {
+      num_items++;
+      test_map[index] =
+          new pair<string, string>(newString(index), newString(index));
+      batch.Put(test_map[index]->first, test_map[index]->second);
+    } else {
+      ASSERT_OK(db->Get(readOptions, test_map[index]->first, &old_value));
+      if (old_value != test_map[index]->second) {
+        cout << "ERROR incorrect value returned by Get" << endl;
+        cout << "  count=" << count << endl;
+        cout << "  old value=" << old_value << endl;
+        cout << "  test_map[index]->second=" << test_map[index]->second << endl;
+        cout << "  test_map[index]->first=" << test_map[index]->first << endl;
+        cout << "  index=" << index << endl;
+        ASSERT_EQ(old_value, test_map[index]->second);
+      }
+
+      if (num_items >= target_size && random(100) > 30) {
+        batch.Delete(test_map[index]->first);
+        delete test_map[index];
+        test_map[index] = nullptr;
+        --num_items;
+      } else {
+        test_map[index]->second = newString(index);
+        if (delete_before_put) batch.Delete(test_map[index]->first);
+        batch.Put(test_map[index]->first, test_map[index]->second);
+      }
+    }
+
+    ASSERT_OK(db->Write(writeOptions, &batch));
+
+    if (keep_snapshots && random(10) == 0) {
+      unsigned int i = random(snapshots.size());
+      if (snapshots[i] != nullptr) {
+        db->ReleaseSnapshot(snapshots[i]);
+      }
+      snapshots[i] = db->GetSnapshot();
+    }
+  }
+
+  for (Snapshot const* snapshot : snapshots) {
+    if (snapshot) {
+      db->ReleaseSnapshot(snapshot);
+    }
+  }
+
+  for (size_t i = 0; i < test_map.size(); ++i) {
+    if (test_map[i] != nullptr) {
+      delete test_map[i];
+      test_map[i] = nullptr;
+    }
+  }
+
+  delete db;
+  DestroyDB(dbpath, options);
+}
+
+}  // namespace leveldb
+
+int main(int argc, char** argv) {
+  return leveldb::test::RunAllTests();
+}

From 09fa8868dbe0cb2701f0560c59ebb63cc17f1271 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Mon, 1 Apr 2019 17:17:47 -0700
Subject: [PATCH 063/181] Align version/soversion CMake setup closer with other
 repositories.

PiperOrigin-RevId: 241432456
---
 CMakeLists.txt | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index ceb5dd9..d50f6c2 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -213,6 +213,10 @@ target_include_directories(leveldb
     $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>
     $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
 )
+
+set_target_properties(leveldb
+  PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR})
+
 target_compile_definitions(leveldb
   PRIVATE
     # Used by include/export.h when building shared libraries.
@@ -233,10 +237,6 @@ if(BUILD_SHARED_LIBS)
       # Used by include/export.h.
       LEVELDB_SHARED_LIBRARY
   )
-  set_target_properties(leveldb
-    PROPERTIES VERSION ${PROJECT_VERSION})
-  set_target_properties(leveldb
-    PROPERTIES SOVERSION 1)
 endif(BUILD_SHARED_LIBS)
 
 if(HAVE_CLANG_THREAD_SAFETY)

From 71ed7c401ec1b1e38d6f7cb9eb2fcff93c24d1f1 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Thu, 11 Apr 2019 12:25:12 -0700
Subject: [PATCH 064/181] Fixed typo in comment in version_set.h.

Flagged by presubmit check.

PiperOrigin-RevId: 243118632
---
 db/version_set.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/db/version_set.h b/db/version_set.h
index 77b9895..0beae4d 100644
--- a/db/version_set.h
+++ b/db/version_set.h
@@ -375,7 +375,7 @@ class Compaction {
   // Each compaction reads inputs from "level_" and "level_+1"
   std::vector<FileMetaData*> inputs_[2];      // The two sets of inputs
 
-  // State used to check for number of of overlapping grandparent files
+  // State used to check for number of overlapping grandparent files
   // (parent == level_ + 1, grandparent == level_ + 2)
   std::vector<FileMetaData*> grandparents_;
   size_t grandparent_index_;  // Index in grandparent_starts_

From 65e86f75ea30e44bc65327f92a16328684269acb Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Thu, 11 Apr 2019 19:10:37 -0700
Subject: [PATCH 065/181] Fix formatting of recent snapshot compaction fix.

Fix variable names, line lengths, namespace use, and a few other
minor issues to conform to Google C++ style guide.

PiperOrigin-RevId: 243187729
---
 db/version_set.cc       |  95 ++++++++++++------------
 db/version_set_test.cc  | 160 +++++++++++++++++++++-------------------
 issues/issue320_test.cc |  58 +++++++--------
 3 files changed, 158 insertions(+), 155 deletions(-)

diff --git a/db/version_set.cc b/db/version_set.cc
index 7891dfc..56493ac 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -1347,78 +1347,81 @@ Compaction* VersionSet::PickCompaction() {
   return c;
 }
 
-// find the largest key in a vector of files. returns true if files it not empty
-bool FindLargestKey(const InternalKeyComparator & icmp, const std::vector<FileMetaData*> & files, InternalKey *largestKey) {
+// Finds the largest key in a vector of files. Returns true if files it not
+// empty.
+bool FindLargestKey(const InternalKeyComparator& icmp,
+                    const std::vector<FileMetaData*>& files,
+                    InternalKey* largest_key) {
   if (files.empty()) {
     return false;
   }
-  *largestKey = files[0]->largest;
+  *largest_key = files[0]->largest;
   for (size_t i = 1; i < files.size(); ++i) {
     FileMetaData* f = files[i];
-    if (icmp.Compare(f->largest, *largestKey) > 0) {
-      *largestKey = f->largest;
+    if (icmp.Compare(f->largest, *largest_key) > 0) {
+      *largest_key = f->largest;
     }
   }
   return true;
 }
 
-// find minimum file b2=(l2, u2) in level file for which l2 > u1 and user_key(l2) = user_key(u1)
-FileMetaData* FindSmallestBoundaryFile(const InternalKeyComparator & icmp,
-                                       const std::vector<FileMetaData*> & levelFiles,
-                                       const InternalKey & largestKey) {
+// Finds minimum file b2=(l2, u2) in level file for which l2 > u1 and
+// user_key(l2) = user_key(u1)
+FileMetaData* FindSmallestBoundaryFile(
+    const InternalKeyComparator& icmp,
+    const std::vector<FileMetaData*>& level_files,
+    const InternalKey& largest_key) {
   const Comparator* user_cmp = icmp.user_comparator();
-  FileMetaData* smallestBoundaryFile = NULL;
-  for (size_t i = 0; i < levelFiles.size(); ++i) {
-    FileMetaData* f = levelFiles[i];
-    if (icmp.Compare(f->smallest, largestKey) > 0 &&
-        user_cmp->Compare(f->smallest.user_key(), largestKey.user_key()) == 0) {
-      if (smallestBoundaryFile == NULL ||
-          icmp.Compare(f->smallest, smallestBoundaryFile->smallest) < 0) {
-        smallestBoundaryFile = f;
+  FileMetaData* smallest_boundary_file = nullptr;
+  for (size_t i = 0; i < level_files.size(); ++i) {
+    FileMetaData* f = level_files[i];
+    if (icmp.Compare(f->smallest, largest_key) > 0 &&
+        user_cmp->Compare(f->smallest.user_key(), largest_key.user_key()) ==
+            0) {
+      if (smallest_boundary_file == nullptr ||
+          icmp.Compare(f->smallest, smallest_boundary_file->smallest) < 0) {
+        smallest_boundary_file = f;
       }
     }
   }
-  return smallestBoundaryFile;
+  return smallest_boundary_file;
 }
 
-// If there are two blocks, b1=(l1, u1) and b2=(l2, u2) and
-// user_key(u1) = user_key(l2), and if we compact b1 but not
-// b2 then a subsequent get operation will yield an incorrect
-// result because it will return the record from b2 in level
-// i rather than from b1 because it searches level by level
-// for records matching the supplied user key.
+// Extracts the largest file b1 from |compaction_files| and then searches for a
+// b2 in |level_files| for which user_key(u1) = user_key(l2). If it finds such a
+// file b2 (known as a boundary file) it adds it to |compaction_files| and then
+// searches again using this new upper bound.
 //
-// This function extracts the largest file b1 from compactionFiles
-// and then searches for a b2 in levelFiles for which user_key(u1) =
-// user_key(l2). If it finds such a file b2 (known as a boundary file)
-// it adds it to compactionFiles and then searches again using this
-// new upper bound.
+// If there are two blocks, b1=(l1, u1) and b2=(l2, u2) and
+// user_key(u1) = user_key(l2), and if we compact b1 but not b2 then a
+// subsequent get operation will yield an incorrect result because it will
+// return the record from b2 in level i rather than from b1 because it searches
+// level by level for records matching the supplied user key.
 //
 // parameters:
-//   in     levelFiles:      list of files to search for boundary files
-//   in/out compactionFiles: list of files to extend by adding boundary files
+//   in     level_files:      List of files to search for boundary files.
+//   in/out compaction_files: List of files to extend by adding boundary files.
 void AddBoundaryInputs(const InternalKeyComparator& icmp,
-                       const std::vector<FileMetaData*>& levelFiles,
-                       std::vector<FileMetaData*>* compactionFiles) {
-  InternalKey largestKey;
+                       const std::vector<FileMetaData*>& level_files,
+                       std::vector<FileMetaData*>* compaction_files) {
+  InternalKey largest_key;
 
-  // find largestKey in compactionFiles, quick return if compactionFiles is
-  // empty
-  if (!FindLargestKey(icmp, *compactionFiles, &largestKey)) {
+  // Quick return if compaction_files is empty.
+  if (!FindLargestKey(icmp, *compaction_files, &largest_key)) {
     return;
   }
 
-  bool continueSearching = true;
-  while (continueSearching) {
-    FileMetaData* smallestBoundaryFile =
-        FindSmallestBoundaryFile(icmp, levelFiles, largestKey);
+  bool continue_searching = true;
+  while (continue_searching) {
+    FileMetaData* smallest_boundary_file =
+        FindSmallestBoundaryFile(icmp, level_files, largest_key);
 
-    // if a boundary file was found advance largestKey, otherwise we're done
-    if (smallestBoundaryFile != NULL) {
-      compactionFiles->push_back(smallestBoundaryFile);
-      largestKey = smallestBoundaryFile->largest;
+    // If a boundary file was found advance largest_key, otherwise we're done.
+    if (smallest_boundary_file != NULL) {
+      compaction_files->push_back(smallest_boundary_file);
+      largest_key = smallest_boundary_file->largest;
     } else {
-      continueSearching = false;
+      continue_searching = false;
     }
   }
 }
diff --git a/db/version_set_test.cc b/db/version_set_test.cc
index 090f115..b32e2e5 100644
--- a/db/version_set_test.cc
+++ b/db/version_set_test.cc
@@ -172,154 +172,160 @@ TEST(FindFileTest, OverlappingFiles) {
   ASSERT_TRUE(Overlaps("600", "700"));
 }
 
-void AddBoundaryInputs(const InternalKeyComparator &icmp,
-                       const std::vector<FileMetaData *> &levelFiles,
-                       std::vector<FileMetaData *> *compactionFiles);
+void AddBoundaryInputs(const InternalKeyComparator& icmp,
+                       const std::vector<FileMetaData*>& level_files,
+                       std::vector<FileMetaData*>* compaction_files);
 
 class AddBoundaryInputsTest {
  public:
-  std::vector<FileMetaData *> levelFiles_;
-  std::vector<FileMetaData *> compactionFiles_;
-  std::vector<FileMetaData *> allFiles_;
+  std::vector<FileMetaData*> level_files_;
+  std::vector<FileMetaData*> compaction_files_;
+  std::vector<FileMetaData*> all_files_;
   InternalKeyComparator icmp_;
 
   AddBoundaryInputsTest() : icmp_(BytewiseComparator()){};
 
   ~AddBoundaryInputsTest() {
-    for (size_t i = 0; i < allFiles_.size(); ++i) {
-      delete allFiles_[i];
+    for (size_t i = 0; i < all_files_.size(); ++i) {
+      delete all_files_[i];
     }
-    allFiles_.clear();
+    all_files_.clear();
   };
 
-  FileMetaData *CreateFileMetaData(uint64_t number, InternalKey smallest,
+  FileMetaData* CreateFileMetaData(uint64_t number, InternalKey smallest,
                                    InternalKey largest) {
-    FileMetaData *f = new FileMetaData();
+    FileMetaData* f = new FileMetaData();
     f->number = number;
     f->smallest = smallest;
     f->largest = largest;
-    allFiles_.push_back(f);
+    all_files_.push_back(f);
     return f;
   }
 };
 
 TEST(AddBoundaryInputsTest, TestEmptyFileSets) {
-  AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_);
-  ASSERT_TRUE(compactionFiles_.empty());
-  ASSERT_TRUE(levelFiles_.empty());
+  AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+  ASSERT_TRUE(compaction_files_.empty());
+  ASSERT_TRUE(level_files_.empty());
 }
 
 TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) {
-  FileMetaData *f1 =
+  FileMetaData* f1 =
       CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
                          InternalKey(InternalKey("100", 1, kTypeValue)));
-  compactionFiles_.push_back(f1);
+  compaction_files_.push_back(f1);
 
-  AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_);
-  ASSERT_EQ(1, compactionFiles_.size());
-  ASSERT_EQ(f1, compactionFiles_[0]);
-  ASSERT_TRUE(levelFiles_.empty());
+  AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+  ASSERT_EQ(1, compaction_files_.size());
+  ASSERT_EQ(f1, compaction_files_[0]);
+  ASSERT_TRUE(level_files_.empty());
 }
 
 TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) {
-  FileMetaData *f1 =
+  FileMetaData* f1 =
       CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
                          InternalKey(InternalKey("100", 1, kTypeValue)));
-  levelFiles_.push_back(f1);
+  level_files_.push_back(f1);
 
-  AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_);
-  ASSERT_TRUE(compactionFiles_.empty());
-  ASSERT_EQ(1, levelFiles_.size());
-  ASSERT_EQ(f1, levelFiles_[0]);
+  AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+  ASSERT_TRUE(compaction_files_.empty());
+  ASSERT_EQ(1, level_files_.size());
+  ASSERT_EQ(f1, level_files_[0]);
 }
 
 TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) {
-  FileMetaData *f1 =
+  FileMetaData* f1 =
       CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
                          InternalKey(InternalKey("100", 1, kTypeValue)));
-  FileMetaData *f2 =
+  FileMetaData* f2 =
       CreateFileMetaData(1, InternalKey("200", 2, kTypeValue),
                          InternalKey(InternalKey("200", 1, kTypeValue)));
-  FileMetaData *f3 =
+  FileMetaData* f3 =
       CreateFileMetaData(1, InternalKey("300", 2, kTypeValue),
                          InternalKey(InternalKey("300", 1, kTypeValue)));
 
-  levelFiles_.push_back(f3);
-  levelFiles_.push_back(f2);
-  levelFiles_.push_back(f1);
-  compactionFiles_.push_back(f2);
-  compactionFiles_.push_back(f3);
+  level_files_.push_back(f3);
+  level_files_.push_back(f2);
+  level_files_.push_back(f1);
+  compaction_files_.push_back(f2);
+  compaction_files_.push_back(f3);
 
-  AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_);
-  ASSERT_EQ(2, compactionFiles_.size());
+  AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+  ASSERT_EQ(2, compaction_files_.size());
 }
 
 TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) {
-  FileMetaData *f1 =
+  FileMetaData* f1 =
       CreateFileMetaData(1, InternalKey("100", 3, kTypeValue),
                          InternalKey(InternalKey("100", 2, kTypeValue)));
-  FileMetaData *f2 =
+  FileMetaData* f2 =
       CreateFileMetaData(1, InternalKey("100", 1, kTypeValue),
                          InternalKey(InternalKey("200", 3, kTypeValue)));
-  FileMetaData *f3 =
+  FileMetaData* f3 =
       CreateFileMetaData(1, InternalKey("300", 2, kTypeValue),
                          InternalKey(InternalKey("300", 1, kTypeValue)));
 
-  levelFiles_.push_back(f3);
-  levelFiles_.push_back(f2);
-  levelFiles_.push_back(f1);
-  compactionFiles_.push_back(f1);
+  level_files_.push_back(f3);
+  level_files_.push_back(f2);
+  level_files_.push_back(f1);
+  compaction_files_.push_back(f1);
 
-  AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_);
-  ASSERT_EQ(2, compactionFiles_.size());
-  ASSERT_EQ(f1, compactionFiles_[0]);
-  ASSERT_EQ(f2, compactionFiles_[1]);
+  AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+  ASSERT_EQ(2, compaction_files_.size());
+  ASSERT_EQ(f1, compaction_files_[0]);
+  ASSERT_EQ(f2, compaction_files_[1]);
 }
 
 TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) {
-  FileMetaData *f1 =
+  FileMetaData* f1 =
       CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
                          InternalKey(InternalKey("100", 5, kTypeValue)));
-  FileMetaData *f2 =
+  FileMetaData* f2 =
       CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
                          InternalKey(InternalKey("300", 1, kTypeValue)));
-  FileMetaData *f3 =
+  FileMetaData* f3 =
       CreateFileMetaData(1, InternalKey("100", 4, kTypeValue),
                          InternalKey(InternalKey("100", 3, kTypeValue)));
 
-  levelFiles_.push_back(f2);
-  levelFiles_.push_back(f3);
-  levelFiles_.push_back(f1);
-  compactionFiles_.push_back(f1);
+  level_files_.push_back(f2);
+  level_files_.push_back(f3);
+  level_files_.push_back(f1);
+  compaction_files_.push_back(f1);
 
-  AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_);
-  ASSERT_EQ(3, compactionFiles_.size());
-  ASSERT_EQ(f1, compactionFiles_[0]);
-  ASSERT_EQ(f3, compactionFiles_[1]);
-  ASSERT_EQ(f2, compactionFiles_[2]);
+  AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+  ASSERT_EQ(3, compaction_files_.size());
+  ASSERT_EQ(f1, compaction_files_[0]);
+  ASSERT_EQ(f3, compaction_files_[1]);
+  ASSERT_EQ(f2, compaction_files_[2]);
 }
 
 TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) {
-  FileMetaData *f1 = CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), InternalKey(InternalKey("100", 5, kTypeValue)));
-  FileMetaData *f2 = CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), InternalKey(InternalKey("100", 5, kTypeValue)));
-  FileMetaData *f3 = CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), InternalKey(InternalKey("300", 1, kTypeValue)));
-  FileMetaData *f4 = CreateFileMetaData(1, InternalKey("100", 4, kTypeValue), InternalKey(InternalKey("100", 3, kTypeValue)));
+  FileMetaData* f1 =
+      CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
+                         InternalKey(InternalKey("100", 5, kTypeValue)));
+  FileMetaData* f2 =
+      CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
+                         InternalKey(InternalKey("100", 5, kTypeValue)));
+  FileMetaData* f3 =
+      CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+                         InternalKey(InternalKey("300", 1, kTypeValue)));
+  FileMetaData* f4 =
+      CreateFileMetaData(1, InternalKey("100", 4, kTypeValue),
+                         InternalKey(InternalKey("100", 3, kTypeValue)));
 
-  levelFiles_.push_back(f2);
-  levelFiles_.push_back(f3);
-  levelFiles_.push_back(f4);
+  level_files_.push_back(f2);
+  level_files_.push_back(f3);
+  level_files_.push_back(f4);
 
-  compactionFiles_.push_back(f1);
+  compaction_files_.push_back(f1);
 
-  AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_);
-  ASSERT_EQ(3, compactionFiles_.size());
-  ASSERT_EQ(f1, compactionFiles_[0]);
-  ASSERT_EQ(f4, compactionFiles_[1]);
-  ASSERT_EQ(f3, compactionFiles_[2]);
+  AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+  ASSERT_EQ(3, compaction_files_.size());
+  ASSERT_EQ(f1, compaction_files_[0]);
+  ASSERT_EQ(f4, compaction_files_[1]);
+  ASSERT_EQ(f3, compaction_files_[2]);
 }
 
-} // namespace leveldb
+}  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/issues/issue320_test.cc b/issues/issue320_test.cc
index a7c37b1..28ef1b8 100644
--- a/issues/issue320_test.cc
+++ b/issues/issue320_test.cc
@@ -1,47 +1,43 @@
 // Copyright (c) 2019 The LevelDB Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <cstdint>
+#include <cstdlib>
 #include <iostream>
-#include <map>
 #include <memory>
-#include <sstream>
 #include <string>
 #include <vector>
 
-#include <math.h>
-
 #include "leveldb/db.h"
 #include "leveldb/write_batch.h"
 #include "util/testharness.h"
 
-using namespace std;
-
 namespace leveldb {
 
 namespace {
 
-unsigned int random(unsigned int max) {
-  return std::rand() % max;
-}
+// Creates a random number in the range of [0, max).
+int GenerateRandomNumber(int max) { return std::rand() % max; }
 
-string newString(int32_t index) {
-  const unsigned int len = 1024;
+std::string CreateRandomString(int32_t index) {
+  static const size_t len = 1024;
   char bytes[len];
-  unsigned int i = 0;
+  size_t i = 0;
   while (i < 8) {
     bytes[i] = 'a' + ((index >> (4 * i)) & 0xf);
     ++i;
   }
   while (i < sizeof(bytes)) {
-    bytes[i] = 'a' + random(26);
+    bytes[i] = 'a' + GenerateRandomNumber(26);
     ++i;
   }
-  return string(bytes, sizeof(bytes));
+  return std::string(bytes, sizeof(bytes));
 }
 
 }  // namespace
 
-class Issue320 { };
+class Issue320 {};
 
 TEST(Issue320, Test) {
   std::srand(0);
@@ -49,8 +45,8 @@ TEST(Issue320, Test) {
   bool delete_before_put = false;
   bool keep_snapshots = true;
 
-  vector<pair<string, string>*> test_map(10000, nullptr);
-  vector<Snapshot const*> snapshots(100, nullptr);
+  std::vector<std::pair<std::string, std::string>*> test_map(10000, nullptr);
+  std::vector<Snapshot const*> snapshots(100, nullptr);
 
   DB* db;
   Options options;
@@ -59,11 +55,11 @@ TEST(Issue320, Test) {
   std::string dbpath = test::TmpDir() + "/leveldb_issue320_test";
   ASSERT_OK(DB::Open(options, dbpath, &db));
 
-  unsigned int target_size = 10000;
-  unsigned int num_items = 0;
-  unsigned long count = 0;
-  string key;
-  string value, old_value;
+  uint32_t target_size = 10000;
+  uint32_t num_items = 0;
+  uint32_t count = 0;
+  std::string key;
+  std::string value, old_value;
 
   WriteOptions writeOptions;
   ReadOptions readOptions;
@@ -72,13 +68,13 @@ TEST(Issue320, Test) {
       cout << "count: " << count << endl;
     }
 
-    unsigned int index = random(test_map.size());
+    int index = GenerateRandomNumber(test_map.size());
     WriteBatch batch;
 
     if (test_map[index] == nullptr) {
       num_items++;
-      test_map[index] =
-          new pair<string, string>(newString(index), newString(index));
+      test_map[index] = new std::pair<std::string, std::string>(
+          CreateRandomString(index), CreateRandomString(index));
       batch.Put(test_map[index]->first, test_map[index]->second);
     } else {
       ASSERT_OK(db->Get(readOptions, test_map[index]->first, &old_value));
@@ -92,13 +88,13 @@ TEST(Issue320, Test) {
         ASSERT_EQ(old_value, test_map[index]->second);
       }
 
-      if (num_items >= target_size && random(100) > 30) {
+      if (num_items >= target_size && GenerateRandomNumber(100) > 30) {
         batch.Delete(test_map[index]->first);
         delete test_map[index];
         test_map[index] = nullptr;
         --num_items;
       } else {
-        test_map[index]->second = newString(index);
+        test_map[index]->second = CreateRandomString(index);
         if (delete_before_put) batch.Delete(test_map[index]->first);
         batch.Put(test_map[index]->first, test_map[index]->second);
       }
@@ -106,8 +102,8 @@ TEST(Issue320, Test) {
 
     ASSERT_OK(db->Write(writeOptions, &batch));
 
-    if (keep_snapshots && random(10) == 0) {
-      unsigned int i = random(snapshots.size());
+    if (keep_snapshots && GenerateRandomNumber(10) == 0) {
+      int i = GenerateRandomNumber(snapshots.size());
       if (snapshots[i] != nullptr) {
         db->ReleaseSnapshot(snapshots[i]);
       }
@@ -134,6 +130,4 @@ TEST(Issue320, Test) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

From 08e771901f454ac32643bd8e8cb2bcfa08026c0c Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Thu, 11 Apr 2019 19:47:16 -0700
Subject: [PATCH 066/181] Simplify issue320_test.

Use std::unique_ptr to simplify issue320_test.

PiperOrigin-RevId: 243190799
---
 issues/issue320_test.cc | 13 +++----------
 1 file changed, 3 insertions(+), 10 deletions(-)

diff --git a/issues/issue320_test.cc b/issues/issue320_test.cc
index 28ef1b8..5145857 100644
--- a/issues/issue320_test.cc
+++ b/issues/issue320_test.cc
@@ -45,7 +45,8 @@ TEST(Issue320, Test) {
   bool delete_before_put = false;
   bool keep_snapshots = true;
 
-  std::vector<std::pair<std::string, std::string>*> test_map(10000, nullptr);
+  std::vector<std::unique_ptr<std::pair<std::string, std::string>>> test_map(
+      10000);
   std::vector<Snapshot const*> snapshots(100, nullptr);
 
   DB* db;
@@ -73,7 +74,7 @@ TEST(Issue320, Test) {
 
     if (test_map[index] == nullptr) {
       num_items++;
-      test_map[index] = new std::pair<std::string, std::string>(
+      test_map[index] = std::make_unique<std::pair<std::string, std::string>>(
           CreateRandomString(index), CreateRandomString(index));
       batch.Put(test_map[index]->first, test_map[index]->second);
     } else {
@@ -90,7 +91,6 @@ TEST(Issue320, Test) {
 
       if (num_items >= target_size && GenerateRandomNumber(100) > 30) {
         batch.Delete(test_map[index]->first);
-        delete test_map[index];
         test_map[index] = nullptr;
         --num_items;
       } else {
@@ -117,13 +117,6 @@ TEST(Issue320, Test) {
     }
   }
 
-  for (size_t i = 0; i < test_map.size(); ++i) {
-    if (test_map[i] != nullptr) {
-      delete test_map[i];
-      test_map[i] = nullptr;
-    }
-  }
-
   delete db;
   DestroyDB(dbpath, options);
 }

From 5a2a472741f36ecf5b994439da5a64c6ab90c47f Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Fri, 12 Apr 2019 01:09:39 -0700
Subject: [PATCH 067/181] Fixed missing std namespaces and make_unique.

cout/endl were missing the std namespace. Also std::make_unique
was used inadvertently which is part of C++14 and only C++11
is currently supported.

PiperOrigin-RevId: 243221310
---
 issues/issue320_test.cc | 20 +++++++++++---------
 1 file changed, 11 insertions(+), 9 deletions(-)

diff --git a/issues/issue320_test.cc b/issues/issue320_test.cc
index 5145857..c5fcbfc 100644
--- a/issues/issue320_test.cc
+++ b/issues/issue320_test.cc
@@ -66,7 +66,7 @@ TEST(Issue320, Test) {
   ReadOptions readOptions;
   while (count < 200000) {
     if ((++count % 1000) == 0) {
-      cout << "count: " << count << endl;
+      std::cout << "count: " << count << std::endl;
     }
 
     int index = GenerateRandomNumber(test_map.size());
@@ -74,18 +74,20 @@ TEST(Issue320, Test) {
 
     if (test_map[index] == nullptr) {
       num_items++;
-      test_map[index] = std::make_unique<std::pair<std::string, std::string>>(
-          CreateRandomString(index), CreateRandomString(index));
+      test_map[index].reset(new std::pair<std::string, std::string>(
+          CreateRandomString(index), CreateRandomString(index)));
       batch.Put(test_map[index]->first, test_map[index]->second);
     } else {
       ASSERT_OK(db->Get(readOptions, test_map[index]->first, &old_value));
       if (old_value != test_map[index]->second) {
-        cout << "ERROR incorrect value returned by Get" << endl;
-        cout << "  count=" << count << endl;
-        cout << "  old value=" << old_value << endl;
-        cout << "  test_map[index]->second=" << test_map[index]->second << endl;
-        cout << "  test_map[index]->first=" << test_map[index]->first << endl;
-        cout << "  index=" << index << endl;
+        std::cout << "ERROR incorrect value returned by Get" << std::endl;
+        std::cout << "  count=" << count << std::endl;
+        std::cout << "  old value=" << old_value << std::endl;
+        std::cout << "  test_map[index]->second=" << test_map[index]->second
+                  << std::endl;
+        std::cout << "  test_map[index]->first=" << test_map[index]->first
+                  << std::endl;
+        std::cout << "  index=" << index << std::endl;
         ASSERT_EQ(old_value, test_map[index]->second);
       }
 

From 2f008ac19ec783e4d0ba2161320241c99e9897e1 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Fri, 12 Apr 2019 18:34:19 -0700
Subject: [PATCH 068/181] Initialize class members to default values in
 constructors.

There were a few members which were identified to have been left
uninitialized in some constructors. These were very likely to
have been set before being used, otherwise the ASan tests would
have caught them, but still good practice to have them
initialized. This addresses some items reported in issue #668.

PiperOrigin-RevId: 243370145
---
 db/db_bench.cc | 5 +----
 db/db_impl.cc  | 7 ++++---
 util/cache.cc  | 3 +--
 3 files changed, 6 insertions(+), 9 deletions(-)

diff --git a/db/db_bench.cc b/db/db_bench.cc
index f9403f4..41e903b 100644
--- a/db/db_bench.cc
+++ b/db/db_bench.cc
@@ -304,10 +304,7 @@ struct ThreadState {
   Stats stats;
   SharedState* shared;
 
-  ThreadState(int index)
-      : tid(index),
-        rand(1000 + index) {
-  }
+  ThreadState(int index) : tid(index), rand(1000 + index), shared(nullptr) {}
 };
 
 }  // namespace
diff --git a/db/db_impl.cc b/db/db_impl.cc
index 3468862..caef2b1 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -48,7 +48,8 @@ struct DBImpl::Writer {
   bool done;
   port::CondVar cv;
 
-  explicit Writer(port::Mutex* mu) : cv(mu) { }
+  explicit Writer(port::Mutex* mu)
+      : batch(nullptr), sync(false), done(false), cv(mu) {}
 };
 
 struct DBImpl::CompactionState {
@@ -78,10 +79,10 @@ struct DBImpl::CompactionState {
 
   explicit CompactionState(Compaction* c)
       : compaction(c),
+        smallest_snapshot(0),
         outfile(nullptr),
         builder(nullptr),
-        total_bytes(0) {
-  }
+        total_bytes(0) {}
 };
 
 // Fix user-supplied options to be reasonable
diff --git a/util/cache.cc b/util/cache.cc
index 7cc2cea..25b51b5 100644
--- a/util/cache.cc
+++ b/util/cache.cc
@@ -196,8 +196,7 @@ class LRUCache {
   HandleTable table_ GUARDED_BY(mutex_);
 };
 
-LRUCache::LRUCache()
-    : usage_(0) {
+LRUCache::LRUCache() : capacity_(0), usage_(0) {
   // Make empty circular linked lists.
   lru_.next = &lru_;
   lru_.prev = &lru_;

From 7b1174519044339f07a023dc445b0d36425bd6db Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Sat, 13 Apr 2019 09:19:55 -0700
Subject: [PATCH 069/181] Changed Windows specific highlighting from bash to
 cmd.

This makes the syntax highlighting a little nicer on GitHub.

PiperOrigin-RevId: 243426806
---
 README.md | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/README.md b/README.md
index 0121dd8..537cab1 100644
--- a/README.md
+++ b/README.md
@@ -44,20 +44,20 @@ cmake -DCMAKE_BUILD_TYPE=Release .. && cmake --build .
 
 First generate the Visual Studio 2017 project/solution files:
 
-```bash
+```cmd
 mkdir build
 cd build
 cmake -G "Visual Studio 15" ..
 ```
 The default default will build for x86. For 64-bit run:
 
-```bash
+```cmd
 cmake -G "Visual Studio 15 Win64" ..
 ```
 
 To compile the Windows solution from the command-line:
 
-```bash
+```cmd
 devenv /build Debug leveldb.sln
 ```
 

From 2ccb45c33aecd8b15000c0c622f45eb119b6b478 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Mon, 15 Apr 2019 15:11:03 -0700
Subject: [PATCH 070/181] Check for possibly invalid offset in test.

Fix a possible array bounds offset issue flagged in
issue #668. Not the source of any known bug, but will
silence any static analyzers.

PiperOrigin-RevId: 243697659
---
 table/table_test.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/table/table_test.cc b/table/table_test.cc
index e47db3d..5573be6 100644
--- a/table/table_test.cc
+++ b/table/table_test.cc
@@ -119,7 +119,7 @@ class StringSource: public RandomAccessFile {
 
   virtual Status Read(uint64_t offset, size_t n, Slice* result,
                        char* scratch) const {
-    if (offset > contents_.size()) {
+    if (offset >= contents_.size()) {
       return Status::InvalidArgument("invalid Read offset");
     }
     if (offset + n > contents_.size()) {

From 3dc9202f78a3eb30ee8c0267e4e4be2e3f986e45 Mon Sep 17 00:00:00 2001
From: leveldb Team <no-reply@google.com>
Date: Tue, 23 Apr 2019 11:00:28 -0700
Subject: [PATCH 071/181] [leveldb] Specifically export the WriteBatch::Handler
 inner class for Windows link

Windows linking visibility in shared libraries requires that inner classes are
specifically exported as visible, even if the containing class is exported.

PiperOrigin-RevId: 244886019
---
 include/leveldb/write_batch.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/leveldb/write_batch.h b/include/leveldb/write_batch.h
index 9b319f0..5380c53 100644
--- a/include/leveldb/write_batch.h
+++ b/include/leveldb/write_batch.h
@@ -62,7 +62,7 @@ class LEVELDB_EXPORT WriteBatch {
   void Append(const WriteBatch& source);
 
   // Support for iterating over the contents of a batch.
-  class Handler {
+  class LEVELDB_EXPORT Handler {
    public:
     virtual ~Handler();
     virtual void Put(const Slice& key, const Slice& value) = 0;

From d3d1c8a0f40a7eaa12a5bb702fa01786b7c3a646 Mon Sep 17 00:00:00 2001
From: Kyle Zhang <kyle@smartx.com>
Date: Thu, 25 Apr 2019 09:44:07 +0800
Subject: [PATCH 072/181] don't check current key in DBIter::Next()

When iter_ is pointing to current key, we can safely move to the next
key to avoid checking current key, which is of course not necessary.

Benchmark shows that 'readseq' has about 8% performance improvement.

Without patch:

>./db_bench --benchmarks=readseq --num=$((4<<20)) --db=/tmp/db --use_existing_db=1
LevelDB:    version 1.21
Date:       Thu Apr 25 09:37:21 2019
CPU:        32 * Intel(R) Xeon(R) CPU E5-2620 v4 @ 2.10GHz
CPUCache:   20480 KB
Keys:       16 bytes each
Values:     100 bytes each (50 bytes after compression)
Entries:    4194304
RawSize:    464.0 MB (estimated)
FileSize:   264.0 MB (estimated)
------------------------------------------------
readseq      :       0.196 micros/op;  565.7 MB/s

With patch:

>./db_bench --benchmarks=readseq --num=$((4<<20)) --db=/tmp/db --use_existing_db=1
LevelDB:    version 1.21
Date:       Thu Apr 25 09:38:20 2019
CPU:        32 * Intel(R) Xeon(R) CPU E5-2620 v4 @ 2.10GHz
CPUCache:   20480 KB
Keys:       16 bytes each
Values:     100 bytes each (50 bytes after compression)
Entries:    4194304
RawSize:    464.0 MB (estimated)
FileSize:   264.0 MB (estimated)
------------------------------------------------
readseq      :       0.181 micros/op;  612.3 MB/s

Signed-off-by: Kyle Zhang <kyle@smartx.com>
---
 db/db_iter.cc | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/db/db_iter.cc b/db/db_iter.cc
index 4d0f42e..48ca4a5 100644
--- a/db/db_iter.cc
+++ b/db/db_iter.cc
@@ -168,6 +168,15 @@ void DBIter::Next() {
   } else {
     // Store in saved_key_ the current key so we skip it below.
     SaveKey(ExtractUserKey(iter_->key()), &saved_key_);
+
+    // iter_ is pointing to current key. We can now safely move to the next to
+    // avoid checking current key.
+    iter_->Next();
+    if (!iter_->Valid()) {
+      valid_ = false;
+      saved_key_.clear();
+      return;
+    }
   }
 
   FindNextUserEntry(true, &saved_key_);

From 3724030179716fd8d95cf79339884c49afade8f9 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Mon, 29 Apr 2019 15:03:07 -0700
Subject: [PATCH 073/181] Update Travis CI configuration.

The Travis configuration:
1) Installs recent versions of clang and GCC.
2) Sets up the environment so that CMake picks up the installed
   compilers. Previously, the pre-installed clang compiler was used
   instead.
3) Requests a modern macOS image that has all the headers needed by GCC.

The CL also removes now-unnecessary old workarounds from the
Travis configuration.

PiperOrigin-RevId: 245831188
---
 .travis.yml | 55 ++++++++++++++++++++++++++++-------------------------
 1 file changed, 29 insertions(+), 26 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index 0e1ad6a..436e037 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,35 +1,29 @@
-# Build matrix / environment variable are explained on:
+# Build matrix / environment variables are explained on:
 # http://about.travis-ci.org/docs/user/build-configuration/
 # This file can be validated on: http://lint.travis-ci.org/
 
-dist: xenial
 language: cpp
+dist: xenial
+osx_image: xcode10.2
 
 compiler:
-  - gcc
-  - clang
+- gcc
+- clang
 os:
-  - linux
-  - osx
+- linux
+- osx
 
 env:
-  - BUILD_TYPE=Debug
-  - BUILD_TYPE=RelWithDebInfo
-
-matrix:
-  exclude:
-    # GCC fails on recent Travis OSX images.
-    # https://github.com/travis-ci/travis-ci/issues/9640
-    - compiler: gcc
-      os: osx
+- BUILD_TYPE=Debug
+- BUILD_TYPE=RelWithDebInfo
 
 addons:
   apt:
     sources:
-    - llvm-toolchain-xenial-7
+    - llvm-toolchain-xenial-8
     - ubuntu-toolchain-r-test
     packages:
-    - clang-7
+    - clang-8
     - cmake
     - gcc-8
     - g++-8
@@ -40,24 +34,33 @@ addons:
     - ninja-build
   homebrew:
     packages:
+    - cmake
     - crc32c
+    - gcc@8
     - gperftools
     - kyotocabinet
-    - gcc@7
+    - llvm@8
     - ninja
     - snappy
     - sqlite3
-
-before_install:
-# The Travis VM image for Mac already has a link at /usr/local/include/c++,
-# causing Homebrew's gcc installation to error out. This was reported to
-# Homebrew maintainers at https://github.com/Homebrew/brew/issues/1742 and
-# removing the link emerged as a workaround.
-- if [ "$TRAVIS_OS_NAME" == "osx" ]; then rm -f /usr/local/include/c++ ; fi
+    update: true
 
 install:
-# /usr/bin/gcc is stuck to old versions on both Linux and OSX.
+# The following Homebrew packages aren't linked by default, and need to be
+# prepended to the path explicitly.
+- if [ "$TRAVIS_OS_NAME" == "osx" ]; then
+    export PATH="$(brew --prefix llvm)/bin:$PATH";
+  fi
+# /usr/bin/gcc points to an older compiler on both Linux and macOS.
 - if [ "$CXX" = "g++" ]; then export CXX="g++-8" CC="gcc-8"; fi
+# /usr/bin/clang points to an older compiler on both Linux and macOS.
+#
+# Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values
+# below don't work on macOS. Fortunately, the path change above makes the
+# default values (clang and clang++) resolve to the correct compiler on macOS.
+- if [ "$TRAVIS_OS_NAME" == "linux" ]; then
+    if [ "$CXX" = "clang++" ]; then export CXX="clang++-8" CC="clang-8"; fi;
+  fi
 - echo ${CC}
 - echo ${CXX}
 - ${CXX} --version

From 297e66afc1dda3f3d7a7cc2022030164c302cb7a Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Thu, 2 May 2019 11:01:00 -0700
Subject: [PATCH 074/181] Format all files IAW the Google C++ Style Guide.

Use clang-format to correct formatting to be in agreement with the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). Doing this simplifies the process of accepting changes. Also fixed a few warnings flagged by clang-tidy.

PiperOrigin-RevId: 246350737
---
 .clang-format                   |  18 ++
 README.md                       |   8 +
 db/autocompact_test.cc          |  25 +--
 db/builder.cc                   |  13 +-
 db/builder.h                    |   8 +-
 db/c.cc                         | 323 ++++++++++++++------------------
 db/corruption_test.cc           |  32 ++--
 db/db_bench.cc                  | 124 ++++++------
 db/db_impl.cc                   | 179 ++++++++----------
 db/db_impl.h                    |  11 +-
 db/db_iter.cc                   |  39 ++--
 db/db_iter.h                    |   9 +-
 db/db_test.cc                   | 241 +++++++++++-------------
 db/dbformat.cc                  |  22 +--
 db/dbformat.h                   |  33 ++--
 db/dbformat_test.cc             |  75 ++++----
 db/dumpfile.cc                  |  10 +-
 db/fault_injection_test.cc      |  43 ++---
 db/filename.cc                  |  15 +-
 db/filename.h                   |   5 +-
 db/filename_test.cc             |  74 ++++----
 db/leveldbutil.cc               |  11 +-
 db/log_reader.cc                |  15 +-
 db/log_reader.h                 |   2 +-
 db/log_test.cc                  | 140 ++++++--------
 db/log_writer.cc                |  29 ++-
 db/log_writer.h                 |   3 +-
 db/memtable.cc                  |  39 ++--
 db/memtable.h                   |   8 +-
 db/recovery_test.cc             |  44 ++---
 db/repair.cc                    |  47 ++---
 db/skiplist.h                   |  48 ++---
 db/skiplist_test.cc             |  38 ++--
 db/snapshot.h                   |  10 +-
 db/table_cache.cc               |  25 +--
 db/table_cache.h                |  17 +-
 db/version_edit.cc              |  34 ++--
 db/version_edit.h               |  25 ++-
 db/version_edit_test.cc         |   6 +-
 db/version_set.cc               | 200 +++++++++-----------
 db/version_set.h                |  56 +++---
 db/version_set_test.cc          |  39 ++--
 db/write_batch.cc               |  19 +-
 db/write_batch_internal.h       |   9 +-
 db/write_batch_test.cc          |  48 ++---
 doc/bench/db_bench_sqlite3.cc   | 141 +++++++-------
 doc/bench/db_bench_tree_db.cc   | 108 +++++------
 helpers/memenv/memenv.cc        |  53 ++----
 helpers/memenv/memenv_test.cc   |  35 ++--
 include/leveldb/c.h             |  42 ++---
 include/leveldb/cache.h         |   3 +-
 include/leveldb/comparator.h    |   6 +-
 include/leveldb/db.h            |  19 +-
 include/leveldb/dumpfile.h      |   1 +
 include/leveldb/env.h           |  18 +-
 include/leveldb/filter_policy.h |   5 +-
 include/leveldb/iterator.h      |   5 +-
 include/leveldb/options.h       |   3 +-
 include/leveldb/slice.h         |  29 +--
 include/leveldb/status.h        |   3 +-
 include/leveldb/table.h         |  15 +-
 include/leveldb/table_builder.h |   1 +
 include/leveldb/write_batch.h   |   3 +-
 issues/issue178_test.cc         |  12 +-
 issues/issue200_test.cc         |  10 +-
 port/port.h                     |   4 +-
 port/port_stdcxx.h              |  26 ++-
 port/thread_annotations.h       |  13 +-
 table/block.cc                  |  28 ++-
 table/block.h                   |   5 +-
 table/block_builder.cc          |  21 +--
 table/block_builder.h           |  19 +-
 table/filter_block.cc           |  19 +-
 table/filter_block.h            |  14 +-
 table/filter_block_test.cc      |  34 ++--
 table/format.cc                 |  11 +-
 table/format.h                  |  28 +--
 table/iterator.cc               |  24 ++-
 table/iterator_wrapper.h        |  54 ++++--
 table/merger.cc                 |  22 +--
 table/merger.h                  |   4 +-
 table/table.cc                  |  35 ++--
 table/table_builder.cc          |  25 +--
 table/table_test.cc             | 208 ++++++++------------
 table/two_level_iterator.cc     |  41 ++--
 table/two_level_iterator.h      |   9 +-
 util/arena.cc                   |   7 +-
 util/arena_test.cc              |  17 +-
 util/bloom.cc                   |  15 +-
 util/bloom_test.cc              |  46 ++---
 util/cache.cc                   |  48 +++--
 util/cache_test.cc              |  44 ++---
 util/coding.cc                  |  66 ++++---
 util/coding.h                   |  14 +-
 util/coding_test.cc             |  32 ++--
 util/comparator.cc              |  15 +-
 util/crc32c.cc                  |  12 +-
 util/crc32c.h                   |   4 +-
 util/crc32c_test.cc             |  29 +--
 util/env.cc                     |  24 +--
 util/env_posix.cc               |  59 +++---
 util/env_posix_test.cc          |   6 +-
 util/env_test.cc                |  16 +-
 util/env_windows.cc             |   7 +-
 util/filter_policy.cc           |   2 +-
 util/hash.cc                    |  11 +-
 util/hash_test.cc               |  22 +--
 util/histogram.cc               | 207 ++++++++++++++++----
 util/histogram.h                |   4 +-
 util/logging.cc                 |   7 +-
 util/logging.h                  |   4 +-
 util/logging_test.cc            |   6 +-
 util/mutexlock.h                |   6 +-
 util/no_destructor.h            |  11 +-
 util/no_destructor_test.cc      |   6 +-
 util/options.cc                 |   5 +-
 util/posix_logger.h             |  27 +--
 util/random.h                   |   9 +-
 util/status.cc                  |  10 +-
 util/status_test.cc             |   4 +-
 util/testharness.cc             |   2 +-
 util/testharness.h              |  62 +++---
 util/testutil.cc                |  12 +-
 util/testutil.h                 |  15 +-
 util/windows_logger.h           |  25 +--
 125 files changed, 1967 insertions(+), 2326 deletions(-)
 create mode 100644 .clang-format

diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..75f3401
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,18 @@
+# Run manually to reformat a file:
+# clang-format -i --style=file <file>
+# find . -iname '*.cc' -o -iname '*.h' -o -iname '*.h.in' | xargs clang-format -i --style=file
+BasedOnStyle: Google
+DerivePointerAlignment: false
+
+# Public headers are in a different location in the internal Google repository.
+# Order them so that when imported to the authoritative repository they will be
+# in correct alphabetical order.
+IncludeCategories:
+  - Regex:           '^(<|"(db|helpers)/)'
+    Priority:        1
+  - Regex:           '^"(leveldb)/'
+    Priority:        2
+  - Regex:           '^(<|"(issues|port|table|third_party|util)/)'
+    Priority:        3
+  - Regex:           '.*'
+    Priority:        4
diff --git a/README.md b/README.md
index 537cab1..0b660ae 100644
--- a/README.md
+++ b/README.md
@@ -86,6 +86,14 @@ Contribution requirements:
 3. **Tests**: All changes must be accompanied by a new (or changed) test, or
    a sufficient explanation as to why a new (or changed) test is not required.
 
+4. **Consistent Style**: This project conforms to the
+   [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html).
+   To ensure your changes are properly formatted please run:
+
+   ```
+   clang-format -i --style=file <file>
+   ```
+
 ## Submitting a Pull Request
 
 Before any pull request will be accepted the author must first sign a
diff --git a/db/autocompact_test.cc b/db/autocompact_test.cc
index d20a236..00e3672 100644
--- a/db/autocompact_test.cc
+++ b/db/autocompact_test.cc
@@ -2,9 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include "leveldb/db.h"
 #include "db/db_impl.h"
 #include "leveldb/cache.h"
+#include "leveldb/db.h"
 #include "util/testharness.h"
 #include "util/testutil.h"
 
@@ -81,17 +81,16 @@ void AutoCompactTest::DoReads(int n) {
     ASSERT_LT(read, 100) << "Taking too long to compact";
     Iterator* iter = db_->NewIterator(ReadOptions());
     for (iter->SeekToFirst();
-         iter->Valid() && iter->key().ToString() < limit_key;
-         iter->Next()) {
+         iter->Valid() && iter->key().ToString() < limit_key; iter->Next()) {
       // Drop data
     }
     delete iter;
     // Wait a little bit to allow any triggered compactions to complete.
     Env::Default()->SleepForMicroseconds(1000000);
     uint64_t size = Size(Key(0), Key(n));
-    fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n",
-            read+1, size/1048576.0, Size(Key(n), Key(kCount))/1048576.0);
-    if (size <= initial_size/10) {
+    fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", read + 1,
+            size / 1048576.0, Size(Key(n), Key(kCount)) / 1048576.0);
+    if (size <= initial_size / 10) {
       break;
     }
   }
@@ -100,19 +99,13 @@ void AutoCompactTest::DoReads(int n) {
   // is pretty much unchanged.
   const int64_t final_other_size = Size(Key(n), Key(kCount));
   ASSERT_LE(final_other_size, initial_other_size + 1048576);
-  ASSERT_GE(final_other_size, initial_other_size/5 - 1048576);
+  ASSERT_GE(final_other_size, initial_other_size / 5 - 1048576);
 }
 
-TEST(AutoCompactTest, ReadAll) {
-  DoReads(kCount);
-}
+TEST(AutoCompactTest, ReadAll) { DoReads(kCount); }
 
-TEST(AutoCompactTest, ReadHalf) {
-  DoReads(kCount/2);
-}
+TEST(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); }
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/db/builder.cc b/db/builder.cc
index 5fa405d..9520ee4 100644
--- a/db/builder.cc
+++ b/db/builder.cc
@@ -4,8 +4,8 @@
 
 #include "db/builder.h"
 
-#include "db/filename.h"
 #include "db/dbformat.h"
+#include "db/filename.h"
 #include "db/table_cache.h"
 #include "db/version_edit.h"
 #include "leveldb/db.h"
@@ -14,12 +14,8 @@
 
 namespace leveldb {
 
-Status BuildTable(const std::string& dbname,
-                  Env* env,
-                  const Options& options,
-                  TableCache* table_cache,
-                  Iterator* iter,
-                  FileMetaData* meta) {
+Status BuildTable(const std::string& dbname, Env* env, const Options& options,
+                  TableCache* table_cache, Iterator* iter, FileMetaData* meta) {
   Status s;
   meta->file_size = 0;
   iter->SeekToFirst();
@@ -60,8 +56,7 @@ Status BuildTable(const std::string& dbname,
 
     if (s.ok()) {
       // Verify that the table is usable
-      Iterator* it = table_cache->NewIterator(ReadOptions(),
-                                              meta->number,
+      Iterator* it = table_cache->NewIterator(ReadOptions(), meta->number,
                                               meta->file_size);
       s = it->status();
       delete it;
diff --git a/db/builder.h b/db/builder.h
index 0289730..7bd0b80 100644
--- a/db/builder.h
+++ b/db/builder.h
@@ -22,12 +22,8 @@ class VersionEdit;
 // *meta will be filled with metadata about the generated table.
 // If no data is present in *iter, meta->file_size will be set to
 // zero, and no Table file will be produced.
-Status BuildTable(const std::string& dbname,
-                  Env* env,
-                  const Options& options,
-                  TableCache* table_cache,
-                  Iterator* iter,
-                  FileMetaData* meta);
+Status BuildTable(const std::string& dbname, Env* env, const Options& options,
+                  TableCache* table_cache, Iterator* iter, FileMetaData* meta);
 
 }  // namespace leveldb
 
diff --git a/db/c.cc b/db/c.cc
index 7756ea3..72f6daa 100644
--- a/db/c.cc
+++ b/db/c.cc
@@ -5,6 +5,7 @@
 #include "leveldb/c.h"
 
 #include <stdlib.h>
+
 #include "leveldb/cache.h"
 #include "leveldb/comparator.h"
 #include "leveldb/db.h"
@@ -42,67 +43,79 @@ using leveldb::WriteOptions;
 
 extern "C" {
 
-struct leveldb_t              { DB*               rep; };
-struct leveldb_iterator_t     { Iterator*         rep; };
-struct leveldb_writebatch_t   { WriteBatch        rep; };
-struct leveldb_snapshot_t     { const Snapshot*   rep; };
-struct leveldb_readoptions_t  { ReadOptions       rep; };
-struct leveldb_writeoptions_t { WriteOptions      rep; };
-struct leveldb_options_t      { Options           rep; };
-struct leveldb_cache_t        { Cache*            rep; };
-struct leveldb_seqfile_t      { SequentialFile*   rep; };
-struct leveldb_randomfile_t   { RandomAccessFile* rep; };
-struct leveldb_writablefile_t { WritableFile*     rep; };
-struct leveldb_logger_t       { Logger*           rep; };
-struct leveldb_filelock_t     { FileLock*         rep; };
+struct leveldb_t {
+  DB* rep;
+};
+struct leveldb_iterator_t {
+  Iterator* rep;
+};
+struct leveldb_writebatch_t {
+  WriteBatch rep;
+};
+struct leveldb_snapshot_t {
+  const Snapshot* rep;
+};
+struct leveldb_readoptions_t {
+  ReadOptions rep;
+};
+struct leveldb_writeoptions_t {
+  WriteOptions rep;
+};
+struct leveldb_options_t {
+  Options rep;
+};
+struct leveldb_cache_t {
+  Cache* rep;
+};
+struct leveldb_seqfile_t {
+  SequentialFile* rep;
+};
+struct leveldb_randomfile_t {
+  RandomAccessFile* rep;
+};
+struct leveldb_writablefile_t {
+  WritableFile* rep;
+};
+struct leveldb_logger_t {
+  Logger* rep;
+};
+struct leveldb_filelock_t {
+  FileLock* rep;
+};
 
 struct leveldb_comparator_t : public Comparator {
   void* state_;
   void (*destructor_)(void*);
-  int (*compare_)(
-      void*,
-      const char* a, size_t alen,
-      const char* b, size_t blen);
+  int (*compare_)(void*, const char* a, size_t alen, const char* b,
+                  size_t blen);
   const char* (*name_)(void*);
 
-  virtual ~leveldb_comparator_t() {
-    (*destructor_)(state_);
-  }
+  virtual ~leveldb_comparator_t() { (*destructor_)(state_); }
 
   virtual int Compare(const Slice& a, const Slice& b) const {
     return (*compare_)(state_, a.data(), a.size(), b.data(), b.size());
   }
 
-  virtual const char* Name() const {
-    return (*name_)(state_);
-  }
+  virtual const char* Name() const { return (*name_)(state_); }
 
   // No-ops since the C binding does not support key shortening methods.
-  virtual void FindShortestSeparator(std::string*, const Slice&) const { }
-  virtual void FindShortSuccessor(std::string* key) const { }
+  virtual void FindShortestSeparator(std::string*, const Slice&) const {}
+  virtual void FindShortSuccessor(std::string* key) const {}
 };
 
 struct leveldb_filterpolicy_t : public FilterPolicy {
   void* state_;
   void (*destructor_)(void*);
   const char* (*name_)(void*);
-  char* (*create_)(
-      void*,
-      const char* const* key_array, const size_t* key_length_array,
-      int num_keys,
-      size_t* filter_length);
-  unsigned char (*key_match_)(
-      void*,
-      const char* key, size_t length,
-      const char* filter, size_t filter_length);
+  char* (*create_)(void*, const char* const* key_array,
+                   const size_t* key_length_array, int num_keys,
+                   size_t* filter_length);
+  unsigned char (*key_match_)(void*, const char* key, size_t length,
+                              const char* filter, size_t filter_length);
 
-  virtual ~leveldb_filterpolicy_t() {
-    (*destructor_)(state_);
-  }
+  virtual ~leveldb_filterpolicy_t() { (*destructor_)(state_); }
 
-  virtual const char* Name() const {
-    return (*name_)(state_);
-  }
+  virtual const char* Name() const { return (*name_)(state_); }
 
   virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
     std::vector<const char*> key_pointers(n);
@@ -118,8 +131,8 @@ struct leveldb_filterpolicy_t : public FilterPolicy {
   }
 
   virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const {
-    return (*key_match_)(state_, key.data(), key.size(),
-                         filter.data(), filter.size());
+    return (*key_match_)(state_, key.data(), key.size(), filter.data(),
+                         filter.size());
   }
 };
 
@@ -148,10 +161,8 @@ static char* CopyString(const std::string& str) {
   return result;
 }
 
-leveldb_t* leveldb_open(
-    const leveldb_options_t* options,
-    const char* name,
-    char** errptr) {
+leveldb_t* leveldb_open(const leveldb_options_t* options, const char* name,
+                        char** errptr) {
   DB* db;
   if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) {
     return nullptr;
@@ -166,39 +177,26 @@ void leveldb_close(leveldb_t* db) {
   delete db;
 }
 
-void leveldb_put(
-    leveldb_t* db,
-    const leveldb_writeoptions_t* options,
-    const char* key, size_t keylen,
-    const char* val, size_t vallen,
-    char** errptr) {
+void leveldb_put(leveldb_t* db, const leveldb_writeoptions_t* options,
+                 const char* key, size_t keylen, const char* val, size_t vallen,
+                 char** errptr) {
   SaveError(errptr,
             db->rep->Put(options->rep, Slice(key, keylen), Slice(val, vallen)));
 }
 
-void leveldb_delete(
-    leveldb_t* db,
-    const leveldb_writeoptions_t* options,
-    const char* key, size_t keylen,
-    char** errptr) {
+void leveldb_delete(leveldb_t* db, const leveldb_writeoptions_t* options,
+                    const char* key, size_t keylen, char** errptr) {
   SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen)));
 }
 
-
-void leveldb_write(
-    leveldb_t* db,
-    const leveldb_writeoptions_t* options,
-    leveldb_writebatch_t* batch,
-    char** errptr) {
+void leveldb_write(leveldb_t* db, const leveldb_writeoptions_t* options,
+                   leveldb_writebatch_t* batch, char** errptr) {
   SaveError(errptr, db->rep->Write(options->rep, &batch->rep));
 }
 
-char* leveldb_get(
-    leveldb_t* db,
-    const leveldb_readoptions_t* options,
-    const char* key, size_t keylen,
-    size_t* vallen,
-    char** errptr) {
+char* leveldb_get(leveldb_t* db, const leveldb_readoptions_t* options,
+                  const char* key, size_t keylen, size_t* vallen,
+                  char** errptr) {
   char* result = nullptr;
   std::string tmp;
   Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp);
@@ -215,30 +213,25 @@ char* leveldb_get(
 }
 
 leveldb_iterator_t* leveldb_create_iterator(
-    leveldb_t* db,
-    const leveldb_readoptions_t* options) {
+    leveldb_t* db, const leveldb_readoptions_t* options) {
   leveldb_iterator_t* result = new leveldb_iterator_t;
   result->rep = db->rep->NewIterator(options->rep);
   return result;
 }
 
-const leveldb_snapshot_t* leveldb_create_snapshot(
-    leveldb_t* db) {
+const leveldb_snapshot_t* leveldb_create_snapshot(leveldb_t* db) {
   leveldb_snapshot_t* result = new leveldb_snapshot_t;
   result->rep = db->rep->GetSnapshot();
   return result;
 }
 
-void leveldb_release_snapshot(
-    leveldb_t* db,
-    const leveldb_snapshot_t* snapshot) {
+void leveldb_release_snapshot(leveldb_t* db,
+                              const leveldb_snapshot_t* snapshot) {
   db->rep->ReleaseSnapshot(snapshot->rep);
   delete snapshot;
 }
 
-char* leveldb_property_value(
-    leveldb_t* db,
-    const char* propname) {
+char* leveldb_property_value(leveldb_t* db, const char* propname) {
   std::string tmp;
   if (db->rep->GetProperty(Slice(propname), &tmp)) {
     // We use strdup() since we expect human readable output.
@@ -248,12 +241,12 @@ char* leveldb_property_value(
   }
 }
 
-void leveldb_approximate_sizes(
-    leveldb_t* db,
-    int num_ranges,
-    const char* const* range_start_key, const size_t* range_start_key_len,
-    const char* const* range_limit_key, const size_t* range_limit_key_len,
-    uint64_t* sizes) {
+void leveldb_approximate_sizes(leveldb_t* db, int num_ranges,
+                               const char* const* range_start_key,
+                               const size_t* range_start_key_len,
+                               const char* const* range_limit_key,
+                               const size_t* range_limit_key_len,
+                               uint64_t* sizes) {
   Range* ranges = new Range[num_ranges];
   for (int i = 0; i < num_ranges; i++) {
     ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
@@ -263,10 +256,9 @@ void leveldb_approximate_sizes(
   delete[] ranges;
 }
 
-void leveldb_compact_range(
-    leveldb_t* db,
-    const char* start_key, size_t start_key_len,
-    const char* limit_key, size_t limit_key_len) {
+void leveldb_compact_range(leveldb_t* db, const char* start_key,
+                           size_t start_key_len, const char* limit_key,
+                           size_t limit_key_len) {
   Slice a, b;
   db->rep->CompactRange(
       // Pass null Slice if corresponding "const char*" is null
@@ -274,17 +266,13 @@ void leveldb_compact_range(
       (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
 }
 
-void leveldb_destroy_db(
-    const leveldb_options_t* options,
-    const char* name,
-    char** errptr) {
+void leveldb_destroy_db(const leveldb_options_t* options, const char* name,
+                        char** errptr) {
   SaveError(errptr, DestroyDB(name, options->rep));
 }
 
-void leveldb_repair_db(
-    const leveldb_options_t* options,
-    const char* name,
-    char** errptr) {
+void leveldb_repair_db(const leveldb_options_t* options, const char* name,
+                       char** errptr) {
   SaveError(errptr, RepairDB(name, options->rep));
 }
 
@@ -309,13 +297,9 @@ void leveldb_iter_seek(leveldb_iterator_t* iter, const char* k, size_t klen) {
   iter->rep->Seek(Slice(k, klen));
 }
 
-void leveldb_iter_next(leveldb_iterator_t* iter) {
-  iter->rep->Next();
-}
+void leveldb_iter_next(leveldb_iterator_t* iter) { iter->rep->Next(); }
 
-void leveldb_iter_prev(leveldb_iterator_t* iter) {
-  iter->rep->Prev();
-}
+void leveldb_iter_prev(leveldb_iterator_t* iter) { iter->rep->Prev(); }
 
 const char* leveldb_iter_key(const leveldb_iterator_t* iter, size_t* klen) {
   Slice s = iter->rep->key();
@@ -337,32 +321,25 @@ leveldb_writebatch_t* leveldb_writebatch_create() {
   return new leveldb_writebatch_t;
 }
 
-void leveldb_writebatch_destroy(leveldb_writebatch_t* b) {
-  delete b;
-}
+void leveldb_writebatch_destroy(leveldb_writebatch_t* b) { delete b; }
 
-void leveldb_writebatch_clear(leveldb_writebatch_t* b) {
-  b->rep.Clear();
-}
+void leveldb_writebatch_clear(leveldb_writebatch_t* b) { b->rep.Clear(); }
 
-void leveldb_writebatch_put(
-    leveldb_writebatch_t* b,
-    const char* key, size_t klen,
-    const char* val, size_t vlen) {
+void leveldb_writebatch_put(leveldb_writebatch_t* b, const char* key,
+                            size_t klen, const char* val, size_t vlen) {
   b->rep.Put(Slice(key, klen), Slice(val, vlen));
 }
 
-void leveldb_writebatch_delete(
-    leveldb_writebatch_t* b,
-    const char* key, size_t klen) {
+void leveldb_writebatch_delete(leveldb_writebatch_t* b, const char* key,
+                               size_t klen) {
   b->rep.Delete(Slice(key, klen));
 }
 
-void leveldb_writebatch_iterate(
-    const leveldb_writebatch_t* b,
-    void* state,
-    void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
-    void (*deleted)(void*, const char* k, size_t klen)) {
+void leveldb_writebatch_iterate(const leveldb_writebatch_t* b, void* state,
+                                void (*put)(void*, const char* k, size_t klen,
+                                            const char* v, size_t vlen),
+                                void (*deleted)(void*, const char* k,
+                                                size_t klen)) {
   class H : public WriteBatch::Handler {
    public:
     void* state_;
@@ -382,43 +359,37 @@ void leveldb_writebatch_iterate(
   b->rep.Iterate(&handler);
 }
 
-void leveldb_writebatch_append(leveldb_writebatch_t *destination,
-                               const leveldb_writebatch_t *source) {
+void leveldb_writebatch_append(leveldb_writebatch_t* destination,
+                               const leveldb_writebatch_t* source) {
   destination->rep.Append(source->rep);
 }
 
-leveldb_options_t* leveldb_options_create() {
-  return new leveldb_options_t;
-}
+leveldb_options_t* leveldb_options_create() { return new leveldb_options_t; }
 
-void leveldb_options_destroy(leveldb_options_t* options) {
-  delete options;
-}
+void leveldb_options_destroy(leveldb_options_t* options) { delete options; }
 
-void leveldb_options_set_comparator(
-    leveldb_options_t* opt,
-    leveldb_comparator_t* cmp) {
+void leveldb_options_set_comparator(leveldb_options_t* opt,
+                                    leveldb_comparator_t* cmp) {
   opt->rep.comparator = cmp;
 }
 
-void leveldb_options_set_filter_policy(
-    leveldb_options_t* opt,
-    leveldb_filterpolicy_t* policy) {
+void leveldb_options_set_filter_policy(leveldb_options_t* opt,
+                                       leveldb_filterpolicy_t* policy) {
   opt->rep.filter_policy = policy;
 }
 
-void leveldb_options_set_create_if_missing(
-    leveldb_options_t* opt, unsigned char v) {
+void leveldb_options_set_create_if_missing(leveldb_options_t* opt,
+                                           unsigned char v) {
   opt->rep.create_if_missing = v;
 }
 
-void leveldb_options_set_error_if_exists(
-    leveldb_options_t* opt, unsigned char v) {
+void leveldb_options_set_error_if_exists(leveldb_options_t* opt,
+                                         unsigned char v) {
   opt->rep.error_if_exists = v;
 }
 
-void leveldb_options_set_paranoid_checks(
-    leveldb_options_t* opt, unsigned char v) {
+void leveldb_options_set_paranoid_checks(leveldb_options_t* opt,
+                                         unsigned char v) {
   opt->rep.paranoid_checks = v;
 }
 
@@ -459,12 +430,9 @@ void leveldb_options_set_compression(leveldb_options_t* opt, int t) {
 }
 
 leveldb_comparator_t* leveldb_comparator_create(
-    void* state,
-    void (*destructor)(void*),
-    int (*compare)(
-        void*,
-        const char* a, size_t alen,
-        const char* b, size_t blen),
+    void* state, void (*destructor)(void*),
+    int (*compare)(void*, const char* a, size_t alen, const char* b,
+                   size_t blen),
     const char* (*name)(void*)) {
   leveldb_comparator_t* result = new leveldb_comparator_t;
   result->state_ = state;
@@ -474,22 +442,15 @@ leveldb_comparator_t* leveldb_comparator_create(
   return result;
 }
 
-void leveldb_comparator_destroy(leveldb_comparator_t* cmp) {
-  delete cmp;
-}
+void leveldb_comparator_destroy(leveldb_comparator_t* cmp) { delete cmp; }
 
 leveldb_filterpolicy_t* leveldb_filterpolicy_create(
-    void* state,
-    void (*destructor)(void*),
-    char* (*create_filter)(
-        void*,
-        const char* const* key_array, const size_t* key_length_array,
-        int num_keys,
-        size_t* filter_length),
-    unsigned char (*key_may_match)(
-        void*,
-        const char* key, size_t length,
-        const char* filter, size_t filter_length),
+    void* state, void (*destructor)(void*),
+    char* (*create_filter)(void*, const char* const* key_array,
+                           const size_t* key_length_array, int num_keys,
+                           size_t* filter_length),
+    unsigned char (*key_may_match)(void*, const char* key, size_t length,
+                                   const char* filter, size_t filter_length),
     const char* (*name)(void*)) {
   leveldb_filterpolicy_t* result = new leveldb_filterpolicy_t;
   result->state_ = state;
@@ -518,7 +479,7 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) {
     bool KeyMayMatch(const Slice& key, const Slice& filter) const {
       return rep_->KeyMayMatch(key, filter);
     }
-    static void DoNothing(void*) { }
+    static void DoNothing(void*) {}
   };
   Wrapper* wrapper = new Wrapper;
   wrapper->rep_ = NewBloomFilterPolicy(bits_per_key);
@@ -531,24 +492,20 @@ leveldb_readoptions_t* leveldb_readoptions_create() {
   return new leveldb_readoptions_t;
 }
 
-void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) {
-  delete opt;
-}
+void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) { delete opt; }
 
-void leveldb_readoptions_set_verify_checksums(
-    leveldb_readoptions_t* opt,
-    unsigned char v) {
+void leveldb_readoptions_set_verify_checksums(leveldb_readoptions_t* opt,
+                                              unsigned char v) {
   opt->rep.verify_checksums = v;
 }
 
-void leveldb_readoptions_set_fill_cache(
-    leveldb_readoptions_t* opt, unsigned char v) {
+void leveldb_readoptions_set_fill_cache(leveldb_readoptions_t* opt,
+                                        unsigned char v) {
   opt->rep.fill_cache = v;
 }
 
-void leveldb_readoptions_set_snapshot(
-    leveldb_readoptions_t* opt,
-    const leveldb_snapshot_t* snap) {
+void leveldb_readoptions_set_snapshot(leveldb_readoptions_t* opt,
+                                      const leveldb_snapshot_t* snap) {
   opt->rep.snapshot = (snap ? snap->rep : nullptr);
 }
 
@@ -556,12 +513,10 @@ leveldb_writeoptions_t* leveldb_writeoptions_create() {
   return new leveldb_writeoptions_t;
 }
 
-void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) {
-  delete opt;
-}
+void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) { delete opt; }
 
-void leveldb_writeoptions_set_sync(
-    leveldb_writeoptions_t* opt, unsigned char v) {
+void leveldb_writeoptions_set_sync(leveldb_writeoptions_t* opt,
+                                   unsigned char v) {
   opt->rep.sync = v;
 }
 
@@ -600,16 +555,10 @@ char* leveldb_env_get_test_directory(leveldb_env_t* env) {
   return buffer;
 }
 
-void leveldb_free(void* ptr) {
-  free(ptr);
-}
+void leveldb_free(void* ptr) { free(ptr); }
 
-int leveldb_major_version() {
-  return kMajorVersion;
-}
+int leveldb_major_version() { return kMajorVersion; }
 
-int leveldb_minor_version() {
-  return kMinorVersion;
-}
+int leveldb_minor_version() { return kMinorVersion; }
 
 }  // end extern "C"
diff --git a/db/corruption_test.cc b/db/corruption_test.cc
index d50785a..e6f64ee 100644
--- a/db/corruption_test.cc
+++ b/db/corruption_test.cc
@@ -2,16 +2,16 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include "leveldb/db.h"
-
 #include <sys/types.h>
-#include "leveldb/cache.h"
-#include "leveldb/table.h"
-#include "leveldb/write_batch.h"
+
 #include "db/db_impl.h"
 #include "db/filename.h"
 #include "db/log_format.h"
 #include "db/version_set.h"
+#include "leveldb/cache.h"
+#include "leveldb/db.h"
+#include "leveldb/table.h"
+#include "leveldb/write_batch.h"
 #include "util/logging.h"
 #include "util/testharness.h"
 #include "util/testutil.h"
@@ -42,8 +42,8 @@ class CorruptionTest {
   }
 
   ~CorruptionTest() {
-     delete db_;
-     delete tiny_cache_;
+    delete db_;
+    delete tiny_cache_;
   }
 
   Status TryReopen() {
@@ -52,9 +52,7 @@ class CorruptionTest {
     return DB::Open(options_, dbname_, &db_);
   }
 
-  void Reopen() {
-    ASSERT_OK(TryReopen());
-  }
+  void Reopen() { ASSERT_OK(TryReopen()); }
 
   void RepairDB() {
     delete db_;
@@ -66,7 +64,7 @@ class CorruptionTest {
     std::string key_space, value_space;
     WriteBatch batch;
     for (int i = 0; i < n; i++) {
-      //if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n);
+      // if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n);
       Slice key = Key(i, &key_space);
       batch.Clear();
       batch.Put(key, Value(i, &value_space));
@@ -95,8 +93,7 @@ class CorruptionTest {
         // Ignore boundary keys.
         continue;
       }
-      if (!ConsumeDecimalNumber(&in, &key) ||
-          !in.empty() ||
+      if (!ConsumeDecimalNumber(&in, &key) || !in.empty() ||
           key < next_expected) {
         bad_keys++;
         continue;
@@ -127,8 +124,7 @@ class CorruptionTest {
     std::string fname;
     int picked_number = -1;
     for (size_t i = 0; i < filenames.size(); i++) {
-      if (ParseFileName(filenames[i], &number, &type) &&
-          type == filetype &&
+      if (ParseFileName(filenames[i], &number, &type) && type == filetype &&
           int(number) > picked_number) {  // Pick latest file
         fname = dbname_ + "/" + filenames[i];
         picked_number = number;
@@ -194,7 +190,7 @@ class CorruptionTest {
 TEST(CorruptionTest, Recovery) {
   Build(100);
   Check(100, 100);
-  Corrupt(kLogFile, 19, 1);      // WriteBatch tag for first record
+  Corrupt(kLogFile, 19, 1);  // WriteBatch tag for first record
   Corrupt(kLogFile, log::kBlockSize + 1000, 1);  // Somewhere in second block
   Reopen();
 
@@ -361,6 +357,4 @@ TEST(CorruptionTest, UnrelatedKeys) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/db/db_bench.cc b/db/db_bench.cc
index 41e903b..3090b43 100644
--- a/db/db_bench.cc
+++ b/db/db_bench.cc
@@ -2,9 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include <sys/types.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include <sys/types.h>
+
 #include "leveldb/cache.h"
 #include "leveldb/db.h"
 #include "leveldb/env.h"
@@ -55,8 +56,7 @@ static const char* FLAGS_benchmarks =
     "fill100K,"
     "crc32c,"
     "snappycomp,"
-    "snappyuncomp,"
-    ;
+    "snappyuncomp,";
 
 // Number of key/values to place in database
 static int FLAGS_num = 1000000;
@@ -155,7 +155,7 @@ static Slice TrimSpace(Slice s) {
     start++;
   }
   size_t limit = s.size();
-  while (limit > start && isspace(s[limit-1])) {
+  while (limit > start && isspace(s[limit - 1])) {
     limit--;
   }
   return Slice(s.data() + start, limit - start);
@@ -214,9 +214,7 @@ class Stats {
     seconds_ = (finish_ - start_) * 1e-6;
   }
 
-  void AddMessage(Slice msg) {
-    AppendWithSpace(&message_, msg);
-  }
+  void AddMessage(Slice msg) { AppendWithSpace(&message_, msg); }
 
   void FinishedSingleOp() {
     if (FLAGS_histogram) {
@@ -232,21 +230,26 @@ class Stats {
 
     done_++;
     if (done_ >= next_report_) {
-      if      (next_report_ < 1000)   next_report_ += 100;
-      else if (next_report_ < 5000)   next_report_ += 500;
-      else if (next_report_ < 10000)  next_report_ += 1000;
-      else if (next_report_ < 50000)  next_report_ += 5000;
-      else if (next_report_ < 100000) next_report_ += 10000;
-      else if (next_report_ < 500000) next_report_ += 50000;
-      else                            next_report_ += 100000;
+      if (next_report_ < 1000)
+        next_report_ += 100;
+      else if (next_report_ < 5000)
+        next_report_ += 500;
+      else if (next_report_ < 10000)
+        next_report_ += 1000;
+      else if (next_report_ < 50000)
+        next_report_ += 5000;
+      else if (next_report_ < 100000)
+        next_report_ += 10000;
+      else if (next_report_ < 500000)
+        next_report_ += 50000;
+      else
+        next_report_ += 100000;
       fprintf(stderr, "... finished %d ops%30s\r", done_, "");
       fflush(stderr);
     }
   }
 
-  void AddBytes(int64_t n) {
-    bytes_ += n;
-  }
+  void AddBytes(int64_t n) { bytes_ += n; }
 
   void Report(const Slice& name) {
     // Pretend at least one op was done in case we are running a benchmark
@@ -265,11 +268,8 @@ class Stats {
     }
     AppendWithSpace(&extra, message_);
 
-    fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
-            name.ToString().c_str(),
-            seconds_ * 1e6 / done_,
-            (extra.empty() ? "" : " "),
-            extra.c_str());
+    fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
+            seconds_ * 1e6 / done_, (extra.empty() ? "" : " "), extra.c_str());
     if (FLAGS_histogram) {
       fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
     }
@@ -294,13 +294,13 @@ struct SharedState {
   bool start GUARDED_BY(mu);
 
   SharedState(int total)
-      : cv(&mu), total(total), num_initialized(0), num_done(0), start(false) { }
+      : cv(&mu), total(total), num_initialized(0), num_done(0), start(false) {}
 };
 
 // Per-thread state for concurrent executions of the same benchmark.
 struct ThreadState {
-  int tid;             // 0..n-1 when running in n threads
-  Random rand;         // Has different seeds for different threads
+  int tid;      // 0..n-1 when running in n threads
+  Random rand;  // Has different seeds for different threads
   Stats stats;
   SharedState* shared;
 
@@ -330,20 +330,20 @@ class Benchmark {
             static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
     fprintf(stdout, "Entries:    %d\n", num_);
     fprintf(stdout, "RawSize:    %.1f MB (estimated)\n",
-            ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
-             / 1048576.0));
+            ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
+             1048576.0));
     fprintf(stdout, "FileSize:   %.1f MB (estimated)\n",
-            (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_)
-             / 1048576.0));
+            (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
+             1048576.0));
     PrintWarnings();
     fprintf(stdout, "------------------------------------------------\n");
   }
 
   void PrintWarnings() {
 #if defined(__GNUC__) && !defined(__OPTIMIZE__)
-    fprintf(stdout,
-            "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
-            );
+    fprintf(
+        stdout,
+        "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
 #endif
 #ifndef NDEBUG
     fprintf(stdout,
@@ -361,8 +361,8 @@ class Benchmark {
   }
 
   void PrintEnvironment() {
-    fprintf(stderr, "LevelDB:    version %d.%d\n",
-            kMajorVersion, kMinorVersion);
+    fprintf(stderr, "LevelDB:    version %d.%d\n", kMajorVersion,
+            kMinorVersion);
 
 #if defined(__linux)
     time_t now = time(nullptr);
@@ -397,16 +397,16 @@ class Benchmark {
 
  public:
   Benchmark()
-  : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : nullptr),
-    filter_policy_(FLAGS_bloom_bits >= 0
-                   ? NewBloomFilterPolicy(FLAGS_bloom_bits)
-                   : nullptr),
-    db_(nullptr),
-    num_(FLAGS_num),
-    value_size_(FLAGS_value_size),
-    entries_per_batch_(1),
-    reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
-    heap_counter_(0) {
+      : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : nullptr),
+        filter_policy_(FLAGS_bloom_bits >= 0
+                           ? NewBloomFilterPolicy(FLAGS_bloom_bits)
+                           : nullptr),
+        db_(nullptr),
+        num_(FLAGS_num),
+        value_size_(FLAGS_value_size),
+        entries_per_batch_(1),
+        reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
+        heap_counter_(0) {
     std::vector<std::string> files;
     g_env->GetChildren(FLAGS_db, &files);
     for (size_t i = 0; i < files.size(); i++) {
@@ -516,7 +516,7 @@ class Benchmark {
       } else if (name == Slice("sstables")) {
         PrintStats("leveldb.sstables");
       } else {
-        if (name != Slice()) {  // No error message for empty name
+        if (!name.empty()) {  // No error message for empty name
           fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
         }
       }
@@ -665,8 +665,8 @@ class Benchmark {
     int64_t bytes = 0;
     char* uncompressed = new char[input.size()];
     while (ok && bytes < 1024 * 1048576) {  // Compress 1G
-      ok =  port::Snappy_Uncompress(compressed.data(), compressed.size(),
-                                    uncompressed);
+      ok = port::Snappy_Uncompress(compressed.data(), compressed.size(),
+                                   uncompressed);
       bytes += input.size();
       thread->stats.FinishedSingleOp();
     }
@@ -706,13 +706,9 @@ class Benchmark {
     }
   }
 
-  void WriteSeq(ThreadState* thread) {
-    DoWrite(thread, true);
-  }
+  void WriteSeq(ThreadState* thread) { DoWrite(thread, true); }
 
-  void WriteRandom(ThreadState* thread) {
-    DoWrite(thread, false);
-  }
+  void WriteRandom(ThreadState* thread) { DoWrite(thread, false); }
 
   void DoWrite(ThreadState* thread, bool seq) {
     if (num_ != FLAGS_num) {
@@ -728,7 +724,7 @@ class Benchmark {
     for (int i = 0; i < num_; i += entries_per_batch_) {
       batch.Clear();
       for (int j = 0; j < entries_per_batch_; j++) {
-        const int k = seq ? i+j : (thread->rand.Next() % FLAGS_num);
+        const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
         char key[100];
         snprintf(key, sizeof(key), "%016d", k);
         batch.Put(key, gen.Generate(value_size_));
@@ -838,7 +834,7 @@ class Benchmark {
     for (int i = 0; i < num_; i += entries_per_batch_) {
       batch.Clear();
       for (int j = 0; j < entries_per_batch_; j++) {
-        const int k = seq ? i+j : (thread->rand.Next() % FLAGS_num);
+        const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
         char key[100];
         snprintf(key, sizeof(key), "%016d", k);
         batch.Delete(key);
@@ -852,13 +848,9 @@ class Benchmark {
     }
   }
 
-  void DeleteSeq(ThreadState* thread) {
-    DoDelete(thread, true);
-  }
+  void DeleteSeq(ThreadState* thread) { DoDelete(thread, true); }
 
-  void DeleteRandom(ThreadState* thread) {
-    DoDelete(thread, false);
-  }
+  void DeleteRandom(ThreadState* thread) { DoDelete(thread, false); }
 
   void ReadWhileWriting(ThreadState* thread) {
     if (thread->tid > 0) {
@@ -890,9 +882,7 @@ class Benchmark {
     }
   }
 
-  void Compact(ThreadState* thread) {
-    db_->CompactRange(nullptr, nullptr);
-  }
+  void Compact(ThreadState* thread) { db_->CompactRange(nullptr, nullptr); }
 
   void PrintStats(const char* key) {
     std::string stats;
@@ -982,9 +972,9 @@ int main(int argc, char** argv) {
 
   // Choose a location for the test database if none given with --db=<path>
   if (FLAGS_db == nullptr) {
-      leveldb::g_env->GetTestDirectory(&default_db_path);
-      default_db_path += "/dbbench";
-      FLAGS_db = default_db_path.c_str();
+    leveldb::g_env->GetTestDirectory(&default_db_path);
+    default_db_path += "/dbbench";
+    FLAGS_db = default_db_path.c_str();
   }
 
   leveldb::Benchmark benchmark;
diff --git a/db/db_impl.cc b/db/db_impl.cc
index caef2b1..bff2d62 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -75,7 +75,7 @@ struct DBImpl::CompactionState {
 
   uint64_t total_bytes;
 
-  Output* current_output() { return &outputs[outputs.size()-1]; }
+  Output* current_output() { return &outputs[outputs.size() - 1]; }
 
   explicit CompactionState(Compaction* c)
       : compaction(c),
@@ -98,10 +98,10 @@ Options SanitizeOptions(const std::string& dbname,
   Options result = src;
   result.comparator = icmp;
   result.filter_policy = (src.filter_policy != nullptr) ? ipolicy : nullptr;
-  ClipToRange(&result.max_open_files,    64 + kNumNonTableCacheFiles, 50000);
-  ClipToRange(&result.write_buffer_size, 64<<10,                      1<<30);
-  ClipToRange(&result.max_file_size,     1<<20,                       1<<30);
-  ClipToRange(&result.block_size,        1<<10,                       4<<20);
+  ClipToRange(&result.max_open_files, 64 + kNumNonTableCacheFiles, 50000);
+  ClipToRange(&result.write_buffer_size, 64 << 10, 1 << 30);
+  ClipToRange(&result.max_file_size, 1 << 20, 1 << 30);
+  ClipToRange(&result.block_size, 1 << 10, 4 << 20);
   if (result.info_log == nullptr) {
     // Open a log file in the same directory as the db
     src.env->CreateDir(dbname);  // In case it does not exist
@@ -268,8 +268,7 @@ void DBImpl::DeleteObsoleteFiles() {
         if (type == kTableFile) {
           table_cache_->Evict(number);
         }
-        Log(options_.info_log, "Delete type=%d #%lld\n",
-            static_cast<int>(type),
+        Log(options_.info_log, "Delete type=%d #%lld\n", static_cast<int>(type),
             static_cast<unsigned long long>(number));
         env_->DeleteFile(dbname_ + "/" + filenames[i]);
       }
@@ -277,7 +276,7 @@ void DBImpl::DeleteObsoleteFiles() {
   }
 }
 
-Status DBImpl::Recover(VersionEdit* edit, bool *save_manifest) {
+Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) {
   mutex_.AssertHeld();
 
   // Ignore error from CreateDir since the creation of the DB is
@@ -302,8 +301,8 @@ Status DBImpl::Recover(VersionEdit* edit, bool *save_manifest) {
     }
   } else {
     if (options_.error_if_exists) {
-      return Status::InvalidArgument(
-          dbname_, "exists (error_if_exists is true)");
+      return Status::InvalidArgument(dbname_,
+                                     "exists (error_if_exists is true)");
     }
   }
 
@@ -378,8 +377,8 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
     Status* status;  // null if options_.paranoid_checks==false
     virtual void Corruption(size_t bytes, const Status& s) {
       Log(info_log, "%s%s: dropping %d bytes; %s",
-          (this->status == nullptr ? "(ignoring error) " : ""),
-          fname, static_cast<int>(bytes), s.ToString().c_str());
+          (this->status == nullptr ? "(ignoring error) " : ""), fname,
+          static_cast<int>(bytes), s.ToString().c_str());
       if (this->status != nullptr && this->status->ok()) *this->status = s;
     }
   };
@@ -405,10 +404,9 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
   // paranoid_checks==false so that corruptions cause entire commits
   // to be skipped instead of propagating bad information (like overly
   // large sequence numbers).
-  log::Reader reader(file, &reporter, true/*checksum*/,
-                     0/*initial_offset*/);
+  log::Reader reader(file, &reporter, true /*checksum*/, 0 /*initial_offset*/);
   Log(options_.info_log, "Recovering log #%llu",
-      (unsigned long long) log_number);
+      (unsigned long long)log_number);
 
   // Read all the records and add to a memtable
   std::string scratch;
@@ -416,11 +414,10 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
   WriteBatch batch;
   int compactions = 0;
   MemTable* mem = nullptr;
-  while (reader.ReadRecord(&record, &scratch) &&
-         status.ok()) {
+  while (reader.ReadRecord(&record, &scratch) && status.ok()) {
     if (record.size() < 12) {
-      reporter.Corruption(
-          record.size(), Status::Corruption("log record too small"));
+      reporter.Corruption(record.size(),
+                          Status::Corruption("log record too small"));
       continue;
     }
     WriteBatchInternal::SetContents(&batch, record);
@@ -434,9 +431,8 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
     if (!status.ok()) {
       break;
     }
-    const SequenceNumber last_seq =
-        WriteBatchInternal::Sequence(&batch) +
-        WriteBatchInternal::Count(&batch) - 1;
+    const SequenceNumber last_seq = WriteBatchInternal::Sequence(&batch) +
+                                    WriteBatchInternal::Count(&batch) - 1;
     if (last_seq > *max_sequence) {
       *max_sequence = last_seq;
     }
@@ -500,7 +496,7 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
   pending_outputs_.insert(meta.number);
   Iterator* iter = mem->NewIterator();
   Log(options_.info_log, "Level-0 table #%llu: started",
-      (unsigned long long) meta.number);
+      (unsigned long long)meta.number);
 
   Status s;
   {
@@ -510,13 +506,11 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
   }
 
   Log(options_.info_log, "Level-0 table #%llu: %lld bytes %s",
-      (unsigned long long) meta.number,
-      (unsigned long long) meta.file_size,
+      (unsigned long long)meta.number, (unsigned long long)meta.file_size,
       s.ToString().c_str());
   delete iter;
   pending_outputs_.erase(meta.number);
 
-
   // Note that if file_size is zero, the file has been deleted and
   // should not be added to the manifest.
   int level = 0;
@@ -526,8 +520,8 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
     if (base != nullptr) {
       level = base->PickLevelForMemTableOutput(min_user_key, max_user_key);
     }
-    edit->AddFile(level, meta.number, meta.file_size,
-                  meta.smallest, meta.largest);
+    edit->AddFile(level, meta.number, meta.file_size, meta.smallest,
+                  meta.largest);
   }
 
   CompactionStats stats;
@@ -658,8 +652,7 @@ void DBImpl::MaybeScheduleCompaction() {
     // DB is being deleted; no more background compactions
   } else if (!bg_error_.ok()) {
     // Already got an error; no more changes
-  } else if (imm_ == nullptr &&
-             manual_compaction_ == nullptr &&
+  } else if (imm_ == nullptr && manual_compaction_ == nullptr &&
              !versions_->NeedsCompaction()) {
     // No work to be done
   } else {
@@ -711,8 +704,7 @@ void DBImpl::BackgroundCompaction() {
     }
     Log(options_.info_log,
         "Manual compaction at level-%d from %s .. %s; will stop at %s\n",
-        m->level,
-        (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
+        m->level, (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
         (m->end ? m->end->DebugString().c_str() : "(end)"),
         (m->done ? "(end)" : manual_end.DebugString().c_str()));
   } else {
@@ -727,19 +719,17 @@ void DBImpl::BackgroundCompaction() {
     assert(c->num_input_files(0) == 1);
     FileMetaData* f = c->input(0, 0);
     c->edit()->DeleteFile(c->level(), f->number);
-    c->edit()->AddFile(c->level() + 1, f->number, f->file_size,
-                       f->smallest, f->largest);
+    c->edit()->AddFile(c->level() + 1, f->number, f->file_size, f->smallest,
+                       f->largest);
     status = versions_->LogAndApply(c->edit(), &mutex_);
     if (!status.ok()) {
       RecordBackgroundError(status);
     }
     VersionSet::LevelSummaryStorage tmp;
     Log(options_.info_log, "Moved #%lld to level-%d %lld bytes %s: %s\n",
-        static_cast<unsigned long long>(f->number),
-        c->level() + 1,
+        static_cast<unsigned long long>(f->number), c->level() + 1,
         static_cast<unsigned long long>(f->file_size),
-        status.ToString().c_str(),
-        versions_->LevelSummary(&tmp));
+        status.ToString().c_str(), versions_->LevelSummary(&tmp));
   } else {
     CompactionState* compact = new CompactionState(c);
     status = DoCompactionWork(compact);
@@ -757,8 +747,7 @@ void DBImpl::BackgroundCompaction() {
   } else if (shutting_down_.load(std::memory_order_acquire)) {
     // Ignore compaction errors found during shutting down
   } else {
-    Log(options_.info_log,
-        "Compaction error: %s", status.ToString().c_str());
+    Log(options_.info_log, "Compaction error: %s", status.ToString().c_str());
   }
 
   if (is_manual) {
@@ -853,31 +842,25 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
 
   if (s.ok() && current_entries > 0) {
     // Verify that the table is usable
-    Iterator* iter = table_cache_->NewIterator(ReadOptions(),
-                                               output_number,
-                                               current_bytes);
+    Iterator* iter =
+        table_cache_->NewIterator(ReadOptions(), output_number, current_bytes);
     s = iter->status();
     delete iter;
     if (s.ok()) {
-      Log(options_.info_log,
-          "Generated table #%llu@%d: %lld keys, %lld bytes",
-          (unsigned long long) output_number,
-          compact->compaction->level(),
-          (unsigned long long) current_entries,
-          (unsigned long long) current_bytes);
+      Log(options_.info_log, "Generated table #%llu@%d: %lld keys, %lld bytes",
+          (unsigned long long)output_number, compact->compaction->level(),
+          (unsigned long long)current_entries,
+          (unsigned long long)current_bytes);
     }
   }
   return s;
 }
 
-
 Status DBImpl::InstallCompactionResults(CompactionState* compact) {
   mutex_.AssertHeld();
-  Log(options_.info_log,  "Compacted %d@%d + %d@%d files => %lld bytes",
-      compact->compaction->num_input_files(0),
-      compact->compaction->level(),
-      compact->compaction->num_input_files(1),
-      compact->compaction->level() + 1,
+  Log(options_.info_log, "Compacted %d@%d + %d@%d files => %lld bytes",
+      compact->compaction->num_input_files(0), compact->compaction->level(),
+      compact->compaction->num_input_files(1), compact->compaction->level() + 1,
       static_cast<long long>(compact->total_bytes));
 
   // Add compaction outputs
@@ -885,9 +868,8 @@ Status DBImpl::InstallCompactionResults(CompactionState* compact) {
   const int level = compact->compaction->level();
   for (size_t i = 0; i < compact->outputs.size(); i++) {
     const CompactionState::Output& out = compact->outputs[i];
-    compact->compaction->edit()->AddFile(
-        level + 1,
-        out.number, out.file_size, out.smallest, out.largest);
+    compact->compaction->edit()->AddFile(level + 1, out.number, out.file_size,
+                                         out.smallest, out.largest);
   }
   return versions_->LogAndApply(compact->compaction->edit(), &mutex_);
 }
@@ -896,9 +878,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
   const uint64_t start_micros = env_->NowMicros();
   int64_t imm_micros = 0;  // Micros spent doing imm_ compactions
 
-  Log(options_.info_log,  "Compacting %d@%d + %d@%d files",
-      compact->compaction->num_input_files(0),
-      compact->compaction->level(),
+  Log(options_.info_log, "Compacting %d@%d + %d@%d files",
+      compact->compaction->num_input_files(0), compact->compaction->level(),
       compact->compaction->num_input_files(1),
       compact->compaction->level() + 1);
 
@@ -921,7 +902,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
   std::string current_user_key;
   bool has_current_user_key = false;
   SequenceNumber last_sequence_for_key = kMaxSequenceNumber;
-  for (; input->Valid() && !shutting_down_.load(std::memory_order_acquire); ) {
+  for (; input->Valid() && !shutting_down_.load(std::memory_order_acquire);) {
     // Prioritize immutable compaction work
     if (has_imm_.load(std::memory_order_relaxed)) {
       const uint64_t imm_start = env_->NowMicros();
@@ -953,8 +934,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
       last_sequence_for_key = kMaxSequenceNumber;
     } else {
       if (!has_current_user_key ||
-          user_comparator()->Compare(ikey.user_key,
-                                     Slice(current_user_key)) != 0) {
+          user_comparator()->Compare(ikey.user_key, Slice(current_user_key)) !=
+              0) {
         // First occurrence of this user key
         current_user_key.assign(ikey.user_key.data(), ikey.user_key.size());
         has_current_user_key = true;
@@ -963,7 +944,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
 
       if (last_sequence_for_key <= compact->smallest_snapshot) {
         // Hidden by an newer entry for same user key
-        drop = true;    // (A)
+        drop = true;  // (A)
       } else if (ikey.type == kTypeDeletion &&
                  ikey.sequence <= compact->smallest_snapshot &&
                  compact->compaction->IsBaseLevelForKey(ikey.user_key)) {
@@ -1049,8 +1030,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
     RecordBackgroundError(status);
   }
   VersionSet::LevelSummaryStorage tmp;
-  Log(options_.info_log,
-      "compacted to: %s", versions_->LevelSummary(&tmp));
+  Log(options_.info_log, "compacted to: %s", versions_->LevelSummary(&tmp));
   return status;
 }
 
@@ -1063,7 +1043,7 @@ struct IterState {
   MemTable* const imm GUARDED_BY(mu);
 
   IterState(port::Mutex* mutex, MemTable* mem, MemTable* imm, Version* version)
-      : mu(mutex), version(version), mem(mem), imm(imm) { }
+      : mu(mutex), version(version), mem(mem), imm(imm) {}
 };
 
 static void CleanupIteratorState(void* arg1, void* arg2) {
@@ -1116,8 +1096,7 @@ int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes() {
   return versions_->MaxNextLevelOverlappingBytes();
 }
 
-Status DBImpl::Get(const ReadOptions& options,
-                   const Slice& key,
+Status DBImpl::Get(const ReadOptions& options, const Slice& key,
                    std::string* value) {
   Status s;
   MutexLock l(&mutex_);
@@ -1168,12 +1147,12 @@ Iterator* DBImpl::NewIterator(const ReadOptions& options) {
   SequenceNumber latest_snapshot;
   uint32_t seed;
   Iterator* iter = NewInternalIterator(options, &latest_snapshot, &seed);
-  return NewDBIterator(
-      this, user_comparator(), iter,
-      (options.snapshot != nullptr
-       ? static_cast<const SnapshotImpl*>(options.snapshot)->sequence_number()
-       : latest_snapshot),
-      seed);
+  return NewDBIterator(this, user_comparator(), iter,
+                       (options.snapshot != nullptr
+                            ? static_cast<const SnapshotImpl*>(options.snapshot)
+                                  ->sequence_number()
+                            : latest_snapshot),
+                       seed);
 }
 
 void DBImpl::RecordReadSample(Slice key) {
@@ -1202,9 +1181,9 @@ Status DBImpl::Delete(const WriteOptions& options, const Slice& key) {
   return DB::Delete(options, key);
 }
 
-Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
+Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) {
   Writer w(&mutex_);
-  w.batch = my_batch;
+  w.batch = updates;
   w.sync = options.sync;
   w.done = false;
 
@@ -1218,10 +1197,10 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
   }
 
   // May temporarily unlock and wait.
-  Status status = MakeRoomForWrite(my_batch == nullptr);
+  Status status = MakeRoomForWrite(updates == nullptr);
   uint64_t last_sequence = versions_->LastSequence();
   Writer* last_writer = &w;
-  if (status.ok() && my_batch != nullptr) {  // nullptr batch is for compactions
+  if (status.ok() && updates != nullptr) {  // nullptr batch is for compactions
     WriteBatch* updates = BuildBatchGroup(&last_writer);
     WriteBatchInternal::SetSequence(updates, last_sequence + 1);
     last_sequence += WriteBatchInternal::Count(updates);
@@ -1290,8 +1269,8 @@ WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) {
   // original write is small, limit the growth so we do not slow
   // down the small write too much.
   size_t max_size = 1 << 20;
-  if (size <= (128<<10)) {
-    max_size = size + (128<<10);
+  if (size <= (128 << 10)) {
+    max_size = size + (128 << 10);
   }
 
   *last_writer = first;
@@ -1337,9 +1316,8 @@ Status DBImpl::MakeRoomForWrite(bool force) {
       // Yield previous error
       s = bg_error_;
       break;
-    } else if (
-        allow_delay &&
-        versions_->NumLevelFiles(0) >= config::kL0_SlowdownWritesTrigger) {
+    } else if (allow_delay && versions_->NumLevelFiles(0) >=
+                                  config::kL0_SlowdownWritesTrigger) {
       // We are getting close to hitting a hard limit on the number of
       // L0 files.  Rather than delaying a single write by several
       // seconds when we hit the hard limit, start delaying each
@@ -1383,7 +1361,7 @@ Status DBImpl::MakeRoomForWrite(bool force) {
       has_imm_.store(true, std::memory_order_release);
       mem_ = new MemTable(internal_comparator_);
       mem_->Ref();
-      force = false;   // Do not force another compaction if have room
+      force = false;  // Do not force another compaction if have room
       MaybeScheduleCompaction();
     }
   }
@@ -1417,21 +1395,16 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
     snprintf(buf, sizeof(buf),
              "                               Compactions\n"
              "Level  Files Size(MB) Time(sec) Read(MB) Write(MB)\n"
-             "--------------------------------------------------\n"
-             );
+             "--------------------------------------------------\n");
     value->append(buf);
     for (int level = 0; level < config::kNumLevels; level++) {
       int files = versions_->NumLevelFiles(level);
       if (stats_[level].micros > 0 || files > 0) {
-        snprintf(
-            buf, sizeof(buf),
-            "%3d %8d %8.0f %9.0f %8.0f %9.0f\n",
-            level,
-            files,
-            versions_->NumLevelBytes(level) / 1048576.0,
-            stats_[level].micros / 1e6,
-            stats_[level].bytes_read / 1048576.0,
-            stats_[level].bytes_written / 1048576.0);
+        snprintf(buf, sizeof(buf), "%3d %8d %8.0f %9.0f %8.0f %9.0f\n", level,
+                 files, versions_->NumLevelBytes(level) / 1048576.0,
+                 stats_[level].micros / 1e6,
+                 stats_[level].bytes_read / 1048576.0,
+                 stats_[level].bytes_written / 1048576.0);
         value->append(buf);
       }
     }
@@ -1457,9 +1430,7 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
   return false;
 }
 
-void DBImpl::GetApproximateSizes(
-    const Range* range, int n,
-    uint64_t* sizes) {
+void DBImpl::GetApproximateSizes(const Range* range, int n, uint64_t* sizes) {
   // TODO(opt): better implementation
   Version* v;
   {
@@ -1497,10 +1468,9 @@ Status DB::Delete(const WriteOptions& opt, const Slice& key) {
   return Write(opt, &batch);
 }
 
-DB::~DB() { }
+DB::~DB() {}
 
-Status DB::Open(const Options& options, const std::string& dbname,
-                DB** dbptr) {
+Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
   *dbptr = nullptr;
 
   DBImpl* impl = new DBImpl(options, dbname);
@@ -1543,8 +1513,7 @@ Status DB::Open(const Options& options, const std::string& dbname,
   return s;
 }
 
-Snapshot::~Snapshot() {
-}
+Snapshot::~Snapshot() {}
 
 Status DestroyDB(const std::string& dbname, const Options& options) {
   Env* env = options.env;
diff --git a/db/db_impl.h b/db/db_impl.h
index ca00d42..c895952 100644
--- a/db/db_impl.h
+++ b/db/db_impl.h
@@ -35,8 +35,7 @@ class DBImpl : public DB {
   virtual Status Put(const WriteOptions&, const Slice& key, const Slice& value);
   virtual Status Delete(const WriteOptions&, const Slice& key);
   virtual Status Write(const WriteOptions& options, WriteBatch* updates);
-  virtual Status Get(const ReadOptions& options,
-                     const Slice& key,
+  virtual Status Get(const ReadOptions& options, const Slice& key,
                      std::string* value);
   virtual Iterator* NewIterator(const ReadOptions&);
   virtual const Snapshot* GetSnapshot();
@@ -166,9 +165,9 @@ class DBImpl : public DB {
   struct ManualCompaction {
     int level;
     bool done;
-    const InternalKey* begin;   // null means beginning of key range
-    const InternalKey* end;     // null means end of key range
-    InternalKey tmp_storage;    // Used to keep track of compaction progress
+    const InternalKey* begin;  // null means beginning of key range
+    const InternalKey* end;    // null means end of key range
+    InternalKey tmp_storage;   // Used to keep track of compaction progress
   };
   ManualCompaction* manual_compaction_ GUARDED_BY(mutex_);
 
@@ -184,7 +183,7 @@ class DBImpl : public DB {
     int64_t bytes_read;
     int64_t bytes_written;
 
-    CompactionStats() : micros(0), bytes_read(0), bytes_written(0) { }
+    CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {}
 
     void Add(const CompactionStats& c) {
       this->micros += c.micros;
diff --git a/db/db_iter.cc b/db/db_iter.cc
index 4d0f42e..1e5b5e2 100644
--- a/db/db_iter.cc
+++ b/db/db_iter.cc
@@ -4,9 +4,9 @@
 
 #include "db/db_iter.h"
 
-#include "db/filename.h"
 #include "db/db_impl.h"
 #include "db/dbformat.h"
+#include "db/filename.h"
 #include "leveldb/env.h"
 #include "leveldb/iterator.h"
 #include "port/port.h"
@@ -36,17 +36,14 @@ namespace {
 // combines multiple entries for the same userkey found in the DB
 // representation into a single entry while accounting for sequence
 // numbers, deletion markers, overwrites, etc.
-class DBIter: public Iterator {
+class DBIter : public Iterator {
  public:
   // Which direction is the iterator currently moving?
   // (1) When moving forward, the internal iterator is positioned at
   //     the exact entry that yields this->key(), this->value()
   // (2) When moving backwards, the internal iterator is positioned
   //     just before all entries whose user key == this->key().
-  enum Direction {
-    kForward,
-    kReverse
-  };
+  enum Direction { kForward, kReverse };
 
   DBIter(DBImpl* db, const Comparator* cmp, Iterator* iter, SequenceNumber s,
          uint32_t seed)
@@ -57,11 +54,8 @@ class DBIter: public Iterator {
         direction_(kForward),
         valid_(false),
         rnd_(seed),
-        bytes_until_read_sampling_(RandomCompactionPeriod()) {
-  }
-  virtual ~DBIter() {
-    delete iter_;
-  }
+        bytes_until_read_sampling_(RandomCompactionPeriod()) {}
+  virtual ~DBIter() { delete iter_; }
   virtual bool Valid() const { return valid_; }
   virtual Slice key() const {
     assert(valid_);
@@ -105,7 +99,7 @@ class DBIter: public Iterator {
 
   // Picks the number of bytes that can be read until a compaction is scheduled.
   size_t RandomCompactionPeriod() {
-    return rnd_.Uniform(2*config::kReadBytesPeriod);
+    return rnd_.Uniform(2 * config::kReadBytesPeriod);
   }
 
   DBImpl* db_;
@@ -114,8 +108,8 @@ class DBIter: public Iterator {
   SequenceNumber const sequence_;
 
   Status status_;
-  std::string saved_key_;     // == current key when direction_==kReverse
-  std::string saved_value_;   // == current raw value when direction_==kReverse
+  std::string saved_key_;    // == current key when direction_==kReverse
+  std::string saved_value_;  // == current raw value when direction_==kReverse
   Direction direction_;
   bool valid_;
 
@@ -221,8 +215,8 @@ void DBIter::Prev() {
         ClearSavedValue();
         return;
       }
-      if (user_comparator_->Compare(ExtractUserKey(iter_->key()),
-                                    saved_key_) < 0) {
+      if (user_comparator_->Compare(ExtractUserKey(iter_->key()), saved_key_) <
+          0) {
         break;
       }
     }
@@ -278,8 +272,8 @@ void DBIter::Seek(const Slice& target) {
   direction_ = kForward;
   ClearSavedValue();
   saved_key_.clear();
-  AppendInternalKey(
-      &saved_key_, ParsedInternalKey(target, sequence_, kValueTypeForSeek));
+  AppendInternalKey(&saved_key_,
+                    ParsedInternalKey(target, sequence_, kValueTypeForSeek));
   iter_->Seek(saved_key_);
   if (iter_->Valid()) {
     FindNextUserEntry(false, &saved_key_ /* temporary storage */);
@@ -308,12 +302,9 @@ void DBIter::SeekToLast() {
 
 }  // anonymous namespace
 
-Iterator* NewDBIterator(
-    DBImpl* db,
-    const Comparator* user_key_comparator,
-    Iterator* internal_iter,
-    SequenceNumber sequence,
-    uint32_t seed) {
+Iterator* NewDBIterator(DBImpl* db, const Comparator* user_key_comparator,
+                        Iterator* internal_iter, SequenceNumber sequence,
+                        uint32_t seed) {
   return new DBIter(db, user_key_comparator, internal_iter, sequence, seed);
 }
 
diff --git a/db/db_iter.h b/db/db_iter.h
index 262840e..fd93e91 100644
--- a/db/db_iter.h
+++ b/db/db_iter.h
@@ -6,8 +6,9 @@
 #define STORAGE_LEVELDB_DB_DB_ITER_H_
 
 #include <stdint.h>
-#include "leveldb/db.h"
+
 #include "db/dbformat.h"
+#include "leveldb/db.h"
 
 namespace leveldb {
 
@@ -16,10 +17,8 @@ class DBImpl;
 // Return a new iterator that converts internal keys (yielded by
 // "*internal_iter") that were live at the specified "sequence" number
 // into appropriate user keys.
-Iterator* NewDBIterator(DBImpl* db,
-                        const Comparator* user_key_comparator,
-                        Iterator* internal_iter,
-                        SequenceNumber sequence,
+Iterator* NewDBIterator(DBImpl* db, const Comparator* user_key_comparator,
+                        Iterator* internal_iter, SequenceNumber sequence,
                         uint32_t seed);
 
 }  // namespace leveldb
diff --git a/db/db_test.cc b/db/db_test.cc
index 3ab4aee..4343216 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -2,17 +2,18 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include "leveldb/db.h"
+
 #include <atomic>
 #include <string>
 
-#include "leveldb/db.h"
-#include "leveldb/filter_policy.h"
 #include "db/db_impl.h"
 #include "db/filename.h"
 #include "db/version_set.h"
 #include "db/write_batch_internal.h"
 #include "leveldb/cache.h"
 #include "leveldb/env.h"
+#include "leveldb/filter_policy.h"
 #include "leveldb/table.h"
 #include "port/port.h"
 #include "port/thread_annotations.h"
@@ -31,9 +32,9 @@ static std::string RandomString(Random* rnd, int len) {
 }
 
 static std::string RandomKey(Random* rnd) {
-  int len = (rnd->OneIn(3)
-             ? 1                // Short sometimes to encourage collisions
-             : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
+  int len =
+      (rnd->OneIn(3) ? 1  // Short sometimes to encourage collisions
+                     : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
   return test::RandomKey(rnd, len);
 }
 
@@ -42,11 +43,10 @@ class AtomicCounter {
  private:
   port::Mutex mu_;
   int count_ GUARDED_BY(mu_);
+
  public:
-  AtomicCounter() : count_(0) { }
-  void Increment() {
-    IncrementBy(1);
-  }
+  AtomicCounter() : count_(0) {}
+  void Increment() { IncrementBy(1); }
   void IncrementBy(int count) LOCKS_EXCLUDED(mu_) {
     MutexLock l(&mu_);
     count_ += count;
@@ -120,15 +120,15 @@ class SpecialEnv : public EnvWrapper {
   bool count_random_reads_;
   AtomicCounter random_read_counter_;
 
-  explicit SpecialEnv(Env* base) : EnvWrapper(base),
-    delay_data_sync_(false),
-    data_sync_error_(false),
-    no_space_(false),
-    non_writable_(false),
-    manifest_sync_error_(false),
-    manifest_write_error_(false),
-    count_random_reads_(false) {
-  }
+  explicit SpecialEnv(Env* base)
+      : EnvWrapper(base),
+        delay_data_sync_(false),
+        data_sync_error_(false),
+        no_space_(false),
+        non_writable_(false),
+        manifest_sync_error_(false),
+        manifest_write_error_(false),
+        count_random_reads_(false) {}
 
   Status NewWritableFile(const std::string& f, WritableFile** r) {
     class DataFile : public WritableFile {
@@ -137,10 +137,7 @@ class SpecialEnv : public EnvWrapper {
       WritableFile* const base_;
 
      public:
-      DataFile(SpecialEnv* env, WritableFile* base)
-          : env_(env),
-            base_(base) {
-      }
+      DataFile(SpecialEnv* env, WritableFile* base) : env_(env), base_(base) {}
       ~DataFile() { delete base_; }
       Status Append(const Slice& data) {
         if (env_->no_space_.load(std::memory_order_acquire)) {
@@ -166,8 +163,9 @@ class SpecialEnv : public EnvWrapper {
      private:
       SpecialEnv* env_;
       WritableFile* base_;
+
      public:
-      ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) { }
+      ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) {}
       ~ManifestFile() { delete base_; }
       Status Append(const Slice& data) {
         if (env_->manifest_write_error_.load(std::memory_order_acquire)) {
@@ -208,10 +206,10 @@ class SpecialEnv : public EnvWrapper {
      private:
       RandomAccessFile* target_;
       AtomicCounter* counter_;
+
      public:
       CountingFile(RandomAccessFile* target, AtomicCounter* counter)
-          : target_(target), counter_(counter) {
-      }
+          : target_(target), counter_(counter) {}
       virtual ~CountingFile() { delete target_; }
       virtual Status Read(uint64_t offset, size_t n, Slice* result,
                           char* scratch) const {
@@ -233,13 +231,7 @@ class DBTest {
   const FilterPolicy* filter_policy_;
 
   // Sequence of option configurations to try
-  enum OptionConfig {
-    kDefault,
-    kReuse,
-    kFilter,
-    kUncompressed,
-    kEnd
-  };
+  enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd };
   int option_config_;
 
  public:
@@ -249,8 +241,7 @@ class DBTest {
 
   Options last_options_;
 
-  DBTest() : option_config_(kDefault),
-             env_(new SpecialEnv(Env::Default())) {
+  DBTest() : option_config_(kDefault), env_(new SpecialEnv(Env::Default())) {
     filter_policy_ = NewBloomFilterPolicy(10);
     dbname_ = test::TmpDir() + "/db_test";
     DestroyDB(dbname_, Options());
@@ -297,13 +288,9 @@ class DBTest {
     return options;
   }
 
-  DBImpl* dbfull() {
-    return reinterpret_cast<DBImpl*>(db_);
-  }
+  DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); }
 
-  void Reopen(Options* options = nullptr) {
-    ASSERT_OK(TryReopen(options));
-  }
+  void Reopen(Options* options = nullptr) { ASSERT_OK(TryReopen(options)); }
 
   void Close() {
     delete db_;
@@ -336,9 +323,7 @@ class DBTest {
     return db_->Put(WriteOptions(), k, v);
   }
 
-  Status Delete(const std::string& k) {
-    return db_->Delete(WriteOptions(), k);
-  }
+  Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); }
 
   std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
     ReadOptions options;
@@ -424,9 +409,8 @@ class DBTest {
 
   int NumTableFilesAtLevel(int level) {
     std::string property;
-    ASSERT_TRUE(
-        db_->GetProperty("leveldb.num-files-at-level" + NumberToString(level),
-                         &property));
+    ASSERT_TRUE(db_->GetProperty(
+        "leveldb.num-files-at-level" + NumberToString(level), &property));
     return std::stoi(property);
   }
 
@@ -491,9 +475,9 @@ class DBTest {
 
   void DumpFileCounts(const char* label) {
     fprintf(stderr, "---\n%s:\n", label);
-    fprintf(stderr, "maxoverlap: %lld\n",
-            static_cast<long long>(
-                dbfull()->TEST_MaxNextLevelOverlappingBytes()));
+    fprintf(
+        stderr, "maxoverlap: %lld\n",
+        static_cast<long long>(dbfull()->TEST_MaxNextLevelOverlappingBytes()));
     for (int level = 0; level < config::kNumLevels; level++) {
       int num = NumTableFilesAtLevel(level);
       if (num > 0) {
@@ -612,8 +596,8 @@ TEST(DBTest, GetFromImmutableLayer) {
 
     // Block sync calls.
     env_->delay_data_sync_.store(true, std::memory_order_release);
-    Put("k1", std::string(100000, 'x'));             // Fill memtable.
-    Put("k2", std::string(100000, 'y'));             // Trigger compaction.
+    Put("k1", std::string(100000, 'x'));  // Fill memtable.
+    Put("k2", std::string(100000, 'y'));  // Trigger compaction.
     ASSERT_EQ("v1", Get("foo"));
     // Release sync calls.
     env_->delay_data_sync_.store(false, std::memory_order_release);
@@ -635,7 +619,7 @@ TEST(DBTest, GetMemUsage) {
     ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val));
     int mem_usage = std::stoi(val);
     ASSERT_GT(mem_usage, 0);
-    ASSERT_LT(mem_usage, 5*1024*1024);
+    ASSERT_LT(mem_usage, 5 * 1024 * 1024);
   } while (ChangeOptions());
 }
 
@@ -760,8 +744,7 @@ TEST(DBTest, GetEncountersEmptyLevel) {
 
     // Step 1: First place sstables in levels 0 and 2
     int compaction_count = 0;
-    while (NumTableFilesAtLevel(0) == 0 ||
-           NumTableFilesAtLevel(2) == 0) {
+    while (NumTableFilesAtLevel(0) == 0 || NumTableFilesAtLevel(2) == 0) {
       ASSERT_LE(compaction_count, 100) << "could not fill levels 0 and 2";
       compaction_count++;
       Put("a", "begin");
@@ -898,10 +881,10 @@ TEST(DBTest, IterMulti) {
   ASSERT_EQ(IterStatus(iter), "b->vb");
 
   // Make sure iter stays at snapshot
-  ASSERT_OK(Put("a",  "va2"));
+  ASSERT_OK(Put("a", "va2"));
   ASSERT_OK(Put("a2", "va3"));
-  ASSERT_OK(Put("b",  "vb2"));
-  ASSERT_OK(Put("c",  "vc2"));
+  ASSERT_OK(Put("b", "vb2"));
+  ASSERT_OK(Put("c", "vc2"));
   ASSERT_OK(Delete("b"));
   iter->SeekToFirst();
   ASSERT_EQ(IterStatus(iter), "a->va");
@@ -1092,7 +1075,7 @@ TEST(DBTest, RecoverWithLargeLog) {
 
 TEST(DBTest, CompactionsGenerateMultipleFiles) {
   Options options = CurrentOptions();
-  options.write_buffer_size = 100000000;        // Large write buffer
+  options.write_buffer_size = 100000000;  // Large write buffer
   Reopen(&options);
 
   Random rnd(301);
@@ -1161,26 +1144,25 @@ TEST(DBTest, SparseMerge) {
   dbfull()->TEST_CompactRange(0, nullptr, nullptr);
 
   // Make sparse update
-  Put("A",    "va2");
+  Put("A", "va2");
   Put("B100", "bvalue2");
-  Put("C",    "vc2");
+  Put("C", "vc2");
   dbfull()->TEST_CompactMemTable();
 
   // Compactions should not cause us to create a situation where
   // a file overlaps too much data at the next level.
-  ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
+  ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
   dbfull()->TEST_CompactRange(0, nullptr, nullptr);
-  ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
+  ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
   dbfull()->TEST_CompactRange(1, nullptr, nullptr);
-  ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
+  ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
 }
 
 static bool Between(uint64_t val, uint64_t low, uint64_t high) {
   bool result = (val >= low) && (val <= high);
   if (!result) {
     fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
-            (unsigned long long)(val),
-            (unsigned long long)(low),
+            (unsigned long long)(val), (unsigned long long)(low),
             (unsigned long long)(high));
   }
   return result;
@@ -1189,7 +1171,7 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) {
 TEST(DBTest, ApproximateSizes) {
   do {
     Options options = CurrentOptions();
-    options.write_buffer_size = 100000000;        // Large write buffer
+    options.write_buffer_size = 100000000;  // Large write buffer
     options.compression = kNoCompression;
     DestroyAndReopen();
 
@@ -1224,12 +1206,13 @@ TEST(DBTest, ApproximateSizes) {
 
       for (int compact_start = 0; compact_start < N; compact_start += 10) {
         for (int i = 0; i < N; i += 10) {
-          ASSERT_TRUE(Between(Size("", Key(i)), S1*i, S2*i));
-          ASSERT_TRUE(Between(Size("", Key(i)+".suffix"), S1*(i+1), S2*(i+1)));
-          ASSERT_TRUE(Between(Size(Key(i), Key(i+10)), S1*10, S2*10));
+          ASSERT_TRUE(Between(Size("", Key(i)), S1 * i, S2 * i));
+          ASSERT_TRUE(Between(Size("", Key(i) + ".suffix"), S1 * (i + 1),
+                              S2 * (i + 1)));
+          ASSERT_TRUE(Between(Size(Key(i), Key(i + 10)), S1 * 10, S2 * 10));
         }
-        ASSERT_TRUE(Between(Size("", Key(50)), S1*50, S2*50));
-        ASSERT_TRUE(Between(Size("", Key(50)+".suffix"), S1*50, S2*50));
+        ASSERT_TRUE(Between(Size("", Key(50)), S1 * 50, S2 * 50));
+        ASSERT_TRUE(Between(Size("", Key(50) + ".suffix"), S1 * 50, S2 * 50));
 
         std::string cstart_str = Key(compact_start);
         std::string cend_str = Key(compact_start + 9);
@@ -1348,7 +1331,7 @@ TEST(DBTest, HiddenValuesAreRemoved) {
     Put("pastfoo", "v");
     const Snapshot* snapshot = db_->GetSnapshot();
     Put("foo", "tiny");
-    Put("pastfoo2", "v2");        // Advance sequence number one more
+    Put("pastfoo2", "v2");  // Advance sequence number one more
 
     ASSERT_OK(dbfull()->TEST_CompactMemTable());
     ASSERT_GT(NumTableFilesAtLevel(0), 0);
@@ -1373,14 +1356,14 @@ TEST(DBTest, DeletionMarkers1) {
   Put("foo", "v1");
   ASSERT_OK(dbfull()->TEST_CompactMemTable());
   const int last = config::kMaxMemCompactLevel;
-  ASSERT_EQ(NumTableFilesAtLevel(last), 1);   // foo => v1 is now in last level
+  ASSERT_EQ(NumTableFilesAtLevel(last), 1);  // foo => v1 is now in last level
 
   // Place a table at level last-1 to prevent merging with preceding mutation
   Put("a", "begin");
   Put("z", "end");
   dbfull()->TEST_CompactMemTable();
   ASSERT_EQ(NumTableFilesAtLevel(last), 1);
-  ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
+  ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1);
 
   Delete("foo");
   Put("foo", "v2");
@@ -1388,11 +1371,11 @@ TEST(DBTest, DeletionMarkers1) {
   ASSERT_OK(dbfull()->TEST_CompactMemTable());  // Moves to level last-2
   ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
   Slice z("z");
-  dbfull()->TEST_CompactRange(last-2, nullptr, &z);
+  dbfull()->TEST_CompactRange(last - 2, nullptr, &z);
   // DEL eliminated, but v1 remains because we aren't compacting that level
   // (DEL can be eliminated because v2 hides v1).
   ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
-  dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
+  dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr);
   // Merging last-1 w/ last, so we are the base level for "foo", so
   // DEL is removed.  (as is v1).
   ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
@@ -1402,23 +1385,23 @@ TEST(DBTest, DeletionMarkers2) {
   Put("foo", "v1");
   ASSERT_OK(dbfull()->TEST_CompactMemTable());
   const int last = config::kMaxMemCompactLevel;
-  ASSERT_EQ(NumTableFilesAtLevel(last), 1);   // foo => v1 is now in last level
+  ASSERT_EQ(NumTableFilesAtLevel(last), 1);  // foo => v1 is now in last level
 
   // Place a table at level last-1 to prevent merging with preceding mutation
   Put("a", "begin");
   Put("z", "end");
   dbfull()->TEST_CompactMemTable();
   ASSERT_EQ(NumTableFilesAtLevel(last), 1);
-  ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
+  ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1);
 
   Delete("foo");
   ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
   ASSERT_OK(dbfull()->TEST_CompactMemTable());  // Moves to level last-2
   ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
-  dbfull()->TEST_CompactRange(last-2, nullptr, nullptr);
+  dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr);
   // DEL kept: "last" file overlaps
   ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
-  dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
+  dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr);
   // Merging last-1 w/ last, so we are the base level for "foo", so
   // DEL is removed.  (as is v1).
   ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
@@ -1428,7 +1411,8 @@ TEST(DBTest, OverlapInLevel0) {
   do {
     ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config";
 
-    // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
+    // Fill levels 1 and 2 to disable the pushing of new memtables to levels >
+    // 0.
     ASSERT_OK(Put("100", "v100"));
     ASSERT_OK(Put("999", "v999"));
     dbfull()->TEST_CompactMemTable();
@@ -1548,16 +1532,17 @@ TEST(DBTest, CustomComparator) {
       return ToNumber(a) - ToNumber(b);
     }
     virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
-      ToNumber(*s);     // Check format
-      ToNumber(l);      // Check format
+      ToNumber(*s);  // Check format
+      ToNumber(l);   // Check format
     }
     virtual void FindShortSuccessor(std::string* key) const {
-      ToNumber(*key);   // Check format
+      ToNumber(*key);  // Check format
     }
+
    private:
     static int ToNumber(const Slice& x) {
       // Check that there are no extra characters.
-      ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size()-1] == ']')
+      ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
           << EscapeString(x);
       int val;
       char ignored;
@@ -1570,7 +1555,7 @@ TEST(DBTest, CustomComparator) {
   Options new_options = CurrentOptions();
   new_options.create_if_missing = true;
   new_options.comparator = &cmp;
-  new_options.filter_policy = nullptr;     // Cannot use bloom filters
+  new_options.filter_policy = nullptr;   // Cannot use bloom filters
   new_options.write_buffer_size = 1000;  // Compact more often
   DestroyAndReopen(&new_options);
   ASSERT_OK(Put("[10]", "ten"));
@@ -1588,7 +1573,7 @@ TEST(DBTest, CustomComparator) {
   for (int run = 0; run < 2; run++) {
     for (int i = 0; i < 1000; i++) {
       char buf[100];
-      snprintf(buf, sizeof(buf), "[%d]", i*10);
+      snprintf(buf, sizeof(buf), "[%d]", i * 10);
       ASSERT_OK(Put(buf, buf));
     }
     Compact("[0]", "[1000000]");
@@ -1739,7 +1724,7 @@ TEST(DBTest, NoSpace) {
   // Force out-of-space errors.
   env_->no_space_.store(true, std::memory_order_release);
   for (int i = 0; i < 10; i++) {
-    for (int level = 0; level < config::kNumLevels-1; level++) {
+    for (int level = 0; level < config::kNumLevels - 1; level++) {
       dbfull()->TEST_CompactRange(level, nullptr, nullptr);
     }
   }
@@ -1809,9 +1794,8 @@ TEST(DBTest, ManifestWriteError) {
   // We iterate twice.  In the second iteration, everything is the
   // same except the log record never makes it to the MANIFEST file.
   for (int iter = 0; iter < 2; iter++) {
-    std::atomic<bool>* error_type = (iter == 0)
-        ? &env_->manifest_sync_error_
-        : &env_->manifest_write_error_;
+    std::atomic<bool>* error_type = (iter == 0) ? &env_->manifest_sync_error_
+                                                : &env_->manifest_write_error_;
 
     // Insert foo=>bar mapping
     Options options = CurrentOptions();
@@ -1826,7 +1810,7 @@ TEST(DBTest, ManifestWriteError) {
     dbfull()->TEST_CompactMemTable();
     ASSERT_EQ("bar", Get("foo"));
     const int last = config::kMaxMemCompactLevel;
-    ASSERT_EQ(NumTableFilesAtLevel(last), 1);   // foo=>bar is now in last level
+    ASSERT_EQ(NumTableFilesAtLevel(last), 1);  // foo=>bar is now in last level
 
     // Merging compaction (will fail)
     error_type->store(true, std::memory_order_release);
@@ -1854,8 +1838,7 @@ TEST(DBTest, MissingSSTFile) {
   options.paranoid_checks = true;
   Status s = TryReopen(&options);
   ASSERT_TRUE(!s.ok());
-  ASSERT_TRUE(s.ToString().find("issing") != std::string::npos)
-      << s.ToString();
+  ASSERT_TRUE(s.ToString().find("issing") != std::string::npos) << s.ToString();
 }
 
 TEST(DBTest, StillReadSST) {
@@ -1915,7 +1898,7 @@ TEST(DBTest, BloomFilter) {
   int reads = env_->random_read_counter_.Read();
   fprintf(stderr, "%d present => %d reads\n", N, reads);
   ASSERT_GE(reads, N);
-  ASSERT_LE(reads, N + 2*N/100);
+  ASSERT_LE(reads, N + 2 * N / 100);
 
   // Lookup present keys.  Should rarely read from either sstable.
   env_->random_read_counter_.Reset();
@@ -1924,7 +1907,7 @@ TEST(DBTest, BloomFilter) {
   }
   reads = env_->random_read_counter_.Read();
   fprintf(stderr, "%d missing => %d reads\n", N, reads);
-  ASSERT_LE(reads, 3*N/100);
+  ASSERT_LE(reads, 3 * N / 100);
 
   env_->delay_data_sync_.store(false, std::memory_order_release);
   Close();
@@ -1970,8 +1953,8 @@ static void MTThreadBody(void* arg) {
     if (rnd.OneIn(2)) {
       // Write values of the form <key, my id, counter>.
       // We add some padding for force compactions.
-      snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d",
-               key, id, static_cast<int>(counter));
+      snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id,
+               static_cast<int>(counter));
       ASSERT_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
     } else {
       // Read a value and verify that it matches the pattern written above.
@@ -2033,24 +2016,24 @@ namespace {
 typedef std::map<std::string, std::string> KVMap;
 }
 
-class ModelDB: public DB {
+class ModelDB : public DB {
  public:
   class ModelSnapshot : public Snapshot {
    public:
     KVMap map_;
   };
 
-  explicit ModelDB(const Options& options): options_(options) { }
-  ~ModelDB() { }
+  explicit ModelDB(const Options& options) : options_(options) {}
+  ~ModelDB() {}
   virtual Status Put(const WriteOptions& o, const Slice& k, const Slice& v) {
     return DB::Put(o, k, v);
   }
   virtual Status Delete(const WriteOptions& o, const Slice& key) {
     return DB::Delete(o, key);
   }
-  virtual Status Get(const ReadOptions& options,
-                     const Slice& key, std::string* value) {
-    assert(false);      // Not implemented
+  virtual Status Get(const ReadOptions& options, const Slice& key,
+                     std::string* value) {
+    assert(false);  // Not implemented
     return Status::NotFound(key);
   }
   virtual Iterator* NewIterator(const ReadOptions& options) {
@@ -2080,9 +2063,7 @@ class ModelDB: public DB {
       virtual void Put(const Slice& key, const Slice& value) {
         (*map_)[key.ToString()] = value.ToString();
       }
-      virtual void Delete(const Slice& key) {
-        map_->erase(key.ToString());
-      }
+      virtual void Delete(const Slice& key) { map_->erase(key.ToString()); }
     };
     Handler handler;
     handler.map_ = &map_;
@@ -2097,15 +2078,13 @@ class ModelDB: public DB {
       sizes[i] = 0;
     }
   }
-  virtual void CompactRange(const Slice* start, const Slice* end) {
-  }
+  virtual void CompactRange(const Slice* start, const Slice* end) {}
 
  private:
-  class ModelIter: public Iterator {
+  class ModelIter : public Iterator {
    public:
     ModelIter(const KVMap* map, bool owned)
-        : map_(map), owned_(owned), iter_(map_->end()) {
-    }
+        : map_(map), owned_(owned), iter_(map_->end()) {}
     ~ModelIter() {
       if (owned_) delete map_;
     }
@@ -2136,9 +2115,7 @@ class ModelDB: public DB {
   KVMap map_;
 };
 
-static bool CompareIterators(int step,
-                             DB* model,
-                             DB* db,
+static bool CompareIterators(int step, DB* model, DB* db,
                              const Snapshot* model_snap,
                              const Snapshot* db_snap) {
   ReadOptions options;
@@ -2149,12 +2126,10 @@ static bool CompareIterators(int step,
   bool ok = true;
   int count = 0;
   for (miter->SeekToFirst(), dbiter->SeekToFirst();
-       ok && miter->Valid() && dbiter->Valid();
-       miter->Next(), dbiter->Next()) {
+       ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) {
     count++;
     if (miter->key().compare(dbiter->key()) != 0) {
-      fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n",
-              step,
+      fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step,
               EscapeString(miter->key()).c_str(),
               EscapeString(dbiter->key()).c_str());
       ok = false;
@@ -2163,8 +2138,7 @@ static bool CompareIterators(int step,
 
     if (miter->value().compare(dbiter->value()) != 0) {
       fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
-              step,
-              EscapeString(miter->key()).c_str(),
+              step, EscapeString(miter->key()).c_str(),
               EscapeString(miter->value()).c_str(),
               EscapeString(miter->value()).c_str());
       ok = false;
@@ -2198,22 +2172,19 @@ TEST(DBTest, Randomized) {
       }
       // TODO(sanjay): Test Get() works
       int p = rnd.Uniform(100);
-      if (p < 45) {                               // Put
+      if (p < 45) {  // Put
         k = RandomKey(&rnd);
-        v = RandomString(&rnd,
-                         rnd.OneIn(20)
-                         ? 100 + rnd.Uniform(100)
-                         : rnd.Uniform(8));
+        v = RandomString(
+            &rnd, rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8));
         ASSERT_OK(model.Put(WriteOptions(), k, v));
         ASSERT_OK(db_->Put(WriteOptions(), k, v));
 
-      } else if (p < 90) {                        // Delete
+      } else if (p < 90) {  // Delete
         k = RandomKey(&rnd);
         ASSERT_OK(model.Delete(WriteOptions(), k));
         ASSERT_OK(db_->Delete(WriteOptions(), k));
 
-
-      } else {                                    // Multi-element batch
+      } else {  // Multi-element batch
         WriteBatch b;
         const int num = rnd.Uniform(8);
         for (int i = 0; i < num; i++) {
@@ -2288,8 +2259,8 @@ void BM_LogAndApply(int iters, int num_base_files) {
   VersionEdit vbase;
   uint64_t fnum = 1;
   for (int i = 0; i < num_base_files; i++) {
-    InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
-    InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
+    InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
+    InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
     vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
   }
   ASSERT_OK(vset.LogAndApply(&vbase, &mu));
@@ -2299,8 +2270,8 @@ void BM_LogAndApply(int iters, int num_base_files) {
   for (int i = 0; i < iters; i++) {
     VersionEdit vedit;
     vedit.DeleteFile(2, fnum);
-    InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
-    InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
+    InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
+    InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
     vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
     vset.LogAndApply(&vedit, &mu);
   }
@@ -2309,8 +2280,8 @@ void BM_LogAndApply(int iters, int num_base_files) {
   char buf[16];
   snprintf(buf, sizeof(buf), "%d", num_base_files);
   fprintf(stderr,
-          "BM_LogAndApply/%-6s   %8d iters : %9u us (%7.0f us / iter)\n",
-          buf, iters, us, ((float)us) / iters);
+          "BM_LogAndApply/%-6s   %8d iters : %9u us (%7.0f us / iter)\n", buf,
+          iters, us, ((float)us) / iters);
 }
 
 }  // namespace leveldb
diff --git a/db/dbformat.cc b/db/dbformat.cc
index 20a7ca4..69e8dc6 100644
--- a/db/dbformat.cc
+++ b/db/dbformat.cc
@@ -2,8 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include <stdio.h>
 #include "db/dbformat.h"
+
+#include <stdio.h>
+
 #include "port/port.h"
 #include "util/coding.h"
 
@@ -22,8 +24,7 @@ void AppendInternalKey(std::string* result, const ParsedInternalKey& key) {
 
 std::string ParsedInternalKey::DebugString() const {
   char buf[50];
-  snprintf(buf, sizeof(buf), "' @ %llu : %d",
-           (unsigned long long) sequence,
+  snprintf(buf, sizeof(buf), "' @ %llu : %d", (unsigned long long)sequence,
            int(type));
   std::string result = "'";
   result += EscapeString(user_key.ToString());
@@ -65,9 +66,8 @@ int InternalKeyComparator::Compare(const Slice& akey, const Slice& bkey) const {
   return r;
 }
 
-void InternalKeyComparator::FindShortestSeparator(
-      std::string* start,
-      const Slice& limit) const {
+void InternalKeyComparator::FindShortestSeparator(std::string* start,
+                                                  const Slice& limit) const {
   // Attempt to shorten the user portion of the key
   Slice user_start = ExtractUserKey(*start);
   Slice user_limit = ExtractUserKey(limit);
@@ -77,7 +77,8 @@ void InternalKeyComparator::FindShortestSeparator(
       user_comparator_->Compare(user_start, tmp) < 0) {
     // User key has become shorter physically, but larger logically.
     // Tack on the earliest possible number to the shortened user key.
-    PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek));
+    PutFixed64(&tmp,
+               PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
     assert(this->Compare(*start, tmp) < 0);
     assert(this->Compare(tmp, limit) < 0);
     start->swap(tmp);
@@ -92,15 +93,14 @@ void InternalKeyComparator::FindShortSuccessor(std::string* key) const {
       user_comparator_->Compare(user_key, tmp) < 0) {
     // User key has become shorter physically, but larger logically.
     // Tack on the earliest possible number to the shortened user key.
-    PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek));
+    PutFixed64(&tmp,
+               PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
     assert(this->Compare(*key, tmp) < 0);
     key->swap(tmp);
   }
 }
 
-const char* InternalFilterPolicy::Name() const {
-  return user_policy_->Name();
-}
+const char* InternalFilterPolicy::Name() const { return user_policy_->Name(); }
 
 void InternalFilterPolicy::CreateFilter(const Slice* keys, int n,
                                         std::string* dst) const {
diff --git a/db/dbformat.h b/db/dbformat.h
index c4d9575..bdc23b8 100644
--- a/db/dbformat.h
+++ b/db/dbformat.h
@@ -6,6 +6,7 @@
 #define STORAGE_LEVELDB_DB_DBFORMAT_H_
 
 #include <stdio.h>
+
 #include "leveldb/comparator.h"
 #include "leveldb/db.h"
 #include "leveldb/filter_policy.h"
@@ -48,10 +49,7 @@ class InternalKey;
 // Value types encoded as the last component of internal keys.
 // DO NOT CHANGE THESE ENUM VALUES: they are embedded in the on-disk
 // data structures.
-enum ValueType {
-  kTypeDeletion = 0x0,
-  kTypeValue = 0x1
-};
+enum ValueType { kTypeDeletion = 0x0, kTypeValue = 0x1 };
 // kValueTypeForSeek defines the ValueType that should be passed when
 // constructing a ParsedInternalKey object for seeking to a particular
 // sequence number (since we sort sequence numbers in decreasing order
@@ -64,17 +62,16 @@ typedef uint64_t SequenceNumber;
 
 // We leave eight bits empty at the bottom so a type and sequence#
 // can be packed together into 64-bits.
-static const SequenceNumber kMaxSequenceNumber =
-    ((0x1ull << 56) - 1);
+static const SequenceNumber kMaxSequenceNumber = ((0x1ull << 56) - 1);
 
 struct ParsedInternalKey {
   Slice user_key;
   SequenceNumber sequence;
   ValueType type;
 
-  ParsedInternalKey() { }  // Intentionally left uninitialized (for speed)
+  ParsedInternalKey() {}  // Intentionally left uninitialized (for speed)
   ParsedInternalKey(const Slice& u, const SequenceNumber& seq, ValueType t)
-      : user_key(u), sequence(seq), type(t) { }
+      : user_key(u), sequence(seq), type(t) {}
   std::string DebugString() const;
 };
 
@@ -103,13 +100,13 @@ inline Slice ExtractUserKey(const Slice& internal_key) {
 class InternalKeyComparator : public Comparator {
  private:
   const Comparator* user_comparator_;
+
  public:
-  explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) { }
+  explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) {}
   virtual const char* Name() const;
   virtual int Compare(const Slice& a, const Slice& b) const;
-  virtual void FindShortestSeparator(
-      std::string* start,
-      const Slice& limit) const;
+  virtual void FindShortestSeparator(std::string* start,
+                                     const Slice& limit) const;
   virtual void FindShortSuccessor(std::string* key) const;
 
   const Comparator* user_comparator() const { return user_comparator_; }
@@ -121,8 +118,9 @@ class InternalKeyComparator : public Comparator {
 class InternalFilterPolicy : public FilterPolicy {
  private:
   const FilterPolicy* const user_policy_;
+
  public:
-  explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) { }
+  explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) {}
   virtual const char* Name() const;
   virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const;
   virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const;
@@ -134,8 +132,9 @@ class InternalFilterPolicy : public FilterPolicy {
 class InternalKey {
  private:
   std::string rep_;
+
  public:
-  InternalKey() { }   // Leave rep_ as empty to indicate it is invalid
+  InternalKey() {}  // Leave rep_ as empty to indicate it is invalid
   InternalKey(const Slice& user_key, SequenceNumber s, ValueType t) {
     AppendInternalKey(&rep_, ParsedInternalKey(user_key, s, t));
   }
@@ -158,8 +157,8 @@ class InternalKey {
   std::string DebugString() const;
 };
 
-inline int InternalKeyComparator::Compare(
-    const InternalKey& a, const InternalKey& b) const {
+inline int InternalKeyComparator::Compare(const InternalKey& a,
+                                          const InternalKey& b) const {
   return Compare(a.Encode(), b.Encode());
 }
 
@@ -204,7 +203,7 @@ class LookupKey {
   const char* start_;
   const char* kstart_;
   const char* end_;
-  char space_[200];      // Avoid allocation for short keys
+  char space_[200];  // Avoid allocation for short keys
 
   // No copying allowed
   LookupKey(const LookupKey&);
diff --git a/db/dbformat_test.cc b/db/dbformat_test.cc
index 5d82f5d..87e6aae 100644
--- a/db/dbformat_test.cc
+++ b/db/dbformat_test.cc
@@ -8,8 +8,7 @@
 
 namespace leveldb {
 
-static std::string IKey(const std::string& user_key,
-                        uint64_t seq,
+static std::string IKey(const std::string& user_key, uint64_t seq,
                         ValueType vt) {
   std::string encoded;
   AppendInternalKey(&encoded, ParsedInternalKey(user_key, seq, vt));
@@ -28,9 +27,7 @@ static std::string ShortSuccessor(const std::string& s) {
   return result;
 }
 
-static void TestKey(const std::string& key,
-                    uint64_t seq,
-                    ValueType vt) {
+static void TestKey(const std::string& key, uint64_t seq, ValueType vt) {
   std::string encoded = IKey(key, seq, vt);
 
   Slice in(encoded);
@@ -44,16 +41,22 @@ static void TestKey(const std::string& key,
   ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded));
 }
 
-class FormatTest { };
+class FormatTest {};
 
 TEST(FormatTest, InternalKey_EncodeDecode) {
-  const char* keys[] = { "", "k", "hello", "longggggggggggggggggggggg" };
-  const uint64_t seq[] = {
-    1, 2, 3,
-    (1ull << 8) - 1, 1ull << 8, (1ull << 8) + 1,
-    (1ull << 16) - 1, 1ull << 16, (1ull << 16) + 1,
-    (1ull << 32) - 1, 1ull << 32, (1ull << 32) + 1
-  };
+  const char* keys[] = {"", "k", "hello", "longggggggggggggggggggggg"};
+  const uint64_t seq[] = {1,
+                          2,
+                          3,
+                          (1ull << 8) - 1,
+                          1ull << 8,
+                          (1ull << 8) + 1,
+                          (1ull << 16) - 1,
+                          1ull << 16,
+                          (1ull << 16) + 1,
+                          (1ull << 32) - 1,
+                          1ull << 32,
+                          (1ull << 32) + 1};
   for (int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) {
     for (int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) {
       TestKey(keys[k], seq[s], kTypeValue);
@@ -65,37 +68,35 @@ TEST(FormatTest, InternalKey_EncodeDecode) {
 TEST(FormatTest, InternalKeyShortSeparator) {
   // When user keys are same
   ASSERT_EQ(IKey("foo", 100, kTypeValue),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("foo", 99, kTypeValue)));
-  ASSERT_EQ(IKey("foo", 100, kTypeValue),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("foo", 101, kTypeValue)));
-  ASSERT_EQ(IKey("foo", 100, kTypeValue),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("foo", 100, kTypeValue)));
-  ASSERT_EQ(IKey("foo", 100, kTypeValue),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("foo", 100, kTypeDeletion)));
+            Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 99, kTypeValue)));
+  ASSERT_EQ(
+      IKey("foo", 100, kTypeValue),
+      Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 101, kTypeValue)));
+  ASSERT_EQ(
+      IKey("foo", 100, kTypeValue),
+      Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeValue)));
+  ASSERT_EQ(
+      IKey("foo", 100, kTypeValue),
+      Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeDeletion)));
 
   // When user keys are misordered
   ASSERT_EQ(IKey("foo", 100, kTypeValue),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("bar", 99, kTypeValue)));
+            Shorten(IKey("foo", 100, kTypeValue), IKey("bar", 99, kTypeValue)));
 
   // When user keys are different, but correctly ordered
-  ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("hello", 200, kTypeValue)));
+  ASSERT_EQ(
+      IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
+      Shorten(IKey("foo", 100, kTypeValue), IKey("hello", 200, kTypeValue)));
 
   // When start user key is prefix of limit user key
-  ASSERT_EQ(IKey("foo", 100, kTypeValue),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("foobar", 200, kTypeValue)));
+  ASSERT_EQ(
+      IKey("foo", 100, kTypeValue),
+      Shorten(IKey("foo", 100, kTypeValue), IKey("foobar", 200, kTypeValue)));
 
   // When limit user key is prefix of start user key
-  ASSERT_EQ(IKey("foobar", 100, kTypeValue),
-            Shorten(IKey("foobar", 100, kTypeValue),
-                    IKey("foo", 200, kTypeValue)));
+  ASSERT_EQ(
+      IKey("foobar", 100, kTypeValue),
+      Shorten(IKey("foobar", 100, kTypeValue), IKey("foo", 200, kTypeValue)));
 }
 
 TEST(FormatTest, InternalKeyShortestSuccessor) {
@@ -107,6 +108,4 @@ TEST(FormatTest, InternalKeyShortestSuccessor) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/db/dumpfile.cc b/db/dumpfile.cc
index 941988b..1dbff5e 100644
--- a/db/dumpfile.cc
+++ b/db/dumpfile.cc
@@ -90,7 +90,6 @@ class WriteBatchItemPrinter : public WriteBatch::Handler {
   }
 };
 
-
 // Called on every log record (each one of which is a WriteBatch)
 // found in a kLogFile.
 static void WriteBatchPrinter(uint64_t pos, Slice record, WritableFile* dst) {
@@ -216,9 +215,12 @@ Status DumpFile(Env* env, const std::string& fname, WritableFile* dst) {
     return Status::InvalidArgument(fname + ": unknown file type");
   }
   switch (ftype) {
-    case kLogFile:         return DumpLog(env, fname, dst);
-    case kDescriptorFile:  return DumpDescriptor(env, fname, dst);
-    case kTableFile:       return DumpTable(env, fname, dst);
+    case kLogFile:
+      return DumpLog(env, fname, dst);
+    case kDescriptorFile:
+      return DumpDescriptor(env, fname, dst);
+    case kTableFile:
+      return DumpTable(env, fname, dst);
     default:
       break;
   }
diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc
index 1f72984..7088ea7 100644
--- a/db/fault_injection_test.cc
+++ b/db/fault_injection_test.cc
@@ -9,12 +9,12 @@
 #include <map>
 #include <set>
 
-#include "leveldb/db.h"
 #include "db/db_impl.h"
 #include "db/filename.h"
 #include "db/log_format.h"
 #include "db/version_set.h"
 #include "leveldb/cache.h"
+#include "leveldb/db.h"
 #include "leveldb/env.h"
 #include "leveldb/table.h"
 #include "leveldb/write_batch.h"
@@ -56,8 +56,7 @@ Status Truncate(const std::string& filename, uint64_t length) {
 
   SequentialFile* orig_file;
   Status s = env->NewSequentialFile(filename, &orig_file);
-  if (!s.ok())
-    return s;
+  if (!s.ok()) return s;
 
   char* scratch = new char[length];
   leveldb::Slice result;
@@ -93,7 +92,7 @@ struct FileState {
       : filename_(filename),
         pos_(-1),
         pos_at_last_sync_(-1),
-        pos_at_last_flush_(-1) { }
+        pos_at_last_flush_(-1) {}
 
   FileState() : pos_(-1), pos_at_last_sync_(-1), pos_at_last_flush_(-1) {}
 
@@ -108,8 +107,7 @@ struct FileState {
 // is written to or sync'ed.
 class TestWritableFile : public WritableFile {
  public:
-  TestWritableFile(const FileState& state,
-                   WritableFile* f,
+  TestWritableFile(const FileState& state, WritableFile* f,
                    FaultInjectionTestEnv* env);
   virtual ~TestWritableFile();
   virtual Status Append(const Slice& data);
@@ -130,7 +128,7 @@ class FaultInjectionTestEnv : public EnvWrapper {
  public:
   FaultInjectionTestEnv()
       : EnvWrapper(Env::Default()), filesystem_active_(true) {}
-  virtual ~FaultInjectionTestEnv() { }
+  virtual ~FaultInjectionTestEnv() {}
   virtual Status NewWritableFile(const std::string& fname,
                                  WritableFile** result);
   virtual Status NewAppendableFile(const std::string& fname,
@@ -165,13 +163,9 @@ class FaultInjectionTestEnv : public EnvWrapper {
   bool filesystem_active_ GUARDED_BY(mutex_);  // Record flushes, syncs, writes
 };
 
-TestWritableFile::TestWritableFile(const FileState& state,
-                                   WritableFile* f,
+TestWritableFile::TestWritableFile(const FileState& state, WritableFile* f,
                                    FaultInjectionTestEnv* env)
-    : state_(state),
-      target_(f),
-      writable_file_opened_(true),
-      env_(env) {
+    : state_(state), target_(f), writable_file_opened_(true), env_(env) {
   assert(f != nullptr);
 }
 
@@ -395,9 +389,7 @@ class FaultInjectionTest {
     delete env_;
   }
 
-  void ReuseLogs(bool reuse) {
-    options_.reuse_logs = reuse;
-  }
+  void ReuseLogs(bool reuse) { options_.reuse_logs = reuse; }
 
   void Build(int start_idx, int num_vals) {
     std::string key_space, value_space;
@@ -497,18 +489,17 @@ class FaultInjectionTest {
   }
 
   void PartialCompactTestReopenWithFault(ResetMethod reset_method,
-                                         int num_pre_sync,
-                                         int num_post_sync) {
+                                         int num_pre_sync, int num_post_sync) {
     env_->SetFilesystemActive(false);
     CloseDB();
     ResetDBState(reset_method);
     ASSERT_OK(OpenDB());
     ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR));
-    ASSERT_OK(Verify(num_pre_sync, num_post_sync, FaultInjectionTest::VAL_EXPECT_ERROR));
+    ASSERT_OK(Verify(num_pre_sync, num_post_sync,
+                     FaultInjectionTest::VAL_EXPECT_ERROR));
   }
 
-  void NoWriteTestPreFault() {
-  }
+  void NoWriteTestPreFault() {}
 
   void NoWriteTestReopenWithFault(ResetMethod reset_method) {
     CloseDB();
@@ -524,8 +515,7 @@ class FaultInjectionTest {
       int num_post_sync = rnd.Uniform(kMaxNumValues);
 
       PartialCompactTestPreFault(num_pre_sync, num_post_sync);
-      PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA,
-                                        num_pre_sync,
+      PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA, num_pre_sync,
                                         num_post_sync);
 
       NoWriteTestPreFault();
@@ -535,8 +525,7 @@ class FaultInjectionTest {
       // No new files created so we expect all values since no files will be
       // dropped.
       PartialCompactTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES,
-                                        num_pre_sync + num_post_sync,
-                                        0);
+                                        num_pre_sync + num_post_sync, 0);
 
       NoWriteTestPreFault();
       NoWriteTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES);
@@ -556,6 +545,4 @@ TEST(FaultInjectionTest, FaultTestWithLogReuse) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/db/filename.cc b/db/filename.cc
index 6539bbe..85de45c 100644
--- a/db/filename.cc
+++ b/db/filename.cc
@@ -2,9 +2,11 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include "db/filename.h"
+
 #include <ctype.h>
 #include <stdio.h>
-#include "db/filename.h"
+
 #include "db/dbformat.h"
 #include "leveldb/env.h"
 #include "util/logging.h"
@@ -19,8 +21,7 @@ static std::string MakeFileName(const std::string& dbname, uint64_t number,
                                 const char* suffix) {
   char buf[100];
   snprintf(buf, sizeof(buf), "/%06llu.%s",
-           static_cast<unsigned long long>(number),
-           suffix);
+           static_cast<unsigned long long>(number), suffix);
   return dbname + buf;
 }
 
@@ -51,9 +52,7 @@ std::string CurrentFileName(const std::string& dbname) {
   return dbname + "/CURRENT";
 }
 
-std::string LockFileName(const std::string& dbname) {
-  return dbname + "/LOCK";
-}
+std::string LockFileName(const std::string& dbname) { return dbname + "/LOCK"; }
 
 std::string TempFileName(const std::string& dbname, uint64_t number) {
   assert(number > 0);
@@ -69,7 +68,6 @@ std::string OldInfoLogFileName(const std::string& dbname) {
   return dbname + "/LOG.old";
 }
 
-
 // Owned filenames have the form:
 //    dbname/CURRENT
 //    dbname/LOCK
@@ -77,8 +75,7 @@ std::string OldInfoLogFileName(const std::string& dbname) {
 //    dbname/LOG.old
 //    dbname/MANIFEST-[0-9]+
 //    dbname/[0-9]+.(log|sst|ldb)
-bool ParseFileName(const std::string& filename,
-                   uint64_t* number,
+bool ParseFileName(const std::string& filename, uint64_t* number,
                    FileType* type) {
   Slice rest(filename);
   if (rest == "CURRENT") {
diff --git a/db/filename.h b/db/filename.h
index 62cb3ef..524e813 100644
--- a/db/filename.h
+++ b/db/filename.h
@@ -8,7 +8,9 @@
 #define STORAGE_LEVELDB_DB_FILENAME_H_
 
 #include <stdint.h>
+
 #include <string>
+
 #include "leveldb/slice.h"
 #include "leveldb/status.h"
 #include "port/port.h"
@@ -69,8 +71,7 @@ std::string OldInfoLogFileName(const std::string& dbname);
 // If filename is a leveldb file, store the type of the file in *type.
 // The number encoded in the filename is stored in *number.  If the
 // filename was successfully parsed, returns true.  Else return false.
-bool ParseFileName(const std::string& filename,
-                   uint64_t* number,
+bool ParseFileName(const std::string& filename, uint64_t* number,
                    FileType* type);
 
 // Make the CURRENT file point to the descriptor file with the
diff --git a/db/filename_test.cc b/db/filename_test.cc
index 0bde538..952f320 100644
--- a/db/filename_test.cc
+++ b/db/filename_test.cc
@@ -11,7 +11,7 @@
 
 namespace leveldb {
 
-class FileNameTest { };
+class FileNameTest {};
 
 TEST(FileNameTest, Parse) {
   Slice db;
@@ -24,17 +24,17 @@ TEST(FileNameTest, Parse) {
     uint64_t number;
     FileType type;
   } cases[] = {
-    { "100.log",            100,   kLogFile },
-    { "0.log",              0,     kLogFile },
-    { "0.sst",              0,     kTableFile },
-    { "0.ldb",              0,     kTableFile },
-    { "CURRENT",            0,     kCurrentFile },
-    { "LOCK",               0,     kDBLockFile },
-    { "MANIFEST-2",         2,     kDescriptorFile },
-    { "MANIFEST-7",         7,     kDescriptorFile },
-    { "LOG",                0,     kInfoLogFile },
-    { "LOG.old",            0,     kInfoLogFile },
-    { "18446744073709551615.log", 18446744073709551615ull, kLogFile },
+      {"100.log", 100, kLogFile},
+      {"0.log", 0, kLogFile},
+      {"0.sst", 0, kTableFile},
+      {"0.ldb", 0, kTableFile},
+      {"CURRENT", 0, kCurrentFile},
+      {"LOCK", 0, kDBLockFile},
+      {"MANIFEST-2", 2, kDescriptorFile},
+      {"MANIFEST-7", 7, kDescriptorFile},
+      {"LOG", 0, kInfoLogFile},
+      {"LOG.old", 0, kInfoLogFile},
+      {"18446744073709551615.log", 18446744073709551615ull, kLogFile},
   };
   for (int i = 0; i < sizeof(cases) / sizeof(cases[0]); i++) {
     std::string f = cases[i].fname;
@@ -44,30 +44,28 @@ TEST(FileNameTest, Parse) {
   }
 
   // Errors
-  static const char* errors[] = {
-    "",
-    "foo",
-    "foo-dx-100.log",
-    ".log",
-    "",
-    "manifest",
-    "CURREN",
-    "CURRENTX",
-    "MANIFES",
-    "MANIFEST",
-    "MANIFEST-",
-    "XMANIFEST-3",
-    "MANIFEST-3x",
-    "LOC",
-    "LOCKx",
-    "LO",
-    "LOGx",
-    "18446744073709551616.log",
-    "184467440737095516150.log",
-    "100",
-    "100.",
-    "100.lop"
-  };
+  static const char* errors[] = {"",
+                                 "foo",
+                                 "foo-dx-100.log",
+                                 ".log",
+                                 "",
+                                 "manifest",
+                                 "CURREN",
+                                 "CURRENTX",
+                                 "MANIFES",
+                                 "MANIFEST",
+                                 "MANIFEST-",
+                                 "XMANIFEST-3",
+                                 "MANIFEST-3x",
+                                 "LOC",
+                                 "LOCKx",
+                                 "LO",
+                                 "LOGx",
+                                 "18446744073709551616.log",
+                                 "184467440737095516150.log",
+                                 "100",
+                                 "100.",
+                                 "100.lop"};
   for (int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) {
     std::string f = errors[i];
     ASSERT_TRUE(!ParseFileName(f, &number, &type)) << f;
@@ -130,6 +128,4 @@ TEST(FileNameTest, Construction) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/db/leveldbutil.cc b/db/leveldbutil.cc
index 9f4b7dd..b21cf8e 100644
--- a/db/leveldbutil.cc
+++ b/db/leveldbutil.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
 #include <stdio.h>
+
 #include "leveldb/dumpfile.h"
 #include "leveldb/env.h"
 #include "leveldb/status.h"
@@ -38,11 +39,9 @@ bool HandleDumpCommand(Env* env, char** files, int num) {
 }  // namespace leveldb
 
 static void Usage() {
-  fprintf(
-      stderr,
-      "Usage: leveldbutil command...\n"
-      "   dump files...         -- dump contents of specified files\n"
-      );
+  fprintf(stderr,
+          "Usage: leveldbutil command...\n"
+          "   dump files...         -- dump contents of specified files\n");
 }
 
 int main(int argc, char** argv) {
@@ -54,7 +53,7 @@ int main(int argc, char** argv) {
   } else {
     std::string command = argv[1];
     if (command == "dump") {
-      ok = leveldb::HandleDumpCommand(env, argv+2, argc-2);
+      ok = leveldb::HandleDumpCommand(env, argv + 2, argc - 2);
     } else {
       Usage();
       ok = false;
diff --git a/db/log_reader.cc b/db/log_reader.cc
index 19c4df6..f472723 100644
--- a/db/log_reader.cc
+++ b/db/log_reader.cc
@@ -5,6 +5,7 @@
 #include "db/log_reader.h"
 
 #include <stdio.h>
+
 #include "leveldb/env.h"
 #include "util/coding.h"
 #include "util/crc32c.h"
@@ -12,8 +13,7 @@
 namespace leveldb {
 namespace log {
 
-Reader::Reporter::~Reporter() {
-}
+Reader::Reporter::~Reporter() {}
 
 Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
                uint64_t initial_offset)
@@ -26,12 +26,9 @@ Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
       last_record_offset_(0),
       end_of_buffer_offset_(0),
       initial_offset_(initial_offset),
-      resyncing_(initial_offset > 0) {
-}
+      resyncing_(initial_offset > 0) {}
 
-Reader::~Reader() {
-  delete[] backing_store_;
-}
+Reader::~Reader() { delete[] backing_store_; }
 
 bool Reader::SkipToInitialBlock() {
   const size_t offset_in_block = initial_offset_ % kBlockSize;
@@ -176,9 +173,7 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) {
   return false;
 }
 
-uint64_t Reader::LastRecordOffset() {
-  return last_record_offset_;
-}
+uint64_t Reader::LastRecordOffset() { return last_record_offset_; }
 
 void Reader::ReportCorruption(uint64_t bytes, const char* reason) {
   ReportDrop(bytes, Status::Corruption(reason));
diff --git a/db/log_reader.h b/db/log_reader.h
index 7dcce8e..b27c164 100644
--- a/db/log_reader.h
+++ b/db/log_reader.h
@@ -63,7 +63,7 @@ class Reader {
   bool const checksum_;
   char* const backing_store_;
   Slice buffer_;
-  bool eof_;   // Last Read() indicated EOF by returning < kBlockSize
+  bool eof_;  // Last Read() indicated EOF by returning < kBlockSize
 
   // Offset of the last record returned by ReadRecord.
   uint64_t last_record_offset_;
diff --git a/db/log_test.cc b/db/log_test.cc
index 48a5928..3acaa33 100644
--- a/db/log_test.cc
+++ b/db/log_test.cc
@@ -56,7 +56,7 @@ class LogTest {
     Slice contents_;
     bool force_error_;
     bool returned_partial_;
-    StringSource() : force_error_(false), returned_partial_(false) { }
+    StringSource() : force_error_(false), returned_partial_(false) {}
 
     virtual Status Read(size_t n, Slice* result, char* scratch) {
       ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
@@ -93,7 +93,7 @@ class LogTest {
     size_t dropped_bytes_;
     std::string message_;
 
-    ReportCollector() : dropped_bytes_(0) { }
+    ReportCollector() : dropped_bytes_(0) {}
     virtual void Corruption(size_t bytes, const Status& status) {
       dropped_bytes_ += bytes;
       message_.append(status.ToString());
@@ -113,11 +113,11 @@ class LogTest {
   static int num_initial_offset_records_;
 
  public:
-  LogTest() : reading_(false),
-              writer_(new Writer(&dest_)),
-              reader_(new Reader(&source_, &report_, true/*checksum*/,
-                      0/*initial_offset*/)) {
-  }
+  LogTest()
+      : reading_(false),
+        writer_(new Writer(&dest_)),
+        reader_(new Reader(&source_, &report_, true /*checksum*/,
+                           0 /*initial_offset*/)) {}
 
   ~LogTest() {
     delete writer_;
@@ -134,9 +134,7 @@ class LogTest {
     writer_->AddRecord(Slice(msg));
   }
 
-  size_t WrittenBytes() const {
-    return dest_.contents_.size();
-  }
+  size_t WrittenBytes() const { return dest_.contents_.size(); }
 
   std::string Read() {
     if (!reading_) {
@@ -166,22 +164,16 @@ class LogTest {
 
   void FixChecksum(int header_offset, int len) {
     // Compute crc of type/len/data
-    uint32_t crc = crc32c::Value(&dest_.contents_[header_offset+6], 1 + len);
+    uint32_t crc = crc32c::Value(&dest_.contents_[header_offset + 6], 1 + len);
     crc = crc32c::Mask(crc);
     EncodeFixed32(&dest_.contents_[header_offset], crc);
   }
 
-  void ForceError() {
-    source_.force_error_ = true;
-  }
+  void ForceError() { source_.force_error_ = true; }
 
-  size_t DroppedBytes() const {
-    return report_.dropped_bytes_;
-  }
+  size_t DroppedBytes() const { return report_.dropped_bytes_; }
 
-  std::string ReportMessage() const {
-    return report_.message_;
-  }
+  std::string ReportMessage() const { return report_.message_; }
 
   // Returns OK iff recorded error message contains "msg"
   std::string MatchError(const std::string& msg) const {
@@ -202,14 +194,14 @@ class LogTest {
 
   void StartReadingAt(uint64_t initial_offset) {
     delete reader_;
-    reader_ = new Reader(&source_, &report_, true/*checksum*/, initial_offset);
+    reader_ = new Reader(&source_, &report_, true /*checksum*/, initial_offset);
   }
 
   void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) {
     WriteInitialOffsetLog();
     reading_ = true;
     source_.contents_ = Slice(dest_.contents_);
-    Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
+    Reader* offset_reader = new Reader(&source_, &report_, true /*checksum*/,
                                        WrittenBytes() + offset_past_end);
     Slice record;
     std::string scratch;
@@ -222,8 +214,8 @@ class LogTest {
     WriteInitialOffsetLog();
     reading_ = true;
     source_.contents_ = Slice(dest_.contents_);
-    Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
-                                       initial_offset);
+    Reader* offset_reader =
+        new Reader(&source_, &report_, true /*checksum*/, initial_offset);
 
     // Read all records from expected_record_offset through the last one.
     ASSERT_LT(expected_record_offset, num_initial_offset_records_);
@@ -242,34 +234,30 @@ class LogTest {
   }
 };
 
-size_t LogTest::initial_offset_record_sizes_[] =
-    {10000,  // Two sizable records in first block
-     10000,
-     2 * log::kBlockSize - 1000,  // Span three blocks
-     1,
-     13716,  // Consume all but two bytes of block 3.
-     log::kBlockSize - kHeaderSize, // Consume the entirety of block 4.
-    };
+size_t LogTest::initial_offset_record_sizes_[] = {
+    10000,  // Two sizable records in first block
+    10000,
+    2 * log::kBlockSize - 1000,  // Span three blocks
+    1,
+    13716,                          // Consume all but two bytes of block 3.
+    log::kBlockSize - kHeaderSize,  // Consume the entirety of block 4.
+};
 
-uint64_t LogTest::initial_offset_last_record_offsets_[] =
-    {0,
-     kHeaderSize + 10000,
-     2 * (kHeaderSize + 10000),
-     2 * (kHeaderSize + 10000) +
-         (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
-     2 * (kHeaderSize + 10000) +
-         (2 * log::kBlockSize - 1000) + 3 * kHeaderSize
-         + kHeaderSize + 1,
-     3 * log::kBlockSize,
-    };
+uint64_t LogTest::initial_offset_last_record_offsets_[] = {
+    0,
+    kHeaderSize + 10000,
+    2 * (kHeaderSize + 10000),
+    2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
+    2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize +
+        kHeaderSize + 1,
+    3 * log::kBlockSize,
+};
 
 // LogTest::initial_offset_last_record_offsets_ must be defined before this.
 int LogTest::num_initial_offset_records_ =
-    sizeof(LogTest::initial_offset_last_record_offsets_)/sizeof(uint64_t);
+    sizeof(LogTest::initial_offset_last_record_offsets_) / sizeof(uint64_t);
 
-TEST(LogTest, Empty) {
-  ASSERT_EQ("EOF", Read());
-}
+TEST(LogTest, Empty) { ASSERT_EQ("EOF", Read()); }
 
 TEST(LogTest, ReadWrite) {
   Write("foo");
@@ -306,7 +294,7 @@ TEST(LogTest, Fragmentation) {
 
 TEST(LogTest, MarginalTrailer) {
   // Make a trailer that is exactly the same length as an empty record.
-  const int n = kBlockSize - 2*kHeaderSize;
+  const int n = kBlockSize - 2 * kHeaderSize;
   Write(BigString("foo", n));
   ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
   Write("");
@@ -319,7 +307,7 @@ TEST(LogTest, MarginalTrailer) {
 
 TEST(LogTest, MarginalTrailer2) {
   // Make a trailer that is exactly the same length as an empty record.
-  const int n = kBlockSize - 2*kHeaderSize;
+  const int n = kBlockSize - 2 * kHeaderSize;
   Write(BigString("foo", n));
   ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
   Write("bar");
@@ -331,7 +319,7 @@ TEST(LogTest, MarginalTrailer2) {
 }
 
 TEST(LogTest, ShortTrailer) {
-  const int n = kBlockSize - 2*kHeaderSize + 4;
+  const int n = kBlockSize - 2 * kHeaderSize + 4;
   Write(BigString("foo", n));
   ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
   Write("");
@@ -343,7 +331,7 @@ TEST(LogTest, ShortTrailer) {
 }
 
 TEST(LogTest, AlignedEof) {
-  const int n = kBlockSize - 2*kHeaderSize + 4;
+  const int n = kBlockSize - 2 * kHeaderSize + 4;
   Write(BigString("foo", n));
   ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
   ASSERT_EQ(BigString("foo", n), Read());
@@ -394,7 +382,7 @@ TEST(LogTest, BadRecordType) {
 
 TEST(LogTest, TruncatedTrailingRecordIsIgnored) {
   Write("foo");
-  ShrinkSize(4);   // Drop all payload as well as a header byte
+  ShrinkSize(4);  // Drop all payload as well as a header byte
   ASSERT_EQ("EOF", Read());
   // Truncated last record is ignored, not treated as an error.
   ASSERT_EQ(0, DroppedBytes());
@@ -492,7 +480,7 @@ TEST(LogTest, SkipIntoMultiRecord) {
   // If initial_offset points to a record after first(R1) but before first(R2)
   // incomplete fragment errors are not actual errors, and must be suppressed
   // until a new first or full record is encountered.
-  Write(BigString("foo", 3*kBlockSize));
+  Write(BigString("foo", 3 * kBlockSize));
   Write("correct");
   StartReadingAt(kBlockSize);
 
@@ -514,44 +502,30 @@ TEST(LogTest, ErrorJoinsRecords) {
   Write("correct");
 
   // Wipe the middle block
-  for (int offset = kBlockSize; offset < 2*kBlockSize; offset++) {
+  for (int offset = kBlockSize; offset < 2 * kBlockSize; offset++) {
     SetByte(offset, 'x');
   }
 
   ASSERT_EQ("correct", Read());
   ASSERT_EQ("EOF", Read());
   const size_t dropped = DroppedBytes();
-  ASSERT_LE(dropped, 2*kBlockSize + 100);
-  ASSERT_GE(dropped, 2*kBlockSize);
+  ASSERT_LE(dropped, 2 * kBlockSize + 100);
+  ASSERT_GE(dropped, 2 * kBlockSize);
 }
 
-TEST(LogTest, ReadStart) {
-  CheckInitialOffsetRecord(0, 0);
-}
+TEST(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); }
 
-TEST(LogTest, ReadSecondOneOff) {
-  CheckInitialOffsetRecord(1, 1);
-}
+TEST(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); }
 
-TEST(LogTest, ReadSecondTenThousand) {
-  CheckInitialOffsetRecord(10000, 1);
-}
+TEST(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); }
 
-TEST(LogTest, ReadSecondStart) {
-  CheckInitialOffsetRecord(10007, 1);
-}
+TEST(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); }
 
-TEST(LogTest, ReadThirdOneOff) {
-  CheckInitialOffsetRecord(10008, 2);
-}
+TEST(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); }
 
-TEST(LogTest, ReadThirdStart) {
-  CheckInitialOffsetRecord(20014, 2);
-}
+TEST(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); }
 
-TEST(LogTest, ReadFourthOneOff) {
-  CheckInitialOffsetRecord(20015, 3);
-}
+TEST(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); }
 
 TEST(LogTest, ReadFourthFirstBlockTrailer) {
   CheckInitialOffsetRecord(log::kBlockSize - 4, 3);
@@ -575,17 +549,11 @@ TEST(LogTest, ReadInitialOffsetIntoBlockPadding) {
   CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5);
 }
 
-TEST(LogTest, ReadEnd) {
-  CheckOffsetPastEndReturnsNoRecords(0);
-}
+TEST(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); }
 
-TEST(LogTest, ReadPastEnd) {
-  CheckOffsetPastEndReturnsNoRecords(5);
-}
+TEST(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); }
 
 }  // namespace log
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/db/log_writer.cc b/db/log_writer.cc
index 74a0327..5e83f6a 100644
--- a/db/log_writer.cc
+++ b/db/log_writer.cc
@@ -5,6 +5,7 @@
 #include "db/log_writer.h"
 
 #include <stdint.h>
+
 #include "leveldb/env.h"
 #include "util/coding.h"
 #include "util/crc32c.h"
@@ -19,9 +20,7 @@ static void InitTypeCrc(uint32_t* type_crc) {
   }
 }
 
-Writer::Writer(WritableFile* dest)
-    : dest_(dest),
-      block_offset_(0) {
+Writer::Writer(WritableFile* dest) : dest_(dest), block_offset_(0) {
   InitTypeCrc(type_crc_);
 }
 
@@ -30,8 +29,7 @@ Writer::Writer(WritableFile* dest, uint64_t dest_length)
   InitTypeCrc(type_crc_);
 }
 
-Writer::~Writer() {
-}
+Writer::~Writer() {}
 
 Status Writer::AddRecord(const Slice& slice) {
   const char* ptr = slice.data();
@@ -49,7 +47,7 @@ Status Writer::AddRecord(const Slice& slice) {
       // Switch to a new block
       if (leftover > 0) {
         // Fill the trailer (literal below relies on kHeaderSize being 7)
-        assert(kHeaderSize == 7);
+        static_assert(kHeaderSize == 7, "");
         dest_->Append(Slice("\x00\x00\x00\x00\x00\x00", leftover));
       }
       block_offset_ = 0;
@@ -81,30 +79,31 @@ Status Writer::AddRecord(const Slice& slice) {
   return s;
 }
 
-Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr, size_t n) {
-  assert(n <= 0xffff);  // Must fit in two bytes
-  assert(block_offset_ + kHeaderSize + n <= kBlockSize);
+Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr,
+                                  size_t length) {
+  assert(length <= 0xffff);  // Must fit in two bytes
+  assert(block_offset_ + kHeaderSize + length <= kBlockSize);
 
   // Format the header
   char buf[kHeaderSize];
-  buf[4] = static_cast<char>(n & 0xff);
-  buf[5] = static_cast<char>(n >> 8);
+  buf[4] = static_cast<char>(length & 0xff);
+  buf[5] = static_cast<char>(length >> 8);
   buf[6] = static_cast<char>(t);
 
   // Compute the crc of the record type and the payload.
-  uint32_t crc = crc32c::Extend(type_crc_[t], ptr, n);
-  crc = crc32c::Mask(crc);                 // Adjust for storage
+  uint32_t crc = crc32c::Extend(type_crc_[t], ptr, length);
+  crc = crc32c::Mask(crc);  // Adjust for storage
   EncodeFixed32(buf, crc);
 
   // Write the header and the payload
   Status s = dest_->Append(Slice(buf, kHeaderSize));
   if (s.ok()) {
-    s = dest_->Append(Slice(ptr, n));
+    s = dest_->Append(Slice(ptr, length));
     if (s.ok()) {
       s = dest_->Flush();
     }
   }
-  block_offset_ += kHeaderSize + n;
+  block_offset_ += kHeaderSize + length;
   return s;
 }
 
diff --git a/db/log_writer.h b/db/log_writer.h
index 9e7cc47..840809d 100644
--- a/db/log_writer.h
+++ b/db/log_writer.h
@@ -6,6 +6,7 @@
 #define STORAGE_LEVELDB_DB_LOG_WRITER_H_
 
 #include <stdint.h>
+
 #include "db/log_format.h"
 #include "leveldb/slice.h"
 #include "leveldb/status.h"
@@ -34,7 +35,7 @@ class Writer {
 
  private:
   WritableFile* dest_;
-  int block_offset_;       // Current offset in block
+  int block_offset_;  // Current offset in block
 
   // crc32c values for all supported record types.  These are
   // pre-computed to reduce the overhead of computing the crc of the
diff --git a/db/memtable.cc b/db/memtable.cc
index 287afdb..c91405c 100644
--- a/db/memtable.cc
+++ b/db/memtable.cc
@@ -18,20 +18,15 @@ static Slice GetLengthPrefixedSlice(const char* data) {
   return Slice(p, len);
 }
 
-MemTable::MemTable(const InternalKeyComparator& cmp)
-    : comparator_(cmp),
-      refs_(0),
-      table_(comparator_, &arena_) {
-}
+MemTable::MemTable(const InternalKeyComparator& comparator)
+    : comparator_(comparator), refs_(0), table_(comparator_, &arena_) {}
 
-MemTable::~MemTable() {
-  assert(refs_ == 0);
-}
+MemTable::~MemTable() { assert(refs_ == 0); }
 
 size_t MemTable::ApproximateMemoryUsage() { return arena_.MemoryUsage(); }
 
-int MemTable::KeyComparator::operator()(const char* aptr, const char* bptr)
-    const {
+int MemTable::KeyComparator::operator()(const char* aptr,
+                                        const char* bptr) const {
   // Internal keys are encoded as length-prefixed strings.
   Slice a = GetLengthPrefixedSlice(aptr);
   Slice b = GetLengthPrefixedSlice(bptr);
@@ -48,9 +43,9 @@ static const char* EncodeKey(std::string* scratch, const Slice& target) {
   return scratch->data();
 }
 
-class MemTableIterator: public Iterator {
+class MemTableIterator : public Iterator {
  public:
-  explicit MemTableIterator(MemTable::Table* table) : iter_(table) { }
+  explicit MemTableIterator(MemTable::Table* table) : iter_(table) {}
 
   virtual bool Valid() const { return iter_.Valid(); }
   virtual void Seek(const Slice& k) { iter_.Seek(EncodeKey(&tmp_, k)); }
@@ -68,19 +63,16 @@ class MemTableIterator: public Iterator {
 
  private:
   MemTable::Table::Iterator iter_;
-  std::string tmp_;       // For passing to EncodeKey
+  std::string tmp_;  // For passing to EncodeKey
 
   // No copying allowed
   MemTableIterator(const MemTableIterator&);
   void operator=(const MemTableIterator&);
 };
 
-Iterator* MemTable::NewIterator() {
-  return new MemTableIterator(&table_);
-}
+Iterator* MemTable::NewIterator() { return new MemTableIterator(&table_); }
 
-void MemTable::Add(SequenceNumber s, ValueType type,
-                   const Slice& key,
+void MemTable::Add(SequenceNumber s, ValueType type, const Slice& key,
                    const Slice& value) {
   // Format of an entry is concatenation of:
   //  key_size     : varint32 of internal_key.size()
@@ -90,9 +82,9 @@ void MemTable::Add(SequenceNumber s, ValueType type,
   size_t key_size = key.size();
   size_t val_size = value.size();
   size_t internal_key_size = key_size + 8;
-  const size_t encoded_len =
-      VarintLength(internal_key_size) + internal_key_size +
-      VarintLength(val_size) + val_size;
+  const size_t encoded_len = VarintLength(internal_key_size) +
+                             internal_key_size + VarintLength(val_size) +
+                             val_size;
   char* buf = arena_.Allocate(encoded_len);
   char* p = EncodeVarint32(buf, internal_key_size);
   memcpy(p, key.data(), key_size);
@@ -121,10 +113,9 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s) {
     // all entries with overly large sequence numbers.
     const char* entry = iter.key();
     uint32_t key_length;
-    const char* key_ptr = GetVarint32Ptr(entry, entry+5, &key_length);
+    const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
     if (comparator_.comparator.user_comparator()->Compare(
-            Slice(key_ptr, key_length - 8),
-            key.user_key()) == 0) {
+            Slice(key_ptr, key_length - 8), key.user_key()) == 0) {
       // Correct user key
       const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
       switch (static_cast<ValueType>(tag & 0xff)) {
diff --git a/db/memtable.h b/db/memtable.h
index f2a6736..ef18bb5 100644
--- a/db/memtable.h
+++ b/db/memtable.h
@@ -6,9 +6,10 @@
 #define STORAGE_LEVELDB_DB_MEMTABLE_H_
 
 #include <string>
-#include "leveldb/db.h"
+
 #include "db/dbformat.h"
 #include "db/skiplist.h"
+#include "leveldb/db.h"
 #include "util/arena.h"
 
 namespace leveldb {
@@ -49,8 +50,7 @@ class MemTable {
   // Add an entry into memtable that maps key to value at the
   // specified sequence number and with the specified type.
   // Typically value will be empty if type==kTypeDeletion.
-  void Add(SequenceNumber seq, ValueType type,
-           const Slice& key,
+  void Add(SequenceNumber seq, ValueType type, const Slice& key,
            const Slice& value);
 
   // If memtable contains a value for key, store it in *value and return true.
@@ -64,7 +64,7 @@ class MemTable {
 
   struct KeyComparator {
     const InternalKeyComparator comparator;
-    explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) { }
+    explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) {}
     int operator()(const char* a, const char* b) const;
   };
   friend class MemTableIterator;
diff --git a/db/recovery_test.cc b/db/recovery_test.cc
index 87bd53c..547a959 100644
--- a/db/recovery_test.cc
+++ b/db/recovery_test.cc
@@ -86,15 +86,13 @@ class RecoveryTest {
     std::string current;
     ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), &current));
     size_t len = current.size();
-    if (len > 0 && current[len-1] == '\n') {
+    if (len > 0 && current[len - 1] == '\n') {
       current.resize(len - 1);
     }
     return dbname_ + "/" + current;
   }
 
-  std::string LogName(uint64_t number) {
-    return LogFileName(dbname_, number);
-  }
+  std::string LogName(uint64_t number) { return LogFileName(dbname_, number); }
 
   size_t DeleteLogFiles() {
     // Linux allows unlinking open files, but Windows does not.
@@ -107,13 +105,9 @@ class RecoveryTest {
     return logs.size();
   }
 
-  void DeleteManifestFile() {
-    ASSERT_OK(env_->DeleteFile(ManifestFileName()));
-  }
+  void DeleteManifestFile() { ASSERT_OK(env_->DeleteFile(ManifestFileName())); }
 
-  uint64_t FirstLogFile() {
-    return GetFiles(kLogFile)[0];
-  }
+  uint64_t FirstLogFile() { return GetFiles(kLogFile)[0]; }
 
   std::vector<uint64_t> GetFiles(FileType t) {
     std::vector<std::string> filenames;
@@ -129,13 +123,9 @@ class RecoveryTest {
     return result;
   }
 
-  int NumLogs() {
-    return GetFiles(kLogFile).size();
-  }
+  int NumLogs() { return GetFiles(kLogFile).size(); }
 
-  int NumTables() {
-    return GetFiles(kTableFile).size();
-  }
+  int NumTables() { return GetFiles(kTableFile).size(); }
 
   uint64_t FileSize(const std::string& fname) {
     uint64_t result;
@@ -143,9 +133,7 @@ class RecoveryTest {
     return result;
   }
 
-  void CompactMemTable() {
-    dbfull()->TEST_CompactMemTable();
-  }
+  void CompactMemTable() { dbfull()->TEST_CompactMemTable(); }
 
   // Directly construct a log file that sets key to val.
   void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) {
@@ -197,7 +185,7 @@ TEST(RecoveryTest, LargeManifestCompacted) {
     uint64_t len = FileSize(old_manifest);
     WritableFile* file;
     ASSERT_OK(env()->NewAppendableFile(old_manifest, &file));
-    std::string zeroes(3*1048576 - static_cast<size_t>(len), 0);
+    std::string zeroes(3 * 1048576 - static_cast<size_t>(len), 0);
     ASSERT_OK(file->Append(zeroes));
     ASSERT_OK(file->Flush());
     delete file;
@@ -270,7 +258,7 @@ TEST(RecoveryTest, MultipleMemTables) {
   // Force creation of multiple memtables by reducing the write buffer size.
   Options opt;
   opt.reuse_logs = true;
-  opt.write_buffer_size = (kNum*100) / 2;
+  opt.write_buffer_size = (kNum * 100) / 2;
   Open(&opt);
   ASSERT_LE(2, NumTables());
   ASSERT_EQ(1, NumLogs());
@@ -289,16 +277,16 @@ TEST(RecoveryTest, MultipleLogFiles) {
 
   // Make a bunch of uncompacted log files.
   uint64_t old_log = FirstLogFile();
-  MakeLogFile(old_log+1, 1000, "hello", "world");
-  MakeLogFile(old_log+2, 1001, "hi", "there");
-  MakeLogFile(old_log+3, 1002, "foo", "bar2");
+  MakeLogFile(old_log + 1, 1000, "hello", "world");
+  MakeLogFile(old_log + 2, 1001, "hi", "there");
+  MakeLogFile(old_log + 3, 1002, "foo", "bar2");
 
   // Recover and check that all log files were processed.
   Open();
   ASSERT_LE(1, NumTables());
   ASSERT_EQ(1, NumLogs());
   uint64_t new_log = FirstLogFile();
-  ASSERT_LE(old_log+3, new_log);
+  ASSERT_LE(old_log + 3, new_log);
   ASSERT_EQ("bar2", Get("foo"));
   ASSERT_EQ("world", Get("hello"));
   ASSERT_EQ("there", Get("hi"));
@@ -316,7 +304,7 @@ TEST(RecoveryTest, MultipleLogFiles) {
 
   // Check that introducing an older log file does not cause it to be re-read.
   Close();
-  MakeLogFile(old_log+1, 2000, "hello", "stale write");
+  MakeLogFile(old_log + 1, 2000, "hello", "stale write");
   Open();
   ASSERT_LE(1, NumTables());
   ASSERT_EQ(1, NumLogs());
@@ -339,6 +327,4 @@ TEST(RecoveryTest, ManifestMissing) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/db/repair.cc b/db/repair.cc
index df8dcd2..d5ecc45 100644
--- a/db/repair.cc
+++ b/db/repair.cc
@@ -84,9 +84,7 @@ class Repairer {
           "recovered %d files; %llu bytes. "
           "Some data may have been lost. "
           "****",
-          dbname_.c_str(),
-          static_cast<int>(tables_.size()),
-          bytes);
+          dbname_.c_str(), static_cast<int>(tables_.size()), bytes);
     }
     return status;
   }
@@ -152,8 +150,7 @@ class Repairer {
       Status status = ConvertLogToTable(logs_[i]);
       if (!status.ok()) {
         Log(options_.info_log, "Log #%llu: ignoring conversion error: %s",
-            (unsigned long long) logs_[i],
-            status.ToString().c_str());
+            (unsigned long long)logs_[i], status.ToString().c_str());
       }
       ArchiveFile(logname);
     }
@@ -167,8 +164,7 @@ class Repairer {
       virtual void Corruption(size_t bytes, const Status& s) {
         // We print error messages for corruption, but continue repairing.
         Log(info_log, "Log #%llu: dropping %d bytes; %s",
-            (unsigned long long) lognum,
-            static_cast<int>(bytes),
+            (unsigned long long)lognum, static_cast<int>(bytes),
             s.ToString().c_str());
       }
     };
@@ -190,8 +186,8 @@ class Repairer {
     // corruptions cause entire commits to be skipped instead of
     // propagating bad information (like overly large sequence
     // numbers).
-    log::Reader reader(lfile, &reporter, false/*do not checksum*/,
-                       0/*initial_offset*/);
+    log::Reader reader(lfile, &reporter, false /*do not checksum*/,
+                       0 /*initial_offset*/);
 
     // Read all the records and add to a memtable
     std::string scratch;
@@ -202,8 +198,8 @@ class Repairer {
     int counter = 0;
     while (reader.ReadRecord(&record, &scratch)) {
       if (record.size() < 12) {
-        reporter.Corruption(
-            record.size(), Status::Corruption("log record too small"));
+        reporter.Corruption(record.size(),
+                            Status::Corruption("log record too small"));
         continue;
       }
       WriteBatchInternal::SetContents(&batch, record);
@@ -212,8 +208,7 @@ class Repairer {
         counter += WriteBatchInternal::Count(&batch);
       } else {
         Log(options_.info_log, "Log #%llu: ignoring %s",
-            (unsigned long long) log,
-            status.ToString().c_str());
+            (unsigned long long)log, status.ToString().c_str());
         status = Status::OK();  // Keep going with rest of file
       }
     }
@@ -234,9 +229,7 @@ class Repairer {
       }
     }
     Log(options_.info_log, "Log #%llu: %d ops saved to Table #%llu %s",
-        (unsigned long long) log,
-        counter,
-        (unsigned long long) meta.number,
+        (unsigned long long)log, counter, (unsigned long long)meta.number,
         status.ToString().c_str());
     return status;
   }
@@ -272,8 +265,7 @@ class Repairer {
       ArchiveFile(TableFileName(dbname_, number));
       ArchiveFile(SSTTableFileName(dbname_, number));
       Log(options_.info_log, "Table #%llu: dropped: %s",
-          (unsigned long long) t.meta.number,
-          status.ToString().c_str());
+          (unsigned long long)t.meta.number, status.ToString().c_str());
       return;
     }
 
@@ -287,8 +279,7 @@ class Repairer {
       Slice key = iter->key();
       if (!ParseInternalKey(key, &parsed)) {
         Log(options_.info_log, "Table #%llu: unparsable key %s",
-            (unsigned long long) t.meta.number,
-            EscapeString(key).c_str());
+            (unsigned long long)t.meta.number, EscapeString(key).c_str());
         continue;
       }
 
@@ -307,9 +298,7 @@ class Repairer {
     }
     delete iter;
     Log(options_.info_log, "Table #%llu: %d entries %s",
-        (unsigned long long) t.meta.number,
-        counter,
-        status.ToString().c_str());
+        (unsigned long long)t.meta.number, counter, status.ToString().c_str());
 
     if (status.ok()) {
       tables_.push_back(t);
@@ -363,7 +352,7 @@ class Repairer {
       s = env_->RenameFile(copy, orig);
       if (s.ok()) {
         Log(options_.info_log, "Table #%llu: %d entries repaired",
-            (unsigned long long) t.meta.number, counter);
+            (unsigned long long)t.meta.number, counter);
         tables_.push_back(t);
       }
     }
@@ -395,11 +384,11 @@ class Repairer {
     for (size_t i = 0; i < tables_.size(); i++) {
       // TODO(opt): separate out into multiple levels
       const TableInfo& t = tables_[i];
-      edit_.AddFile(0, t.meta.number, t.meta.file_size,
-                    t.meta.smallest, t.meta.largest);
+      edit_.AddFile(0, t.meta.number, t.meta.file_size, t.meta.smallest,
+                    t.meta.largest);
     }
 
-    //fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
+    // fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
     {
       log::Writer log(file);
       std::string record;
@@ -447,8 +436,8 @@ class Repairer {
     new_file.append("/");
     new_file.append((slash == nullptr) ? fname.c_str() : slash + 1);
     Status s = env_->RenameFile(fname, new_file);
-    Log(options_.info_log, "Archiving %s: %s\n",
-        fname.c_str(), s.ToString().c_str());
+    Log(options_.info_log, "Archiving %s: %s\n", fname.c_str(),
+        s.ToString().c_str());
   }
 };
 }  // namespace
diff --git a/db/skiplist.h b/db/skiplist.h
index 7ac914b..05e5733 100644
--- a/db/skiplist.h
+++ b/db/skiplist.h
@@ -38,7 +38,7 @@ namespace leveldb {
 
 class Arena;
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 class SkipList {
  private:
   struct Node;
@@ -100,13 +100,13 @@ class SkipList {
 
   // Immutable after construction
   Comparator const compare_;
-  Arena* const arena_;    // Arena used for allocations of nodes
+  Arena* const arena_;  // Arena used for allocations of nodes
 
   Node* const head_;
 
   // Modified only by Insert().  Read racily by readers, but stale
   // values are ok.
-  std::atomic<int> max_height_;   // Height of the entire list
+  std::atomic<int> max_height_;  // Height of the entire list
 
   inline int GetMaxHeight() const {
     return max_height_.load(std::memory_order_relaxed);
@@ -143,9 +143,9 @@ class SkipList {
 };
 
 // Implementation details follow
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 struct SkipList<Key, Comparator>::Node {
-  explicit Node(const Key& k) : key(k) { }
+  explicit Node(const Key& k) : key(k) {}
 
   Key const key;
 
@@ -179,38 +179,38 @@ struct SkipList<Key, Comparator>::Node {
   std::atomic<Node*> next_[1];
 };
 
-template<typename Key, class Comparator>
-typename SkipList<Key, Comparator>::Node*
-SkipList<Key, Comparator>::NewNode(const Key& key, int height) {
+template <typename Key, class Comparator>
+typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::NewNode(
+    const Key& key, int height) {
   char* const node_memory = arena_->AllocateAligned(
       sizeof(Node) + sizeof(std::atomic<Node*>) * (height - 1));
   return new (node_memory) Node(key);
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 inline SkipList<Key, Comparator>::Iterator::Iterator(const SkipList* list) {
   list_ = list;
   node_ = nullptr;
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 inline bool SkipList<Key, Comparator>::Iterator::Valid() const {
   return node_ != nullptr;
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 inline const Key& SkipList<Key, Comparator>::Iterator::key() const {
   assert(Valid());
   return node_->key;
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 inline void SkipList<Key, Comparator>::Iterator::Next() {
   assert(Valid());
   node_ = node_->Next(0);
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 inline void SkipList<Key, Comparator>::Iterator::Prev() {
   // Instead of using explicit "prev" links, we just search for the
   // last node that falls before key.
@@ -221,17 +221,17 @@ inline void SkipList<Key, Comparator>::Iterator::Prev() {
   }
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 inline void SkipList<Key, Comparator>::Iterator::Seek(const Key& target) {
   node_ = list_->FindGreaterOrEqual(target, nullptr);
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 inline void SkipList<Key, Comparator>::Iterator::SeekToFirst() {
   node_ = list_->head_->Next(0);
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 inline void SkipList<Key, Comparator>::Iterator::SeekToLast() {
   node_ = list_->FindLast();
   if (node_ == list_->head_) {
@@ -239,7 +239,7 @@ inline void SkipList<Key, Comparator>::Iterator::SeekToLast() {
   }
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 int SkipList<Key, Comparator>::RandomHeight() {
   // Increase height with probability 1 in kBranching
   static const unsigned int kBranching = 4;
@@ -252,13 +252,13 @@ int SkipList<Key, Comparator>::RandomHeight() {
   return height;
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 bool SkipList<Key, Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
   // null n is considered infinite
   return (n != nullptr) && (compare_(n->key, key) < 0);
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 typename SkipList<Key, Comparator>::Node*
 SkipList<Key, Comparator>::FindGreaterOrEqual(const Key& key,
                                               Node** prev) const {
@@ -281,7 +281,7 @@ SkipList<Key, Comparator>::FindGreaterOrEqual(const Key& key,
   }
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 typename SkipList<Key, Comparator>::Node*
 SkipList<Key, Comparator>::FindLessThan(const Key& key) const {
   Node* x = head_;
@@ -302,7 +302,7 @@ SkipList<Key, Comparator>::FindLessThan(const Key& key) const {
   }
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast()
     const {
   Node* x = head_;
@@ -322,7 +322,7 @@ typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast()
   }
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena)
     : compare_(cmp),
       arena_(arena),
@@ -334,7 +334,7 @@ SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena)
   }
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 void SkipList<Key, Comparator>::Insert(const Key& key) {
   // TODO(opt): We can use a barrier-free variant of FindGreaterOrEqual()
   // here since Insert() is externally synchronized.
@@ -368,7 +368,7 @@ void SkipList<Key, Comparator>::Insert(const Key& key) {
   }
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 bool SkipList<Key, Comparator>::Contains(const Key& key) const {
   Node* x = FindGreaterOrEqual(key, nullptr);
   if (x != nullptr && Equal(key, x->key)) {
diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc
index 38c1941..9fa2d96 100644
--- a/db/skiplist_test.cc
+++ b/db/skiplist_test.cc
@@ -31,7 +31,7 @@ struct Comparator {
   }
 };
 
-class SkipTest { };
+class SkipTest {};
 
 TEST(SkipTest, Empty) {
   Arena arena;
@@ -117,8 +117,7 @@ TEST(SkipTest, InsertAndLookup) {
 
     // Compare against model iterator
     for (std::set<Key>::reverse_iterator model_iter = keys.rbegin();
-         model_iter != keys.rend();
-         ++model_iter) {
+         model_iter != keys.rend(); ++model_iter) {
       ASSERT_TRUE(iter.Valid());
       ASSERT_EQ(*model_iter, iter.key());
       iter.Prev();
@@ -160,12 +159,12 @@ class ConcurrentTest {
   static uint64_t hash(Key key) { return key & 0xff; }
 
   static uint64_t HashNumbers(uint64_t k, uint64_t g) {
-    uint64_t data[2] = { k, g };
+    uint64_t data[2] = {k, g};
     return Hash(reinterpret_cast<char*>(data), sizeof(data), 0);
   }
 
   static Key MakeKey(uint64_t k, uint64_t g) {
-    assert(sizeof(Key) == sizeof(uint64_t));
+    static_assert(sizeof(Key) == sizeof(uint64_t), "");
     assert(k <= K);  // We sometimes pass K to seek to the end of the skiplist
     assert(g <= 0xffffffffu);
     return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff));
@@ -195,9 +194,7 @@ class ConcurrentTest {
     void Set(int k, int v) {
       generation[k].store(v, std::memory_order_release);
     }
-    int Get(int k) {
-      return generation[k].load(std::memory_order_acquire);
-    }
+    int Get(int k) { return generation[k].load(std::memory_order_acquire); }
 
     State() {
       for (int k = 0; k < K; k++) {
@@ -216,7 +213,7 @@ class ConcurrentTest {
   SkipList<Key, Comparator> list_;
 
  public:
-  ConcurrentTest() : list_(Comparator(), &arena_) { }
+  ConcurrentTest() : list_(Comparator(), &arena_) {}
 
   // REQUIRES: External synchronization
   void WriteStep(Random* rnd) {
@@ -255,11 +252,9 @@ class ConcurrentTest {
         // Note that generation 0 is never inserted, so it is ok if
         // <*,0,*> is missing.
         ASSERT_TRUE((gen(pos) == 0) ||
-                    (gen(pos) > static_cast<Key>(initial_state.Get(key(pos))))
-                    ) << "key: " << key(pos)
-                      << "; gen: " << gen(pos)
-                      << "; initgen: "
-                      << initial_state.Get(key(pos));
+                    (gen(pos) > static_cast<Key>(initial_state.Get(key(pos)))))
+            << "key: " << key(pos) << "; gen: " << gen(pos)
+            << "; initgen: " << initial_state.Get(key(pos));
 
         // Advance to next key in the valid key space
         if (key(pos) < key(current)) {
@@ -305,17 +300,10 @@ class TestState {
   int seed_;
   std::atomic<bool> quit_flag_;
 
-  enum ReaderState {
-    STARTING,
-    RUNNING,
-    DONE
-  };
+  enum ReaderState { STARTING, RUNNING, DONE };
 
   explicit TestState(int s)
-      : seed_(s),
-        quit_flag_(false),
-        state_(STARTING),
-        state_cv_(&mu_) {}
+      : seed_(s), quit_flag_(false), state_(STARTING), state_cv_(&mu_) {}
 
   void Wait(ReaderState s) LOCKS_EXCLUDED(mu_) {
     mu_.Lock();
@@ -378,6 +366,4 @@ TEST(SkipTest, Concurrent5) { RunConcurrent(5); }
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/db/snapshot.h b/db/snapshot.h
index c43d9f9..9f1d664 100644
--- a/db/snapshot.h
+++ b/db/snapshot.h
@@ -44,8 +44,14 @@ class SnapshotList {
   }
 
   bool empty() const { return head_.next_ == &head_; }
-  SnapshotImpl* oldest() const { assert(!empty()); return head_.next_; }
-  SnapshotImpl* newest() const { assert(!empty()); return head_.prev_; }
+  SnapshotImpl* oldest() const {
+    assert(!empty());
+    return head_.next_;
+  }
+  SnapshotImpl* newest() const {
+    assert(!empty());
+    return head_.prev_;
+  }
 
   // Creates a SnapshotImpl and appends it to the end of the list.
   SnapshotImpl* New(SequenceNumber sequence_number) {
diff --git a/db/table_cache.cc b/db/table_cache.cc
index 7226d3b..73f05fd 100644
--- a/db/table_cache.cc
+++ b/db/table_cache.cc
@@ -29,18 +29,14 @@ static void UnrefEntry(void* arg1, void* arg2) {
   cache->Release(h);
 }
 
-TableCache::TableCache(const std::string& dbname,
-                       const Options& options,
+TableCache::TableCache(const std::string& dbname, const Options& options,
                        int entries)
     : env_(options.env),
       dbname_(dbname),
       options_(options),
-      cache_(NewLRUCache(entries)) {
-}
+      cache_(NewLRUCache(entries)) {}
 
-TableCache::~TableCache() {
-  delete cache_;
-}
+TableCache::~TableCache() { delete cache_; }
 
 Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
                              Cache::Handle** handle) {
@@ -80,8 +76,7 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
 }
 
 Iterator* TableCache::NewIterator(const ReadOptions& options,
-                                  uint64_t file_number,
-                                  uint64_t file_size,
+                                  uint64_t file_number, uint64_t file_size,
                                   Table** tableptr) {
   if (tableptr != nullptr) {
     *tableptr = nullptr;
@@ -102,17 +97,15 @@ Iterator* TableCache::NewIterator(const ReadOptions& options,
   return result;
 }
 
-Status TableCache::Get(const ReadOptions& options,
-                       uint64_t file_number,
-                       uint64_t file_size,
-                       const Slice& k,
-                       void* arg,
-                       void (*saver)(void*, const Slice&, const Slice&)) {
+Status TableCache::Get(const ReadOptions& options, uint64_t file_number,
+                       uint64_t file_size, const Slice& k, void* arg,
+                       void (*handle_result)(void*, const Slice&,
+                                             const Slice&)) {
   Cache::Handle* handle = nullptr;
   Status s = FindTable(file_number, file_size, &handle);
   if (s.ok()) {
     Table* t = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
-    s = t->InternalGet(options, k, arg, saver);
+    s = t->InternalGet(options, k, arg, handle_result);
     cache_->Release(handle);
   }
   return s;
diff --git a/db/table_cache.h b/db/table_cache.h
index ae8bee5..21ae92d 100644
--- a/db/table_cache.h
+++ b/db/table_cache.h
@@ -7,8 +7,10 @@
 #ifndef STORAGE_LEVELDB_DB_TABLE_CACHE_H_
 #define STORAGE_LEVELDB_DB_TABLE_CACHE_H_
 
-#include <string>
 #include <stdint.h>
+
+#include <string>
+
 #include "db/dbformat.h"
 #include "leveldb/cache.h"
 #include "leveldb/table.h"
@@ -30,18 +32,13 @@ class TableCache {
   // underlies the returned iterator.  The returned "*tableptr" object is owned
   // by the cache and should not be deleted, and is valid for as long as the
   // returned iterator is live.
-  Iterator* NewIterator(const ReadOptions& options,
-                        uint64_t file_number,
-                        uint64_t file_size,
-                        Table** tableptr = nullptr);
+  Iterator* NewIterator(const ReadOptions& options, uint64_t file_number,
+                        uint64_t file_size, Table** tableptr = nullptr);
 
   // If a seek to internal key "k" in specified file finds an entry,
   // call (*handle_result)(arg, found_key, found_value).
-  Status Get(const ReadOptions& options,
-             uint64_t file_number,
-             uint64_t file_size,
-             const Slice& k,
-             void* arg,
+  Status Get(const ReadOptions& options, uint64_t file_number,
+             uint64_t file_size, const Slice& k, void* arg,
              void (*handle_result)(void*, const Slice&, const Slice&));
 
   // Evict any entry for the specified file number
diff --git a/db/version_edit.cc b/db/version_edit.cc
index b7a366d..44a4d02 100644
--- a/db/version_edit.cc
+++ b/db/version_edit.cc
@@ -12,15 +12,15 @@ namespace leveldb {
 // Tag numbers for serialized VersionEdit.  These numbers are written to
 // disk and should not be changed.
 enum Tag {
-  kComparator           = 1,
-  kLogNumber            = 2,
-  kNextFileNumber       = 3,
-  kLastSequence         = 4,
-  kCompactPointer       = 5,
-  kDeletedFile          = 6,
-  kNewFile              = 7,
+  kComparator = 1,
+  kLogNumber = 2,
+  kNextFileNumber = 3,
+  kLastSequence = 4,
+  kCompactPointer = 5,
+  kDeletedFile = 6,
+  kNewFile = 7,
   // 8 was used for large value refs
-  kPrevLogNumber        = 9
+  kPrevLogNumber = 9
 };
 
 void VersionEdit::Clear() {
@@ -67,8 +67,7 @@ void VersionEdit::EncodeTo(std::string* dst) const {
   }
 
   for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
-       iter != deleted_files_.end();
-       ++iter) {
+       iter != deleted_files_.end(); ++iter) {
     PutVarint32(dst, kDeletedFile);
     PutVarint32(dst, iter->first);   // level
     PutVarint64(dst, iter->second);  // file number
@@ -97,8 +96,7 @@ static bool GetInternalKey(Slice* input, InternalKey* dst) {
 
 static bool GetLevel(Slice* input, int* level) {
   uint32_t v;
-  if (GetVarint32(input, &v) &&
-      v < config::kNumLevels) {
+  if (GetVarint32(input, &v) && v < config::kNumLevels) {
     *level = v;
     return true;
   } else {
@@ -163,8 +161,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
         break;
 
       case kCompactPointer:
-        if (GetLevel(&input, &level) &&
-            GetInternalKey(&input, &key)) {
+        if (GetLevel(&input, &level) && GetInternalKey(&input, &key)) {
           compact_pointers_.push_back(std::make_pair(level, key));
         } else {
           msg = "compaction pointer";
@@ -172,8 +169,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
         break;
 
       case kDeletedFile:
-        if (GetLevel(&input, &level) &&
-            GetVarint64(&input, &number)) {
+        if (GetLevel(&input, &level) && GetVarint64(&input, &number)) {
           deleted_files_.insert(std::make_pair(level, number));
         } else {
           msg = "deleted file";
@@ -181,8 +177,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
         break;
 
       case kNewFile:
-        if (GetLevel(&input, &level) &&
-            GetVarint64(&input, &f.number) &&
+        if (GetLevel(&input, &level) && GetVarint64(&input, &f.number) &&
             GetVarint64(&input, &f.file_size) &&
             GetInternalKey(&input, &f.smallest) &&
             GetInternalKey(&input, &f.largest)) {
@@ -239,8 +234,7 @@ std::string VersionEdit::DebugString() const {
     r.append(compact_pointers_[i].second.DebugString());
   }
   for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
-       iter != deleted_files_.end();
-       ++iter) {
+       iter != deleted_files_.end(); ++iter) {
     r.append("\n  DeleteFile: ");
     AppendNumberTo(&r, iter->first);
     r.append(" ");
diff --git a/db/version_edit.h b/db/version_edit.h
index eaef77b..3daf4ef 100644
--- a/db/version_edit.h
+++ b/db/version_edit.h
@@ -8,6 +8,7 @@
 #include <set>
 #include <utility>
 #include <vector>
+
 #include "db/dbformat.h"
 
 namespace leveldb {
@@ -16,19 +17,19 @@ class VersionSet;
 
 struct FileMetaData {
   int refs;
-  int allowed_seeks;          // Seeks allowed until compaction
+  int allowed_seeks;  // Seeks allowed until compaction
   uint64_t number;
-  uint64_t file_size;         // File size in bytes
-  InternalKey smallest;       // Smallest internal key served by table
-  InternalKey largest;        // Largest internal key served by table
+  uint64_t file_size;    // File size in bytes
+  InternalKey smallest;  // Smallest internal key served by table
+  InternalKey largest;   // Largest internal key served by table
 
-  FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) { }
+  FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) {}
 };
 
 class VersionEdit {
  public:
   VersionEdit() { Clear(); }
-  ~VersionEdit() { }
+  ~VersionEdit() {}
 
   void Clear();
 
@@ -59,10 +60,8 @@ class VersionEdit {
   // Add the specified file at the specified number.
   // REQUIRES: This version has not been saved (see VersionSet::SaveTo)
   // REQUIRES: "smallest" and "largest" are smallest and largest keys in file
-  void AddFile(int level, uint64_t file,
-               uint64_t file_size,
-               const InternalKey& smallest,
-               const InternalKey& largest) {
+  void AddFile(int level, uint64_t file, uint64_t file_size,
+               const InternalKey& smallest, const InternalKey& largest) {
     FileMetaData f;
     f.number = file;
     f.file_size = file_size;
@@ -84,7 +83,7 @@ class VersionEdit {
  private:
   friend class VersionSet;
 
-  typedef std::set< std::pair<int, uint64_t> > DeletedFileSet;
+  typedef std::set<std::pair<int, uint64_t> > DeletedFileSet;
 
   std::string comparator_;
   uint64_t log_number_;
@@ -97,9 +96,9 @@ class VersionEdit {
   bool has_next_file_number_;
   bool has_last_sequence_;
 
-  std::vector< std::pair<int, InternalKey> > compact_pointers_;
+  std::vector<std::pair<int, InternalKey> > compact_pointers_;
   DeletedFileSet deleted_files_;
-  std::vector< std::pair<int, FileMetaData> > new_files_;
+  std::vector<std::pair<int, FileMetaData> > new_files_;
 };
 
 }  // namespace leveldb
diff --git a/db/version_edit_test.cc b/db/version_edit_test.cc
index 280310b..0b7cda8 100644
--- a/db/version_edit_test.cc
+++ b/db/version_edit_test.cc
@@ -17,7 +17,7 @@ static void TestEncodeDecode(const VersionEdit& edit) {
   ASSERT_EQ(encoded, encoded2);
 }
 
-class VersionEditTest { };
+class VersionEditTest {};
 
 TEST(VersionEditTest, EncodeDecode) {
   static const uint64_t kBig = 1ull << 50;
@@ -41,6 +41,4 @@ TEST(VersionEditTest, EncodeDecode) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/db/version_set.cc b/db/version_set.cc
index 56493ac..96a92cc 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -4,8 +4,10 @@
 
 #include "db/version_set.h"
 
-#include <algorithm>
 #include <stdio.h>
+
+#include <algorithm>
+
 #include "db/filename.h"
 #include "db/log_reader.h"
 #include "db/log_writer.h"
@@ -84,8 +86,7 @@ Version::~Version() {
 }
 
 int FindFile(const InternalKeyComparator& icmp,
-             const std::vector<FileMetaData*>& files,
-             const Slice& key) {
+             const std::vector<FileMetaData*>& files, const Slice& key) {
   uint32_t left = 0;
   uint32_t right = files.size();
   while (left < right) {
@@ -104,26 +105,25 @@ int FindFile(const InternalKeyComparator& icmp,
   return right;
 }
 
-static bool AfterFile(const Comparator* ucmp,
-                      const Slice* user_key, const FileMetaData* f) {
+static bool AfterFile(const Comparator* ucmp, const Slice* user_key,
+                      const FileMetaData* f) {
   // null user_key occurs before all keys and is therefore never after *f
   return (user_key != nullptr &&
           ucmp->Compare(*user_key, f->largest.user_key()) > 0);
 }
 
-static bool BeforeFile(const Comparator* ucmp,
-                       const Slice* user_key, const FileMetaData* f) {
+static bool BeforeFile(const Comparator* ucmp, const Slice* user_key,
+                       const FileMetaData* f) {
   // null user_key occurs after all keys and is therefore never before *f
   return (user_key != nullptr &&
           ucmp->Compare(*user_key, f->smallest.user_key()) < 0);
 }
 
-bool SomeFileOverlapsRange(
-    const InternalKeyComparator& icmp,
-    bool disjoint_sorted_files,
-    const std::vector<FileMetaData*>& files,
-    const Slice* smallest_user_key,
-    const Slice* largest_user_key) {
+bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
+                           bool disjoint_sorted_files,
+                           const std::vector<FileMetaData*>& files,
+                           const Slice* smallest_user_key,
+                           const Slice* largest_user_key) {
   const Comparator* ucmp = icmp.user_comparator();
   if (!disjoint_sorted_files) {
     // Need to check against all files
@@ -143,7 +143,8 @@ bool SomeFileOverlapsRange(
   uint32_t index = 0;
   if (smallest_user_key != nullptr) {
     // Find the earliest possible internal key for smallest_user_key
-    InternalKey small_key(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek);
+    InternalKey small_key(*smallest_user_key, kMaxSequenceNumber,
+                          kValueTypeForSeek);
     index = FindFile(icmp, files, small_key.Encode());
   }
 
@@ -164,13 +165,9 @@ class Version::LevelFileNumIterator : public Iterator {
  public:
   LevelFileNumIterator(const InternalKeyComparator& icmp,
                        const std::vector<FileMetaData*>* flist)
-      : icmp_(icmp),
-        flist_(flist),
-        index_(flist->size()) {        // Marks as invalid
-  }
-  virtual bool Valid() const {
-    return index_ < flist_->size();
+      : icmp_(icmp), flist_(flist), index_(flist->size()) {  // Marks as invalid
   }
+  virtual bool Valid() const { return index_ < flist_->size(); }
   virtual void Seek(const Slice& target) {
     index_ = FindFile(icmp_, *flist_, target);
   }
@@ -197,10 +194,11 @@ class Version::LevelFileNumIterator : public Iterator {
   Slice value() const {
     assert(Valid());
     EncodeFixed64(value_buf_, (*flist_)[index_]->number);
-    EncodeFixed64(value_buf_+8, (*flist_)[index_]->file_size);
+    EncodeFixed64(value_buf_ + 8, (*flist_)[index_]->file_size);
     return Slice(value_buf_, sizeof(value_buf_));
   }
   virtual Status status() const { return Status::OK(); }
+
  private:
   const InternalKeyComparator icmp_;
   const std::vector<FileMetaData*>* const flist_;
@@ -210,16 +208,14 @@ class Version::LevelFileNumIterator : public Iterator {
   mutable char value_buf_[16];
 };
 
-static Iterator* GetFileIterator(void* arg,
-                                 const ReadOptions& options,
+static Iterator* GetFileIterator(void* arg, const ReadOptions& options,
                                  const Slice& file_value) {
   TableCache* cache = reinterpret_cast<TableCache*>(arg);
   if (file_value.size() != 16) {
     return NewErrorIterator(
         Status::Corruption("FileReader invoked with unexpected value"));
   } else {
-    return cache->NewIterator(options,
-                              DecodeFixed64(file_value.data()),
+    return cache->NewIterator(options, DecodeFixed64(file_value.data()),
                               DecodeFixed64(file_value.data() + 8));
   }
 }
@@ -227,17 +223,16 @@ static Iterator* GetFileIterator(void* arg,
 Iterator* Version::NewConcatenatingIterator(const ReadOptions& options,
                                             int level) const {
   return NewTwoLevelIterator(
-      new LevelFileNumIterator(vset_->icmp_, &files_[level]),
-      &GetFileIterator, vset_->table_cache_, options);
+      new LevelFileNumIterator(vset_->icmp_, &files_[level]), &GetFileIterator,
+      vset_->table_cache_, options);
 }
 
 void Version::AddIterators(const ReadOptions& options,
                            std::vector<Iterator*>* iters) {
   // Merge all level zero files together since they may overlap
   for (size_t i = 0; i < files_[0].size(); i++) {
-    iters->push_back(
-        vset_->table_cache_->NewIterator(
-            options, files_[0][i]->number, files_[0][i]->file_size));
+    iters->push_back(vset_->table_cache_->NewIterator(
+        options, files_[0][i]->number, files_[0][i]->file_size));
   }
 
   // For levels > 0, we can use a concatenating iterator that sequentially
@@ -264,7 +259,7 @@ struct Saver {
   Slice user_key;
   std::string* value;
 };
-}
+}  // namespace
 static void SaveValue(void* arg, const Slice& ikey, const Slice& v) {
   Saver* s = reinterpret_cast<Saver*>(arg);
   ParsedInternalKey parsed_key;
@@ -284,8 +279,7 @@ static bool NewestFirst(FileMetaData* a, FileMetaData* b) {
   return a->number > b->number;
 }
 
-void Version::ForEachOverlapping(Slice user_key, Slice internal_key,
-                                 void* arg,
+void Version::ForEachOverlapping(Slice user_key, Slice internal_key, void* arg,
                                  bool (*func)(void*, int, FileMetaData*)) {
   // TODO(sanjay): Change Version::Get() to use this function.
   const Comparator* ucmp = vset_->icmp_.user_comparator();
@@ -329,10 +323,8 @@ void Version::ForEachOverlapping(Slice user_key, Slice internal_key,
   }
 }
 
-Status Version::Get(const ReadOptions& options,
-                    const LookupKey& k,
-                    std::string* value,
-                    GetStats* stats) {
+Status Version::Get(const ReadOptions& options, const LookupKey& k,
+                    std::string* value, GetStats* stats) {
   Slice ikey = k.internal_key();
   Slice user_key = k.user_key();
   const Comparator* ucmp = vset_->icmp_.user_comparator();
@@ -405,14 +397,14 @@ Status Version::Get(const ReadOptions& options,
       saver.ucmp = ucmp;
       saver.user_key = user_key;
       saver.value = value;
-      s = vset_->table_cache_->Get(options, f->number, f->file_size,
-                                   ikey, &saver, SaveValue);
+      s = vset_->table_cache_->Get(options, f->number, f->file_size, ikey,
+                                   &saver, SaveValue);
       if (!s.ok()) {
         return s;
       }
       switch (saver.state) {
         case kNotFound:
-          break;      // Keep searching in other files
+          break;  // Keep searching in other files
         case kFound:
           return s;
         case kDeleted:
@@ -479,9 +471,7 @@ bool Version::RecordReadSample(Slice internal_key) {
   return false;
 }
 
-void Version::Ref() {
-  ++refs_;
-}
+void Version::Ref() { ++refs_; }
 
 void Version::Unref() {
   assert(this != &vset_->dummy_versions_);
@@ -492,16 +482,14 @@ void Version::Unref() {
   }
 }
 
-bool Version::OverlapInLevel(int level,
-                             const Slice* smallest_user_key,
+bool Version::OverlapInLevel(int level, const Slice* smallest_user_key,
                              const Slice* largest_user_key) {
   return SomeFileOverlapsRange(vset_->icmp_, (level > 0), files_[level],
                                smallest_user_key, largest_user_key);
 }
 
-int Version::PickLevelForMemTableOutput(
-    const Slice& smallest_user_key,
-    const Slice& largest_user_key) {
+int Version::PickLevelForMemTableOutput(const Slice& smallest_user_key,
+                                        const Slice& largest_user_key) {
   int level = 0;
   if (!OverlapInLevel(0, &smallest_user_key, &largest_user_key)) {
     // Push to next level if there is no overlap in next level,
@@ -528,11 +516,9 @@ int Version::PickLevelForMemTableOutput(
 }
 
 // Store in "*inputs" all files in "level" that overlap [begin,end]
-void Version::GetOverlappingInputs(
-    int level,
-    const InternalKey* begin,
-    const InternalKey* end,
-    std::vector<FileMetaData*>* inputs) {
+void Version::GetOverlappingInputs(int level, const InternalKey* begin,
+                                   const InternalKey* end,
+                                   std::vector<FileMetaData*>* inputs) {
   assert(level >= 0);
   assert(level < config::kNumLevels);
   inputs->clear();
@@ -544,7 +530,7 @@ void Version::GetOverlappingInputs(
     user_end = end->user_key();
   }
   const Comparator* user_cmp = vset_->icmp_.user_comparator();
-  for (size_t i = 0; i < files_[level].size(); ) {
+  for (size_t i = 0; i < files_[level].size();) {
     FileMetaData* f = files_[level][i++];
     const Slice file_start = f->smallest.user_key();
     const Slice file_limit = f->largest.user_key();
@@ -561,8 +547,8 @@ void Version::GetOverlappingInputs(
           user_begin = file_start;
           inputs->clear();
           i = 0;
-        } else if (end != nullptr && user_cmp->Compare(file_limit,
-                                                       user_end) > 0) {
+        } else if (end != nullptr &&
+                   user_cmp->Compare(file_limit, user_end) > 0) {
           user_end = file_limit;
           inputs->clear();
           i = 0;
@@ -630,9 +616,7 @@ class VersionSet::Builder {
 
  public:
   // Initialize a builder with the files from *base and other info from *vset
-  Builder(VersionSet* vset, Version* base)
-      : vset_(vset),
-        base_(base) {
+  Builder(VersionSet* vset, Version* base) : vset_(vset), base_(base) {
     base_->Ref();
     BySmallestKey cmp;
     cmp.internal_comparator = &vset_->icmp_;
@@ -646,8 +630,8 @@ class VersionSet::Builder {
       const FileSet* added = levels_[level].added_files;
       std::vector<FileMetaData*> to_unref;
       to_unref.reserve(added->size());
-      for (FileSet::const_iterator it = added->begin();
-          it != added->end(); ++it) {
+      for (FileSet::const_iterator it = added->begin(); it != added->end();
+           ++it) {
         to_unref.push_back(*it);
       }
       delete added;
@@ -674,8 +658,7 @@ class VersionSet::Builder {
     // Delete files
     const VersionEdit::DeletedFileSet& del = edit->deleted_files_;
     for (VersionEdit::DeletedFileSet::const_iterator iter = del.begin();
-         iter != del.end();
-         ++iter) {
+         iter != del.end(); ++iter) {
       const int level = iter->first;
       const uint64_t number = iter->second;
       levels_[level].deleted_files.insert(number);
@@ -721,13 +704,11 @@ class VersionSet::Builder {
       const FileSet* added = levels_[level].added_files;
       v->files_[level].reserve(base_files.size() + added->size());
       for (FileSet::const_iterator added_iter = added->begin();
-           added_iter != added->end();
-           ++added_iter) {
+           added_iter != added->end(); ++added_iter) {
         // Add all smaller files listed in base_
-        for (std::vector<FileMetaData*>::const_iterator bpos
-                 = std::upper_bound(base_iter, base_end, *added_iter, cmp);
-             base_iter != bpos;
-             ++base_iter) {
+        for (std::vector<FileMetaData*>::const_iterator bpos =
+                 std::upper_bound(base_iter, base_end, *added_iter, cmp);
+             base_iter != bpos; ++base_iter) {
           MaybeAddFile(v, level, *base_iter);
         }
 
@@ -743,7 +724,7 @@ class VersionSet::Builder {
       // Make sure there is no overlap in levels > 0
       if (level > 0) {
         for (uint32_t i = 1; i < v->files_[level].size(); i++) {
-          const InternalKey& prev_end = v->files_[level][i-1]->largest;
+          const InternalKey& prev_end = v->files_[level][i - 1]->largest;
           const InternalKey& this_begin = v->files_[level][i]->smallest;
           if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) {
             fprintf(stderr, "overlapping ranges in same level %s vs. %s\n",
@@ -764,7 +745,7 @@ class VersionSet::Builder {
       std::vector<FileMetaData*>* files = &v->files_[level];
       if (level > 0 && !files->empty()) {
         // Must not overlap
-        assert(vset_->icmp_.Compare((*files)[files->size()-1]->largest,
+        assert(vset_->icmp_.Compare((*files)[files->size() - 1]->largest,
                                     f->smallest) < 0);
       }
       f->refs++;
@@ -773,8 +754,7 @@ class VersionSet::Builder {
   }
 };
 
-VersionSet::VersionSet(const std::string& dbname,
-                       const Options* options,
+VersionSet::VersionSet(const std::string& dbname, const Options* options,
                        TableCache* table_cache,
                        const InternalKeyComparator* cmp)
     : env_(options->env),
@@ -903,7 +883,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
   return s;
 }
 
-Status VersionSet::Recover(bool *save_manifest) {
+Status VersionSet::Recover(bool* save_manifest) {
   struct LogReporter : public log::Reader::Reporter {
     Status* status;
     virtual void Corruption(size_t bytes, const Status& s) {
@@ -917,7 +897,7 @@ Status VersionSet::Recover(bool *save_manifest) {
   if (!s.ok()) {
     return s;
   }
-  if (current.empty() || current[current.size()-1] != '\n') {
+  if (current.empty() || current[current.size() - 1] != '\n') {
     return Status::Corruption("CURRENT file does not end with newline");
   }
   current.resize(current.size() - 1);
@@ -927,8 +907,8 @@ Status VersionSet::Recover(bool *save_manifest) {
   s = env_->NewSequentialFile(dscname, &file);
   if (!s.ok()) {
     if (s.IsNotFound()) {
-      return Status::Corruption(
-            "CURRENT points to a non-existent file", s.ToString());
+      return Status::Corruption("CURRENT points to a non-existent file",
+                                s.ToString());
     }
     return s;
   }
@@ -946,7 +926,8 @@ Status VersionSet::Recover(bool *save_manifest) {
   {
     LogReporter reporter;
     reporter.status = &s;
-    log::Reader reader(file, &reporter, true/*checksum*/, 0/*initial_offset*/);
+    log::Reader reader(file, &reporter, true /*checksum*/,
+                       0 /*initial_offset*/);
     Slice record;
     std::string scratch;
     while (reader.ReadRecord(&record, &scratch) && s.ok()) {
@@ -1071,7 +1052,7 @@ void VersionSet::Finalize(Version* v) {
   int best_level = -1;
   double best_score = -1;
 
-  for (int level = 0; level < config::kNumLevels-1; level++) {
+  for (int level = 0; level < config::kNumLevels - 1; level++) {
     double score;
     if (level == 0) {
       // We treat level-0 specially by bounding the number of files
@@ -1086,7 +1067,7 @@ void VersionSet::Finalize(Version* v) {
       // setting, or very high compression ratios, or lots of
       // overwrites/deletions).
       score = v->files_[level].size() /
-          static_cast<double>(config::kL0_CompactionTrigger);
+              static_cast<double>(config::kL0_CompactionTrigger);
     } else {
       // Compute the ratio of current size to size limit.
       const uint64_t level_bytes = TotalFileSize(v->files_[level]);
@@ -1142,16 +1123,12 @@ int VersionSet::NumLevelFiles(int level) const {
 
 const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const {
   // Update code if kNumLevels changes
-  assert(config::kNumLevels == 7);
+  static_assert(config::kNumLevels == 7, "");
   snprintf(scratch->buffer, sizeof(scratch->buffer),
-           "files[ %d %d %d %d %d %d %d ]",
-           int(current_->files_[0].size()),
-           int(current_->files_[1].size()),
-           int(current_->files_[2].size()),
-           int(current_->files_[3].size()),
-           int(current_->files_[4].size()),
-           int(current_->files_[5].size()),
-           int(current_->files_[6].size()));
+           "files[ %d %d %d %d %d %d %d ]", int(current_->files_[0].size()),
+           int(current_->files_[1].size()), int(current_->files_[2].size()),
+           int(current_->files_[3].size()), int(current_->files_[4].size()),
+           int(current_->files_[5].size()), int(current_->files_[6].size()));
   return scratch->buffer;
 }
 
@@ -1188,8 +1165,7 @@ uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) {
 }
 
 void VersionSet::AddLiveFiles(std::set<uint64_t>* live) {
-  for (Version* v = dummy_versions_.next_;
-       v != &dummy_versions_;
+  for (Version* v = dummy_versions_.next_; v != &dummy_versions_;
        v = v->next_) {
     for (int level = 0; level < config::kNumLevels; level++) {
       const std::vector<FileMetaData*>& files = v->files_[level];
@@ -1212,7 +1188,7 @@ int64_t VersionSet::MaxNextLevelOverlappingBytes() {
   for (int level = 1; level < config::kNumLevels - 1; level++) {
     for (size_t i = 0; i < current_->files_[level].size(); i++) {
       const FileMetaData* f = current_->files_[level][i];
-      current_->GetOverlappingInputs(level+1, &f->smallest, &f->largest,
+      current_->GetOverlappingInputs(level + 1, &f->smallest, &f->largest,
                                      &overlaps);
       const int64_t sum = TotalFileSize(overlaps);
       if (sum > result) {
@@ -1227,8 +1203,7 @@ int64_t VersionSet::MaxNextLevelOverlappingBytes() {
 // *smallest, *largest.
 // REQUIRES: inputs is not empty
 void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs,
-                          InternalKey* smallest,
-                          InternalKey* largest) {
+                          InternalKey* smallest, InternalKey* largest) {
   assert(!inputs.empty());
   smallest->Clear();
   largest->Clear();
@@ -1253,8 +1228,7 @@ void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs,
 // REQUIRES: inputs is not empty
 void VersionSet::GetRange2(const std::vector<FileMetaData*>& inputs1,
                            const std::vector<FileMetaData*>& inputs2,
-                           InternalKey* smallest,
-                           InternalKey* largest) {
+                           InternalKey* smallest, InternalKey* largest) {
   std::vector<FileMetaData*> all = inputs1;
   all.insert(all.end(), inputs2.begin(), inputs2.end());
   GetRange(all, smallest, largest);
@@ -1276,8 +1250,8 @@ Iterator* VersionSet::MakeInputIterator(Compaction* c) {
       if (c->level() + which == 0) {
         const std::vector<FileMetaData*>& files = c->inputs_[which];
         for (size_t i = 0; i < files.size(); i++) {
-          list[num++] = table_cache_->NewIterator(
-              options, files[i]->number, files[i]->file_size);
+          list[num++] = table_cache_->NewIterator(options, files[i]->number,
+                                                  files[i]->file_size);
         }
       } else {
         // Create concatenating iterator for the files from this level
@@ -1304,7 +1278,7 @@ Compaction* VersionSet::PickCompaction() {
   if (size_compaction) {
     level = current_->compaction_level_;
     assert(level >= 0);
-    assert(level+1 < config::kNumLevels);
+    assert(level + 1 < config::kNumLevels);
     c = new Compaction(options_, level);
 
     // Pick the first file that comes after compact_pointer_[level]
@@ -1433,7 +1407,8 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
   AddBoundaryInputs(icmp_, current_->files_[level], &c->inputs_[0]);
   GetRange(c->inputs_[0], &smallest, &largest);
 
-  current_->GetOverlappingInputs(level+1, &smallest, &largest, &c->inputs_[1]);
+  current_->GetOverlappingInputs(level + 1, &smallest, &largest,
+                                 &c->inputs_[1]);
 
   // Get entire range covered by compaction
   InternalKey all_start, all_limit;
@@ -1454,18 +1429,14 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
       InternalKey new_start, new_limit;
       GetRange(expanded0, &new_start, &new_limit);
       std::vector<FileMetaData*> expanded1;
-      current_->GetOverlappingInputs(level+1, &new_start, &new_limit,
+      current_->GetOverlappingInputs(level + 1, &new_start, &new_limit,
                                      &expanded1);
       if (expanded1.size() == c->inputs_[1].size()) {
         Log(options_->info_log,
             "Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n",
-            level,
-            int(c->inputs_[0].size()),
-            int(c->inputs_[1].size()),
-            long(inputs0_size), long(inputs1_size),
-            int(expanded0.size()),
-            int(expanded1.size()),
-            long(expanded0_size), long(inputs1_size));
+            level, int(c->inputs_[0].size()), int(c->inputs_[1].size()),
+            long(inputs0_size), long(inputs1_size), int(expanded0.size()),
+            int(expanded1.size()), long(expanded0_size), long(inputs1_size));
         smallest = new_start;
         largest = new_limit;
         c->inputs_[0] = expanded0;
@@ -1490,10 +1461,8 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
   c->edit_.SetCompactPointer(level, largest);
 }
 
-Compaction* VersionSet::CompactRange(
-    int level,
-    const InternalKey* begin,
-    const InternalKey* end) {
+Compaction* VersionSet::CompactRange(int level, const InternalKey* begin,
+                                     const InternalKey* end) {
   std::vector<FileMetaData*> inputs;
   current_->GetOverlappingInputs(level, begin, end, &inputs);
   if (inputs.empty()) {
@@ -1566,7 +1535,7 @@ bool Compaction::IsBaseLevelForKey(const Slice& user_key) {
   const Comparator* user_cmp = input_version_->vset_->icmp_.user_comparator();
   for (int lvl = level_ + 2; lvl < config::kNumLevels; lvl++) {
     const std::vector<FileMetaData*>& files = input_version_->files_[lvl];
-    for (; level_ptrs_[lvl] < files.size(); ) {
+    for (; level_ptrs_[lvl] < files.size();) {
       FileMetaData* f = files[level_ptrs_[lvl]];
       if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) {
         // We've advanced far enough
@@ -1587,8 +1556,9 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) {
   // Scan to find earliest grandparent file that contains key.
   const InternalKeyComparator* icmp = &vset->icmp_;
   while (grandparent_index_ < grandparents_.size() &&
-      icmp->Compare(internal_key,
-                    grandparents_[grandparent_index_]->largest.Encode()) > 0) {
+         icmp->Compare(internal_key,
+                       grandparents_[grandparent_index_]->largest.Encode()) >
+             0) {
     if (seen_key_) {
       overlapped_bytes_ += grandparents_[grandparent_index_]->file_size;
     }
diff --git a/db/version_set.h b/db/version_set.h
index 0beae4d..334ebd9 100644
--- a/db/version_set.h
+++ b/db/version_set.h
@@ -18,6 +18,7 @@
 #include <map>
 #include <set>
 #include <vector>
+
 #include "db/dbformat.h"
 #include "db/version_edit.h"
 #include "port/port.h"
@@ -25,7 +26,9 @@
 
 namespace leveldb {
 
-namespace log { class Writer; }
+namespace log {
+class Writer;
+}
 
 class Compaction;
 class Iterator;
@@ -40,8 +43,7 @@ class WritableFile;
 // Return files.size() if there is no such file.
 // REQUIRES: "files" contains a sorted list of non-overlapping files.
 int FindFile(const InternalKeyComparator& icmp,
-             const std::vector<FileMetaData*>& files,
-             const Slice& key);
+             const std::vector<FileMetaData*>& files, const Slice& key);
 
 // Returns true iff some file in "files" overlaps the user key range
 // [*smallest,*largest].
@@ -90,16 +92,15 @@ class Version {
 
   void GetOverlappingInputs(
       int level,
-      const InternalKey* begin,         // nullptr means before all keys
-      const InternalKey* end,           // nullptr means after all keys
+      const InternalKey* begin,  // nullptr means before all keys
+      const InternalKey* end,    // nullptr means after all keys
       std::vector<FileMetaData*>* inputs);
 
   // Returns true iff some file in the specified level overlaps
   // some part of [*smallest_user_key,*largest_user_key].
   // smallest_user_key==nullptr represents a key smaller than all the DB's keys.
   // largest_user_key==nullptr represents a key largest than all the DB's keys.
-  bool OverlapInLevel(int level,
-                      const Slice* smallest_user_key,
+  bool OverlapInLevel(int level, const Slice* smallest_user_key,
                       const Slice* largest_user_key);
 
   // Return the level at which we should place a new memtable compaction
@@ -124,14 +125,13 @@ class Version {
   // false, makes no more calls.
   //
   // REQUIRES: user portion of internal_key == user_key.
-  void ForEachOverlapping(Slice user_key, Slice internal_key,
-                          void* arg,
+  void ForEachOverlapping(Slice user_key, Slice internal_key, void* arg,
                           bool (*func)(void*, int, FileMetaData*));
 
-  VersionSet* vset_;            // VersionSet to which this Version belongs
-  Version* next_;               // Next version in linked list
-  Version* prev_;               // Previous version in linked list
-  int refs_;                    // Number of live refs to this version
+  VersionSet* vset_;  // VersionSet to which this Version belongs
+  Version* next_;     // Next version in linked list
+  Version* prev_;     // Previous version in linked list
+  int refs_;          // Number of live refs to this version
 
   // List of files per level
   std::vector<FileMetaData*> files_[config::kNumLevels];
@@ -147,12 +147,14 @@ class Version {
   int compaction_level_;
 
   explicit Version(VersionSet* vset)
-      : vset_(vset), next_(this), prev_(this), refs_(0),
+      : vset_(vset),
+        next_(this),
+        prev_(this),
+        refs_(0),
         file_to_compact_(nullptr),
         file_to_compact_level_(-1),
         compaction_score_(-1),
-        compaction_level_(-1) {
-  }
+        compaction_level_(-1) {}
 
   ~Version();
 
@@ -163,10 +165,8 @@ class Version {
 
 class VersionSet {
  public:
-  VersionSet(const std::string& dbname,
-             const Options* options,
-             TableCache* table_cache,
-             const InternalKeyComparator*);
+  VersionSet(const std::string& dbname, const Options* options,
+             TableCache* table_cache, const InternalKeyComparator*);
   ~VersionSet();
 
   // Apply *edit to the current version to form a new descriptor that
@@ -178,7 +178,7 @@ class VersionSet {
       EXCLUSIVE_LOCKS_REQUIRED(mu);
 
   // Recover the last saved descriptor from persistent storage.
-  Status Recover(bool *save_manifest);
+  Status Recover(bool* save_manifest);
 
   // Return the current version.
   Version* current() const { return current_; }
@@ -233,10 +233,8 @@ class VersionSet {
   // the specified level.  Returns nullptr if there is nothing in that
   // level that overlaps the specified range.  Caller should delete
   // the result.
-  Compaction* CompactRange(
-      int level,
-      const InternalKey* begin,
-      const InternalKey* end);
+  Compaction* CompactRange(int level, const InternalKey* begin,
+                           const InternalKey* end);
 
   // Return the maximum overlapping data (in bytes) at next level for any
   // file at a level >= 1.
@@ -277,14 +275,12 @@ class VersionSet {
 
   void Finalize(Version* v);
 
-  void GetRange(const std::vector<FileMetaData*>& inputs,
-                InternalKey* smallest,
+  void GetRange(const std::vector<FileMetaData*>& inputs, InternalKey* smallest,
                 InternalKey* largest);
 
   void GetRange2(const std::vector<FileMetaData*>& inputs1,
                  const std::vector<FileMetaData*>& inputs2,
-                 InternalKey* smallest,
-                 InternalKey* largest);
+                 InternalKey* smallest, InternalKey* largest);
 
   void SetupOtherInputs(Compaction* c);
 
@@ -373,7 +369,7 @@ class Compaction {
   VersionEdit edit_;
 
   // Each compaction reads inputs from "level_" and "level_+1"
-  std::vector<FileMetaData*> inputs_[2];      // The two sets of inputs
+  std::vector<FileMetaData*> inputs_[2];  // The two sets of inputs
 
   // State used to check for number of overlapping grandparent files
   // (parent == level_ + 1, grandparent == level_ + 2)
diff --git a/db/version_set_test.cc b/db/version_set_test.cc
index b32e2e5..43b51d8 100644
--- a/db/version_set_test.cc
+++ b/db/version_set_test.cc
@@ -14,7 +14,7 @@ class FindFileTest {
   std::vector<FileMetaData*> files_;
   bool disjoint_sorted_files_;
 
-  FindFileTest() : disjoint_sorted_files_(true) { }
+  FindFileTest() : disjoint_sorted_files_(true) {}
 
   ~FindFileTest() {
     for (int i = 0; i < files_.size(); i++) {
@@ -50,10 +50,10 @@ class FindFileTest {
 
 TEST(FindFileTest, Empty) {
   ASSERT_EQ(0, Find("foo"));
-  ASSERT_TRUE(! Overlaps("a", "z"));
-  ASSERT_TRUE(! Overlaps(nullptr, "z"));
-  ASSERT_TRUE(! Overlaps("a", nullptr));
-  ASSERT_TRUE(! Overlaps(nullptr, nullptr));
+  ASSERT_TRUE(!Overlaps("a", "z"));
+  ASSERT_TRUE(!Overlaps(nullptr, "z"));
+  ASSERT_TRUE(!Overlaps("a", nullptr));
+  ASSERT_TRUE(!Overlaps(nullptr, nullptr));
 }
 
 TEST(FindFileTest, Single) {
@@ -65,8 +65,8 @@ TEST(FindFileTest, Single) {
   ASSERT_EQ(1, Find("q1"));
   ASSERT_EQ(1, Find("z"));
 
-  ASSERT_TRUE(! Overlaps("a", "b"));
-  ASSERT_TRUE(! Overlaps("z1", "z2"));
+  ASSERT_TRUE(!Overlaps("a", "b"));
+  ASSERT_TRUE(!Overlaps("z1", "z2"));
   ASSERT_TRUE(Overlaps("a", "p"));
   ASSERT_TRUE(Overlaps("a", "q"));
   ASSERT_TRUE(Overlaps("a", "z"));
@@ -78,15 +78,14 @@ TEST(FindFileTest, Single) {
   ASSERT_TRUE(Overlaps("q", "q"));
   ASSERT_TRUE(Overlaps("q", "q1"));
 
-  ASSERT_TRUE(! Overlaps(nullptr, "j"));
-  ASSERT_TRUE(! Overlaps("r", nullptr));
+  ASSERT_TRUE(!Overlaps(nullptr, "j"));
+  ASSERT_TRUE(!Overlaps("r", nullptr));
   ASSERT_TRUE(Overlaps(nullptr, "p"));
   ASSERT_TRUE(Overlaps(nullptr, "p1"));
   ASSERT_TRUE(Overlaps("q", nullptr));
   ASSERT_TRUE(Overlaps(nullptr, nullptr));
 }
 
-
 TEST(FindFileTest, Multiple) {
   Add("150", "200");
   Add("200", "250");
@@ -110,10 +109,10 @@ TEST(FindFileTest, Multiple) {
   ASSERT_EQ(3, Find("450"));
   ASSERT_EQ(4, Find("451"));
 
-  ASSERT_TRUE(! Overlaps("100", "149"));
-  ASSERT_TRUE(! Overlaps("251", "299"));
-  ASSERT_TRUE(! Overlaps("451", "500"));
-  ASSERT_TRUE(! Overlaps("351", "399"));
+  ASSERT_TRUE(!Overlaps("100", "149"));
+  ASSERT_TRUE(!Overlaps("251", "299"));
+  ASSERT_TRUE(!Overlaps("451", "500"));
+  ASSERT_TRUE(!Overlaps("351", "399"));
 
   ASSERT_TRUE(Overlaps("100", "150"));
   ASSERT_TRUE(Overlaps("100", "200"));
@@ -130,8 +129,8 @@ TEST(FindFileTest, MultipleNullBoundaries) {
   Add("200", "250");
   Add("300", "350");
   Add("400", "450");
-  ASSERT_TRUE(! Overlaps(nullptr, "149"));
-  ASSERT_TRUE(! Overlaps("451", nullptr));
+  ASSERT_TRUE(!Overlaps(nullptr, "149"));
+  ASSERT_TRUE(!Overlaps("451", nullptr));
   ASSERT_TRUE(Overlaps(nullptr, nullptr));
   ASSERT_TRUE(Overlaps(nullptr, "150"));
   ASSERT_TRUE(Overlaps(nullptr, "199"));
@@ -147,8 +146,8 @@ TEST(FindFileTest, MultipleNullBoundaries) {
 
 TEST(FindFileTest, OverlapSequenceChecks) {
   Add("200", "200", 5000, 3000);
-  ASSERT_TRUE(! Overlaps("199", "199"));
-  ASSERT_TRUE(! Overlaps("201", "300"));
+  ASSERT_TRUE(!Overlaps("199", "199"));
+  ASSERT_TRUE(!Overlaps("201", "300"));
   ASSERT_TRUE(Overlaps("200", "200"));
   ASSERT_TRUE(Overlaps("190", "200"));
   ASSERT_TRUE(Overlaps("200", "210"));
@@ -158,8 +157,8 @@ TEST(FindFileTest, OverlappingFiles) {
   Add("150", "600");
   Add("400", "500");
   disjoint_sorted_files_ = false;
-  ASSERT_TRUE(! Overlaps("100", "149"));
-  ASSERT_TRUE(! Overlaps("601", "700"));
+  ASSERT_TRUE(!Overlaps("100", "149"));
+  ASSERT_TRUE(!Overlaps("601", "700"));
   ASSERT_TRUE(Overlaps("100", "150"));
   ASSERT_TRUE(Overlaps("100", "200"));
   ASSERT_TRUE(Overlaps("100", "300"));
diff --git a/db/write_batch.cc b/db/write_batch.cc
index 23eb00f..2dec642 100644
--- a/db/write_batch.cc
+++ b/db/write_batch.cc
@@ -15,10 +15,10 @@
 
 #include "leveldb/write_batch.h"
 
-#include "leveldb/db.h"
 #include "db/dbformat.h"
 #include "db/memtable.h"
 #include "db/write_batch_internal.h"
+#include "leveldb/db.h"
 #include "util/coding.h"
 
 namespace leveldb {
@@ -26,22 +26,18 @@ namespace leveldb {
 // WriteBatch header has an 8-byte sequence number followed by a 4-byte count.
 static const size_t kHeader = 12;
 
-WriteBatch::WriteBatch() {
-  Clear();
-}
+WriteBatch::WriteBatch() { Clear(); }
 
-WriteBatch::~WriteBatch() { }
+WriteBatch::~WriteBatch() {}
 
-WriteBatch::Handler::~Handler() { }
+WriteBatch::Handler::~Handler() {}
 
 void WriteBatch::Clear() {
   rep_.clear();
   rep_.resize(kHeader);
 }
 
-size_t WriteBatch::ApproximateSize() const {
-  return rep_.size();
-}
+size_t WriteBatch::ApproximateSize() const { return rep_.size(); }
 
 Status WriteBatch::Iterate(Handler* handler) const {
   Slice input(rep_);
@@ -112,7 +108,7 @@ void WriteBatch::Delete(const Slice& key) {
   PutLengthPrefixedSlice(&rep_, key);
 }
 
-void WriteBatch::Append(const WriteBatch &source) {
+void WriteBatch::Append(const WriteBatch& source) {
   WriteBatchInternal::Append(this, &source);
 }
 
@@ -133,8 +129,7 @@ class MemTableInserter : public WriteBatch::Handler {
 };
 }  // namespace
 
-Status WriteBatchInternal::InsertInto(const WriteBatch* b,
-                                      MemTable* memtable) {
+Status WriteBatchInternal::InsertInto(const WriteBatch* b, MemTable* memtable) {
   MemTableInserter inserter;
   inserter.sequence_ = WriteBatchInternal::Sequence(b);
   inserter.mem_ = memtable;
diff --git a/db/write_batch_internal.h b/db/write_batch_internal.h
index 9448ef7..fce86e3 100644
--- a/db/write_batch_internal.h
+++ b/db/write_batch_internal.h
@@ -29,13 +29,9 @@ class WriteBatchInternal {
   // this batch.
   static void SetSequence(WriteBatch* batch, SequenceNumber seq);
 
-  static Slice Contents(const WriteBatch* batch) {
-    return Slice(batch->rep_);
-  }
+  static Slice Contents(const WriteBatch* batch) { return Slice(batch->rep_); }
 
-  static size_t ByteSize(const WriteBatch* batch) {
-    return batch->rep_.size();
-  }
+  static size_t ByteSize(const WriteBatch* batch) { return batch->rep_.size(); }
 
   static void SetContents(WriteBatch* batch, const Slice& contents);
 
@@ -46,5 +42,4 @@ class WriteBatchInternal {
 
 }  // namespace leveldb
 
-
 #endif  // STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_
diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc
index 49c178d..c32317f 100644
--- a/db/write_batch_test.cc
+++ b/db/write_batch_test.cc
@@ -52,7 +52,7 @@ static std::string PrintContents(WriteBatch* b) {
   return state;
 }
 
-class WriteBatchTest { };
+class WriteBatchTest {};
 
 TEST(WriteBatchTest, Empty) {
   WriteBatch batch;
@@ -68,10 +68,11 @@ TEST(WriteBatchTest, Multiple) {
   WriteBatchInternal::SetSequence(&batch, 100);
   ASSERT_EQ(100, WriteBatchInternal::Sequence(&batch));
   ASSERT_EQ(3, WriteBatchInternal::Count(&batch));
-  ASSERT_EQ("Put(baz, boo)@102"
-            "Delete(box)@101"
-            "Put(foo, bar)@100",
-            PrintContents(&batch));
+  ASSERT_EQ(
+      "Put(baz, boo)@102"
+      "Delete(box)@101"
+      "Put(foo, bar)@100",
+      PrintContents(&batch));
 }
 
 TEST(WriteBatchTest, Corruption) {
@@ -81,10 +82,11 @@ TEST(WriteBatchTest, Corruption) {
   WriteBatchInternal::SetSequence(&batch, 200);
   Slice contents = WriteBatchInternal::Contents(&batch);
   WriteBatchInternal::SetContents(&batch,
-                                  Slice(contents.data(),contents.size()-1));
-  ASSERT_EQ("Put(foo, bar)@200"
-            "ParseError()",
-            PrintContents(&batch));
+                                  Slice(contents.data(), contents.size() - 1));
+  ASSERT_EQ(
+      "Put(foo, bar)@200"
+      "ParseError()",
+      PrintContents(&batch));
 }
 
 TEST(WriteBatchTest, Append) {
@@ -92,25 +94,25 @@ TEST(WriteBatchTest, Append) {
   WriteBatchInternal::SetSequence(&b1, 200);
   WriteBatchInternal::SetSequence(&b2, 300);
   b1.Append(b2);
-  ASSERT_EQ("",
-            PrintContents(&b1));
+  ASSERT_EQ("", PrintContents(&b1));
   b2.Put("a", "va");
   b1.Append(b2);
-  ASSERT_EQ("Put(a, va)@200",
-            PrintContents(&b1));
+  ASSERT_EQ("Put(a, va)@200", PrintContents(&b1));
   b2.Clear();
   b2.Put("b", "vb");
   b1.Append(b2);
-  ASSERT_EQ("Put(a, va)@200"
-            "Put(b, vb)@201",
-            PrintContents(&b1));
+  ASSERT_EQ(
+      "Put(a, va)@200"
+      "Put(b, vb)@201",
+      PrintContents(&b1));
   b2.Delete("foo");
   b1.Append(b2);
-  ASSERT_EQ("Put(a, va)@200"
-            "Put(b, vb)@202"
-            "Put(b, vb)@201"
-            "Delete(foo)@203",
-            PrintContents(&b1));
+  ASSERT_EQ(
+      "Put(a, va)@200"
+      "Put(b, vb)@202"
+      "Put(b, vb)@201"
+      "Delete(foo)@203",
+      PrintContents(&b1));
 }
 
 TEST(WriteBatchTest, ApproximateSize) {
@@ -132,6 +134,4 @@ TEST(WriteBatchTest, ApproximateSize) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/doc/bench/db_bench_sqlite3.cc b/doc/bench/db_bench_sqlite3.cc
index 7e05de2..f183f4f 100644
--- a/doc/bench/db_bench_sqlite3.cc
+++ b/doc/bench/db_bench_sqlite3.cc
@@ -2,9 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include <sqlite3.h>
 #include <stdio.h>
 #include <stdlib.h>
-#include <sqlite3.h>
+
 #include "util/histogram.h"
 #include "util/random.h"
 #include "util/testutil.h"
@@ -38,8 +39,7 @@ static const char* FLAGS_benchmarks =
     "fillrand100K,"
     "fillseq100K,"
     "readseq,"
-    "readrand100K,"
-    ;
+    "readrand100K,";
 
 // Number of key/values to place in database
 static int FLAGS_num = 1000000;
@@ -78,8 +78,7 @@ static bool FLAGS_WAL_enabled = true;
 // Use the db with the following name.
 static const char* FLAGS_db = nullptr;
 
-inline
-static void ExecErrorCheck(int status, char *err_msg) {
+inline static void ExecErrorCheck(int status, char* err_msg) {
   if (status != SQLITE_OK) {
     fprintf(stderr, "SQL error: %s\n", err_msg);
     sqlite3_free(err_msg);
@@ -87,24 +86,21 @@ static void ExecErrorCheck(int status, char *err_msg) {
   }
 }
 
-inline
-static void StepErrorCheck(int status) {
+inline static void StepErrorCheck(int status) {
   if (status != SQLITE_DONE) {
     fprintf(stderr, "SQL step error: status = %d\n", status);
     exit(1);
   }
 }
 
-inline
-static void ErrorCheck(int status) {
+inline static void ErrorCheck(int status) {
   if (status != SQLITE_OK) {
     fprintf(stderr, "sqlite3 error: status = %d\n", status);
     exit(1);
   }
 }
 
-inline
-static void WalCheckpoint(sqlite3* db_) {
+inline static void WalCheckpoint(sqlite3* db_) {
   // Flush all writes to disk
   if (FLAGS_WAL_enabled) {
     sqlite3_wal_checkpoint_v2(db_, nullptr, SQLITE_CHECKPOINT_FULL, nullptr,
@@ -153,7 +149,7 @@ static Slice TrimSpace(Slice s) {
     start++;
   }
   int limit = s.size();
-  while (limit > start && isspace(s[limit-1])) {
+  while (limit > start && isspace(s[limit - 1])) {
     limit--;
   }
   return Slice(s.data() + start, limit - start);
@@ -177,7 +173,7 @@ class Benchmark {
 
   // State kept for progress messages
   int done_;
-  int next_report_;     // When to report next
+  int next_report_;  // When to report next
 
   void PrintHeader() {
     const int kKeySize = 16;
@@ -186,17 +182,17 @@ class Benchmark {
     fprintf(stdout, "Values:     %d bytes each\n", FLAGS_value_size);
     fprintf(stdout, "Entries:    %d\n", num_);
     fprintf(stdout, "RawSize:    %.1f MB (estimated)\n",
-            ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
-             / 1048576.0));
+            ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
+             1048576.0));
     PrintWarnings();
     fprintf(stdout, "------------------------------------------------\n");
   }
 
   void PrintWarnings() {
 #if defined(__GNUC__) && !defined(__OPTIMIZE__)
-    fprintf(stdout,
-            "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
-            );
+    fprintf(
+        stdout,
+        "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
 #endif
 #ifndef NDEBUG
     fprintf(stdout,
@@ -262,13 +258,20 @@ class Benchmark {
 
     done_++;
     if (done_ >= next_report_) {
-      if      (next_report_ < 1000)   next_report_ += 100;
-      else if (next_report_ < 5000)   next_report_ += 500;
-      else if (next_report_ < 10000)  next_report_ += 1000;
-      else if (next_report_ < 50000)  next_report_ += 5000;
-      else if (next_report_ < 100000) next_report_ += 10000;
-      else if (next_report_ < 500000) next_report_ += 50000;
-      else                            next_report_ += 100000;
+      if (next_report_ < 1000)
+        next_report_ += 100;
+      else if (next_report_ < 5000)
+        next_report_ += 500;
+      else if (next_report_ < 10000)
+        next_report_ += 1000;
+      else if (next_report_ < 50000)
+        next_report_ += 5000;
+      else if (next_report_ < 100000)
+        next_report_ += 10000;
+      else if (next_report_ < 500000)
+        next_report_ += 50000;
+      else
+        next_report_ += 100000;
       fprintf(stderr, "... finished %d ops%30s\r", done_, "");
       fflush(stderr);
     }
@@ -286,16 +289,14 @@ class Benchmark {
       snprintf(rate, sizeof(rate), "%6.1f MB/s",
                (bytes_ / 1048576.0) / (finish - start_));
       if (!message_.empty()) {
-        message_  = std::string(rate) + " " + message_;
+        message_ = std::string(rate) + " " + message_;
       } else {
         message_ = rate;
       }
     }
 
-    fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
-            name.ToString().c_str(),
-            (finish - start_) * 1e6 / done_,
-            (message_.empty() ? "" : " "),
+    fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
+            (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
             message_.c_str());
     if (FLAGS_histogram) {
       fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
@@ -304,22 +305,16 @@ class Benchmark {
   }
 
  public:
-  enum Order {
-    SEQUENTIAL,
-    RANDOM
-  };
-  enum DBState {
-    FRESH,
-    EXISTING
-  };
+  enum Order { SEQUENTIAL, RANDOM };
+  enum DBState { FRESH, EXISTING };
 
   Benchmark()
-  : db_(nullptr),
-    db_num_(0),
-    num_(FLAGS_num),
-    reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
-    bytes_(0),
-    rand_(301) {
+      : db_(nullptr),
+        db_num_(0),
+        num_(FLAGS_num),
+        reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
+        bytes_(0),
+        rand_(301) {
     std::vector<std::string> files;
     std::string test_dir;
     Env::Default()->GetTestDirectory(&test_dir);
@@ -426,10 +421,8 @@ class Benchmark {
     // Open database
     std::string tmp_dir;
     Env::Default()->GetTestDirectory(&tmp_dir);
-    snprintf(file_name, sizeof(file_name),
-             "%s/dbbench_sqlite3-%d.db",
-             tmp_dir.c_str(),
-             db_num_);
+    snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db",
+             tmp_dir.c_str(), db_num_);
     status = sqlite3_open(file_name, &db_);
     if (status) {
       fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_));
@@ -460,26 +453,26 @@ class Benchmark {
       std::string WAL_checkpoint = "PRAGMA wal_autocheckpoint = 4096";
       status = sqlite3_exec(db_, WAL_stmt.c_str(), nullptr, nullptr, &err_msg);
       ExecErrorCheck(status, err_msg);
-      status = sqlite3_exec(db_, WAL_checkpoint.c_str(), nullptr, nullptr,
-                            &err_msg);
+      status =
+          sqlite3_exec(db_, WAL_checkpoint.c_str(), nullptr, nullptr, &err_msg);
       ExecErrorCheck(status, err_msg);
     }
 
     // Change locking mode to exclusive and create tables/index for database
     std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE";
     std::string create_stmt =
-          "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
-    std::string stmt_array[] = { locking_stmt, create_stmt };
+        "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
+    std::string stmt_array[] = {locking_stmt, create_stmt};
     int stmt_array_length = sizeof(stmt_array) / sizeof(std::string);
     for (int i = 0; i < stmt_array_length; i++) {
-      status = sqlite3_exec(db_, stmt_array[i].c_str(), nullptr, nullptr,
-                            &err_msg);
+      status =
+          sqlite3_exec(db_, stmt_array[i].c_str(), nullptr, nullptr, &err_msg);
       ExecErrorCheck(status, err_msg);
     }
   }
 
-  void Write(bool write_sync, Order order, DBState state,
-             int num_entries, int value_size, int entries_per_batch) {
+  void Write(bool write_sync, Order order, DBState state, int num_entries,
+             int value_size, int entries_per_batch) {
     // Create new database if state == FRESH
     if (state == FRESH) {
       if (FLAGS_use_existing_db) {
@@ -507,20 +500,20 @@ class Benchmark {
     std::string end_trans_str = "END TRANSACTION;";
 
     // Check for synchronous flag in options
-    std::string sync_stmt = (write_sync) ? "PRAGMA synchronous = FULL" :
-                                           "PRAGMA synchronous = OFF";
+    std::string sync_stmt =
+        (write_sync) ? "PRAGMA synchronous = FULL" : "PRAGMA synchronous = OFF";
     status = sqlite3_exec(db_, sync_stmt.c_str(), nullptr, nullptr, &err_msg);
     ExecErrorCheck(status, err_msg);
 
     // Preparing sqlite3 statements
-    status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1,
-                                &replace_stmt, nullptr);
+    status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1, &replace_stmt,
+                                nullptr);
     ErrorCheck(status);
     status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
                                 &begin_trans_stmt, nullptr);
     ErrorCheck(status);
-    status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1,
-                                &end_trans_stmt, nullptr);
+    status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt,
+                                nullptr);
     ErrorCheck(status);
 
     bool transaction = (entries_per_batch > 1);
@@ -538,16 +531,16 @@ class Benchmark {
         const char* value = gen_.Generate(value_size).data();
 
         // Create values for key-value pair
-        const int k = (order == SEQUENTIAL) ? i + j :
-                      (rand_.Next() % num_entries);
+        const int k =
+            (order == SEQUENTIAL) ? i + j : (rand_.Next() % num_entries);
         char key[100];
         snprintf(key, sizeof(key), "%016d", k);
 
         // Bind KV values into replace_stmt
         status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC);
         ErrorCheck(status);
-        status = sqlite3_bind_blob(replace_stmt, 2, value,
-                                   value_size, SQLITE_STATIC);
+        status = sqlite3_bind_blob(replace_stmt, 2, value, value_size,
+                                   SQLITE_STATIC);
         ErrorCheck(status);
 
         // Execute replace_stmt
@@ -593,8 +586,8 @@ class Benchmark {
     status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
                                 &begin_trans_stmt, nullptr);
     ErrorCheck(status);
-    status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1,
-                                &end_trans_stmt, nullptr);
+    status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt,
+                                nullptr);
     ErrorCheck(status);
     status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, nullptr);
     ErrorCheck(status);
@@ -621,7 +614,8 @@ class Benchmark {
         ErrorCheck(status);
 
         // Execute read statement
-        while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) {}
+        while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) {
+        }
         StepErrorCheck(status);
 
         // Reset SQLite statement for another use
@@ -651,7 +645,7 @@ class Benchmark {
 
   void ReadSequential() {
     int status;
-    sqlite3_stmt *pStmt;
+    sqlite3_stmt* pStmt;
     std::string read_str = "SELECT * FROM test ORDER BY key";
 
     status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, nullptr);
@@ -664,7 +658,6 @@ class Benchmark {
     status = sqlite3_finalize(pStmt);
     ErrorCheck(status);
   }
-
 };
 
 }  // namespace leveldb
@@ -710,9 +703,9 @@ int main(int argc, char** argv) {
 
   // Choose a location for the test database if none given with --db=<path>
   if (FLAGS_db == nullptr) {
-      leveldb::Env::Default()->GetTestDirectory(&default_db_path);
-      default_db_path += "/dbbench";
-      FLAGS_db = default_db_path.c_str();
+    leveldb::Env::Default()->GetTestDirectory(&default_db_path);
+    default_db_path += "/dbbench";
+    FLAGS_db = default_db_path.c_str();
   }
 
   leveldb::Benchmark benchmark;
diff --git a/doc/bench/db_bench_tree_db.cc b/doc/bench/db_bench_tree_db.cc
index 9f8fb90..b2f6646 100644
--- a/doc/bench/db_bench_tree_db.cc
+++ b/doc/bench/db_bench_tree_db.cc
@@ -2,9 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include <kcpolydb.h>
 #include <stdio.h>
 #include <stdlib.h>
-#include <kcpolydb.h>
+
 #include "util/histogram.h"
 #include "util/random.h"
 #include "util/testutil.h"
@@ -34,8 +35,7 @@ static const char* FLAGS_benchmarks =
     "fillrand100K,"
     "fillseq100K,"
     "readseq100K,"
-    "readrand100K,"
-    ;
+    "readrand100K,";
 
 // Number of key/values to place in database
 static int FLAGS_num = 1000000;
@@ -71,9 +71,7 @@ static bool FLAGS_compression = true;
 // Use the db with the following name.
 static const char* FLAGS_db = nullptr;
 
-inline
-static void DBSynchronize(kyotocabinet::TreeDB* db_)
-{
+inline static void DBSynchronize(kyotocabinet::TreeDB* db_) {
   // Synchronize will flush writes to disk
   if (!db_->synchronize()) {
     fprintf(stderr, "synchronize error: %s\n", db_->error().name());
@@ -121,7 +119,7 @@ static Slice TrimSpace(Slice s) {
     start++;
   }
   int limit = s.size();
-  while (limit > start && isspace(s[limit-1])) {
+  while (limit > start && isspace(s[limit - 1])) {
     limit--;
   }
   return Slice(s.data() + start, limit - start);
@@ -146,7 +144,7 @@ class Benchmark {
 
   // State kept for progress messages
   int done_;
-  int next_report_;     // When to report next
+  int next_report_;  // When to report next
 
   void PrintHeader() {
     const int kKeySize = 16;
@@ -157,20 +155,20 @@ class Benchmark {
             static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
     fprintf(stdout, "Entries:    %d\n", num_);
     fprintf(stdout, "RawSize:    %.1f MB (estimated)\n",
-            ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
-             / 1048576.0));
+            ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
+             1048576.0));
     fprintf(stdout, "FileSize:   %.1f MB (estimated)\n",
-            (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_)
-             / 1048576.0));
+            (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
+             1048576.0));
     PrintWarnings();
     fprintf(stdout, "------------------------------------------------\n");
   }
 
   void PrintWarnings() {
 #if defined(__GNUC__) && !defined(__OPTIMIZE__)
-    fprintf(stdout,
-            "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
-            );
+    fprintf(
+        stdout,
+        "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
 #endif
 #ifndef NDEBUG
     fprintf(stdout,
@@ -237,13 +235,20 @@ class Benchmark {
 
     done_++;
     if (done_ >= next_report_) {
-      if      (next_report_ < 1000)   next_report_ += 100;
-      else if (next_report_ < 5000)   next_report_ += 500;
-      else if (next_report_ < 10000)  next_report_ += 1000;
-      else if (next_report_ < 50000)  next_report_ += 5000;
-      else if (next_report_ < 100000) next_report_ += 10000;
-      else if (next_report_ < 500000) next_report_ += 50000;
-      else                            next_report_ += 100000;
+      if (next_report_ < 1000)
+        next_report_ += 100;
+      else if (next_report_ < 5000)
+        next_report_ += 500;
+      else if (next_report_ < 10000)
+        next_report_ += 1000;
+      else if (next_report_ < 50000)
+        next_report_ += 5000;
+      else if (next_report_ < 100000)
+        next_report_ += 10000;
+      else if (next_report_ < 500000)
+        next_report_ += 50000;
+      else
+        next_report_ += 100000;
       fprintf(stderr, "... finished %d ops%30s\r", done_, "");
       fflush(stderr);
     }
@@ -261,16 +266,14 @@ class Benchmark {
       snprintf(rate, sizeof(rate), "%6.1f MB/s",
                (bytes_ / 1048576.0) / (finish - start_));
       if (!message_.empty()) {
-        message_  = std::string(rate) + " " + message_;
+        message_ = std::string(rate) + " " + message_;
       } else {
         message_ = rate;
       }
     }
 
-    fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
-            name.ToString().c_str(),
-            (finish - start_) * 1e6 / done_,
-            (message_.empty() ? "" : " "),
+    fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
+            (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
             message_.c_str());
     if (FLAGS_histogram) {
       fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
@@ -279,21 +282,15 @@ class Benchmark {
   }
 
  public:
-  enum Order {
-    SEQUENTIAL,
-    RANDOM
-  };
-  enum DBState {
-    FRESH,
-    EXISTING
-  };
+  enum Order { SEQUENTIAL, RANDOM };
+  enum DBState { FRESH, EXISTING };
 
   Benchmark()
-  : db_(nullptr),
-    num_(FLAGS_num),
-    reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
-    bytes_(0),
-    rand_(301) {
+      : db_(nullptr),
+        num_(FLAGS_num),
+        reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
+        bytes_(0),
+        rand_(301) {
     std::vector<std::string> files;
     std::string test_dir;
     Env::Default()->GetTestDirectory(&test_dir);
@@ -386,7 +383,7 @@ class Benchmark {
   }
 
  private:
-    void Open(bool sync) {
+  void Open(bool sync) {
     assert(db_ == nullptr);
 
     // Initialize db_
@@ -395,16 +392,14 @@ class Benchmark {
     db_num_++;
     std::string test_dir;
     Env::Default()->GetTestDirectory(&test_dir);
-    snprintf(file_name, sizeof(file_name),
-             "%s/dbbench_polyDB-%d.kct",
-             test_dir.c_str(),
-             db_num_);
+    snprintf(file_name, sizeof(file_name), "%s/dbbench_polyDB-%d.kct",
+             test_dir.c_str(), db_num_);
 
     // Create tuning options and open the database
-    int open_options = kyotocabinet::PolyDB::OWRITER |
-                       kyotocabinet::PolyDB::OCREATE;
-    int tune_options = kyotocabinet::TreeDB::TSMALL |
-        kyotocabinet::TreeDB::TLINEAR;
+    int open_options =
+        kyotocabinet::PolyDB::OWRITER | kyotocabinet::PolyDB::OCREATE;
+    int tune_options =
+        kyotocabinet::TreeDB::TSMALL | kyotocabinet::TreeDB::TLINEAR;
     if (FLAGS_compression) {
       tune_options |= kyotocabinet::TreeDB::TCOMPRESS;
       db_->tune_compressor(&comp_);
@@ -412,7 +407,7 @@ class Benchmark {
     db_->tune_options(tune_options);
     db_->tune_page_cache(FLAGS_cache_size);
     db_->tune_page(FLAGS_page_size);
-    db_->tune_map(256LL<<20);
+    db_->tune_map(256LL << 20);
     if (sync) {
       open_options |= kyotocabinet::PolyDB::OAUTOSYNC;
     }
@@ -421,8 +416,8 @@ class Benchmark {
     }
   }
 
-  void Write(bool sync, Order order, DBState state,
-             int num_entries, int value_size, int entries_per_batch) {
+  void Write(bool sync, Order order, DBState state, int num_entries,
+             int value_size, int entries_per_batch) {
     // Create new database if state == FRESH
     if (state == FRESH) {
       if (FLAGS_use_existing_db) {
@@ -442,8 +437,7 @@ class Benchmark {
     }
 
     // Write to database
-    for (int i = 0; i < num_entries; i++)
-    {
+    for (int i = 0; i < num_entries; i++) {
       const int k = (order == SEQUENTIAL) ? i : (rand_.Next() % num_entries);
       char key[100];
       snprintf(key, sizeof(key), "%016d", k);
@@ -517,9 +511,9 @@ int main(int argc, char** argv) {
 
   // Choose a location for the test database if none given with --db=<path>
   if (FLAGS_db == nullptr) {
-      leveldb::Env::Default()->GetTestDirectory(&default_db_path);
-      default_db_path += "/dbbench";
-      FLAGS_db = default_db_path.c_str();
+    leveldb::Env::Default()->GetTestDirectory(&default_db_path);
+    default_db_path += "/dbbench";
+    FLAGS_db = default_db_path.c_str();
   }
 
   leveldb::Benchmark benchmark;
diff --git a/helpers/memenv/memenv.cc b/helpers/memenv/memenv.cc
index ff384e4..58dc538 100644
--- a/helpers/memenv/memenv.cc
+++ b/helpers/memenv/memenv.cc
@@ -134,9 +134,7 @@ class FileState {
 
  private:
   // Private since only Unref() should be used to delete it.
-  ~FileState() {
-    Truncate();
-  }
+  ~FileState() { Truncate(); }
 
   // No copying allowed.
   FileState(const FileState&);
@@ -158,9 +156,7 @@ class SequentialFileImpl : public SequentialFile {
     file_->Ref();
   }
 
-  ~SequentialFileImpl() {
-    file_->Unref();
-  }
+  ~SequentialFileImpl() { file_->Unref(); }
 
   virtual Status Read(size_t n, Slice* result, char* scratch) {
     Status s = file_->Read(pos_, n, result, scratch);
@@ -189,13 +185,9 @@ class SequentialFileImpl : public SequentialFile {
 
 class RandomAccessFileImpl : public RandomAccessFile {
  public:
-  explicit RandomAccessFileImpl(FileState* file) : file_(file) {
-    file_->Ref();
-  }
+  explicit RandomAccessFileImpl(FileState* file) : file_(file) { file_->Ref(); }
 
-  ~RandomAccessFileImpl() {
-    file_->Unref();
-  }
+  ~RandomAccessFileImpl() { file_->Unref(); }
 
   virtual Status Read(uint64_t offset, size_t n, Slice* result,
                       char* scratch) const {
@@ -208,17 +200,11 @@ class RandomAccessFileImpl : public RandomAccessFile {
 
 class WritableFileImpl : public WritableFile {
  public:
-  WritableFileImpl(FileState* file) : file_(file) {
-    file_->Ref();
-  }
+  WritableFileImpl(FileState* file) : file_(file) { file_->Ref(); }
 
-  ~WritableFileImpl() {
-    file_->Unref();
-  }
+  ~WritableFileImpl() { file_->Unref(); }
 
-  virtual Status Append(const Slice& data) {
-    return file_->Append(data);
-  }
+  virtual Status Append(const Slice& data) { return file_->Append(data); }
 
   virtual Status Close() { return Status::OK(); }
   virtual Status Flush() { return Status::OK(); }
@@ -230,15 +216,16 @@ class WritableFileImpl : public WritableFile {
 
 class NoOpLogger : public Logger {
  public:
-  virtual void Logv(const char* format, va_list ap) { }
+  virtual void Logv(const char* format, va_list ap) {}
 };
 
 class InMemoryEnv : public EnvWrapper {
  public:
-  explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) { }
+  explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) {}
 
   virtual ~InMemoryEnv() {
-    for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i){
+    for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end();
+         ++i) {
       i->second->Unref();
     }
   }
@@ -311,7 +298,8 @@ class InMemoryEnv : public EnvWrapper {
     MutexLock lock(&mutex_);
     result->clear();
 
-    for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i){
+    for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end();
+         ++i) {
       const std::string& filename = i->first;
 
       if (filename.size() >= dir.size() + 1 && filename[dir.size()] == '/' &&
@@ -343,13 +331,9 @@ class InMemoryEnv : public EnvWrapper {
     return Status::OK();
   }
 
-  virtual Status CreateDir(const std::string& dirname) {
-    return Status::OK();
-  }
+  virtual Status CreateDir(const std::string& dirname) { return Status::OK(); }
 
-  virtual Status DeleteDir(const std::string& dirname) {
-    return Status::OK();
-  }
+  virtual Status DeleteDir(const std::string& dirname) { return Status::OK(); }
 
   virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) {
     MutexLock lock(&mutex_);
@@ -361,8 +345,7 @@ class InMemoryEnv : public EnvWrapper {
     return Status::OK();
   }
 
-  virtual Status RenameFile(const std::string& src,
-                            const std::string& target) {
+  virtual Status RenameFile(const std::string& src, const std::string& target) {
     MutexLock lock(&mutex_);
     if (file_map_.find(src) == file_map_.end()) {
       return Status::IOError(src, "File not found");
@@ -403,8 +386,6 @@ class InMemoryEnv : public EnvWrapper {
 
 }  // namespace
 
-Env* NewMemEnv(Env* base_env) {
-  return new InMemoryEnv(base_env);
-}
+Env* NewMemEnv(Env* base_env) { return new InMemoryEnv(base_env); }
 
 }  // namespace leveldb
diff --git a/helpers/memenv/memenv_test.cc b/helpers/memenv/memenv_test.cc
index 4664795..a0a9469 100644
--- a/helpers/memenv/memenv_test.cc
+++ b/helpers/memenv/memenv_test.cc
@@ -4,12 +4,13 @@
 
 #include "helpers/memenv/memenv.h"
 
+#include <string>
+#include <vector>
+
 #include "db/db_impl.h"
 #include "leveldb/db.h"
 #include "leveldb/env.h"
 #include "util/testharness.h"
-#include <string>
-#include <vector>
 
 namespace leveldb {
 
@@ -17,12 +18,8 @@ class MemEnvTest {
  public:
   Env* env_;
 
-  MemEnvTest()
-      : env_(NewMemEnv(Env::Default())) {
-  }
-  ~MemEnvTest() {
-    delete env_;
-  }
+  MemEnvTest() : env_(NewMemEnv(Env::Default())) {}
+  ~MemEnvTest() { delete env_; }
 };
 
 TEST(MemEnvTest, Basics) {
@@ -109,25 +106,25 @@ TEST(MemEnvTest, ReadWrite) {
 
   // Read sequentially.
   ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
-  ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello".
+  ASSERT_OK(seq_file->Read(5, &result, scratch));  // Read "hello".
   ASSERT_EQ(0, result.compare("hello"));
   ASSERT_OK(seq_file->Skip(1));
-  ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world".
+  ASSERT_OK(seq_file->Read(1000, &result, scratch));  // Read "world".
   ASSERT_EQ(0, result.compare("world"));
-  ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF.
+  ASSERT_OK(seq_file->Read(1000, &result, scratch));  // Try reading past EOF.
   ASSERT_EQ(0, result.size());
-  ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file.
+  ASSERT_OK(seq_file->Skip(100));  // Try to skip past end of file.
   ASSERT_OK(seq_file->Read(1000, &result, scratch));
   ASSERT_EQ(0, result.size());
   delete seq_file;
 
   // Random reads.
   ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file));
-  ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world".
+  ASSERT_OK(rand_file->Read(6, 5, &result, scratch));  // Read "world".
   ASSERT_EQ(0, result.compare("world"));
-  ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello".
+  ASSERT_OK(rand_file->Read(0, 5, &result, scratch));  // Read "hello".
   ASSERT_EQ(0, result.compare("hello"));
-  ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d".
+  ASSERT_OK(rand_file->Read(10, 100, &result, scratch));  // Read "d".
   ASSERT_EQ(0, result.compare("d"));
 
   // Too high offset.
@@ -176,7 +173,7 @@ TEST(MemEnvTest, LargeWrite) {
   SequentialFile* seq_file;
   Slice result;
   ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
-  ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo".
+  ASSERT_OK(seq_file->Read(3, &result, scratch));  // Read "foo".
   ASSERT_EQ(0, result.compare("foo"));
 
   size_t read = 0;
@@ -188,7 +185,7 @@ TEST(MemEnvTest, LargeWrite) {
   }
   ASSERT_TRUE(write_data == read_data);
   delete seq_file;
-  delete [] scratch;
+  delete[] scratch;
 }
 
 TEST(MemEnvTest, OverwriteOpenFile) {
@@ -259,6 +256,4 @@ TEST(MemEnvTest, DBTest) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/include/leveldb/c.h b/include/leveldb/c.h
index d8aab5b..8e0d592 100644
--- a/include/leveldb/c.h
+++ b/include/leveldb/c.h
@@ -47,26 +47,27 @@ extern "C" {
 #include <stdarg.h>
 #include <stddef.h>
 #include <stdint.h>
+
 #include "leveldb/export.h"
 
 /* Exported types */
 
-typedef struct leveldb_t               leveldb_t;
-typedef struct leveldb_cache_t         leveldb_cache_t;
-typedef struct leveldb_comparator_t    leveldb_comparator_t;
-typedef struct leveldb_env_t           leveldb_env_t;
-typedef struct leveldb_filelock_t      leveldb_filelock_t;
-typedef struct leveldb_filterpolicy_t  leveldb_filterpolicy_t;
-typedef struct leveldb_iterator_t      leveldb_iterator_t;
-typedef struct leveldb_logger_t        leveldb_logger_t;
-typedef struct leveldb_options_t       leveldb_options_t;
-typedef struct leveldb_randomfile_t    leveldb_randomfile_t;
-typedef struct leveldb_readoptions_t   leveldb_readoptions_t;
-typedef struct leveldb_seqfile_t       leveldb_seqfile_t;
-typedef struct leveldb_snapshot_t      leveldb_snapshot_t;
-typedef struct leveldb_writablefile_t  leveldb_writablefile_t;
-typedef struct leveldb_writebatch_t    leveldb_writebatch_t;
-typedef struct leveldb_writeoptions_t  leveldb_writeoptions_t;
+typedef struct leveldb_t leveldb_t;
+typedef struct leveldb_cache_t leveldb_cache_t;
+typedef struct leveldb_comparator_t leveldb_comparator_t;
+typedef struct leveldb_env_t leveldb_env_t;
+typedef struct leveldb_filelock_t leveldb_filelock_t;
+typedef struct leveldb_filterpolicy_t leveldb_filterpolicy_t;
+typedef struct leveldb_iterator_t leveldb_iterator_t;
+typedef struct leveldb_logger_t leveldb_logger_t;
+typedef struct leveldb_options_t leveldb_options_t;
+typedef struct leveldb_randomfile_t leveldb_randomfile_t;
+typedef struct leveldb_readoptions_t leveldb_readoptions_t;
+typedef struct leveldb_seqfile_t leveldb_seqfile_t;
+typedef struct leveldb_snapshot_t leveldb_snapshot_t;
+typedef struct leveldb_writablefile_t leveldb_writablefile_t;
+typedef struct leveldb_writebatch_t leveldb_writebatch_t;
+typedef struct leveldb_writeoptions_t leveldb_writeoptions_t;
 
 /* DB operations */
 
@@ -189,10 +190,7 @@ LEVELDB_EXPORT void leveldb_options_set_block_restart_interval(
 LEVELDB_EXPORT void leveldb_options_set_max_file_size(leveldb_options_t*,
                                                       size_t);
 
-enum {
-  leveldb_no_compression = 0,
-  leveldb_snappy_compression = 1
-};
+enum { leveldb_no_compression = 0, leveldb_snappy_compression = 1 };
 LEVELDB_EXPORT void leveldb_options_set_compression(leveldb_options_t*, int);
 
 /* Comparator */
@@ -266,7 +264,7 @@ LEVELDB_EXPORT int leveldb_major_version();
 LEVELDB_EXPORT int leveldb_minor_version();
 
 #ifdef __cplusplus
-}  /* end extern "C" */
+} /* end extern "C" */
 #endif
 
-#endif  /* STORAGE_LEVELDB_INCLUDE_C_H_ */
+#endif /* STORAGE_LEVELDB_INCLUDE_C_H_ */
diff --git a/include/leveldb/cache.h b/include/leveldb/cache.h
index e416ea5..7d1a221 100644
--- a/include/leveldb/cache.h
+++ b/include/leveldb/cache.h
@@ -19,6 +19,7 @@
 #define STORAGE_LEVELDB_INCLUDE_CACHE_H_
 
 #include <stdint.h>
+
 #include "leveldb/export.h"
 #include "leveldb/slice.h"
 
@@ -42,7 +43,7 @@ class LEVELDB_EXPORT Cache {
   virtual ~Cache();
 
   // Opaque handle to an entry stored in the cache.
-  struct Handle { };
+  struct Handle {};
 
   // Insert a mapping from key->value into the cache and assign it
   // the specified charge against the total cache capacity.
diff --git a/include/leveldb/comparator.h b/include/leveldb/comparator.h
index 9b09684..a85b51e 100644
--- a/include/leveldb/comparator.h
+++ b/include/leveldb/comparator.h
@@ -6,6 +6,7 @@
 #define STORAGE_LEVELDB_INCLUDE_COMPARATOR_H_
 
 #include <string>
+
 #include "leveldb/export.h"
 
 namespace leveldb {
@@ -44,9 +45,8 @@ class LEVELDB_EXPORT Comparator {
   // If *start < limit, changes *start to a short string in [start,limit).
   // Simple comparator implementations may return with *start unchanged,
   // i.e., an implementation of this method that does nothing is correct.
-  virtual void FindShortestSeparator(
-      std::string* start,
-      const Slice& limit) const = 0;
+  virtual void FindShortestSeparator(std::string* start,
+                                     const Slice& limit) const = 0;
 
   // Changes *key to a short string >= *key.
   // Simple comparator implementations may return with *key unchanged,
diff --git a/include/leveldb/db.h b/include/leveldb/db.h
index 0239593..0b8dc24 100644
--- a/include/leveldb/db.h
+++ b/include/leveldb/db.h
@@ -7,6 +7,7 @@
 
 #include <stdint.h>
 #include <stdio.h>
+
 #include "leveldb/export.h"
 #include "leveldb/iterator.h"
 #include "leveldb/options.h"
@@ -32,11 +33,11 @@ class LEVELDB_EXPORT Snapshot {
 
 // A range of keys
 struct LEVELDB_EXPORT Range {
-  Slice start;          // Included in the range
-  Slice limit;          // Not included in the range
+  Slice start;  // Included in the range
+  Slice limit;  // Not included in the range
 
-  Range() { }
-  Range(const Slice& s, const Slice& l) : start(s), limit(l) { }
+  Range() {}
+  Range(const Slice& s, const Slice& l) : start(s), limit(l) {}
 };
 
 // A DB is a persistent ordered map from keys to values.
@@ -49,8 +50,7 @@ class LEVELDB_EXPORT DB {
   // OK on success.
   // Stores nullptr in *dbptr and returns a non-OK status on error.
   // Caller should delete *dbptr when it is no longer needed.
-  static Status Open(const Options& options,
-                     const std::string& name,
+  static Status Open(const Options& options, const std::string& name,
                      DB** dbptr);
 
   DB() = default;
@@ -63,8 +63,7 @@ class LEVELDB_EXPORT DB {
   // Set the database entry for "key" to "value".  Returns OK on success,
   // and a non-OK status on error.
   // Note: consider setting options.sync = true.
-  virtual Status Put(const WriteOptions& options,
-                     const Slice& key,
+  virtual Status Put(const WriteOptions& options, const Slice& key,
                      const Slice& value) = 0;
 
   // Remove the database entry (if any) for "key".  Returns OK on
@@ -85,8 +84,8 @@ class LEVELDB_EXPORT DB {
   // a status for which Status::IsNotFound() returns true.
   //
   // May return some other Status on an error.
-  virtual Status Get(const ReadOptions& options,
-                     const Slice& key, std::string* value) = 0;
+  virtual Status Get(const ReadOptions& options, const Slice& key,
+                     std::string* value) = 0;
 
   // Return a heap-allocated iterator over the contents of the database.
   // The result of NewIterator() is initially invalid (caller must
diff --git a/include/leveldb/dumpfile.h b/include/leveldb/dumpfile.h
index 6597741..a58bc6b 100644
--- a/include/leveldb/dumpfile.h
+++ b/include/leveldb/dumpfile.h
@@ -6,6 +6,7 @@
 #define STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_
 
 #include <string>
+
 #include "leveldb/env.h"
 #include "leveldb/export.h"
 #include "leveldb/status.h"
diff --git a/include/leveldb/env.h b/include/leveldb/env.h
index ea728c9..112fe96 100644
--- a/include/leveldb/env.h
+++ b/include/leveldb/env.h
@@ -15,8 +15,10 @@
 
 #include <stdarg.h>
 #include <stdint.h>
+
 #include <string>
 #include <vector>
+
 #include "leveldb/export.h"
 #include "leveldb/status.h"
 
@@ -164,9 +166,7 @@ class LEVELDB_EXPORT Env {
   // added to the same Env may run concurrently in different threads.
   // I.e., the caller may not assume that background work items are
   // serialized.
-  virtual void Schedule(
-      void (*function)(void* arg),
-      void* arg) = 0;
+  virtual void Schedule(void (*function)(void* arg), void* arg) = 0;
 
   // Start a new thread, invoking "function(arg)" within the new thread.
   // When "function(arg)" returns, the thread will be destroyed.
@@ -287,9 +287,9 @@ class LEVELDB_EXPORT FileLock {
 
 // Log the specified data to *info_log if info_log is non-null.
 void Log(Logger* info_log, const char* format, ...)
-#   if defined(__GNUC__) || defined(__clang__)
-    __attribute__((__format__ (__printf__, 2, 3)))
-#   endif
+#if defined(__GNUC__) || defined(__clang__)
+    __attribute__((__format__(__printf__, 2, 3)))
+#endif
     ;
 
 // A utility routine: write "data" to the named file.
@@ -306,7 +306,7 @@ LEVELDB_EXPORT Status ReadFileToString(Env* env, const std::string& fname,
 class LEVELDB_EXPORT EnvWrapper : public Env {
  public:
   // Initialize an EnvWrapper that delegates all calls to *t.
-  explicit EnvWrapper(Env* t) : target_(t) { }
+  explicit EnvWrapper(Env* t) : target_(t) {}
   virtual ~EnvWrapper();
 
   // Return the target to which this Env forwards all calls.
@@ -364,9 +364,7 @@ class LEVELDB_EXPORT EnvWrapper : public Env {
   Status NewLogger(const std::string& fname, Logger** result) override {
     return target_->NewLogger(fname, result);
   }
-  uint64_t NowMicros() override {
-    return target_->NowMicros();
-  }
+  uint64_t NowMicros() override { return target_->NowMicros(); }
   void SleepForMicroseconds(int micros) override {
     target_->SleepForMicroseconds(micros);
   }
diff --git a/include/leveldb/filter_policy.h b/include/leveldb/filter_policy.h
index ba02720..49c8eda 100644
--- a/include/leveldb/filter_policy.h
+++ b/include/leveldb/filter_policy.h
@@ -17,6 +17,7 @@
 #define STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_
 
 #include <string>
+
 #include "leveldb/export.h"
 
 namespace leveldb {
@@ -39,8 +40,8 @@ class LEVELDB_EXPORT FilterPolicy {
   //
   // Warning: do not change the initial contents of *dst.  Instead,
   // append the newly constructed filter to *dst.
-  virtual void CreateFilter(const Slice* keys, int n, std::string* dst)
-      const = 0;
+  virtual void CreateFilter(const Slice* keys, int n,
+                            std::string* dst) const = 0;
 
   // "filter" contains the data appended by a preceding call to
   // CreateFilter() on this class.  This method must return true if
diff --git a/include/leveldb/iterator.h b/include/leveldb/iterator.h
index 6c1d91b..447e950 100644
--- a/include/leveldb/iterator.h
+++ b/include/leveldb/iterator.h
@@ -93,7 +93,10 @@ class LEVELDB_EXPORT Iterator {
     // True if the node is not used. Only head nodes might be unused.
     bool IsEmpty() const { return function == nullptr; }
     // Invokes the cleanup function.
-    void Run() { assert(function != nullptr); (*function)(arg1, arg2); }
+    void Run() {
+      assert(function != nullptr);
+      (*function)(arg1, arg2);
+    }
   };
   CleanupNode cleanup_head_;
 };
diff --git a/include/leveldb/options.h b/include/leveldb/options.h
index 90aa19e..7e26dc6 100644
--- a/include/leveldb/options.h
+++ b/include/leveldb/options.h
@@ -6,6 +6,7 @@
 #define STORAGE_LEVELDB_INCLUDE_OPTIONS_H_
 
 #include <stddef.h>
+
 #include "leveldb/export.h"
 
 namespace leveldb {
@@ -24,7 +25,7 @@ class Snapshot;
 enum CompressionType {
   // NOTE: do not change the values of existing entries, as these are
   // part of the persistent format on disk.
-  kNoCompression     = 0x0,
+  kNoCompression = 0x0,
   kSnappyCompression = 0x1
 };
 
diff --git a/include/leveldb/slice.h b/include/leveldb/slice.h
index a86e8a6..2df417d 100644
--- a/include/leveldb/slice.h
+++ b/include/leveldb/slice.h
@@ -18,7 +18,9 @@
 #include <assert.h>
 #include <stddef.h>
 #include <string.h>
+
 #include <string>
+
 #include "leveldb/export.h"
 
 namespace leveldb {
@@ -26,16 +28,16 @@ namespace leveldb {
 class LEVELDB_EXPORT Slice {
  public:
   // Create an empty slice.
-  Slice() : data_(""), size_(0) { }
+  Slice() : data_(""), size_(0) {}
 
   // Create a slice that refers to d[0,n-1].
-  Slice(const char* d, size_t n) : data_(d), size_(n) { }
+  Slice(const char* d, size_t n) : data_(d), size_(n) {}
 
   // Create a slice that refers to the contents of "s"
-  Slice(const std::string& s) : data_(s.data()), size_(s.size()) { }
+  Slice(const std::string& s) : data_(s.data()), size_(s.size()) {}
 
   // Create a slice that refers to s[0,strlen(s)-1]
-  Slice(const char* s) : data_(s), size_(strlen(s)) { }
+  Slice(const char* s) : data_(s), size_(strlen(s)) {}
 
   // Intentionally copyable.
   Slice(const Slice&) = default;
@@ -58,7 +60,10 @@ class LEVELDB_EXPORT Slice {
   }
 
   // Change this slice to refer to an empty array
-  void clear() { data_ = ""; size_ = 0; }
+  void clear() {
+    data_ = "";
+    size_ = 0;
+  }
 
   // Drop the first "n" bytes from this slice.
   void remove_prefix(size_t n) {
@@ -78,8 +83,7 @@ class LEVELDB_EXPORT Slice {
 
   // Return true iff "x" is a prefix of "*this"
   bool starts_with(const Slice& x) const {
-    return ((size_ >= x.size_) &&
-            (memcmp(data_, x.data_, x.size_) == 0));
+    return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0));
   }
 
  private:
@@ -92,21 +96,20 @@ inline bool operator==(const Slice& x, const Slice& y) {
           (memcmp(x.data(), y.data(), x.size()) == 0));
 }
 
-inline bool operator!=(const Slice& x, const Slice& y) {
-  return !(x == y);
-}
+inline bool operator!=(const Slice& x, const Slice& y) { return !(x == y); }
 
 inline int Slice::compare(const Slice& b) const {
   const size_t min_len = (size_ < b.size_) ? size_ : b.size_;
   int r = memcmp(data_, b.data_, min_len);
   if (r == 0) {
-    if (size_ < b.size_) r = -1;
-    else if (size_ > b.size_) r = +1;
+    if (size_ < b.size_)
+      r = -1;
+    else if (size_ > b.size_)
+      r = +1;
   }
   return r;
 }
 
 }  // namespace leveldb
 
-
 #endif  // STORAGE_LEVELDB_INCLUDE_SLICE_H_
diff --git a/include/leveldb/status.h b/include/leveldb/status.h
index ee9fac2..54cf377 100644
--- a/include/leveldb/status.h
+++ b/include/leveldb/status.h
@@ -15,6 +15,7 @@
 
 #include <algorithm>
 #include <string>
+
 #include "leveldb/export.h"
 #include "leveldb/slice.h"
 
@@ -23,7 +24,7 @@ namespace leveldb {
 class LEVELDB_EXPORT Status {
  public:
   // Create a success status.
-  Status() noexcept : state_(nullptr) { }
+  Status() noexcept : state_(nullptr) {}
   ~Status() { delete[] state_; }
 
   Status(const Status& rhs);
diff --git a/include/leveldb/table.h b/include/leveldb/table.h
index e9f6641..14a6a44 100644
--- a/include/leveldb/table.h
+++ b/include/leveldb/table.h
@@ -6,6 +6,7 @@
 #define STORAGE_LEVELDB_INCLUDE_TABLE_H_
 
 #include <stdint.h>
+
 #include "leveldb/export.h"
 #include "leveldb/iterator.h"
 
@@ -36,10 +37,8 @@ class LEVELDB_EXPORT Table {
   // for the duration of the returned table's lifetime.
   //
   // *file must remain live while this Table is in use.
-  static Status Open(const Options& options,
-                     RandomAccessFile* file,
-                     uint64_t file_size,
-                     Table** table);
+  static Status Open(const Options& options, RandomAccessFile* file,
+                     uint64_t file_size, Table** table);
 
   Table(const Table&) = delete;
   void operator=(const Table&) = delete;
@@ -70,11 +69,9 @@ class LEVELDB_EXPORT Table {
   // to Seek(key).  May not make such a call if filter policy says
   // that key is not present.
   friend class TableCache;
-  Status InternalGet(
-      const ReadOptions&, const Slice& key,
-      void* arg,
-      void (*handle_result)(void* arg, const Slice& k, const Slice& v));
-
+  Status InternalGet(const ReadOptions&, const Slice& key, void* arg,
+                     void (*handle_result)(void* arg, const Slice& k,
+                                           const Slice& v));
 
   void ReadMeta(const Footer& footer);
   void ReadFilter(const Slice& filter_handle_value);
diff --git a/include/leveldb/table_builder.h b/include/leveldb/table_builder.h
index 8d05d33..f8361fd 100644
--- a/include/leveldb/table_builder.h
+++ b/include/leveldb/table_builder.h
@@ -14,6 +14,7 @@
 #define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_
 
 #include <stdint.h>
+
 #include "leveldb/export.h"
 #include "leveldb/options.h"
 #include "leveldb/status.h"
diff --git a/include/leveldb/write_batch.h b/include/leveldb/write_batch.h
index 5380c53..21f7c63 100644
--- a/include/leveldb/write_batch.h
+++ b/include/leveldb/write_batch.h
@@ -22,6 +22,7 @@
 #define STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_
 
 #include <string>
+
 #include "leveldb/export.h"
 #include "leveldb/status.h"
 
@@ -35,7 +36,7 @@ class LEVELDB_EXPORT WriteBatch {
 
   // Intentionally copyable.
   WriteBatch(const WriteBatch&) = default;
-  WriteBatch& operator =(const WriteBatch&) = default;
+  WriteBatch& operator=(const WriteBatch&) = default;
 
   ~WriteBatch();
 
diff --git a/issues/issue178_test.cc b/issues/issue178_test.cc
index 1b1cf8b..d50ffeb 100644
--- a/issues/issue178_test.cc
+++ b/issues/issue178_test.cc
@@ -3,9 +3,9 @@
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
 // Test for issue 178: a manual compaction causes deleted data to reappear.
+#include <cstdlib>
 #include <iostream>
 #include <sstream>
-#include <cstdlib>
 
 #include "leveldb/db.h"
 #include "leveldb/write_batch.h"
@@ -21,11 +21,9 @@ std::string Key1(int i) {
   return buf;
 }
 
-std::string Key2(int i) {
-  return Key1(i) + "_xxx";
-}
+std::string Key2(int i) { return Key1(i) + "_xxx"; }
 
-class Issue178 { };
+class Issue178 {};
 
 TEST(Issue178, Test) {
   // Get rid of any state from an old run.
@@ -87,6 +85,4 @@ TEST(Issue178, Test) {
 
 }  // anonymous namespace
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/issues/issue200_test.cc b/issues/issue200_test.cc
index 1cec79f..877b2af 100644
--- a/issues/issue200_test.cc
+++ b/issues/issue200_test.cc
@@ -11,14 +11,14 @@
 
 namespace leveldb {
 
-class Issue200 { };
+class Issue200 {};
 
 TEST(Issue200, Test) {
   // Get rid of any state from an old run.
   std::string dbpath = test::TmpDir() + "/leveldb_issue200_test";
   DestroyDB(dbpath, Options());
 
-  DB *db;
+  DB* db;
   Options options;
   options.create_if_missing = true;
   ASSERT_OK(DB::Open(options, dbpath, &db));
@@ -31,7 +31,7 @@ TEST(Issue200, Test) {
   ASSERT_OK(db->Put(write_options, "5", "f"));
 
   ReadOptions read_options;
-  Iterator *iter = db->NewIterator(read_options);
+  Iterator* iter = db->NewIterator(read_options);
 
   // Add an element that should not be reflected in the iterator.
   ASSERT_OK(db->Put(write_options, "25", "cd"));
@@ -54,6 +54,4 @@ TEST(Issue200, Test) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/port/port.h b/port/port.h
index b2210a7..4b247f7 100644
--- a/port/port.h
+++ b/port/port.h
@@ -11,9 +11,9 @@
 // porting to a new platform, see "port_example.h" for documentation
 // of what the new port_<platform>.h file must provide.
 #if defined(LEVELDB_PLATFORM_POSIX) || defined(LEVELDB_PLATFORM_WINDOWS)
-#  include "port/port_stdcxx.h"
+#include "port/port_stdcxx.h"
 #elif defined(LEVELDB_PLATFORM_CHROMIUM)
-#  include "port/port_chromium.h"
+#include "port/port_chromium.h"
 #endif
 
 #endif  // STORAGE_LEVELDB_PORT_PORT_H_
diff --git a/port/port_stdcxx.h b/port/port_stdcxx.h
index 7638ded..e9cb0e5 100644
--- a/port/port_stdcxx.h
+++ b/port/port_stdcxx.h
@@ -30,10 +30,10 @@
 #endif  // HAVE_SNAPPY
 
 #include <cassert>
+#include <condition_variable>  // NOLINT
 #include <cstddef>
 #include <cstdint>
-#include <condition_variable>  // NOLINT
-#include <mutex>               // NOLINT
+#include <mutex>  // NOLINT
 #include <string>
 
 #include "port/thread_annotations.h"
@@ -56,7 +56,7 @@ class LOCKABLE Mutex {
 
   void Lock() EXCLUSIVE_LOCK_FUNCTION() { mu_.lock(); }
   void Unlock() UNLOCK_FUNCTION() { mu_.unlock(); }
-  void AssertHeld() ASSERT_EXCLUSIVE_LOCK() { }
+  void AssertHeld() ASSERT_EXCLUSIVE_LOCK() {}
 
  private:
   friend class CondVar;
@@ -79,6 +79,7 @@ class CondVar {
   }
   void Signal() { cv_.notify_one(); }
   void SignalAll() { cv_.notify_all(); }
+
  private:
   std::condition_variable cv_;
   Mutex* const mu_;
@@ -94,7 +95,9 @@ inline bool Snappy_Compress(const char* input, size_t length,
   return true;
 #else
   // Silence compiler warnings about unused arguments.
-  (void)input; (void)length; (void)output;
+  (void)input;
+  (void)length;
+  (void)output;
 #endif  // HAVE_SNAPPY
 
   return false;
@@ -106,7 +109,9 @@ inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
   return snappy::GetUncompressedLength(input, length, result);
 #else
   // Silence compiler warnings about unused arguments.
-  (void)input; (void)length; (void)result;
+  (void)input;
+  (void)length;
+  (void)result;
   return false;
 #endif  // HAVE_SNAPPY
 }
@@ -116,14 +121,17 @@ inline bool Snappy_Uncompress(const char* input, size_t length, char* output) {
   return snappy::RawUncompress(input, length, output);
 #else
   // Silence compiler warnings about unused arguments.
-  (void)input; (void)length; (void)output;
+  (void)input;
+  (void)length;
+  (void)output;
   return false;
 #endif  // HAVE_SNAPPY
 }
 
 inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
   // Silence compiler warnings about unused arguments.
-  (void)func; (void)arg;
+  (void)func;
+  (void)arg;
   return false;
 }
 
@@ -132,7 +140,9 @@ inline uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) {
   return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t*>(buf), size);
 #else
   // Silence compiler warnings about unused arguments.
-  (void)crc; (void)buf; (void)size;
+  (void)crc;
+  (void)buf;
+  (void)size;
   return 0;
 #endif  // HAVE_CRC32C
 }
diff --git a/port/thread_annotations.h b/port/thread_annotations.h
index b737c69..1547df9 100644
--- a/port/thread_annotations.h
+++ b/port/thread_annotations.h
@@ -13,9 +13,9 @@
 
 #if defined(__clang__)
 
-#define THREAD_ANNOTATION_ATTRIBUTE__(x)   __attribute__((x))
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
 #else
-#define THREAD_ANNOTATION_ATTRIBUTE__(x)   // no-op
+#define THREAD_ANNOTATION_ATTRIBUTE__(x)  // no-op
 #endif
 
 #endif  // !defined(THREAD_ANNOTATION_ATTRIBUTE__)
@@ -54,18 +54,15 @@
 #endif
 
 #ifndef LOCK_RETURNED
-#define LOCK_RETURNED(x) \
-  THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
+#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
 #endif
 
 #ifndef LOCKABLE
-#define LOCKABLE \
-  THREAD_ANNOTATION_ATTRIBUTE__(lockable)
+#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable)
 #endif
 
 #ifndef SCOPED_LOCKABLE
-#define SCOPED_LOCKABLE \
-  THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
+#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
 #endif
 
 #ifndef EXCLUSIVE_LOCK_FUNCTION
diff --git a/table/block.cc b/table/block.cc
index 6fdfdea..ad0ee98 100644
--- a/table/block.cc
+++ b/table/block.cc
@@ -6,8 +6,9 @@
 
 #include "table/block.h"
 
-#include <vector>
 #include <algorithm>
+#include <vector>
+
 #include "leveldb/comparator.h"
 #include "table/format.h"
 #include "util/coding.h"
@@ -27,7 +28,7 @@ Block::Block(const BlockContents& contents)
   if (size_ < sizeof(uint32_t)) {
     size_ = 0;  // Error marker
   } else {
-    size_t max_restarts_allowed = (size_-sizeof(uint32_t)) / sizeof(uint32_t);
+    size_t max_restarts_allowed = (size_ - sizeof(uint32_t)) / sizeof(uint32_t);
     if (NumRestarts() > max_restarts_allowed) {
       // The size is too small for NumRestarts()
       size_ = 0;
@@ -51,8 +52,7 @@ Block::~Block() {
 // If any errors are detected, returns nullptr.  Otherwise, returns a
 // pointer to the key delta (just past the three decoded values).
 static inline const char* DecodeEntry(const char* p, const char* limit,
-                                      uint32_t* shared,
-                                      uint32_t* non_shared,
+                                      uint32_t* shared, uint32_t* non_shared,
                                       uint32_t* value_length) {
   if (limit - p < 3) return nullptr;
   *shared = reinterpret_cast<const unsigned char*>(p)[0];
@@ -76,9 +76,9 @@ static inline const char* DecodeEntry(const char* p, const char* limit,
 class Block::Iter : public Iterator {
  private:
   const Comparator* const comparator_;
-  const char* const data_;      // underlying block contents
-  uint32_t const restarts_;     // Offset of restart array (list of fixed32)
-  uint32_t const num_restarts_; // Number of uint32_t entries in restart array
+  const char* const data_;       // underlying block contents
+  uint32_t const restarts_;      // Offset of restart array (list of fixed32)
+  uint32_t const num_restarts_;  // Number of uint32_t entries in restart array
 
   // current_ is offset in data_ of current entry.  >= restarts_ if !Valid
   uint32_t current_;
@@ -112,9 +112,7 @@ class Block::Iter : public Iterator {
   }
 
  public:
-  Iter(const Comparator* comparator,
-       const char* data,
-       uint32_t restarts,
+  Iter(const Comparator* comparator, const char* data, uint32_t restarts,
        uint32_t num_restarts)
       : comparator_(comparator),
         data_(data),
@@ -171,9 +169,9 @@ class Block::Iter : public Iterator {
       uint32_t mid = (left + right + 1) / 2;
       uint32_t region_offset = GetRestartPoint(mid);
       uint32_t shared, non_shared, value_length;
-      const char* key_ptr = DecodeEntry(data_ + region_offset,
-                                        data_ + restarts_,
-                                        &shared, &non_shared, &value_length);
+      const char* key_ptr =
+          DecodeEntry(data_ + region_offset, data_ + restarts_, &shared,
+                      &non_shared, &value_length);
       if (key_ptr == nullptr || (shared != 0)) {
         CorruptionError();
         return;
@@ -253,7 +251,7 @@ class Block::Iter : public Iterator {
   }
 };
 
-Iterator* Block::NewIterator(const Comparator* cmp) {
+Iterator* Block::NewIterator(const Comparator* comparator) {
   if (size_ < sizeof(uint32_t)) {
     return NewErrorIterator(Status::Corruption("bad block contents"));
   }
@@ -261,7 +259,7 @@ Iterator* Block::NewIterator(const Comparator* cmp) {
   if (num_restarts == 0) {
     return NewEmptyIterator();
   } else {
-    return new Iter(cmp, data_, restart_offset_, num_restarts);
+    return new Iter(comparator, data_, restart_offset_, num_restarts);
   }
 }
 
diff --git a/table/block.h b/table/block.h
index 2493eb9..3d4b03c 100644
--- a/table/block.h
+++ b/table/block.h
@@ -7,6 +7,7 @@
 
 #include <stddef.h>
 #include <stdint.h>
+
 #include "leveldb/iterator.h"
 
 namespace leveldb {
@@ -29,8 +30,8 @@ class Block {
 
   const char* data_;
   size_t size_;
-  uint32_t restart_offset_;     // Offset in data_ of restart array
-  bool owned_;                  // Block owns data_[]
+  uint32_t restart_offset_;  // Offset in data_ of restart array
+  bool owned_;               // Block owns data_[]
 
   // No copying allowed
   Block(const Block&);
diff --git a/table/block_builder.cc b/table/block_builder.cc
index db660cd..f7cb1b0 100644
--- a/table/block_builder.cc
+++ b/table/block_builder.cc
@@ -28,8 +28,10 @@
 
 #include "table/block_builder.h"
 
-#include <algorithm>
 #include <assert.h>
+
+#include <algorithm>
+
 #include "leveldb/comparator.h"
 #include "leveldb/table_builder.h"
 #include "util/coding.h"
@@ -37,27 +39,24 @@
 namespace leveldb {
 
 BlockBuilder::BlockBuilder(const Options* options)
-    : options_(options),
-      restarts_(),
-      counter_(0),
-      finished_(false) {
+    : options_(options), restarts_(), counter_(0), finished_(false) {
   assert(options->block_restart_interval >= 1);
-  restarts_.push_back(0);       // First restart point is at offset 0
+  restarts_.push_back(0);  // First restart point is at offset 0
 }
 
 void BlockBuilder::Reset() {
   buffer_.clear();
   restarts_.clear();
-  restarts_.push_back(0);       // First restart point is at offset 0
+  restarts_.push_back(0);  // First restart point is at offset 0
   counter_ = 0;
   finished_ = false;
   last_key_.clear();
 }
 
 size_t BlockBuilder::CurrentSizeEstimate() const {
-  return (buffer_.size() +                        // Raw data buffer
-          restarts_.size() * sizeof(uint32_t) +   // Restart array
-          sizeof(uint32_t));                      // Restart array length
+  return (buffer_.size() +                       // Raw data buffer
+          restarts_.size() * sizeof(uint32_t) +  // Restart array
+          sizeof(uint32_t));                     // Restart array length
 }
 
 Slice BlockBuilder::Finish() {
@@ -74,7 +73,7 @@ void BlockBuilder::Add(const Slice& key, const Slice& value) {
   Slice last_key_piece(last_key_);
   assert(!finished_);
   assert(counter_ <= options_->block_restart_interval);
-  assert(buffer_.empty() // No values yet?
+  assert(buffer_.empty()  // No values yet?
          || options_->comparator->Compare(key, last_key_piece) > 0);
   size_t shared = 0;
   if (counter_ < options_->block_restart_interval) {
diff --git a/table/block_builder.h b/table/block_builder.h
index 4fbcb33..d0d9b6e 100644
--- a/table/block_builder.h
+++ b/table/block_builder.h
@@ -5,9 +5,10 @@
 #ifndef STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
 #define STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
 
+#include <stdint.h>
+
 #include <vector>
 
-#include <stdint.h>
 #include "leveldb/slice.h"
 
 namespace leveldb {
@@ -35,17 +36,15 @@ class BlockBuilder {
   size_t CurrentSizeEstimate() const;
 
   // Return true iff no entries have been added since the last Reset()
-  bool empty() const {
-    return buffer_.empty();
-  }
+  bool empty() const { return buffer_.empty(); }
 
  private:
-  const Options*        options_;
-  std::string           buffer_;      // Destination buffer
-  std::vector<uint32_t> restarts_;    // Restart points
-  int                   counter_;     // Number of entries emitted since restart
-  bool                  finished_;    // Has Finish() been called?
-  std::string           last_key_;
+  const Options* options_;
+  std::string buffer_;              // Destination buffer
+  std::vector<uint32_t> restarts_;  // Restart points
+  int counter_;                     // Number of entries emitted since restart
+  bool finished_;                   // Has Finish() been called?
+  std::string last_key_;
 
   // No copying allowed
   BlockBuilder(const BlockBuilder&);
diff --git a/table/filter_block.cc b/table/filter_block.cc
index ce0aa04..09ec009 100644
--- a/table/filter_block.cc
+++ b/table/filter_block.cc
@@ -16,8 +16,7 @@ static const size_t kFilterBaseLg = 11;
 static const size_t kFilterBase = 1 << kFilterBaseLg;
 
 FilterBlockBuilder::FilterBlockBuilder(const FilterPolicy* policy)
-    : policy_(policy) {
-}
+    : policy_(policy) {}
 
 void FilterBlockBuilder::StartBlock(uint64_t block_offset) {
   uint64_t filter_index = (block_offset / kFilterBase);
@@ -62,7 +61,7 @@ void FilterBlockBuilder::GenerateFilter() {
   tmp_keys_.resize(num_keys);
   for (size_t i = 0; i < num_keys; i++) {
     const char* base = keys_.data() + start_[i];
-    size_t length = start_[i+1] - start_[i];
+    size_t length = start_[i + 1] - start_[i];
     tmp_keys_[i] = Slice(base, length);
   }
 
@@ -77,14 +76,10 @@ void FilterBlockBuilder::GenerateFilter() {
 
 FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
                                      const Slice& contents)
-    : policy_(policy),
-      data_(nullptr),
-      offset_(nullptr),
-      num_(0),
-      base_lg_(0) {
+    : policy_(policy), data_(nullptr), offset_(nullptr), num_(0), base_lg_(0) {
   size_t n = contents.size();
   if (n < 5) return;  // 1 byte for base_lg_ and 4 for start of offset array
-  base_lg_ = contents[n-1];
+  base_lg_ = contents[n - 1];
   uint32_t last_word = DecodeFixed32(contents.data() + n - 5);
   if (last_word > n - 5) return;
   data_ = contents.data();
@@ -95,8 +90,8 @@ FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
 bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
   uint64_t index = block_offset >> base_lg_;
   if (index < num_) {
-    uint32_t start = DecodeFixed32(offset_ + index*4);
-    uint32_t limit = DecodeFixed32(offset_ + index*4 + 4);
+    uint32_t start = DecodeFixed32(offset_ + index * 4);
+    uint32_t limit = DecodeFixed32(offset_ + index * 4 + 4);
     if (start <= limit && limit <= static_cast<size_t>(offset_ - data_)) {
       Slice filter = Slice(data_ + start, limit - start);
       return policy_->KeyMayMatch(key, filter);
@@ -108,4 +103,4 @@ bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
   return true;  // Errors are treated as potential matches
 }
 
-}
+}  // namespace leveldb
diff --git a/table/filter_block.h b/table/filter_block.h
index c67d010..1b034dc 100644
--- a/table/filter_block.h
+++ b/table/filter_block.h
@@ -11,8 +11,10 @@
 
 #include <stddef.h>
 #include <stdint.h>
+
 #include <string>
 #include <vector>
+
 #include "leveldb/slice.h"
 #include "util/hash.h"
 
@@ -38,10 +40,10 @@ class FilterBlockBuilder {
   void GenerateFilter();
 
   const FilterPolicy* policy_;
-  std::string keys_;              // Flattened key contents
-  std::vector<size_t> start_;     // Starting index in keys_ of each key
-  std::string result_;            // Filter data computed so far
-  std::vector<Slice> tmp_keys_;   // policy_->CreateFilter() argument
+  std::string keys_;             // Flattened key contents
+  std::vector<size_t> start_;    // Starting index in keys_ of each key
+  std::string result_;           // Filter data computed so far
+  std::vector<Slice> tmp_keys_;  // policy_->CreateFilter() argument
   std::vector<uint32_t> filter_offsets_;
 
   // No copying allowed
@@ -51,7 +53,7 @@ class FilterBlockBuilder {
 
 class FilterBlockReader {
  public:
- // REQUIRES: "contents" and *policy must stay live while *this is live.
+  // REQUIRES: "contents" and *policy must stay live while *this is live.
   FilterBlockReader(const FilterPolicy* policy, const Slice& contents);
   bool KeyMayMatch(uint64_t block_offset, const Slice& key);
 
@@ -63,6 +65,6 @@ class FilterBlockReader {
   size_t base_lg_;      // Encoding parameter (see kFilterBaseLg in .cc file)
 };
 
-}
+}  // namespace leveldb
 
 #endif  // STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_
diff --git a/table/filter_block_test.cc b/table/filter_block_test.cc
index 8c4a474..6cdd435 100644
--- a/table/filter_block_test.cc
+++ b/table/filter_block_test.cc
@@ -16,9 +16,7 @@ namespace leveldb {
 // For testing: emit an array with one hash value per key
 class TestHashFilter : public FilterPolicy {
  public:
-  virtual const char* Name() const {
-    return "TestHashFilter";
-  }
+  virtual const char* Name() const { return "TestHashFilter"; }
 
   virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
     for (int i = 0; i < n; i++) {
@@ -69,8 +67,8 @@ TEST(FilterBlockTest, SingleChunk) {
   ASSERT_TRUE(reader.KeyMayMatch(100, "box"));
   ASSERT_TRUE(reader.KeyMayMatch(100, "hello"));
   ASSERT_TRUE(reader.KeyMayMatch(100, "foo"));
-  ASSERT_TRUE(! reader.KeyMayMatch(100, "missing"));
-  ASSERT_TRUE(! reader.KeyMayMatch(100, "other"));
+  ASSERT_TRUE(!reader.KeyMayMatch(100, "missing"));
+  ASSERT_TRUE(!reader.KeyMayMatch(100, "other"));
 }
 
 TEST(FilterBlockTest, MultiChunk) {
@@ -99,30 +97,28 @@ TEST(FilterBlockTest, MultiChunk) {
   // Check first filter
   ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));
   ASSERT_TRUE(reader.KeyMayMatch(2000, "bar"));
-  ASSERT_TRUE(! reader.KeyMayMatch(0, "box"));
-  ASSERT_TRUE(! reader.KeyMayMatch(0, "hello"));
+  ASSERT_TRUE(!reader.KeyMayMatch(0, "box"));
+  ASSERT_TRUE(!reader.KeyMayMatch(0, "hello"));
 
   // Check second filter
   ASSERT_TRUE(reader.KeyMayMatch(3100, "box"));
-  ASSERT_TRUE(! reader.KeyMayMatch(3100, "foo"));
-  ASSERT_TRUE(! reader.KeyMayMatch(3100, "bar"));
-  ASSERT_TRUE(! reader.KeyMayMatch(3100, "hello"));
+  ASSERT_TRUE(!reader.KeyMayMatch(3100, "foo"));
+  ASSERT_TRUE(!reader.KeyMayMatch(3100, "bar"));
+  ASSERT_TRUE(!reader.KeyMayMatch(3100, "hello"));
 
   // Check third filter (empty)
-  ASSERT_TRUE(! reader.KeyMayMatch(4100, "foo"));
-  ASSERT_TRUE(! reader.KeyMayMatch(4100, "bar"));
-  ASSERT_TRUE(! reader.KeyMayMatch(4100, "box"));
-  ASSERT_TRUE(! reader.KeyMayMatch(4100, "hello"));
+  ASSERT_TRUE(!reader.KeyMayMatch(4100, "foo"));
+  ASSERT_TRUE(!reader.KeyMayMatch(4100, "bar"));
+  ASSERT_TRUE(!reader.KeyMayMatch(4100, "box"));
+  ASSERT_TRUE(!reader.KeyMayMatch(4100, "hello"));
 
   // Check last filter
   ASSERT_TRUE(reader.KeyMayMatch(9000, "box"));
   ASSERT_TRUE(reader.KeyMayMatch(9000, "hello"));
-  ASSERT_TRUE(! reader.KeyMayMatch(9000, "foo"));
-  ASSERT_TRUE(! reader.KeyMayMatch(9000, "bar"));
+  ASSERT_TRUE(!reader.KeyMayMatch(9000, "foo"));
+  ASSERT_TRUE(!reader.KeyMayMatch(9000, "bar"));
 }
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/table/format.cc b/table/format.cc
index 24e4e02..e183977 100644
--- a/table/format.cc
+++ b/table/format.cc
@@ -21,8 +21,7 @@ void BlockHandle::EncodeTo(std::string* dst) const {
 }
 
 Status BlockHandle::DecodeFrom(Slice* input) {
-  if (GetVarint64(input, &offset_) &&
-      GetVarint64(input, &size_)) {
+  if (GetVarint64(input, &offset_) && GetVarint64(input, &size_)) {
     return Status::OK();
   } else {
     return Status::Corruption("bad block handle");
@@ -62,10 +61,8 @@ Status Footer::DecodeFrom(Slice* input) {
   return result;
 }
 
-Status ReadBlock(RandomAccessFile* file,
-                 const ReadOptions& options,
-                 const BlockHandle& handle,
-                 BlockContents* result) {
+Status ReadBlock(RandomAccessFile* file, const ReadOptions& options,
+                 const BlockHandle& handle, BlockContents* result) {
   result->data = Slice();
   result->cachable = false;
   result->heap_allocated = false;
@@ -86,7 +83,7 @@ Status ReadBlock(RandomAccessFile* file,
   }
 
   // Check the crc of the type and the block contents
-  const char* data = contents.data();    // Pointer to where Read put the data
+  const char* data = contents.data();  // Pointer to where Read put the data
   if (options.verify_checksums) {
     const uint32_t crc = crc32c::Unmask(DecodeFixed32(data + n + 1));
     const uint32_t actual = crc32c::Value(data, n + 1);
diff --git a/table/format.h b/table/format.h
index 144ff55..dacaa9f 100644
--- a/table/format.h
+++ b/table/format.h
@@ -5,8 +5,10 @@
 #ifndef STORAGE_LEVELDB_TABLE_FORMAT_H_
 #define STORAGE_LEVELDB_TABLE_FORMAT_H_
 
-#include <string>
 #include <stdint.h>
+
+#include <string>
+
 #include "leveldb/slice.h"
 #include "leveldb/status.h"
 #include "leveldb/table_builder.h"
@@ -46,19 +48,15 @@ class BlockHandle {
 // end of every table file.
 class Footer {
  public:
-  Footer() { }
+  Footer() {}
 
   // The block handle for the metaindex block of the table
   const BlockHandle& metaindex_handle() const { return metaindex_handle_; }
   void set_metaindex_handle(const BlockHandle& h) { metaindex_handle_ = h; }
 
   // The block handle for the index block of the table
-  const BlockHandle& index_handle() const {
-    return index_handle_;
-  }
-  void set_index_handle(const BlockHandle& h) {
-    index_handle_ = h;
-  }
+  const BlockHandle& index_handle() const { return index_handle_; }
+  void set_index_handle(const BlockHandle& h) { index_handle_ = h; }
 
   void EncodeTo(std::string* dst) const;
   Status DecodeFrom(Slice* input);
@@ -66,9 +64,7 @@ class Footer {
   // Encoded length of a Footer.  Note that the serialization of a
   // Footer will always occupy exactly this many bytes.  It consists
   // of two block handles and a magic number.
-  enum {
-    kEncodedLength = 2*BlockHandle::kMaxEncodedLength + 8
-  };
+  enum { kEncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8 };
 
  private:
   BlockHandle metaindex_handle_;
@@ -91,17 +87,13 @@ struct BlockContents {
 
 // Read the block identified by "handle" from "file".  On failure
 // return non-OK.  On success fill *result and return OK.
-Status ReadBlock(RandomAccessFile* file,
-                 const ReadOptions& options,
-                 const BlockHandle& handle,
-                 BlockContents* result);
+Status ReadBlock(RandomAccessFile* file, const ReadOptions& options,
+                 const BlockHandle& handle, BlockContents* result);
 
 // Implementation details follow.  Clients should ignore,
 
 inline BlockHandle::BlockHandle()
-    : offset_(~static_cast<uint64_t>(0)),
-      size_(~static_cast<uint64_t>(0)) {
-}
+    : offset_(~static_cast<uint64_t>(0)), size_(~static_cast<uint64_t>(0)) {}
 
 }  // namespace leveldb
 
diff --git a/table/iterator.cc b/table/iterator.cc
index 41ec1aa..dfef083 100644
--- a/table/iterator.cc
+++ b/table/iterator.cc
@@ -14,7 +14,7 @@ Iterator::Iterator() {
 Iterator::~Iterator() {
   if (!cleanup_head_.IsEmpty()) {
     cleanup_head_.Run();
-    for (CleanupNode* node = cleanup_head_.next; node != nullptr; ) {
+    for (CleanupNode* node = cleanup_head_.next; node != nullptr;) {
       node->Run();
       CleanupNode* next_node = node->next;
       delete node;
@@ -42,17 +42,23 @@ namespace {
 
 class EmptyIterator : public Iterator {
  public:
-  EmptyIterator(const Status& s) : status_(s) { }
+  EmptyIterator(const Status& s) : status_(s) {}
   ~EmptyIterator() override = default;
 
   bool Valid() const override { return false; }
-  void Seek(const Slice& target) override { }
-  void SeekToFirst() override { }
-  void SeekToLast() override { }
+  void Seek(const Slice& target) override {}
+  void SeekToFirst() override {}
+  void SeekToLast() override {}
   void Next() override { assert(false); }
   void Prev() override { assert(false); }
-  Slice key() const override { assert(false); return Slice(); }
-  Slice value() const override { assert(false); return Slice(); }
+  Slice key() const override {
+    assert(false);
+    return Slice();
+  }
+  Slice value() const override {
+    assert(false);
+    return Slice();
+  }
   Status status() const override { return status_; }
 
  private:
@@ -61,9 +67,7 @@ class EmptyIterator : public Iterator {
 
 }  // anonymous namespace
 
-Iterator* NewEmptyIterator() {
-  return new EmptyIterator(Status::OK());
-}
+Iterator* NewEmptyIterator() { return new EmptyIterator(Status::OK()); }
 
 Iterator* NewErrorIterator(const Status& status) {
   return new EmptyIterator(status);
diff --git a/table/iterator_wrapper.h b/table/iterator_wrapper.h
index f1814ca..c230572 100644
--- a/table/iterator_wrapper.h
+++ b/table/iterator_wrapper.h
@@ -16,10 +16,8 @@ namespace leveldb {
 // cache locality.
 class IteratorWrapper {
  public:
-  IteratorWrapper(): iter_(nullptr), valid_(false) { }
-  explicit IteratorWrapper(Iterator* iter): iter_(nullptr) {
-    Set(iter);
-  }
+  IteratorWrapper() : iter_(nullptr), valid_(false) {}
+  explicit IteratorWrapper(Iterator* iter) : iter_(nullptr) { Set(iter); }
   ~IteratorWrapper() { delete iter_; }
   Iterator* iter() const { return iter_; }
 
@@ -35,18 +33,46 @@ class IteratorWrapper {
     }
   }
 
-
   // Iterator interface methods
-  bool Valid() const        { return valid_; }
-  Slice key() const         { assert(Valid()); return key_; }
-  Slice value() const       { assert(Valid()); return iter_->value(); }
+  bool Valid() const { return valid_; }
+  Slice key() const {
+    assert(Valid());
+    return key_;
+  }
+  Slice value() const {
+    assert(Valid());
+    return iter_->value();
+  }
   // Methods below require iter() != nullptr
-  Status status() const     { assert(iter_); return iter_->status(); }
-  void Next()               { assert(iter_); iter_->Next();        Update(); }
-  void Prev()               { assert(iter_); iter_->Prev();        Update(); }
-  void Seek(const Slice& k) { assert(iter_); iter_->Seek(k);       Update(); }
-  void SeekToFirst()        { assert(iter_); iter_->SeekToFirst(); Update(); }
-  void SeekToLast()         { assert(iter_); iter_->SeekToLast();  Update(); }
+  Status status() const {
+    assert(iter_);
+    return iter_->status();
+  }
+  void Next() {
+    assert(iter_);
+    iter_->Next();
+    Update();
+  }
+  void Prev() {
+    assert(iter_);
+    iter_->Prev();
+    Update();
+  }
+  void Seek(const Slice& k) {
+    assert(iter_);
+    iter_->Seek(k);
+    Update();
+  }
+  void SeekToFirst() {
+    assert(iter_);
+    iter_->SeekToFirst();
+    Update();
+  }
+  void SeekToLast() {
+    assert(iter_);
+    iter_->SeekToLast();
+    Update();
+  }
 
  private:
   void Update() {
diff --git a/table/merger.cc b/table/merger.cc
index e079680..3a5c3e4 100644
--- a/table/merger.cc
+++ b/table/merger.cc
@@ -24,13 +24,9 @@ class MergingIterator : public Iterator {
     }
   }
 
-  virtual ~MergingIterator() {
-    delete[] children_;
-  }
+  virtual ~MergingIterator() { delete[] children_; }
 
-  virtual bool Valid() const {
-    return (current_ != nullptr);
-  }
+  virtual bool Valid() const { return (current_ != nullptr); }
 
   virtual void SeekToFirst() {
     for (int i = 0; i < n_; i++) {
@@ -145,10 +141,7 @@ class MergingIterator : public Iterator {
   IteratorWrapper* current_;
 
   // Which direction is the iterator moving?
-  enum Direction {
-    kForward,
-    kReverse
-  };
+  enum Direction { kForward, kReverse };
   Direction direction_;
 };
 
@@ -169,7 +162,7 @@ void MergingIterator::FindSmallest() {
 
 void MergingIterator::FindLargest() {
   IteratorWrapper* largest = nullptr;
-  for (int i = n_-1; i >= 0; i--) {
+  for (int i = n_ - 1; i >= 0; i--) {
     IteratorWrapper* child = &children_[i];
     if (child->Valid()) {
       if (largest == nullptr) {
@@ -183,14 +176,15 @@ void MergingIterator::FindLargest() {
 }
 }  // namespace
 
-Iterator* NewMergingIterator(const Comparator* cmp, Iterator** list, int n) {
+Iterator* NewMergingIterator(const Comparator* comparator, Iterator** children,
+                             int n) {
   assert(n >= 0);
   if (n == 0) {
     return NewEmptyIterator();
   } else if (n == 1) {
-    return list[0];
+    return children[0];
   } else {
-    return new MergingIterator(cmp, list, n);
+    return new MergingIterator(comparator, children, n);
   }
 }
 
diff --git a/table/merger.h b/table/merger.h
index bafdf5a..41cedc5 100644
--- a/table/merger.h
+++ b/table/merger.h
@@ -18,8 +18,8 @@ class Iterator;
 // key is present in K child iterators, it will be yielded K times.
 //
 // REQUIRES: n >= 0
-Iterator* NewMergingIterator(
-    const Comparator* comparator, Iterator** children, int n);
+Iterator* NewMergingIterator(const Comparator* comparator, Iterator** children,
+                             int n);
 
 }  // namespace leveldb
 
diff --git a/table/table.cc b/table/table.cc
index 8e737e1..b07bc88 100644
--- a/table/table.cc
+++ b/table/table.cc
@@ -20,7 +20,7 @@ namespace leveldb {
 struct Table::Rep {
   ~Rep() {
     delete filter;
-    delete [] filter_data;
+    delete[] filter_data;
     delete index_block;
   }
 
@@ -35,10 +35,8 @@ struct Table::Rep {
   Block* index_block;
 };
 
-Status Table::Open(const Options& options,
-                   RandomAccessFile* file,
-                   uint64_t size,
-                   Table** table) {
+Status Table::Open(const Options& options, RandomAccessFile* file,
+                   uint64_t size, Table** table) {
   *table = nullptr;
   if (size < Footer::kEncodedLength) {
     return Status::Corruption("file is too short to be an sstable");
@@ -130,14 +128,12 @@ void Table::ReadFilter(const Slice& filter_handle_value) {
     return;
   }
   if (block.heap_allocated) {
-    rep_->filter_data = block.data.data();     // Will need to delete later
+    rep_->filter_data = block.data.data();  // Will need to delete later
   }
   rep_->filter = new FilterBlockReader(rep_->options.filter_policy, block.data);
 }
 
-Table::~Table() {
-  delete rep_;
-}
+Table::~Table() { delete rep_; }
 
 static void DeleteBlock(void* arg, void* ignored) {
   delete reinterpret_cast<Block*>(arg);
@@ -156,8 +152,7 @@ static void ReleaseBlock(void* arg, void* h) {
 
 // Convert an index iterator value (i.e., an encoded BlockHandle)
 // into an iterator over the contents of the corresponding block.
-Iterator* Table::BlockReader(void* arg,
-                             const ReadOptions& options,
+Iterator* Table::BlockReader(void* arg, const ReadOptions& options,
                              const Slice& index_value) {
   Table* table = reinterpret_cast<Table*>(arg);
   Cache* block_cache = table->rep_->options.block_cache;
@@ -175,7 +170,7 @@ Iterator* Table::BlockReader(void* arg,
     if (block_cache != nullptr) {
       char cache_key_buffer[16];
       EncodeFixed64(cache_key_buffer, table->rep_->cache_id);
-      EncodeFixed64(cache_key_buffer+8, handle.offset());
+      EncodeFixed64(cache_key_buffer + 8, handle.offset());
       Slice key(cache_key_buffer, sizeof(cache_key_buffer));
       cache_handle = block_cache->Lookup(key);
       if (cache_handle != nullptr) {
@@ -185,8 +180,8 @@ Iterator* Table::BlockReader(void* arg,
         if (s.ok()) {
           block = new Block(contents);
           if (contents.cachable && options.fill_cache) {
-            cache_handle = block_cache->Insert(
-                key, block, block->size(), &DeleteCachedBlock);
+            cache_handle = block_cache->Insert(key, block, block->size(),
+                                               &DeleteCachedBlock);
           }
         }
       }
@@ -218,9 +213,9 @@ Iterator* Table::NewIterator(const ReadOptions& options) const {
       &Table::BlockReader, const_cast<Table*>(this), options);
 }
 
-Status Table::InternalGet(const ReadOptions& options, const Slice& k,
-                          void* arg,
-                          void (*saver)(void*, const Slice&, const Slice&)) {
+Status Table::InternalGet(const ReadOptions& options, const Slice& k, void* arg,
+                          void (*handle_result)(void*, const Slice&,
+                                                const Slice&)) {
   Status s;
   Iterator* iiter = rep_->index_block->NewIterator(rep_->options.comparator);
   iiter->Seek(k);
@@ -228,15 +223,14 @@ Status Table::InternalGet(const ReadOptions& options, const Slice& k,
     Slice handle_value = iiter->value();
     FilterBlockReader* filter = rep_->filter;
     BlockHandle handle;
-    if (filter != nullptr &&
-        handle.DecodeFrom(&handle_value).ok() &&
+    if (filter != nullptr && handle.DecodeFrom(&handle_value).ok() &&
         !filter->KeyMayMatch(handle.offset(), k)) {
       // Not found
     } else {
       Iterator* block_iter = BlockReader(this, options, iiter->value());
       block_iter->Seek(k);
       if (block_iter->Valid()) {
-        (*saver)(arg, block_iter->key(), block_iter->value());
+        (*handle_result)(arg, block_iter->key(), block_iter->value());
       }
       s = block_iter->status();
       delete block_iter;
@@ -249,7 +243,6 @@ Status Table::InternalGet(const ReadOptions& options, const Slice& k,
   return s;
 }
 
-
 uint64_t Table::ApproximateOffsetOf(const Slice& key) const {
   Iterator* index_iter =
       rep_->index_block->NewIterator(rep_->options.comparator);
diff --git a/table/table_builder.cc b/table/table_builder.cc
index 444d4f9..9afff76 100644
--- a/table/table_builder.cc
+++ b/table/table_builder.cc
@@ -5,6 +5,7 @@
 #include "leveldb/table_builder.h"
 
 #include <assert.h>
+
 #include "leveldb/comparator.h"
 #include "leveldb/env.h"
 #include "leveldb/filter_policy.h"
@@ -27,7 +28,7 @@ struct TableBuilder::Rep {
   BlockBuilder index_block;
   std::string last_key;
   int64_t num_entries;
-  bool closed;          // Either Finish() or Abandon() has been called.
+  bool closed;  // Either Finish() or Abandon() has been called.
   FilterBlockBuilder* filter_block;
 
   // We do not emit the index entry for a block until we have seen the
@@ -53,8 +54,9 @@ struct TableBuilder::Rep {
         index_block(&index_block_options),
         num_entries(0),
         closed(false),
-        filter_block(opt.filter_policy == nullptr ? nullptr
-                     : new FilterBlockBuilder(opt.filter_policy)),
+        filter_block(opt.filter_policy == nullptr
+                         ? nullptr
+                         : new FilterBlockBuilder(opt.filter_policy)),
         pending_index_entry(false) {
     index_block_options.block_restart_interval = 1;
   }
@@ -173,8 +175,7 @@ void TableBuilder::WriteBlock(BlockBuilder* block, BlockHandle* handle) {
 }
 
 void TableBuilder::WriteRawBlock(const Slice& block_contents,
-                                 CompressionType type,
-                                 BlockHandle* handle) {
+                                 CompressionType type, BlockHandle* handle) {
   Rep* r = rep_;
   handle->set_offset(r->offset);
   handle->set_size(block_contents.size());
@@ -184,7 +185,7 @@ void TableBuilder::WriteRawBlock(const Slice& block_contents,
     trailer[0] = type;
     uint32_t crc = crc32c::Value(block_contents.data(), block_contents.size());
     crc = crc32c::Extend(crc, trailer, 1);  // Extend crc to cover block type
-    EncodeFixed32(trailer+1, crc32c::Mask(crc));
+    EncodeFixed32(trailer + 1, crc32c::Mask(crc));
     r->status = r->file->Append(Slice(trailer, kBlockTrailerSize));
     if (r->status.ok()) {
       r->offset += block_contents.size() + kBlockTrailerSize;
@@ -192,9 +193,7 @@ void TableBuilder::WriteRawBlock(const Slice& block_contents,
   }
 }
 
-Status TableBuilder::status() const {
-  return rep_->status;
-}
+Status TableBuilder::status() const { return rep_->status; }
 
 Status TableBuilder::Finish() {
   Rep* r = rep_;
@@ -259,12 +258,8 @@ void TableBuilder::Abandon() {
   r->closed = true;
 }
 
-uint64_t TableBuilder::NumEntries() const {
-  return rep_->num_entries;
-}
+uint64_t TableBuilder::NumEntries() const { return rep_->num_entries; }
 
-uint64_t TableBuilder::FileSize() const {
-  return rep_->offset;
-}
+uint64_t TableBuilder::FileSize() const { return rep_->offset; }
 
 }  // namespace leveldb
diff --git a/table/table_test.cc b/table/table_test.cc
index 5573be6..0974052 100644
--- a/table/table_test.cc
+++ b/table/table_test.cc
@@ -6,6 +6,7 @@
 
 #include <map>
 #include <string>
+
 #include "db/dbformat.h"
 #include "db/memtable.h"
 #include "db/write_batch_internal.h"
@@ -27,8 +28,8 @@ namespace leveldb {
 static std::string Reverse(const Slice& key) {
   std::string str(key.ToString());
   std::string rev("");
-  for (std::string::reverse_iterator rit = str.rbegin();
-       rit != str.rend(); ++rit) {
+  for (std::string::reverse_iterator rit = str.rbegin(); rit != str.rend();
+       ++rit) {
     rev.push_back(*rit);
   }
   return rev;
@@ -45,9 +46,8 @@ class ReverseKeyComparator : public Comparator {
     return BytewiseComparator()->Compare(Reverse(a), Reverse(b));
   }
 
-  virtual void FindShortestSeparator(
-      std::string* start,
-      const Slice& limit) const {
+  virtual void FindShortestSeparator(std::string* start,
+                                     const Slice& limit) const {
     std::string s = Reverse(*start);
     std::string l = Reverse(limit);
     BytewiseComparator()->FindShortestSeparator(&s, l);
@@ -79,17 +79,17 @@ namespace {
 struct STLLessThan {
   const Comparator* cmp;
 
-  STLLessThan() : cmp(BytewiseComparator()) { }
-  STLLessThan(const Comparator* c) : cmp(c) { }
+  STLLessThan() : cmp(BytewiseComparator()) {}
+  STLLessThan(const Comparator* c) : cmp(c) {}
   bool operator()(const std::string& a, const std::string& b) const {
     return cmp->Compare(Slice(a), Slice(b)) < 0;
   }
 };
 }  // namespace
 
-class StringSink: public WritableFile {
+class StringSink : public WritableFile {
  public:
-  ~StringSink() { }
+  ~StringSink() {}
 
   const std::string& contents() const { return contents_; }
 
@@ -106,19 +106,17 @@ class StringSink: public WritableFile {
   std::string contents_;
 };
 
-
-class StringSource: public RandomAccessFile {
+class StringSource : public RandomAccessFile {
  public:
   StringSource(const Slice& contents)
-      : contents_(contents.data(), contents.size()) {
-  }
+      : contents_(contents.data(), contents.size()) {}
 
-  virtual ~StringSource() { }
+  virtual ~StringSource() {}
 
   uint64_t Size() const { return contents_.size(); }
 
   virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                       char* scratch) const {
+                      char* scratch) const {
     if (offset >= contents_.size()) {
       return Status::InvalidArgument("invalid Read offset");
     }
@@ -140,8 +138,8 @@ typedef std::map<std::string, std::string, STLLessThan> KVMap;
 // BlockBuilder/TableBuilder and Block/Table.
 class Constructor {
  public:
-  explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) { }
-  virtual ~Constructor() { }
+  explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) {}
+  virtual ~Constructor() {}
 
   void Add(const std::string& key, const Slice& value) {
     data_[key] = value.ToString();
@@ -150,14 +148,11 @@ class Constructor {
   // Finish constructing the data structure with all the keys that have
   // been added so far.  Returns the keys in sorted order in "*keys"
   // and stores the key/value pairs in "*kvmap"
-  void Finish(const Options& options,
-              std::vector<std::string>* keys,
+  void Finish(const Options& options, std::vector<std::string>* keys,
               KVMap* kvmap) {
     *kvmap = data_;
     keys->clear();
-    for (KVMap::const_iterator it = data_.begin();
-         it != data_.end();
-         ++it) {
+    for (KVMap::const_iterator it = data_.begin(); it != data_.end(); ++it) {
       keys->push_back(it->first);
     }
     data_.clear();
@@ -178,23 +173,17 @@ class Constructor {
   KVMap data_;
 };
 
-class BlockConstructor: public Constructor {
+class BlockConstructor : public Constructor {
  public:
   explicit BlockConstructor(const Comparator* cmp)
-      : Constructor(cmp),
-        comparator_(cmp),
-        block_(nullptr) { }
-  ~BlockConstructor() {
-    delete block_;
-  }
+      : Constructor(cmp), comparator_(cmp), block_(nullptr) {}
+  ~BlockConstructor() { delete block_; }
   virtual Status FinishImpl(const Options& options, const KVMap& data) {
     delete block_;
     block_ = nullptr;
     BlockBuilder builder(&options);
 
-    for (KVMap::const_iterator it = data.begin();
-         it != data.end();
-         ++it) {
+    for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
       builder.Add(it->first, it->second);
     }
     // Open the block
@@ -218,23 +207,17 @@ class BlockConstructor: public Constructor {
   BlockConstructor();
 };
 
-class TableConstructor: public Constructor {
+class TableConstructor : public Constructor {
  public:
   TableConstructor(const Comparator* cmp)
-      : Constructor(cmp),
-        source_(nullptr), table_(nullptr) {
-  }
-  ~TableConstructor() {
-    Reset();
-  }
+      : Constructor(cmp), source_(nullptr), table_(nullptr) {}
+  ~TableConstructor() { Reset(); }
   virtual Status FinishImpl(const Options& options, const KVMap& data) {
     Reset();
     StringSink sink;
     TableBuilder builder(options, &sink);
 
-    for (KVMap::const_iterator it = data.begin();
-         it != data.end();
-         ++it) {
+    for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
       builder.Add(it->first, it->second);
       ASSERT_TRUE(builder.status().ok());
     }
@@ -273,9 +256,9 @@ class TableConstructor: public Constructor {
 };
 
 // A helper class that converts internal format keys into user keys
-class KeyConvertingIterator: public Iterator {
+class KeyConvertingIterator : public Iterator {
  public:
-  explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) { }
+  explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) {}
   virtual ~KeyConvertingIterator() { delete iter_; }
   virtual bool Valid() const { return iter_->Valid(); }
   virtual void Seek(const Slice& target) {
@@ -313,25 +296,20 @@ class KeyConvertingIterator: public Iterator {
   void operator=(const KeyConvertingIterator&);
 };
 
-class MemTableConstructor: public Constructor {
+class MemTableConstructor : public Constructor {
  public:
   explicit MemTableConstructor(const Comparator* cmp)
-      : Constructor(cmp),
-        internal_comparator_(cmp) {
+      : Constructor(cmp), internal_comparator_(cmp) {
     memtable_ = new MemTable(internal_comparator_);
     memtable_->Ref();
   }
-  ~MemTableConstructor() {
-    memtable_->Unref();
-  }
+  ~MemTableConstructor() { memtable_->Unref(); }
   virtual Status FinishImpl(const Options& options, const KVMap& data) {
     memtable_->Unref();
     memtable_ = new MemTable(internal_comparator_);
     memtable_->Ref();
     int seq = 1;
-    for (KVMap::const_iterator it = data.begin();
-         it != data.end();
-         ++it) {
+    for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
       memtable_->Add(seq, kTypeValue, it->first, it->second);
       seq++;
     }
@@ -346,24 +324,19 @@ class MemTableConstructor: public Constructor {
   MemTable* memtable_;
 };
 
-class DBConstructor: public Constructor {
+class DBConstructor : public Constructor {
  public:
   explicit DBConstructor(const Comparator* cmp)
-      : Constructor(cmp),
-        comparator_(cmp) {
+      : Constructor(cmp), comparator_(cmp) {
     db_ = nullptr;
     NewDB();
   }
-  ~DBConstructor() {
-    delete db_;
-  }
+  ~DBConstructor() { delete db_; }
   virtual Status FinishImpl(const Options& options, const KVMap& data) {
     delete db_;
     db_ = nullptr;
     NewDB();
-    for (KVMap::const_iterator it = data.begin();
-         it != data.end();
-         ++it) {
+    for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
       WriteBatch batch;
       batch.Put(it->first, it->second);
       ASSERT_TRUE(db_->Write(WriteOptions(), &batch).ok());
@@ -396,12 +369,7 @@ class DBConstructor: public Constructor {
   DB* db_;
 };
 
-enum TestType {
-  TABLE_TEST,
-  BLOCK_TEST,
-  MEMTABLE_TEST,
-  DB_TEST
-};
+enum TestType { TABLE_TEST, BLOCK_TEST, MEMTABLE_TEST, DB_TEST };
 
 struct TestArgs {
   TestType type;
@@ -410,33 +378,33 @@ struct TestArgs {
 };
 
 static const TestArgs kTestArgList[] = {
-  { TABLE_TEST, false, 16 },
-  { TABLE_TEST, false, 1 },
-  { TABLE_TEST, false, 1024 },
-  { TABLE_TEST, true, 16 },
-  { TABLE_TEST, true, 1 },
-  { TABLE_TEST, true, 1024 },
+    {TABLE_TEST, false, 16},
+    {TABLE_TEST, false, 1},
+    {TABLE_TEST, false, 1024},
+    {TABLE_TEST, true, 16},
+    {TABLE_TEST, true, 1},
+    {TABLE_TEST, true, 1024},
 
-  { BLOCK_TEST, false, 16 },
-  { BLOCK_TEST, false, 1 },
-  { BLOCK_TEST, false, 1024 },
-  { BLOCK_TEST, true, 16 },
-  { BLOCK_TEST, true, 1 },
-  { BLOCK_TEST, true, 1024 },
+    {BLOCK_TEST, false, 16},
+    {BLOCK_TEST, false, 1},
+    {BLOCK_TEST, false, 1024},
+    {BLOCK_TEST, true, 16},
+    {BLOCK_TEST, true, 1},
+    {BLOCK_TEST, true, 1024},
 
-  // Restart interval does not matter for memtables
-  { MEMTABLE_TEST, false, 16 },
-  { MEMTABLE_TEST, true, 16 },
+    // Restart interval does not matter for memtables
+    {MEMTABLE_TEST, false, 16},
+    {MEMTABLE_TEST, true, 16},
 
-  // Do not bother with restart interval variations for DB
-  { DB_TEST, false, 16 },
-  { DB_TEST, true, 16 },
+    // Do not bother with restart interval variations for DB
+    {DB_TEST, false, 16},
+    {DB_TEST, true, 16},
 };
 static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]);
 
 class Harness {
  public:
-  Harness() : constructor_(nullptr) { }
+  Harness() : constructor_(nullptr) {}
 
   void Init(const TestArgs& args) {
     delete constructor_;
@@ -466,9 +434,7 @@ class Harness {
     }
   }
 
-  ~Harness() {
-    delete constructor_;
-  }
+  ~Harness() { delete constructor_; }
 
   void Add(const std::string& key, const std::string& value) {
     constructor_->Add(key, value);
@@ -490,8 +456,7 @@ class Harness {
     ASSERT_TRUE(!iter->Valid());
     iter->SeekToFirst();
     for (KVMap::const_iterator model_iter = data.begin();
-         model_iter != data.end();
-         ++model_iter) {
+         model_iter != data.end(); ++model_iter) {
       ASSERT_EQ(ToString(data, model_iter), ToString(iter));
       iter->Next();
     }
@@ -505,8 +470,7 @@ class Harness {
     ASSERT_TRUE(!iter->Valid());
     iter->SeekToLast();
     for (KVMap::const_reverse_iterator model_iter = data.rbegin();
-         model_iter != data.rend();
-         ++model_iter) {
+         model_iter != data.rend(); ++model_iter) {
       ASSERT_EQ(ToString(data, model_iter), ToString(iter));
       iter->Prev();
     }
@@ -514,8 +478,7 @@ class Harness {
     delete iter;
   }
 
-  void TestRandomAccess(Random* rnd,
-                        const std::vector<std::string>& keys,
+  void TestRandomAccess(Random* rnd, const std::vector<std::string>& keys,
                         const KVMap& data) {
     static const bool kVerbose = false;
     Iterator* iter = constructor_->NewIterator();
@@ -546,8 +509,8 @@ class Harness {
         case 2: {
           std::string key = PickRandomKey(rnd, keys);
           model_iter = data.lower_bound(key);
-          if (kVerbose) fprintf(stderr, "Seek '%s'\n",
-                                EscapeString(key).c_str());
+          if (kVerbose)
+            fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str());
           iter->Seek(Slice(key));
           ASSERT_EQ(ToString(data, model_iter), ToString(iter));
           break;
@@ -558,7 +521,7 @@ class Harness {
             if (kVerbose) fprintf(stderr, "Prev\n");
             iter->Prev();
             if (model_iter == data.begin()) {
-              model_iter = data.end();   // Wrap around to invalid value
+              model_iter = data.end();  // Wrap around to invalid value
             } else {
               --model_iter;
             }
@@ -621,8 +584,8 @@ class Harness {
           break;
         case 1: {
           // Attempt to return something smaller than an existing key
-          if (result.size() > 0 && result[result.size()-1] > '\0') {
-            result[result.size()-1]--;
+          if (!result.empty() && result[result.size() - 1] > '\0') {
+            result[result.size() - 1]--;
           }
           break;
         }
@@ -720,8 +683,8 @@ TEST(Harness, Randomized) {
     for (int num_entries = 0; num_entries < 2000;
          num_entries += (num_entries < 50 ? 1 : 200)) {
       if ((num_entries % 10) == 0) {
-        fprintf(stderr, "case %d of %d: num_entries = %d\n",
-                (i + 1), int(kNumTestArgs), num_entries);
+        fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1),
+                int(kNumTestArgs), num_entries);
       }
       for (int e = 0; e < num_entries; e++) {
         std::string v;
@@ -735,7 +698,7 @@ TEST(Harness, Randomized) {
 
 TEST(Harness, RandomizedLongDB) {
   Random rnd(test::RandomSeed());
-  TestArgs args = { DB_TEST, false, 16 };
+  TestArgs args = {DB_TEST, false, 16};
   Init(args);
   int num_entries = 100000;
   for (int e = 0; e < num_entries; e++) {
@@ -757,7 +720,7 @@ TEST(Harness, RandomizedLongDB) {
   ASSERT_GT(files, 0);
 }
 
-class MemTableTest { };
+class MemTableTest {};
 
 TEST(MemTableTest, Simple) {
   InternalKeyComparator cmp(BytewiseComparator());
@@ -774,8 +737,7 @@ TEST(MemTableTest, Simple) {
   Iterator* iter = memtable->NewIterator();
   iter->SeekToFirst();
   while (iter->Valid()) {
-    fprintf(stderr, "key: '%s' -> '%s'\n",
-            iter->key().ToString().c_str(),
+    fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(),
             iter->value().ToString().c_str());
     iter->Next();
   }
@@ -788,14 +750,13 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) {
   bool result = (val >= low) && (val <= high);
   if (!result) {
     fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
-            (unsigned long long)(val),
-            (unsigned long long)(low),
+            (unsigned long long)(val), (unsigned long long)(low),
             (unsigned long long)(high));
   }
   return result;
 }
 
-class TableTest { };
+class TableTest {};
 
 TEST(TableTest, ApproximateOffsetOfPlain) {
   TableConstructor c(BytewiseComparator());
@@ -813,18 +774,17 @@ TEST(TableTest, ApproximateOffsetOfPlain) {
   options.compression = kNoCompression;
   c.Finish(options, &keys, &kvmap);
 
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"),       0,      0));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"),       0,      0));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"),      0,      0));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"),       0,      0));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"),       0,      0));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"),   10000,  11000));
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0));
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
   ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 210000, 211000));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"),  210000, 211000));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"),  510000, 511000));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"),  510000, 511000));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"),  610000, 612000));
-
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
 }
 
 static bool SnappyCompressionSupported() {
@@ -855,7 +815,7 @@ TEST(TableTest, ApproximateOffsetOfCompressed) {
 
   // Expected upper and lower bounds of space used by compressible strings.
   static const int kSlop = 1000;  // Compressor effectiveness varies.
-  const int expected = 2500;  // 10000 * compression ratio (0.25)
+  const int expected = 2500;      // 10000 * compression ratio (0.25)
   const int min_z = expected - kSlop;
   const int max_z = expected + kSlop;
 
@@ -871,6 +831,4 @@ TEST(TableTest, ApproximateOffsetOfCompressed) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/table/two_level_iterator.cc b/table/two_level_iterator.cc
index 4e6f420..5340a4d 100644
--- a/table/two_level_iterator.cc
+++ b/table/two_level_iterator.cc
@@ -15,13 +15,10 @@ namespace {
 
 typedef Iterator* (*BlockFunction)(void*, const ReadOptions&, const Slice&);
 
-class TwoLevelIterator: public Iterator {
+class TwoLevelIterator : public Iterator {
  public:
-  TwoLevelIterator(
-    Iterator* index_iter,
-    BlockFunction block_function,
-    void* arg,
-    const ReadOptions& options);
+  TwoLevelIterator(Iterator* index_iter, BlockFunction block_function,
+                   void* arg, const ReadOptions& options);
 
   virtual ~TwoLevelIterator();
 
@@ -31,9 +28,7 @@ class TwoLevelIterator: public Iterator {
   virtual void Next();
   virtual void Prev();
 
-  virtual bool Valid() const {
-    return data_iter_.Valid();
-  }
+  virtual bool Valid() const { return data_iter_.Valid(); }
   virtual Slice key() const {
     assert(Valid());
     return data_iter_.key();
@@ -67,26 +62,22 @@ class TwoLevelIterator: public Iterator {
   const ReadOptions options_;
   Status status_;
   IteratorWrapper index_iter_;
-  IteratorWrapper data_iter_; // May be nullptr
+  IteratorWrapper data_iter_;  // May be nullptr
   // If data_iter_ is non-null, then "data_block_handle_" holds the
   // "index_value" passed to block_function_ to create the data_iter_.
   std::string data_block_handle_;
 };
 
-TwoLevelIterator::TwoLevelIterator(
-    Iterator* index_iter,
-    BlockFunction block_function,
-    void* arg,
-    const ReadOptions& options)
+TwoLevelIterator::TwoLevelIterator(Iterator* index_iter,
+                                   BlockFunction block_function, void* arg,
+                                   const ReadOptions& options)
     : block_function_(block_function),
       arg_(arg),
       options_(options),
       index_iter_(index_iter),
-      data_iter_(nullptr) {
-}
+      data_iter_(nullptr) {}
 
-TwoLevelIterator::~TwoLevelIterator() {
-}
+TwoLevelIterator::~TwoLevelIterator() {}
 
 void TwoLevelIterator::Seek(const Slice& target) {
   index_iter_.Seek(target);
@@ -121,7 +112,6 @@ void TwoLevelIterator::Prev() {
   SkipEmptyDataBlocksBackward();
 }
 
-
 void TwoLevelIterator::SkipEmptyDataBlocksForward() {
   while (data_iter_.iter() == nullptr || !data_iter_.Valid()) {
     // Move to next block
@@ -158,7 +148,8 @@ void TwoLevelIterator::InitDataBlock() {
     SetDataIterator(nullptr);
   } else {
     Slice handle = index_iter_.value();
-    if (data_iter_.iter() != nullptr && handle.compare(data_block_handle_) == 0) {
+    if (data_iter_.iter() != nullptr &&
+        handle.compare(data_block_handle_) == 0) {
       // data_iter_ is already constructed with this iterator, so
       // no need to change anything
     } else {
@@ -171,11 +162,9 @@ void TwoLevelIterator::InitDataBlock() {
 
 }  // namespace
 
-Iterator* NewTwoLevelIterator(
-    Iterator* index_iter,
-    BlockFunction block_function,
-    void* arg,
-    const ReadOptions& options) {
+Iterator* NewTwoLevelIterator(Iterator* index_iter,
+                              BlockFunction block_function, void* arg,
+                              const ReadOptions& options) {
   return new TwoLevelIterator(index_iter, block_function, arg, options);
 }
 
diff --git a/table/two_level_iterator.h b/table/two_level_iterator.h
index a93ba89..81ffe80 100644
--- a/table/two_level_iterator.h
+++ b/table/two_level_iterator.h
@@ -22,12 +22,9 @@ struct ReadOptions;
 // an iterator over the contents of the corresponding block.
 Iterator* NewTwoLevelIterator(
     Iterator* index_iter,
-    Iterator* (*block_function)(
-        void* arg,
-        const ReadOptions& options,
-        const Slice& index_value),
-    void* arg,
-    const ReadOptions& options);
+    Iterator* (*block_function)(void* arg, const ReadOptions& options,
+                                const Slice& index_value),
+    void* arg, const ReadOptions& options);
 
 }  // namespace leveldb
 
diff --git a/util/arena.cc b/util/arena.cc
index a496ad0..eadec8a 100644
--- a/util/arena.cc
+++ b/util/arena.cc
@@ -39,8 +39,9 @@ char* Arena::AllocateFallback(size_t bytes) {
 
 char* Arena::AllocateAligned(size_t bytes) {
   const int align = (sizeof(void*) > 8) ? sizeof(void*) : 8;
-  assert((align & (align-1)) == 0);   // Pointer size should be a power of 2
-  size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align-1);
+  static_assert((align & (align - 1)) == 0,
+                "Pointer size should be a power of 2");
+  size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align - 1);
   size_t slop = (current_mod == 0 ? 0 : align - current_mod);
   size_t needed = bytes + slop;
   char* result;
@@ -52,7 +53,7 @@ char* Arena::AllocateAligned(size_t bytes) {
     // AllocateFallback always returned aligned memory
     result = AllocateFallback(bytes);
   }
-  assert((reinterpret_cast<uintptr_t>(result) & (align-1)) == 0);
+  assert((reinterpret_cast<uintptr_t>(result) & (align - 1)) == 0);
   return result;
 }
 
diff --git a/util/arena_test.cc b/util/arena_test.cc
index 58e870e..f34095c 100644
--- a/util/arena_test.cc
+++ b/util/arena_test.cc
@@ -9,11 +9,9 @@
 
 namespace leveldb {
 
-class ArenaTest { };
+class ArenaTest {};
 
-TEST(ArenaTest, Empty) {
-  Arena arena;
-}
+TEST(ArenaTest, Empty) { Arena arena; }
 
 TEST(ArenaTest, Simple) {
   std::vector<std::pair<size_t, char*> > allocated;
@@ -26,8 +24,9 @@ TEST(ArenaTest, Simple) {
     if (i % (N / 10) == 0) {
       s = i;
     } else {
-      s = rnd.OneIn(4000) ? rnd.Uniform(6000) :
-          (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20));
+      s = rnd.OneIn(4000)
+              ? rnd.Uniform(6000)
+              : (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20));
     }
     if (s == 0) {
       // Our arena disallows size 0 allocations.
@@ -47,7 +46,7 @@ TEST(ArenaTest, Simple) {
     bytes += s;
     allocated.push_back(std::make_pair(s, r));
     ASSERT_GE(arena.MemoryUsage(), bytes);
-    if (i > N/10) {
+    if (i > N / 10) {
       ASSERT_LE(arena.MemoryUsage(), bytes * 1.10);
     }
   }
@@ -63,6 +62,4 @@ TEST(ArenaTest, Simple) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/bloom.cc b/util/bloom.cc
index bf3e4ca..097ce7a 100644
--- a/util/bloom.cc
+++ b/util/bloom.cc
@@ -20,17 +20,14 @@ class BloomFilterPolicy : public FilterPolicy {
   size_t k_;
 
  public:
-  explicit BloomFilterPolicy(int bits_per_key)
-      : bits_per_key_(bits_per_key) {
+  explicit BloomFilterPolicy(int bits_per_key) : bits_per_key_(bits_per_key) {
     // We intentionally round down to reduce probing cost a little bit
     k_ = static_cast<size_t>(bits_per_key * 0.69);  // 0.69 =~ ln(2)
     if (k_ < 1) k_ = 1;
     if (k_ > 30) k_ = 30;
   }
 
-  virtual const char* Name() const {
-    return "leveldb.BuiltinBloomFilter2";
-  }
+  virtual const char* Name() const { return "leveldb.BuiltinBloomFilter2"; }
 
   virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
     // Compute bloom filter size (in both bits and bytes)
@@ -54,7 +51,7 @@ class BloomFilterPolicy : public FilterPolicy {
       const uint32_t delta = (h >> 17) | (h << 15);  // Rotate right 17 bits
       for (size_t j = 0; j < k_; j++) {
         const uint32_t bitpos = h % bits;
-        array[bitpos/8] |= (1 << (bitpos % 8));
+        array[bitpos / 8] |= (1 << (bitpos % 8));
         h += delta;
       }
     }
@@ -69,7 +66,7 @@ class BloomFilterPolicy : public FilterPolicy {
 
     // Use the encoded k so that we can read filters generated by
     // bloom filters created using different parameters.
-    const size_t k = array[len-1];
+    const size_t k = array[len - 1];
     if (k > 30) {
       // Reserved for potentially new encodings for short bloom filters.
       // Consider it a match.
@@ -80,13 +77,13 @@ class BloomFilterPolicy : public FilterPolicy {
     const uint32_t delta = (h >> 17) | (h << 15);  // Rotate right 17 bits
     for (size_t j = 0; j < k; j++) {
       const uint32_t bitpos = h % bits;
-      if ((array[bitpos/8] & (1 << (bitpos % 8))) == 0) return false;
+      if ((array[bitpos / 8] & (1 << (bitpos % 8))) == 0) return false;
       h += delta;
     }
     return true;
   }
 };
-}
+}  // namespace
 
 const FilterPolicy* NewBloomFilterPolicy(int bits_per_key) {
   return new BloomFilterPolicy(bits_per_key);
diff --git a/util/bloom_test.cc b/util/bloom_test.cc
index 1b87a2b..71c4115 100644
--- a/util/bloom_test.cc
+++ b/util/bloom_test.cc
@@ -25,20 +25,16 @@ class BloomTest {
   std::vector<std::string> keys_;
 
  public:
-  BloomTest() : policy_(NewBloomFilterPolicy(10)) { }
+  BloomTest() : policy_(NewBloomFilterPolicy(10)) {}
 
-  ~BloomTest() {
-    delete policy_;
-  }
+  ~BloomTest() { delete policy_; }
 
   void Reset() {
     keys_.clear();
     filter_.clear();
   }
 
-  void Add(const Slice& s) {
-    keys_.push_back(s.ToString());
-  }
+  void Add(const Slice& s) { keys_.push_back(s.ToString()); }
 
   void Build() {
     std::vector<Slice> key_slices;
@@ -52,16 +48,14 @@ class BloomTest {
     if (kVerbose >= 2) DumpFilter();
   }
 
-  size_t FilterSize() const {
-    return filter_.size();
-  }
+  size_t FilterSize() const { return filter_.size(); }
 
   void DumpFilter() {
     fprintf(stderr, "F(");
-    for (size_t i = 0; i+1 < filter_.size(); i++) {
+    for (size_t i = 0; i + 1 < filter_.size(); i++) {
       const unsigned int c = static_cast<unsigned int>(filter_[i]);
       for (int j = 0; j < 8; j++) {
-        fprintf(stderr, "%c", (c & (1 <<j)) ? '1' : '.');
+        fprintf(stderr, "%c", (c & (1 << j)) ? '1' : '.');
       }
     }
     fprintf(stderr, ")\n");
@@ -87,8 +81,8 @@ class BloomTest {
 };
 
 TEST(BloomTest, EmptyFilter) {
-  ASSERT_TRUE(! Matches("hello"));
-  ASSERT_TRUE(! Matches("world"));
+  ASSERT_TRUE(!Matches("hello"));
+  ASSERT_TRUE(!Matches("world"));
 }
 
 TEST(BloomTest, Small) {
@@ -96,8 +90,8 @@ TEST(BloomTest, Small) {
   Add("world");
   ASSERT_TRUE(Matches("hello"));
   ASSERT_TRUE(Matches("world"));
-  ASSERT_TRUE(! Matches("x"));
-  ASSERT_TRUE(! Matches("foo"));
+  ASSERT_TRUE(!Matches("x"));
+  ASSERT_TRUE(!Matches("foo"));
 }
 
 static int NextLength(int length) {
@@ -140,23 +134,23 @@ TEST(BloomTest, VaryingLengths) {
     double rate = FalsePositiveRate();
     if (kVerbose >= 1) {
       fprintf(stderr, "False positives: %5.2f%% @ length = %6d ; bytes = %6d\n",
-              rate*100.0, length, static_cast<int>(FilterSize()));
+              rate * 100.0, length, static_cast<int>(FilterSize()));
     }
-    ASSERT_LE(rate, 0.02);   // Must not be over 2%
-    if (rate > 0.0125) mediocre_filters++;  // Allowed, but not too often
-    else good_filters++;
+    ASSERT_LE(rate, 0.02);  // Must not be over 2%
+    if (rate > 0.0125)
+      mediocre_filters++;  // Allowed, but not too often
+    else
+      good_filters++;
   }
   if (kVerbose >= 1) {
-    fprintf(stderr, "Filters: %d good, %d mediocre\n",
-            good_filters, mediocre_filters);
+    fprintf(stderr, "Filters: %d good, %d mediocre\n", good_filters,
+            mediocre_filters);
   }
-  ASSERT_LE(mediocre_filters, good_filters/5);
+  ASSERT_LE(mediocre_filters, good_filters / 5);
 }
 
 // Different bits-per-byte
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/cache.cc b/util/cache.cc
index 25b51b5..0f801cc 100644
--- a/util/cache.cc
+++ b/util/cache.cc
@@ -14,8 +14,7 @@
 
 namespace leveldb {
 
-Cache::~Cache() {
-}
+Cache::~Cache() {}
 
 namespace {
 
@@ -46,12 +45,12 @@ struct LRUHandle {
   LRUHandle* next_hash;
   LRUHandle* next;
   LRUHandle* prev;
-  size_t charge;      // TODO(opt): Only allow uint32_t?
+  size_t charge;  // TODO(opt): Only allow uint32_t?
   size_t key_length;
-  bool in_cache;      // Whether entry is in the cache.
-  uint32_t refs;      // References, including cache reference, if present.
-  uint32_t hash;      // Hash of key(); used for fast sharding and comparisons
-  char key_data[1];   // Beginning of key
+  bool in_cache;     // Whether entry is in the cache.
+  uint32_t refs;     // References, including cache reference, if present.
+  uint32_t hash;     // Hash of key(); used for fast sharding and comparisons
+  char key_data[1];  // Beginning of key
 
   Slice key() const {
     // next_ is only equal to this if the LRU handle is the list head of an
@@ -114,8 +113,7 @@ class HandleTable {
   // pointer to the trailing slot in the corresponding linked list.
   LRUHandle** FindPointer(const Slice& key, uint32_t hash) {
     LRUHandle** ptr = &list_[hash & (length_ - 1)];
-    while (*ptr != nullptr &&
-           ((*ptr)->hash != hash || key != (*ptr)->key())) {
+    while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
       ptr = &(*ptr)->next_hash;
     }
     return ptr;
@@ -158,8 +156,8 @@ class LRUCache {
   void SetCapacity(size_t capacity) { capacity_ = capacity; }
 
   // Like Cache methods, but with an extra "hash" parameter.
-  Cache::Handle* Insert(const Slice& key, uint32_t hash,
-                        void* value, size_t charge,
+  Cache::Handle* Insert(const Slice& key, uint32_t hash, void* value,
+                        size_t charge,
                         void (*deleter)(const Slice& key, void* value));
   Cache::Handle* Lookup(const Slice& key, uint32_t hash);
   void Release(Cache::Handle* handle);
@@ -172,7 +170,7 @@ class LRUCache {
 
  private:
   void LRU_Remove(LRUHandle* e);
-  void LRU_Append(LRUHandle*list, LRUHandle* e);
+  void LRU_Append(LRUHandle* list, LRUHandle* e);
   void Ref(LRUHandle* e);
   void Unref(LRUHandle* e);
   bool FinishErase(LRUHandle* e) EXCLUSIVE_LOCKS_REQUIRED(mutex_);
@@ -206,7 +204,7 @@ LRUCache::LRUCache() : capacity_(0), usage_(0) {
 
 LRUCache::~LRUCache() {
   assert(in_use_.next == &in_use_);  // Error if caller has an unreleased handle
-  for (LRUHandle* e = lru_.next; e != &lru_; ) {
+  for (LRUHandle* e = lru_.next; e != &lru_;) {
     LRUHandle* next = e->next;
     assert(e->in_cache);
     e->in_cache = false;
@@ -265,13 +263,14 @@ void LRUCache::Release(Cache::Handle* handle) {
   Unref(reinterpret_cast<LRUHandle*>(handle));
 }
 
-Cache::Handle* LRUCache::Insert(
-    const Slice& key, uint32_t hash, void* value, size_t charge,
-    void (*deleter)(const Slice& key, void* value)) {
+Cache::Handle* LRUCache::Insert(const Slice& key, uint32_t hash, void* value,
+                                size_t charge,
+                                void (*deleter)(const Slice& key,
+                                                void* value)) {
   MutexLock l(&mutex_);
 
-  LRUHandle* e = reinterpret_cast<LRUHandle*>(
-      malloc(sizeof(LRUHandle)-1 + key.size()));
+  LRUHandle* e =
+      reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size()));
   e->value = value;
   e->deleter = deleter;
   e->charge = charge;
@@ -346,19 +345,16 @@ class ShardedLRUCache : public Cache {
     return Hash(s.data(), s.size(), 0);
   }
 
-  static uint32_t Shard(uint32_t hash) {
-    return hash >> (32 - kNumShardBits);
-  }
+  static uint32_t Shard(uint32_t hash) { return hash >> (32 - kNumShardBits); }
 
  public:
-  explicit ShardedLRUCache(size_t capacity)
-      : last_id_(0) {
+  explicit ShardedLRUCache(size_t capacity) : last_id_(0) {
     const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards;
     for (int s = 0; s < kNumShards; s++) {
       shard_[s].SetCapacity(per_shard);
     }
   }
-  virtual ~ShardedLRUCache() { }
+  virtual ~ShardedLRUCache() {}
   virtual Handle* Insert(const Slice& key, void* value, size_t charge,
                          void (*deleter)(const Slice& key, void* value)) {
     const uint32_t hash = HashSlice(key);
@@ -399,8 +395,6 @@ class ShardedLRUCache : public Cache {
 
 }  // end anonymous namespace
 
-Cache* NewLRUCache(size_t capacity) {
-  return new ShardedLRUCache(capacity);
-}
+Cache* NewLRUCache(size_t capacity) { return new ShardedLRUCache(capacity); }
 
 }  // namespace leveldb
diff --git a/util/cache_test.cc b/util/cache_test.cc
index 8647feb..d5c1a1d 100644
--- a/util/cache_test.cc
+++ b/util/cache_test.cc
@@ -37,13 +37,9 @@ class CacheTest {
   std::vector<int> deleted_values_;
   Cache* cache_;
 
-  CacheTest() : cache_(NewLRUCache(kCacheSize)) {
-    current_ = this;
-  }
+  CacheTest() : cache_(NewLRUCache(kCacheSize)) { current_ = this; }
 
-  ~CacheTest() {
-    delete cache_;
-  }
+  ~CacheTest() { delete cache_; }
 
   int Lookup(int key) {
     Cache::Handle* handle = cache_->Lookup(EncodeKey(key));
@@ -64,9 +60,7 @@ class CacheTest {
                           &CacheTest::Deleter);
   }
 
-  void Erase(int key) {
-    cache_->Erase(EncodeKey(key));
-  }
+  void Erase(int key) { cache_->Erase(EncodeKey(key)); }
 };
 CacheTest* CacheTest::current_;
 
@@ -75,18 +69,18 @@ TEST(CacheTest, HitAndMiss) {
 
   Insert(100, 101);
   ASSERT_EQ(101, Lookup(100));
-  ASSERT_EQ(-1,  Lookup(200));
-  ASSERT_EQ(-1,  Lookup(300));
+  ASSERT_EQ(-1, Lookup(200));
+  ASSERT_EQ(-1, Lookup(300));
 
   Insert(200, 201);
   ASSERT_EQ(101, Lookup(100));
   ASSERT_EQ(201, Lookup(200));
-  ASSERT_EQ(-1,  Lookup(300));
+  ASSERT_EQ(-1, Lookup(300));
 
   Insert(100, 102);
   ASSERT_EQ(102, Lookup(100));
   ASSERT_EQ(201, Lookup(200));
-  ASSERT_EQ(-1,  Lookup(300));
+  ASSERT_EQ(-1, Lookup(300));
 
   ASSERT_EQ(1, deleted_keys_.size());
   ASSERT_EQ(100, deleted_keys_[0]);
@@ -100,14 +94,14 @@ TEST(CacheTest, Erase) {
   Insert(100, 101);
   Insert(200, 201);
   Erase(100);
-  ASSERT_EQ(-1,  Lookup(100));
+  ASSERT_EQ(-1, Lookup(100));
   ASSERT_EQ(201, Lookup(200));
   ASSERT_EQ(1, deleted_keys_.size());
   ASSERT_EQ(100, deleted_keys_[0]);
   ASSERT_EQ(101, deleted_values_[0]);
 
   Erase(100);
-  ASSERT_EQ(-1,  Lookup(100));
+  ASSERT_EQ(-1, Lookup(100));
   ASSERT_EQ(201, Lookup(200));
   ASSERT_EQ(1, deleted_keys_.size());
 }
@@ -146,8 +140,8 @@ TEST(CacheTest, EvictionPolicy) {
   // Frequently used entry must be kept around,
   // as must things that are still in use.
   for (int i = 0; i < kCacheSize + 100; i++) {
-    Insert(1000+i, 2000+i);
-    ASSERT_EQ(2000+i, Lookup(1000+i));
+    Insert(1000 + i, 2000 + i);
+    ASSERT_EQ(2000 + i, Lookup(1000 + i));
     ASSERT_EQ(101, Lookup(100));
   }
   ASSERT_EQ(101, Lookup(100));
@@ -160,12 +154,12 @@ TEST(CacheTest, UseExceedsCacheSize) {
   // Overfill the cache, keeping handles on all inserted entries.
   std::vector<Cache::Handle*> h;
   for (int i = 0; i < kCacheSize + 100; i++) {
-    h.push_back(InsertAndReturnHandle(1000+i, 2000+i));
+    h.push_back(InsertAndReturnHandle(1000 + i, 2000 + i));
   }
 
   // Check that all the entries can be found in the cache.
   for (int i = 0; i < h.size(); i++) {
-    ASSERT_EQ(2000+i, Lookup(1000+i));
+    ASSERT_EQ(2000 + i, Lookup(1000 + i));
   }
 
   for (int i = 0; i < h.size(); i++) {
@@ -181,9 +175,9 @@ TEST(CacheTest, HeavyEntries) {
   const int kHeavy = 10;
   int added = 0;
   int index = 0;
-  while (added < 2*kCacheSize) {
+  while (added < 2 * kCacheSize) {
     const int weight = (index & 1) ? kLight : kHeavy;
-    Insert(index, 1000+index, weight);
+    Insert(index, 1000 + index, weight);
     added += weight;
     index++;
   }
@@ -194,10 +188,10 @@ TEST(CacheTest, HeavyEntries) {
     int r = Lookup(i);
     if (r >= 0) {
       cached_weight += weight;
-      ASSERT_EQ(1000+i, r);
+      ASSERT_EQ(1000 + i, r);
     }
   }
-  ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
+  ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10);
 }
 
 TEST(CacheTest, NewId) {
@@ -229,6 +223,4 @@ TEST(CacheTest, ZeroSizeCache) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/coding.cc b/util/coding.cc
index 1a9e333..e2089df 100644
--- a/util/coding.cc
+++ b/util/coding.cc
@@ -6,29 +6,29 @@
 
 namespace leveldb {
 
-void EncodeFixed32(char* buf, uint32_t value) {
+void EncodeFixed32(char* dst, uint32_t value) {
   if (port::kLittleEndian) {
-    memcpy(buf, &value, sizeof(value));
+    memcpy(dst, &value, sizeof(value));
   } else {
-    buf[0] = value & 0xff;
-    buf[1] = (value >> 8) & 0xff;
-    buf[2] = (value >> 16) & 0xff;
-    buf[3] = (value >> 24) & 0xff;
+    dst[0] = value & 0xff;
+    dst[1] = (value >> 8) & 0xff;
+    dst[2] = (value >> 16) & 0xff;
+    dst[3] = (value >> 24) & 0xff;
   }
 }
 
-void EncodeFixed64(char* buf, uint64_t value) {
+void EncodeFixed64(char* dst, uint64_t value) {
   if (port::kLittleEndian) {
-    memcpy(buf, &value, sizeof(value));
+    memcpy(dst, &value, sizeof(value));
   } else {
-    buf[0] = value & 0xff;
-    buf[1] = (value >> 8) & 0xff;
-    buf[2] = (value >> 16) & 0xff;
-    buf[3] = (value >> 24) & 0xff;
-    buf[4] = (value >> 32) & 0xff;
-    buf[5] = (value >> 40) & 0xff;
-    buf[6] = (value >> 48) & 0xff;
-    buf[7] = (value >> 56) & 0xff;
+    dst[0] = value & 0xff;
+    dst[1] = (value >> 8) & 0xff;
+    dst[2] = (value >> 16) & 0xff;
+    dst[3] = (value >> 24) & 0xff;
+    dst[4] = (value >> 32) & 0xff;
+    dst[5] = (value >> 40) & 0xff;
+    dst[6] = (value >> 48) & 0xff;
+    dst[7] = (value >> 56) & 0xff;
   }
 }
 
@@ -48,26 +48,26 @@ char* EncodeVarint32(char* dst, uint32_t v) {
   // Operate on characters as unsigneds
   unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
   static const int B = 128;
-  if (v < (1<<7)) {
+  if (v < (1 << 7)) {
     *(ptr++) = v;
-  } else if (v < (1<<14)) {
+  } else if (v < (1 << 14)) {
     *(ptr++) = v | B;
-    *(ptr++) = v>>7;
-  } else if (v < (1<<21)) {
+    *(ptr++) = v >> 7;
+  } else if (v < (1 << 21)) {
     *(ptr++) = v | B;
-    *(ptr++) = (v>>7) | B;
-    *(ptr++) = v>>14;
-  } else if (v < (1<<28)) {
+    *(ptr++) = (v >> 7) | B;
+    *(ptr++) = v >> 14;
+  } else if (v < (1 << 28)) {
     *(ptr++) = v | B;
-    *(ptr++) = (v>>7) | B;
-    *(ptr++) = (v>>14) | B;
-    *(ptr++) = v>>21;
+    *(ptr++) = (v >> 7) | B;
+    *(ptr++) = (v >> 14) | B;
+    *(ptr++) = v >> 21;
   } else {
     *(ptr++) = v | B;
-    *(ptr++) = (v>>7) | B;
-    *(ptr++) = (v>>14) | B;
-    *(ptr++) = (v>>21) | B;
-    *(ptr++) = v>>28;
+    *(ptr++) = (v >> 7) | B;
+    *(ptr++) = (v >> 14) | B;
+    *(ptr++) = (v >> 21) | B;
+    *(ptr++) = v >> 28;
   }
   return reinterpret_cast<char*>(ptr);
 }
@@ -109,8 +109,7 @@ int VarintLength(uint64_t v) {
   return len;
 }
 
-const char* GetVarint32PtrFallback(const char* p,
-                                   const char* limit,
+const char* GetVarint32PtrFallback(const char* p, const char* limit,
                                    uint32_t* value) {
   uint32_t result = 0;
   for (uint32_t shift = 0; shift <= 28 && p < limit; shift += 7) {
@@ -181,8 +180,7 @@ const char* GetLengthPrefixedSlice(const char* p, const char* limit,
 
 bool GetLengthPrefixedSlice(Slice* input, Slice* result) {
   uint32_t len;
-  if (GetVarint32(input, &len) &&
-      input->size() >= len) {
+  if (GetVarint32(input, &len) && input->size() >= len) {
     *result = Slice(input->data(), len);
     input->remove_prefix(len);
     return true;
diff --git a/util/coding.h b/util/coding.h
index f0fa2cb..d9eeaa3 100644
--- a/util/coding.h
+++ b/util/coding.h
@@ -64,10 +64,10 @@ inline uint32_t DecodeFixed32(const char* ptr) {
     memcpy(&result, ptr, sizeof(result));  // gcc optimizes this to a plain load
     return result;
   } else {
-    return ((static_cast<uint32_t>(static_cast<unsigned char>(ptr[0])))
-        | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[1])) << 8)
-        | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[2])) << 16)
-        | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[3])) << 24));
+    return ((static_cast<uint32_t>(static_cast<unsigned char>(ptr[0]))) |
+            (static_cast<uint32_t>(static_cast<unsigned char>(ptr[1])) << 8) |
+            (static_cast<uint32_t>(static_cast<unsigned char>(ptr[2])) << 16) |
+            (static_cast<uint32_t>(static_cast<unsigned char>(ptr[3])) << 24));
   }
 }
 
@@ -85,11 +85,9 @@ inline uint64_t DecodeFixed64(const char* ptr) {
 }
 
 // Internal routine for use by fallback path of GetVarint32Ptr
-const char* GetVarint32PtrFallback(const char* p,
-                                   const char* limit,
+const char* GetVarint32PtrFallback(const char* p, const char* limit,
                                    uint32_t* value);
-inline const char* GetVarint32Ptr(const char* p,
-                                  const char* limit,
+inline const char* GetVarint32Ptr(const char* p, const char* limit,
                                   uint32_t* value) {
   if (p < limit) {
     uint32_t result = *(reinterpret_cast<const unsigned char*>(p));
diff --git a/util/coding_test.cc b/util/coding_test.cc
index d315e19..0d2a0c5 100644
--- a/util/coding_test.cc
+++ b/util/coding_test.cc
@@ -9,7 +9,7 @@
 
 namespace leveldb {
 
-class Coding { };
+class Coding {};
 
 TEST(Coding, Fixed32) {
   std::string s;
@@ -39,15 +39,15 @@ TEST(Coding, Fixed64) {
     uint64_t v = static_cast<uint64_t>(1) << power;
     uint64_t actual;
     actual = DecodeFixed64(p);
-    ASSERT_EQ(v-1, actual);
+    ASSERT_EQ(v - 1, actual);
     p += sizeof(uint64_t);
 
     actual = DecodeFixed64(p);
-    ASSERT_EQ(v+0, actual);
+    ASSERT_EQ(v + 0, actual);
     p += sizeof(uint64_t);
 
     actual = DecodeFixed64(p);
-    ASSERT_EQ(v+1, actual);
+    ASSERT_EQ(v + 1, actual);
     p += sizeof(uint64_t);
   }
 }
@@ -108,8 +108,8 @@ TEST(Coding, Varint64) {
     // Test values near powers of two
     const uint64_t power = 1ull << k;
     values.push_back(power);
-    values.push_back(power-1);
-    values.push_back(power+1);
+    values.push_back(power - 1);
+    values.push_back(power + 1);
   }
 
   std::string s;
@@ -134,8 +134,8 @@ TEST(Coding, Varint64) {
 TEST(Coding, Varint32Overflow) {
   uint32_t result;
   std::string input("\x81\x82\x83\x84\x85\x11");
-  ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), &result)
-              == nullptr);
+  ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(),
+                             &result) == nullptr);
 }
 
 TEST(Coding, Varint32Truncation) {
@@ -146,16 +146,16 @@ TEST(Coding, Varint32Truncation) {
   for (size_t len = 0; len < s.size() - 1; len++) {
     ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr);
   }
-  ASSERT_TRUE(
-      GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != nullptr);
+  ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) !=
+              nullptr);
   ASSERT_EQ(large_value, result);
 }
 
 TEST(Coding, Varint64Overflow) {
   uint64_t result;
   std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11");
-  ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), &result)
-              == nullptr);
+  ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(),
+                             &result) == nullptr);
 }
 
 TEST(Coding, Varint64Truncation) {
@@ -166,8 +166,8 @@ TEST(Coding, Varint64Truncation) {
   for (size_t len = 0; len < s.size() - 1; len++) {
     ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr);
   }
-  ASSERT_TRUE(
-      GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != nullptr);
+  ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) !=
+              nullptr);
   ASSERT_EQ(large_value, result);
 }
 
@@ -193,6 +193,4 @@ TEST(Coding, Strings) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/comparator.cc b/util/comparator.cc
index e1e2963..26d1eb3 100644
--- a/util/comparator.cc
+++ b/util/comparator.cc
@@ -13,24 +13,21 @@
 
 namespace leveldb {
 
-Comparator::~Comparator() { }
+Comparator::~Comparator() {}
 
 namespace {
 class BytewiseComparatorImpl : public Comparator {
  public:
-  BytewiseComparatorImpl() { }
+  BytewiseComparatorImpl() {}
 
-  virtual const char* Name() const {
-    return "leveldb.BytewiseComparator";
-  }
+  virtual const char* Name() const { return "leveldb.BytewiseComparator"; }
 
   virtual int Compare(const Slice& a, const Slice& b) const {
     return a.compare(b);
   }
 
-  virtual void FindShortestSeparator(
-      std::string* start,
-      const Slice& limit) const {
+  virtual void FindShortestSeparator(std::string* start,
+                                     const Slice& limit) const {
     // Find length of common prefix
     size_t min_length = std::min(start->size(), limit.size());
     size_t diff_index = 0;
@@ -59,7 +56,7 @@ class BytewiseComparatorImpl : public Comparator {
       const uint8_t byte = (*key)[i];
       if (byte != static_cast<uint8_t>(0xff)) {
         (*key)[i] = byte + 1;
-        key->resize(i+1);
+        key->resize(i + 1);
         return;
       }
     }
diff --git a/util/crc32c.cc b/util/crc32c.cc
index 4f1d80f..c2e61f7 100644
--- a/util/crc32c.cc
+++ b/util/crc32c.cc
@@ -256,8 +256,8 @@ inline uint32_t ReadUint32LE(const uint8_t* buffer) {
 template <int N>
 constexpr inline const uint8_t* RoundUp(const uint8_t* pointer) {
   return reinterpret_cast<uint8_t*>(
-      (reinterpret_cast<uintptr_t>(pointer) + (N - 1))
-      & ~static_cast<uintptr_t>(N - 1));
+      (reinterpret_cast<uintptr_t>(pointer) + (N - 1)) &
+      ~static_cast<uintptr_t>(N - 1));
 }
 
 }  // namespace
@@ -273,14 +273,14 @@ static bool CanAccelerateCRC32C() {
   return port::AcceleratedCRC32C(0, kTestCRCBuffer, kBufSize) == kTestCRCValue;
 }
 
-uint32_t Extend(uint32_t crc, const char* buf, size_t size) {
+uint32_t Extend(uint32_t crc, const char* data, size_t n) {
   static bool accelerate = CanAccelerateCRC32C();
   if (accelerate) {
-    return port::AcceleratedCRC32C(crc, buf, size);
+    return port::AcceleratedCRC32C(crc, data, n);
   }
 
-  const uint8_t* p = reinterpret_cast<const uint8_t*>(buf);
-  const uint8_t* e = p + size;
+  const uint8_t* p = reinterpret_cast<const uint8_t*>(data);
+  const uint8_t* e = p + n;
   uint32_t l = crc ^ kCRC32Xor;
 
 // Process one byte at a time.
diff --git a/util/crc32c.h b/util/crc32c.h
index 7864688..98fabb0 100644
--- a/util/crc32c.h
+++ b/util/crc32c.h
@@ -17,9 +17,7 @@ namespace crc32c {
 uint32_t Extend(uint32_t init_crc, const char* data, size_t n);
 
 // Return the crc32c of data[0,n-1]
-inline uint32_t Value(const char* data, size_t n) {
-  return Extend(0, data, n);
-}
+inline uint32_t Value(const char* data, size_t n) { return Extend(0, data, n); }
 
 static const uint32_t kMaskDelta = 0xa282ead8ul;
 
diff --git a/util/crc32c_test.cc b/util/crc32c_test.cc
index 4b957ee..dbd2ba4 100644
--- a/util/crc32c_test.cc
+++ b/util/crc32c_test.cc
@@ -8,7 +8,7 @@
 namespace leveldb {
 namespace crc32c {
 
-class CRC { };
+class CRC {};
 
 TEST(CRC, StandardResults) {
   // From rfc3720 section B.4.
@@ -31,29 +31,18 @@ TEST(CRC, StandardResults) {
   ASSERT_EQ(0x113fdb5c, Value(buf, sizeof(buf)));
 
   unsigned char data[48] = {
-    0x01, 0xc0, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x14, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x04, 0x00,
-    0x00, 0x00, 0x00, 0x14,
-    0x00, 0x00, 0x00, 0x18,
-    0x28, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x02, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
+      0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+      0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+      0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+      0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
   };
   ASSERT_EQ(0xd9963a56, Value(reinterpret_cast<char*>(data), sizeof(data)));
 }
 
-TEST(CRC, Values) {
-  ASSERT_NE(Value("a", 1), Value("foo", 3));
-}
+TEST(CRC, Values) { ASSERT_NE(Value("a", 1), Value("foo", 3)); }
 
 TEST(CRC, Extend) {
-  ASSERT_EQ(Value("hello world", 11),
-            Extend(Value("hello ", 6), "world", 5));
+  ASSERT_EQ(Value("hello world", 11), Extend(Value("hello ", 6), "world", 5));
 }
 
 TEST(CRC, Mask) {
@@ -67,6 +56,4 @@ TEST(CRC, Mask) {
 }  // namespace crc32c
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/env.cc b/util/env.cc
index 40a1363..6cd5f2e 100644
--- a/util/env.cc
+++ b/util/env.cc
@@ -6,27 +6,21 @@
 
 namespace leveldb {
 
-Env::~Env() {
-}
+Env::~Env() {}
 
 Status Env::NewAppendableFile(const std::string& fname, WritableFile** result) {
   return Status::NotSupported("NewAppendableFile", fname);
 }
 
-SequentialFile::~SequentialFile() {
-}
+SequentialFile::~SequentialFile() {}
 
-RandomAccessFile::~RandomAccessFile() {
-}
+RandomAccessFile::~RandomAccessFile() {}
 
-WritableFile::~WritableFile() {
-}
+WritableFile::~WritableFile() {}
 
-Logger::~Logger() {
-}
+Logger::~Logger() {}
 
-FileLock::~FileLock() {
-}
+FileLock::~FileLock() {}
 
 void Log(Logger* info_log, const char* format, ...) {
   if (info_log != nullptr) {
@@ -38,8 +32,7 @@ void Log(Logger* info_log, const char* format, ...) {
 }
 
 static Status DoWriteStringToFile(Env* env, const Slice& data,
-                                  const std::string& fname,
-                                  bool should_sync) {
+                                  const std::string& fname, bool should_sync) {
   WritableFile* file;
   Status s = env->NewWritableFile(fname, &file);
   if (!s.ok()) {
@@ -94,7 +87,6 @@ Status ReadFileToString(Env* env, const std::string& fname, std::string* data) {
   return s;
 }
 
-EnvWrapper::~EnvWrapper() {
-}
+EnvWrapper::~EnvWrapper() {}
 
 }  // namespace leveldb
diff --git a/util/env_posix.cc b/util/env_posix.cc
index 362adb3..8c74f5a 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -32,8 +32,8 @@
 #include "leveldb/status.h"
 #include "port/port.h"
 #include "port/thread_annotations.h"
-#include "util/posix_logger.h"
 #include "util/env_posix_test_helper.h"
+#include "util/posix_logger.h"
 
 namespace leveldb {
 
@@ -76,8 +76,7 @@ class Limiter {
     int old_acquires_allowed =
         acquires_allowed_.fetch_sub(1, std::memory_order_relaxed);
 
-    if (old_acquires_allowed > 0)
-      return true;
+    if (old_acquires_allowed > 0) return true;
 
     acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
     return false;
@@ -85,9 +84,7 @@ class Limiter {
 
   // Release a resource acquired by a previous call to Acquire() that returned
   // true.
-  void Release() {
-    acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
-  }
+  void Release() { acquires_allowed_.fetch_add(1, std::memory_order_relaxed); }
 
  private:
   // The number of available resources.
@@ -193,7 +190,7 @@ class PosixRandomAccessFile final : public RandomAccessFile {
 
  private:
   const bool has_permanent_fd_;  // If false, the file is opened on every read.
-  const int fd_;  // -1 if has_permanent_fd_ is false.
+  const int fd_;                 // -1 if has_permanent_fd_ is false.
   Limiter* const fd_limiter_;
   const std::string filename_;
 };
@@ -214,7 +211,9 @@ class PosixMmapReadableFile final : public RandomAccessFile {
   // instance is destroyed.
   PosixMmapReadableFile(std::string filename, char* mmap_base, size_t length,
                         Limiter* mmap_limiter)
-      : mmap_base_(mmap_base), length_(length), mmap_limiter_(mmap_limiter),
+      : mmap_base_(mmap_base),
+        length_(length),
+        mmap_limiter_(mmap_limiter),
         filename_(std::move(filename)) {}
 
   ~PosixMmapReadableFile() override {
@@ -243,8 +242,11 @@ class PosixMmapReadableFile final : public RandomAccessFile {
 class PosixWritableFile final : public WritableFile {
  public:
   PosixWritableFile(std::string filename, int fd)
-      : pos_(0), fd_(fd), is_manifest_(IsManifest(filename)),
-        filename_(std::move(filename)), dirname_(Dirname(filename_)) {}
+      : pos_(0),
+        fd_(fd),
+        is_manifest_(IsManifest(filename)),
+        filename_(std::move(filename)),
+        dirname_(Dirname(filename_)) {}
 
   ~PosixWritableFile() override {
     if (fd_ >= 0) {
@@ -292,9 +294,7 @@ class PosixWritableFile final : public WritableFile {
     return status;
   }
 
-  Status Flush() override {
-    return FlushBuffer();
-  }
+  Status Flush() override { return FlushBuffer(); }
 
   Status Sync() override {
     // Ensure new files referred to by the manifest are in the filesystem.
@@ -517,12 +517,12 @@ class PosixEnv : public Env {
     uint64_t file_size;
     Status status = GetFileSize(filename, &file_size);
     if (status.ok()) {
-      void* mmap_base = ::mmap(/*addr=*/nullptr, file_size, PROT_READ,
-                               MAP_SHARED, fd, 0);
+      void* mmap_base =
+          ::mmap(/*addr=*/nullptr, file_size, PROT_READ, MAP_SHARED, fd, 0);
       if (mmap_base != MAP_FAILED) {
-        *result = new PosixMmapReadableFile(
-            filename, reinterpret_cast<char*>(mmap_base), file_size,
-            &mmap_limiter_);
+        *result = new PosixMmapReadableFile(filename,
+                                            reinterpret_cast<char*>(mmap_base),
+                                            file_size, &mmap_limiter_);
       } else {
         status = PosixError(filename, errno);
       }
@@ -691,9 +691,7 @@ class PosixEnv : public Env {
     return static_cast<uint64_t>(tv.tv_sec) * kUsecondsPerSecond + tv.tv_usec;
   }
 
-  void SleepForMicroseconds(int micros) override {
-    ::usleep(micros);
-  }
+  void SleepForMicroseconds(int micros) override { ::usleep(micros); }
 
  private:
   void BackgroundThreadMain();
@@ -712,11 +710,10 @@ class PosixEnv : public Env {
     explicit BackgroundWorkItem(void (*function)(void* arg), void* arg)
         : function(function), arg(arg) {}
 
-    void (* const function)(void*);
+    void (*const function)(void*);
     void* const arg;
   };
 
-
   port::Mutex background_work_mutex_;
   port::CondVar background_work_cv_ GUARDED_BY(background_work_mutex_);
   bool started_background_thread_ GUARDED_BY(background_work_mutex_);
@@ -726,13 +723,11 @@ class PosixEnv : public Env {
 
   PosixLockTable locks_;  // Thread-safe.
   Limiter mmap_limiter_;  // Thread-safe.
-  Limiter fd_limiter_;  // Thread-safe.
+  Limiter fd_limiter_;    // Thread-safe.
 };
 
 // Return the maximum number of concurrent mmaps.
-int MaxMmaps() {
-  return g_mmap_limit;
-}
+int MaxMmaps() { return g_mmap_limit; }
 
 // Return the maximum number of read-only files to keep open.
 int MaxOpenFiles() {
@@ -758,8 +753,7 @@ PosixEnv::PosixEnv()
     : background_work_cv_(&background_work_mutex_),
       started_background_thread_(false),
       mmap_limiter_(MaxMmaps()),
-      fd_limiter_(MaxOpenFiles()) {
-}
+      fd_limiter_(MaxOpenFiles()) {}
 
 void PosixEnv::Schedule(
     void (*background_work_function)(void* background_work_arg),
@@ -792,8 +786,7 @@ void PosixEnv::BackgroundThreadMain() {
     }
 
     assert(!background_work_queue_.empty());
-    auto background_work_function =
-        background_work_queue_.front().function;
+    auto background_work_function = background_work_queue_.front().function;
     void* background_work_arg = background_work_queue_.front().arg;
     background_work_queue_.pop();
 
@@ -816,7 +809,7 @@ namespace {
 //     static PlatformSingletonEnv default_env;
 //     return default_env.env();
 //   }
-template<typename EnvType>
+template <typename EnvType>
 class SingletonEnv {
  public:
   SingletonEnv() {
@@ -851,7 +844,7 @@ class SingletonEnv {
 };
 
 #if !defined(NDEBUG)
-template<typename EnvType>
+template <typename EnvType>
 std::atomic<bool> SingletonEnv<EnvType>::env_initialized_;
 #endif  // !defined(NDEBUG)
 
diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc
index e28df9a..6a2a1fc 100644
--- a/util/env_posix_test.cc
+++ b/util/env_posix_test.cc
@@ -3,21 +3,19 @@
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
 #include "leveldb/env.h"
-
 #include "port/port.h"
-#include "util/testharness.h"
 #include "util/env_posix_test_helper.h"
+#include "util/testharness.h"
 
 namespace leveldb {
 
-static const int kDelayMicros = 100000;
 static const int kReadOnlyFileLimit = 4;
 static const int kMMapLimit = 4;
 
 class EnvPosixTest {
  public:
   Env* env_;
-  EnvPosixTest() : env_(Env::Default()) { }
+  EnvPosixTest() : env_(Env::Default()) {}
 
   static void SetFileLimits(int read_only_file_limit, int mmap_limit) {
     EnvPosixTestHelper::SetReadOnlyFDLimit(read_only_file_limit);
diff --git a/util/env_test.cc b/util/env_test.cc
index b204089..3e81261 100644
--- a/util/env_test.cc
+++ b/util/env_test.cc
@@ -16,13 +16,11 @@
 namespace leveldb {
 
 static const int kDelayMicros = 100000;
-static const int kReadOnlyFileLimit = 4;
-static const int kMMapLimit = 4;
 
 class EnvTest {
  public:
   Env* env_;
-  EnvTest() : env_(Env::Default()) { }
+  EnvTest() : env_(Env::Default()) {}
 };
 
 namespace {
@@ -97,7 +95,7 @@ TEST(EnvTest, RunMany) {
     const int id_;  // Order# for the execution of this callback.
 
     Callback(std::atomic<int>* last_id_ptr, int id)
-        : last_id_ptr_(last_id_ptr), id_(id) { }
+        : last_id_ptr_(last_id_ptr), id_(id) {}
 
     static void Run(void* arg) {
       Callback* callback = reinterpret_cast<Callback*>(arg);
@@ -125,7 +123,7 @@ struct State {
   int val GUARDED_BY(mu);
   int num_running GUARDED_BY(mu);
 
-  State(int val, int num_running) : val(val), num_running(num_running) { }
+  State(int val, int num_running) : val(val), num_running(num_running) {}
 };
 
 static void ThreadBody(void* arg) {
@@ -164,8 +162,8 @@ TEST(EnvTest, TestOpenNonExistentFile) {
   ASSERT_TRUE(!env_->FileExists(non_existent_file));
 
   RandomAccessFile* random_access_file;
-  Status status = env_->NewRandomAccessFile(
-      non_existent_file, &random_access_file);
+  Status status =
+      env_->NewRandomAccessFile(non_existent_file, &random_access_file);
   ASSERT_TRUE(status.IsNotFound());
 
   SequentialFile* sequential_file;
@@ -223,6 +221,4 @@ TEST(EnvTest, ReopenAppendableFile) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/env_windows.cc b/util/env_windows.cc
index 14e41e9..c537938 100644
--- a/util/env_windows.cc
+++ b/util/env_windows.cc
@@ -122,8 +122,7 @@ class Limiter {
     int old_acquires_allowed =
         acquires_allowed_.fetch_sub(1, std::memory_order_relaxed);
 
-    if (old_acquires_allowed > 0)
-      return true;
+    if (old_acquires_allowed > 0) return true;
 
     acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
     return false;
@@ -131,9 +130,7 @@ class Limiter {
 
   // Release a resource acquired by a previous call to Acquire() that returned
   // true.
-  void Release() {
-    acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
-  }
+  void Release() { acquires_allowed_.fetch_add(1, std::memory_order_relaxed); }
 
  private:
   // The number of available resources.
diff --git a/util/filter_policy.cc b/util/filter_policy.cc
index 7b045c8..90fd754 100644
--- a/util/filter_policy.cc
+++ b/util/filter_policy.cc
@@ -6,6 +6,6 @@
 
 namespace leveldb {
 
-FilterPolicy::~FilterPolicy() { }
+FilterPolicy::~FilterPolicy() {}
 
 }  // namespace leveldb
diff --git a/util/hash.cc b/util/hash.cc
index ed439ce..67dc134 100644
--- a/util/hash.cc
+++ b/util/hash.cc
@@ -2,15 +2,19 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include <string.h>
-#include "util/coding.h"
 #include "util/hash.h"
 
+#include <string.h>
+
+#include "util/coding.h"
+
 // The FALLTHROUGH_INTENDED macro can be used to annotate implicit fall-through
 // between switch labels. The real definition should be provided externally.
 // This one is a fallback version for unsupported compilers.
 #ifndef FALLTHROUGH_INTENDED
-#define FALLTHROUGH_INTENDED do { } while (0)
+#define FALLTHROUGH_INTENDED \
+  do {                       \
+  } while (0)
 #endif
 
 namespace leveldb {
@@ -48,5 +52,4 @@ uint32_t Hash(const char* data, size_t n, uint32_t seed) {
   return h;
 }
 
-
 }  // namespace leveldb
diff --git a/util/hash_test.cc b/util/hash_test.cc
index eaa1c92..8f579cc 100644
--- a/util/hash_test.cc
+++ b/util/hash_test.cc
@@ -7,7 +7,7 @@
 
 namespace leveldb {
 
-class HASH { };
+class HASH {};
 
 TEST(HASH, SignedUnsignedIssue) {
   const unsigned char data1[1] = {0x62};
@@ -15,18 +15,10 @@ TEST(HASH, SignedUnsignedIssue) {
   const unsigned char data3[3] = {0xe2, 0x99, 0xa5};
   const unsigned char data4[4] = {0xe1, 0x80, 0xb9, 0x32};
   const unsigned char data5[48] = {
-    0x01, 0xc0, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x14, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x04, 0x00,
-    0x00, 0x00, 0x00, 0x14,
-    0x00, 0x00, 0x00, 0x18,
-    0x28, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x02, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
+      0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+      0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+      0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+      0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
   };
 
   ASSERT_EQ(Hash(0, 0, 0xbc9f1d34), 0xbc9f1d34);
@@ -49,6 +41,4 @@ TEST(HASH, SignedUnsignedIssue) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/histogram.cc b/util/histogram.cc
index bb95f58..65092c8 100644
--- a/util/histogram.cc
+++ b/util/histogram.cc
@@ -2,36 +2,174 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include "util/histogram.h"
+
 #include <math.h>
 #include <stdio.h>
+
 #include "port/port.h"
-#include "util/histogram.h"
 
 namespace leveldb {
 
 const double Histogram::kBucketLimit[kNumBuckets] = {
-  1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 25, 30, 35, 40, 45,
-  50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200, 250, 300, 350, 400, 450,
-  500, 600, 700, 800, 900, 1000, 1200, 1400, 1600, 1800, 2000, 2500, 3000,
-  3500, 4000, 4500, 5000, 6000, 7000, 8000, 9000, 10000, 12000, 14000,
-  16000, 18000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 60000,
-  70000, 80000, 90000, 100000, 120000, 140000, 160000, 180000, 200000,
-  250000, 300000, 350000, 400000, 450000, 500000, 600000, 700000, 800000,
-  900000, 1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2500000,
-  3000000, 3500000, 4000000, 4500000, 5000000, 6000000, 7000000, 8000000,
-  9000000, 10000000, 12000000, 14000000, 16000000, 18000000, 20000000,
-  25000000, 30000000, 35000000, 40000000, 45000000, 50000000, 60000000,
-  70000000, 80000000, 90000000, 100000000, 120000000, 140000000, 160000000,
-  180000000, 200000000, 250000000, 300000000, 350000000, 400000000,
-  450000000, 500000000, 600000000, 700000000, 800000000, 900000000,
-  1000000000, 1200000000, 1400000000, 1600000000, 1800000000, 2000000000,
-  2500000000.0, 3000000000.0, 3500000000.0, 4000000000.0, 4500000000.0,
-  5000000000.0, 6000000000.0, 7000000000.0, 8000000000.0, 9000000000.0,
-  1e200,
+    1,
+    2,
+    3,
+    4,
+    5,
+    6,
+    7,
+    8,
+    9,
+    10,
+    12,
+    14,
+    16,
+    18,
+    20,
+    25,
+    30,
+    35,
+    40,
+    45,
+    50,
+    60,
+    70,
+    80,
+    90,
+    100,
+    120,
+    140,
+    160,
+    180,
+    200,
+    250,
+    300,
+    350,
+    400,
+    450,
+    500,
+    600,
+    700,
+    800,
+    900,
+    1000,
+    1200,
+    1400,
+    1600,
+    1800,
+    2000,
+    2500,
+    3000,
+    3500,
+    4000,
+    4500,
+    5000,
+    6000,
+    7000,
+    8000,
+    9000,
+    10000,
+    12000,
+    14000,
+    16000,
+    18000,
+    20000,
+    25000,
+    30000,
+    35000,
+    40000,
+    45000,
+    50000,
+    60000,
+    70000,
+    80000,
+    90000,
+    100000,
+    120000,
+    140000,
+    160000,
+    180000,
+    200000,
+    250000,
+    300000,
+    350000,
+    400000,
+    450000,
+    500000,
+    600000,
+    700000,
+    800000,
+    900000,
+    1000000,
+    1200000,
+    1400000,
+    1600000,
+    1800000,
+    2000000,
+    2500000,
+    3000000,
+    3500000,
+    4000000,
+    4500000,
+    5000000,
+    6000000,
+    7000000,
+    8000000,
+    9000000,
+    10000000,
+    12000000,
+    14000000,
+    16000000,
+    18000000,
+    20000000,
+    25000000,
+    30000000,
+    35000000,
+    40000000,
+    45000000,
+    50000000,
+    60000000,
+    70000000,
+    80000000,
+    90000000,
+    100000000,
+    120000000,
+    140000000,
+    160000000,
+    180000000,
+    200000000,
+    250000000,
+    300000000,
+    350000000,
+    400000000,
+    450000000,
+    500000000,
+    600000000,
+    700000000,
+    800000000,
+    900000000,
+    1000000000,
+    1200000000,
+    1400000000,
+    1600000000,
+    1800000000,
+    2000000000,
+    2500000000.0,
+    3000000000.0,
+    3500000000.0,
+    4000000000.0,
+    4500000000.0,
+    5000000000.0,
+    6000000000.0,
+    7000000000.0,
+    8000000000.0,
+    9000000000.0,
+    1e200,
 };
 
 void Histogram::Clear() {
-  min_ = kBucketLimit[kNumBuckets-1];
+  min_ = kBucketLimit[kNumBuckets - 1];
   max_ = 0;
   num_ = 0;
   sum_ = 0;
@@ -66,9 +204,7 @@ void Histogram::Merge(const Histogram& other) {
   }
 }
 
-double Histogram::Median() const {
-  return Percentile(50.0);
-}
+double Histogram::Median() const { return Percentile(50.0); }
 
 double Histogram::Percentile(double p) const {
   double threshold = num_ * (p / 100.0);
@@ -77,7 +213,7 @@ double Histogram::Percentile(double p) const {
     sum += buckets_[b];
     if (sum >= threshold) {
       // Scale linearly within this bucket
-      double left_point = (b == 0) ? 0 : kBucketLimit[b-1];
+      double left_point = (b == 0) ? 0 : kBucketLimit[b - 1];
       double right_point = kBucketLimit[b];
       double left_sum = sum - buckets_[b];
       double right_sum = sum;
@@ -105,12 +241,10 @@ double Histogram::StandardDeviation() const {
 std::string Histogram::ToString() const {
   std::string r;
   char buf[200];
-  snprintf(buf, sizeof(buf),
-           "Count: %.0f  Average: %.4f  StdDev: %.2f\n",
-           num_, Average(), StandardDeviation());
+  snprintf(buf, sizeof(buf), "Count: %.0f  Average: %.4f  StdDev: %.2f\n", num_,
+           Average(), StandardDeviation());
   r.append(buf);
-  snprintf(buf, sizeof(buf),
-           "Min: %.4f  Median: %.4f  Max: %.4f\n",
+  snprintf(buf, sizeof(buf), "Min: %.4f  Median: %.4f  Max: %.4f\n",
            (num_ == 0.0 ? 0.0 : min_), Median(), max_);
   r.append(buf);
   r.append("------------------------------------------------------\n");
@@ -119,17 +253,16 @@ std::string Histogram::ToString() const {
   for (int b = 0; b < kNumBuckets; b++) {
     if (buckets_[b] <= 0.0) continue;
     sum += buckets_[b];
-    snprintf(buf, sizeof(buf),
-             "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ",
-             ((b == 0) ? 0.0 : kBucketLimit[b-1]),      // left
-             kBucketLimit[b],                           // right
-             buckets_[b],                               // count
-             mult * buckets_[b],                        // percentage
-             mult * sum);                               // cumulative percentage
+    snprintf(buf, sizeof(buf), "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ",
+             ((b == 0) ? 0.0 : kBucketLimit[b - 1]),  // left
+             kBucketLimit[b],                         // right
+             buckets_[b],                             // count
+             mult * buckets_[b],                      // percentage
+             mult * sum);                             // cumulative percentage
     r.append(buf);
 
     // Add hash marks based on percentage; 20 marks for 100%.
-    int marks = static_cast<int>(20*(buckets_[b] / num_) + 0.5);
+    int marks = static_cast<int>(20 * (buckets_[b] / num_) + 0.5);
     r.append(marks, '#');
     r.push_back('\n');
   }
diff --git a/util/histogram.h b/util/histogram.h
index 1ef9f3c..fe281a9 100644
--- a/util/histogram.h
+++ b/util/histogram.h
@@ -11,8 +11,8 @@ namespace leveldb {
 
 class Histogram {
  public:
-  Histogram() { }
-  ~Histogram() { }
+  Histogram() {}
+  ~Histogram() {}
 
   void Clear();
   void Add(double value);
diff --git a/util/logging.cc b/util/logging.cc
index 411a303..1ad8f1c 100644
--- a/util/logging.cc
+++ b/util/logging.cc
@@ -8,7 +8,9 @@
 #include <stdarg.h>
 #include <stdio.h>
 #include <stdlib.h>
+
 #include <limits>
+
 #include "leveldb/env.h"
 #include "leveldb/slice.h"
 
@@ -16,7 +18,7 @@ namespace leveldb {
 
 void AppendNumberTo(std::string* str, uint64_t num) {
   char buf[30];
-  snprintf(buf, sizeof(buf), "%llu", (unsigned long long) num);
+  snprintf(buf, sizeof(buf), "%llu", (unsigned long long)num);
   str->append(buf);
 }
 
@@ -62,8 +64,7 @@ bool ConsumeDecimalNumber(Slice* in, uint64_t* val) {
   const unsigned char* current = start;
   for (; current != end; ++current) {
     const unsigned char ch = *current;
-    if (ch < '0' || ch > '9')
-      break;
+    if (ch < '0' || ch > '9') break;
 
     // Overflow check.
     // kMaxUint64 / 10 is also constant and will be optimized away.
diff --git a/util/logging.h b/util/logging.h
index 13351a2..8ff2da8 100644
--- a/util/logging.h
+++ b/util/logging.h
@@ -8,9 +8,11 @@
 #ifndef STORAGE_LEVELDB_UTIL_LOGGING_H_
 #define STORAGE_LEVELDB_UTIL_LOGGING_H_
 
-#include <stdio.h>
 #include <stdint.h>
+#include <stdio.h>
+
 #include <string>
+
 #include "port/port.h"
 
 namespace leveldb {
diff --git a/util/logging_test.cc b/util/logging_test.cc
index 11665fc..389cbeb 100644
--- a/util/logging_test.cc
+++ b/util/logging_test.cc
@@ -11,7 +11,7 @@
 
 namespace leveldb {
 
-class Logging { };
+class Logging {};
 
 TEST(Logging, NumberToString) {
   ASSERT_EQ("0", NumberToString(0));
@@ -140,6 +140,4 @@ TEST(Logging, ConsumeDecimalNumberNoDigits) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/mutexlock.h b/util/mutexlock.h
index 08d709a..0cb2e25 100644
--- a/util/mutexlock.h
+++ b/util/mutexlock.h
@@ -22,8 +22,7 @@ namespace leveldb {
 
 class SCOPED_LOCKABLE MutexLock {
  public:
-  explicit MutexLock(port::Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu)
-      : mu_(mu)  {
+  explicit MutexLock(port::Mutex* mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
     this->mu_->Lock();
   }
   ~MutexLock() UNLOCK_FUNCTION() { this->mu_->Unlock(); }
@@ -32,10 +31,9 @@ class SCOPED_LOCKABLE MutexLock {
   MutexLock& operator=(const MutexLock&) = delete;
 
  private:
-  port::Mutex *const mu_;
+  port::Mutex* const mu_;
 };
 
 }  // namespace leveldb
 
-
 #endif  // STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_
diff --git a/util/no_destructor.h b/util/no_destructor.h
index 4827e45..a0d3b87 100644
--- a/util/no_destructor.h
+++ b/util/no_destructor.h
@@ -13,7 +13,7 @@ namespace leveldb {
 // Wraps an instance whose destructor is never called.
 //
 // This is intended for use with function-level static variables.
-template<typename InstanceType>
+template <typename InstanceType>
 class NoDestructor {
  public:
   template <typename... ConstructorArgTypes>
@@ -23,8 +23,8 @@ class NoDestructor {
     static_assert(
         alignof(decltype(instance_storage_)) >= alignof(InstanceType),
         "instance_storage_ does not meet the instance's alignment requirement");
-    new (&instance_storage_) InstanceType(
-        std::forward<ConstructorArgTypes>(constructor_args)...);
+    new (&instance_storage_)
+        InstanceType(std::forward<ConstructorArgTypes>(constructor_args)...);
   }
 
   ~NoDestructor() = default;
@@ -37,9 +37,8 @@ class NoDestructor {
   }
 
  private:
-  typename
-      std::aligned_storage<sizeof(InstanceType), alignof(InstanceType)>::type
-      instance_storage_;
+  typename std::aligned_storage<sizeof(InstanceType),
+                                alignof(InstanceType)>::type instance_storage_;
 };
 
 }  // namespace leveldb
diff --git a/util/no_destructor_test.cc b/util/no_destructor_test.cc
index 7ce2631..b41caca 100644
--- a/util/no_destructor_test.cc
+++ b/util/no_destructor_test.cc
@@ -28,7 +28,7 @@ constexpr const uint64_t kGoldenB = 0xaabbccddeeffaabb;
 
 }  // namespace
 
-class NoDestructorTest { };
+class NoDestructorTest {};
 
 TEST(NoDestructorTest, StackInstance) {
   NoDestructor<DoNotDestruct> instance(kGoldenA, kGoldenB);
@@ -44,6 +44,4 @@ TEST(NoDestructorTest, StaticInstance) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/options.cc b/util/options.cc
index 63284f8..62de5bf 100644
--- a/util/options.cc
+++ b/util/options.cc
@@ -9,9 +9,6 @@
 
 namespace leveldb {
 
-Options::Options()
-    : comparator(BytewiseComparator()),
-      env(Env::Default()) {
-}
+Options::Options() : comparator(BytewiseComparator()), env(Env::Default()) {}
 
 }  // namespace leveldb
diff --git a/util/posix_logger.h b/util/posix_logger.h
index 5685fa3..28e15d1 100644
--- a/util/posix_logger.h
+++ b/util/posix_logger.h
@@ -26,13 +26,9 @@ class PosixLogger final : public Logger {
   // Creates a logger that writes to the given file.
   //
   // The PosixLogger instance takes ownership of the file handle.
-  explicit PosixLogger(std::FILE* fp) : fp_(fp) {
-    assert(fp != nullptr);
-  }
+  explicit PosixLogger(std::FILE* fp) : fp_(fp) { assert(fp != nullptr); }
 
-  ~PosixLogger() override {
-    std::fclose(fp_);
-  }
+  ~PosixLogger() override { std::fclose(fp_); }
 
   void Logv(const char* format, va_list arguments) override {
     // Record the time as close to the Logv() call as possible.
@@ -67,15 +63,10 @@ class PosixLogger final : public Logger {
 
       // Print the header into the buffer.
       int buffer_offset = snprintf(
-          buffer, buffer_size,
-          "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
-          now_components.tm_year + 1900,
-          now_components.tm_mon + 1,
-          now_components.tm_mday,
-          now_components.tm_hour,
-          now_components.tm_min,
-          now_components.tm_sec,
-          static_cast<int>(now_timeval.tv_usec),
+          buffer, buffer_size, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
+          now_components.tm_year + 1900, now_components.tm_mon + 1,
+          now_components.tm_mday, now_components.tm_hour, now_components.tm_min,
+          now_components.tm_sec, static_cast<int>(now_timeval.tv_usec),
           thread_id.c_str());
 
       // The header can be at most 28 characters (10 date + 15 time +
@@ -89,9 +80,9 @@ class PosixLogger final : public Logger {
       // Print the message into the buffer.
       std::va_list arguments_copy;
       va_copy(arguments_copy, arguments);
-      buffer_offset += std::vsnprintf(buffer + buffer_offset,
-                                      buffer_size - buffer_offset, format,
-                                      arguments_copy);
+      buffer_offset +=
+          std::vsnprintf(buffer + buffer_offset, buffer_size - buffer_offset,
+                         format, arguments_copy);
       va_end(arguments_copy);
 
       // The code below may append a newline at the end of the buffer, which
diff --git a/util/random.h b/util/random.h
index ddd51b1..76f7daf 100644
--- a/util/random.h
+++ b/util/random.h
@@ -15,6 +15,7 @@ namespace leveldb {
 class Random {
  private:
   uint32_t seed_;
+
  public:
   explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) {
     // Avoid bad seeds.
@@ -23,8 +24,8 @@ class Random {
     }
   }
   uint32_t Next() {
-    static const uint32_t M = 2147483647L;   // 2^31-1
-    static const uint64_t A = 16807;  // bits 14, 8, 7, 5, 2, 1, 0
+    static const uint32_t M = 2147483647L;  // 2^31-1
+    static const uint64_t A = 16807;        // bits 14, 8, 7, 5, 2, 1, 0
     // We are computing
     //       seed_ = (seed_ * A) % M,    where M = 2^31-1
     //
@@ -54,9 +55,7 @@ class Random {
   // Skewed: pick "base" uniformly from range [0,max_log] and then
   // return "base" random bits.  The effect is to pick a number in the
   // range [0,2^max_log-1] with exponential bias towards smaller numbers.
-  uint32_t Skewed(int max_log) {
-    return Uniform(1 << Uniform(max_log + 1));
-  }
+  uint32_t Skewed(int max_log) { return Uniform(1 << Uniform(max_log + 1)); }
 };
 
 }  // namespace leveldb
diff --git a/util/status.cc b/util/status.cc
index 5591381..6ca8da6 100644
--- a/util/status.cc
+++ b/util/status.cc
@@ -2,10 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include <stdio.h>
-#include "port/port.h"
 #include "leveldb/status.h"
 
+#include <stdio.h>
+
+#include "port/port.h"
+
 namespace leveldb {
 
 const char* Status::CopyState(const char* state) {
@@ -59,8 +61,8 @@ std::string Status::ToString() const {
         type = "IO error: ";
         break;
       default:
-        snprintf(tmp, sizeof(tmp), "Unknown code(%d): ",
-                 static_cast<int>(code()));
+        snprintf(tmp, sizeof(tmp),
+                 "Unknown code(%d): ", static_cast<int>(code()));
         type = tmp;
         break;
     }
diff --git a/util/status_test.cc b/util/status_test.cc
index 7ed3b9e..2842319 100644
--- a/util/status_test.cc
+++ b/util/status_test.cc
@@ -37,6 +37,4 @@ TEST(Status, MoveConstructor) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/testharness.cc b/util/testharness.cc
index 37ba410..318ecfa 100644
--- a/util/testharness.cc
+++ b/util/testharness.cc
@@ -23,7 +23,7 @@ struct Test {
   void (*func)();
 };
 std::vector<Test>* tests;
-}
+}  // namespace
 
 bool RegisterTest(const char* base, const char* name, void (*func)()) {
   if (tests == nullptr) {
diff --git a/util/testharness.h b/util/testharness.h
index 8ee7972..72cd162 100644
--- a/util/testharness.h
+++ b/util/testharness.h
@@ -47,9 +47,7 @@ class Tester {
   std::stringstream ss_;
 
  public:
-  Tester(const char* f, int l)
-      : ok_(true), fname_(f), line_(l) {
-  }
+  Tester(const char* f, int l) : ok_(true), fname_(f), line_(l) {}
 
   ~Tester() {
     if (!ok_) {
@@ -74,14 +72,14 @@ class Tester {
     return *this;
   }
 
-#define BINARY_OP(name, op)                             \
-  template <class X, class Y>                           \
-  Tester& name(const X& x, const Y& y) {                \
-    if (!(x op y)) {                                    \
-      ss_ << " failed: " << x << (" " #op " ") << y;    \
-      ok_ = false;                                      \
-    }                                                   \
-    return *this;                                       \
+#define BINARY_OP(name, op)                          \
+  template <class X, class Y>                        \
+  Tester& name(const X& x, const Y& y) {             \
+    if (!(x op y)) {                                 \
+      ss_ << " failed: " << x << (" " #op " ") << y; \
+      ok_ = false;                                   \
+    }                                                \
+    return *this;                                    \
   }
 
   BINARY_OP(IsEq, ==)
@@ -104,28 +102,34 @@ class Tester {
 
 #define ASSERT_TRUE(c) ::leveldb::test::Tester(__FILE__, __LINE__).Is((c), #c)
 #define ASSERT_OK(s) ::leveldb::test::Tester(__FILE__, __LINE__).IsOk((s))
-#define ASSERT_EQ(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsEq((a),(b))
-#define ASSERT_NE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsNe((a),(b))
-#define ASSERT_GE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsGe((a),(b))
-#define ASSERT_GT(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsGt((a),(b))
-#define ASSERT_LE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsLe((a),(b))
-#define ASSERT_LT(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsLt((a),(b))
+#define ASSERT_EQ(a, b) \
+  ::leveldb::test::Tester(__FILE__, __LINE__).IsEq((a), (b))
+#define ASSERT_NE(a, b) \
+  ::leveldb::test::Tester(__FILE__, __LINE__).IsNe((a), (b))
+#define ASSERT_GE(a, b) \
+  ::leveldb::test::Tester(__FILE__, __LINE__).IsGe((a), (b))
+#define ASSERT_GT(a, b) \
+  ::leveldb::test::Tester(__FILE__, __LINE__).IsGt((a), (b))
+#define ASSERT_LE(a, b) \
+  ::leveldb::test::Tester(__FILE__, __LINE__).IsLe((a), (b))
+#define ASSERT_LT(a, b) \
+  ::leveldb::test::Tester(__FILE__, __LINE__).IsLt((a), (b))
 
 #define TCONCAT(a, b) TCONCAT1(a, b)
 #define TCONCAT1(a, b) a##b
 
-#define TEST(base, name)                                                       \
-class TCONCAT(_Test_, name) : public base {                                    \
- public:                                                                       \
-  void _Run();                                                                 \
-  static void _RunIt() {                                                       \
-    TCONCAT(_Test_, name) t;                                                   \
-    t._Run();                                                                  \
-  }                                                                            \
-};                                                                             \
-bool TCONCAT(_Test_ignored_, name) =                                           \
-  ::leveldb::test::RegisterTest(#base, #name, &TCONCAT(_Test_, name)::_RunIt); \
-void TCONCAT(_Test_, name)::_Run()
+#define TEST(base, name)                                              \
+  class TCONCAT(_Test_, name) : public base {                         \
+   public:                                                            \
+    void _Run();                                                      \
+    static void _RunIt() {                                            \
+      TCONCAT(_Test_, name) t;                                        \
+      t._Run();                                                       \
+    }                                                                 \
+  };                                                                  \
+  bool TCONCAT(_Test_ignored_, name) = ::leveldb::test::RegisterTest( \
+      #base, #name, &TCONCAT(_Test_, name)::_RunIt);                  \
+  void TCONCAT(_Test_, name)::_Run()
 
 // Register the specified test.  Typically not used directly, but
 // invoked via the macro expansion of TEST.
diff --git a/util/testutil.cc b/util/testutil.cc
index 9d8079c..6b151b9 100644
--- a/util/testutil.cc
+++ b/util/testutil.cc
@@ -12,7 +12,7 @@ namespace test {
 Slice RandomString(Random* rnd, int len, std::string* dst) {
   dst->resize(len);
   for (int i = 0; i < len; i++) {
-    (*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95));   // ' ' .. '~'
+    (*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95));  // ' ' .. '~'
   }
   return Slice(*dst);
 }
@@ -20,9 +20,8 @@ Slice RandomString(Random* rnd, int len, std::string* dst) {
 std::string RandomKey(Random* rnd, int len) {
   // Make sure to generate a wide variety of characters so we
   // test the boundary conditions for short-key optimizations.
-  static const char kTestChars[] = {
-    '\0', '\1', 'a', 'b', 'c', 'd', 'e', '\xfd', '\xfe', '\xff'
-  };
+  static const char kTestChars[] = {'\0', '\1', 'a',    'b',    'c',
+                                    'd',  'e',  '\xfd', '\xfe', '\xff'};
   std::string result;
   for (int i = 0; i < len; i++) {
     result += kTestChars[rnd->Uniform(sizeof(kTestChars))];
@@ -30,9 +29,8 @@ std::string RandomKey(Random* rnd, int len) {
   return result;
 }
 
-
-Slice CompressibleString(Random* rnd, double compressed_fraction,
-                         size_t len, std::string* dst) {
+Slice CompressibleString(Random* rnd, double compressed_fraction, size_t len,
+                         std::string* dst) {
   int raw = static_cast<int>(len * compressed_fraction);
   if (raw < 1) raw = 1;
   std::string raw_data;
diff --git a/util/testutil.h b/util/testutil.h
index a568824..bb4051b 100644
--- a/util/testutil.h
+++ b/util/testutil.h
@@ -24,8 +24,8 @@ std::string RandomKey(Random* rnd, int len);
 // Store in *dst a string of length "len" that will compress to
 // "N*compressed_fraction" bytes and return a Slice that references
 // the generated data.
-Slice CompressibleString(Random* rnd, double compressed_fraction,
-                         size_t len, std::string* dst);
+Slice CompressibleString(Random* rnd, double compressed_fraction, size_t len,
+                         std::string* dst);
 
 // A wrapper that allows injection of errors.
 class ErrorEnv : public EnvWrapper {
@@ -33,12 +33,11 @@ class ErrorEnv : public EnvWrapper {
   bool writable_file_error_;
   int num_writable_file_errors_;
 
-  ErrorEnv() : EnvWrapper(NewMemEnv(Env::Default())),
-               writable_file_error_(false),
-               num_writable_file_errors_(0) { }
-  ~ErrorEnv() override {
-    delete target();
-  }
+  ErrorEnv()
+      : EnvWrapper(NewMemEnv(Env::Default())),
+        writable_file_error_(false),
+        num_writable_file_errors_(0) {}
+  ~ErrorEnv() override { delete target(); }
 
   Status NewWritableFile(const std::string& fname,
                          WritableFile** result) override {
diff --git a/util/windows_logger.h b/util/windows_logger.h
index 96799bc..9296063 100644
--- a/util/windows_logger.h
+++ b/util/windows_logger.h
@@ -23,13 +23,9 @@ class WindowsLogger final : public Logger {
   // Creates a logger that writes to the given file.
   //
   // The PosixLogger instance takes ownership of the file handle.
-  explicit WindowsLogger(std::FILE* fp) : fp_(fp) {
-    assert(fp != nullptr);
-  }
+  explicit WindowsLogger(std::FILE* fp) : fp_(fp) { assert(fp != nullptr); }
 
-  ~WindowsLogger() override {
-    std::fclose(fp_);
-  }
+  ~WindowsLogger() override { std::fclose(fp_); }
 
   void Logv(const char* format, va_list arguments) override {
     // Record the time as close to the Logv() call as possible.
@@ -61,14 +57,9 @@ class WindowsLogger final : public Logger {
 
       // Print the header into the buffer.
       int buffer_offset = snprintf(
-          buffer, buffer_size,
-          "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
-          now_components.wYear,
-          now_components.wMonth,
-          now_components.wDay,
-          now_components.wHour,
-          now_components.wMinute,
-          now_components.wSecond,
+          buffer, buffer_size, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
+          now_components.wYear, now_components.wMonth, now_components.wDay,
+          now_components.wHour, now_components.wMinute, now_components.wSecond,
           static_cast<int>(now_components.wMilliseconds * 1000),
           thread_id.c_str());
 
@@ -83,9 +74,9 @@ class WindowsLogger final : public Logger {
       // Print the message into the buffer.
       std::va_list arguments_copy;
       va_copy(arguments_copy, arguments);
-      buffer_offset += std::vsnprintf(buffer + buffer_offset,
-                                      buffer_size - buffer_offset, format,
-                                      arguments_copy);
+      buffer_offset +=
+          std::vsnprintf(buffer + buffer_offset, buffer_size - buffer_offset,
+                         format, arguments_copy);
       va_end(arguments_copy);
 
       // The code below may append a newline at the end of the buffer, which

From c784d63b931d07895833fb80185b10d44ad63cce Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Thu, 2 May 2019 13:59:41 -0700
Subject: [PATCH 075/181] Moved port/README to port/README.md.

Easier to read on sites supporting Markdown (i.e. GitHub).

PiperOrigin-RevId: 246385089
---
 port/{README => README.md} | 0
 1 file changed, 0 insertions(+), 0 deletions(-)
 rename port/{README => README.md} (100%)

diff --git a/port/README b/port/README.md
similarity index 100%
rename from port/README
rename to port/README.md

From 9bd23c767601a2420478eec158927882b879bada Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Fri, 3 May 2019 09:31:18 -0700
Subject: [PATCH 076/181] Correct class/structure declaration order.

1. Correct the class/struct declaration order to be IAW
   the Google C++ style guide[1].
2. For non-copyable classes, switched from non-implemented
   private methods to explicitly deleted[2] methods.
3. Minor const and member initialization fixes.

[1] https://google.github.io/styleguide/cppguide.html#Declaration_Order
[2] http://eel.is/c++draft/dcl.fct.def.delete

PiperOrigin-RevId: 246521844
---
 db/autocompact_test.cc          |  11 +--
 db/c.cc                         |  36 ++++----
 db/corruption_test.cc           |  22 ++---
 db/db_impl.cc                   |  37 ++++----
 db/db_impl.h                    |  64 +++++++-------
 db/db_iter.cc                   |  10 +--
 db/db_test.cc                   |  24 ++---
 db/dbformat.h                   |   7 +-
 db/dumpfile.cc                  |   6 +-
 db/log_reader.h                 |  45 +++++-----
 db/log_test.cc                  | 151 ++++++++++++++++----------------
 db/log_writer.h                 |  11 ++-
 db/memtable.h                   |  14 +--
 db/repair.cc                    |  32 +++----
 db/skiplist.h                   |  31 ++++---
 db/table_cache.h                |   4 +-
 db/version_edit.h               |   4 +-
 db/version_set.h                |  50 +++++------
 db/version_set_test.cc          |   8 +-
 helpers/memenv/memenv.cc        |  13 +--
 helpers/memenv/memenv_test.cc   |   4 +-
 include/leveldb/db.h            |   6 +-
 include/leveldb/iterator.h      |  12 +--
 include/leveldb/options.h       |  14 +--
 include/leveldb/status.h        |  14 +--
 include/leveldb/table.h         |  10 ++-
 include/leveldb/table_builder.h |   2 +-
 include/leveldb/write_batch.h   |  13 +--
 table/block.h                   |  11 ++-
 table/block_builder.h           |   7 +-
 table/filter_block.h            |   7 +-
 table/format.h                  |  16 ++--
 table/merger.cc                 |   6 +-
 table/table_builder.cc          |  32 +++----
 util/arena.cc                   |   6 +-
 util/arena.h                    |   8 +-
 util/bloom.cc                   |   8 +-
 util/bloom_test.cc              |  10 +--
 util/cache_test.cc              |   4 +-
 util/env_posix_test.cc          |   7 +-
 util/env_test.cc                |   3 +-
 util/env_windows.cc             |  16 ++--
 util/env_windows_test.cc        |   7 +-
 util/histogram.h                |  16 ++--
 44 files changed, 414 insertions(+), 405 deletions(-)

diff --git a/db/autocompact_test.cc b/db/autocompact_test.cc
index 00e3672..e6c97a0 100644
--- a/db/autocompact_test.cc
+++ b/db/autocompact_test.cc
@@ -12,11 +12,6 @@ namespace leveldb {
 
 class AutoCompactTest {
  public:
-  std::string dbname_;
-  Cache* tiny_cache_;
-  Options options_;
-  DB* db_;
-
   AutoCompactTest() {
     dbname_ = test::TmpDir() + "/autocompact_test";
     tiny_cache_ = NewLRUCache(100);
@@ -47,6 +42,12 @@ class AutoCompactTest {
   }
 
   void DoReads(int n);
+
+ private:
+  std::string dbname_;
+  Cache* tiny_cache_;
+  Options options_;
+  DB* db_;
 };
 
 static const int kValueSize = 200 * 1024;
diff --git a/db/c.cc b/db/c.cc
index 72f6daa..e0f3367 100644
--- a/db/c.cc
+++ b/db/c.cc
@@ -84,12 +84,6 @@ struct leveldb_filelock_t {
 };
 
 struct leveldb_comparator_t : public Comparator {
-  void* state_;
-  void (*destructor_)(void*);
-  int (*compare_)(void*, const char* a, size_t alen, const char* b,
-                  size_t blen);
-  const char* (*name_)(void*);
-
   virtual ~leveldb_comparator_t() { (*destructor_)(state_); }
 
   virtual int Compare(const Slice& a, const Slice& b) const {
@@ -101,18 +95,15 @@ struct leveldb_comparator_t : public Comparator {
   // No-ops since the C binding does not support key shortening methods.
   virtual void FindShortestSeparator(std::string*, const Slice&) const {}
   virtual void FindShortSuccessor(std::string* key) const {}
+
+  void* state_;
+  void (*destructor_)(void*);
+  int (*compare_)(void*, const char* a, size_t alen, const char* b,
+                  size_t blen);
+  const char* (*name_)(void*);
 };
 
 struct leveldb_filterpolicy_t : public FilterPolicy {
-  void* state_;
-  void (*destructor_)(void*);
-  const char* (*name_)(void*);
-  char* (*create_)(void*, const char* const* key_array,
-                   const size_t* key_length_array, int num_keys,
-                   size_t* filter_length);
-  unsigned char (*key_match_)(void*, const char* key, size_t length,
-                              const char* filter, size_t filter_length);
-
   virtual ~leveldb_filterpolicy_t() { (*destructor_)(state_); }
 
   virtual const char* Name() const { return (*name_)(state_); }
@@ -134,6 +125,15 @@ struct leveldb_filterpolicy_t : public FilterPolicy {
     return (*key_match_)(state_, key.data(), key.size(), filter.data(),
                          filter.size());
   }
+
+  void* state_;
+  void (*destructor_)(void*);
+  const char* (*name_)(void*);
+  char* (*create_)(void*, const char* const* key_array,
+                   const size_t* key_length_array, int num_keys,
+                   size_t* filter_length);
+  unsigned char (*key_match_)(void*, const char* key, size_t length,
+                              const char* filter, size_t filter_length);
 };
 
 struct leveldb_env_t {
@@ -470,7 +470,8 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) {
   // they delegate to a NewBloomFilterPolicy() instead of user
   // supplied C functions.
   struct Wrapper : public leveldb_filterpolicy_t {
-    const FilterPolicy* rep_;
+    static void DoNothing(void*) {}
+
     ~Wrapper() { delete rep_; }
     const char* Name() const { return rep_->Name(); }
     void CreateFilter(const Slice* keys, int n, std::string* dst) const {
@@ -479,7 +480,8 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) {
     bool KeyMayMatch(const Slice& key, const Slice& filter) const {
       return rep_->KeyMayMatch(key, filter);
     }
-    static void DoNothing(void*) {}
+
+    const FilterPolicy* rep_;
   };
   Wrapper* wrapper = new Wrapper;
   wrapper->rep_ = NewBloomFilterPolicy(bits_per_key);
diff --git a/db/corruption_test.cc b/db/corruption_test.cc
index e6f64ee..42f5237 100644
--- a/db/corruption_test.cc
+++ b/db/corruption_test.cc
@@ -22,20 +22,14 @@ static const int kValueSize = 1000;
 
 class CorruptionTest {
  public:
-  test::ErrorEnv env_;
-  std::string dbname_;
-  Cache* tiny_cache_;
-  Options options_;
-  DB* db_;
-
-  CorruptionTest() {
-    tiny_cache_ = NewLRUCache(100);
+  CorruptionTest()
+      : db_(nullptr),
+        dbname_("/memenv/corruption_test"),
+        tiny_cache_(NewLRUCache(100)) {
     options_.env = &env_;
     options_.block_cache = tiny_cache_;
-    dbname_ = "/memenv/corruption_test";
     DestroyDB(dbname_, options_);
 
-    db_ = nullptr;
     options_.create_if_missing = true;
     Reopen();
     options_.create_if_missing = false;
@@ -185,6 +179,14 @@ class CorruptionTest {
     Random r(k);
     return test::RandomString(&r, kValueSize, storage);
   }
+
+  test::ErrorEnv env_;
+  Options options_;
+  DB* db_;
+
+ private:
+  std::string dbname_;
+  Cache* tiny_cache_;
 };
 
 TEST(CorruptionTest, Recovery) {
diff --git a/db/db_impl.cc b/db/db_impl.cc
index bff2d62..761ebf6 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -42,38 +42,23 @@ const int kNumNonTableCacheFiles = 10;
 
 // Information kept for every waiting writer
 struct DBImpl::Writer {
+  explicit Writer(port::Mutex* mu)
+      : batch(nullptr), sync(false), done(false), cv(mu) {}
+
   Status status;
   WriteBatch* batch;
   bool sync;
   bool done;
   port::CondVar cv;
-
-  explicit Writer(port::Mutex* mu)
-      : batch(nullptr), sync(false), done(false), cv(mu) {}
 };
 
 struct DBImpl::CompactionState {
-  Compaction* const compaction;
-
-  // Sequence numbers < smallest_snapshot are not significant since we
-  // will never have to service a snapshot below smallest_snapshot.
-  // Therefore if we have seen a sequence number S <= smallest_snapshot,
-  // we can drop all entries for the same key with sequence numbers < S.
-  SequenceNumber smallest_snapshot;
-
   // Files produced by compaction
   struct Output {
     uint64_t number;
     uint64_t file_size;
     InternalKey smallest, largest;
   };
-  std::vector<Output> outputs;
-
-  // State kept for output being generated
-  WritableFile* outfile;
-  TableBuilder* builder;
-
-  uint64_t total_bytes;
 
   Output* current_output() { return &outputs[outputs.size() - 1]; }
 
@@ -83,6 +68,22 @@ struct DBImpl::CompactionState {
         outfile(nullptr),
         builder(nullptr),
         total_bytes(0) {}
+
+  Compaction* const compaction;
+
+  // Sequence numbers < smallest_snapshot are not significant since we
+  // will never have to service a snapshot below smallest_snapshot.
+  // Therefore if we have seen a sequence number S <= smallest_snapshot,
+  // we can drop all entries for the same key with sequence numbers < S.
+  SequenceNumber smallest_snapshot;
+
+  std::vector<Output> outputs;
+
+  // State kept for output being generated
+  WritableFile* outfile;
+  TableBuilder* builder;
+
+  uint64_t total_bytes;
 };
 
 // Fix user-supplied options to be reasonable
diff --git a/db/db_impl.h b/db/db_impl.h
index c895952..ae87d6e 100644
--- a/db/db_impl.h
+++ b/db/db_impl.h
@@ -29,6 +29,10 @@ class VersionSet;
 class DBImpl : public DB {
  public:
   DBImpl(const Options& options, const std::string& dbname);
+
+  DBImpl(const DBImpl&) = delete;
+  DBImpl& operator=(const DBImpl&) = delete;
+
   virtual ~DBImpl();
 
   // Implementations of the DB interface
@@ -71,6 +75,31 @@ class DBImpl : public DB {
   struct CompactionState;
   struct Writer;
 
+  // Information for a manual compaction
+  struct ManualCompaction {
+    int level;
+    bool done;
+    const InternalKey* begin;  // null means beginning of key range
+    const InternalKey* end;    // null means end of key range
+    InternalKey tmp_storage;   // Used to keep track of compaction progress
+  };
+
+  // Per level compaction stats.  stats_[level] stores the stats for
+  // compactions that produced data for the specified "level".
+  struct CompactionStats {
+    CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {}
+
+    void Add(const CompactionStats& c) {
+      this->micros += c.micros;
+      this->bytes_read += c.bytes_read;
+      this->bytes_written += c.bytes_written;
+    }
+
+    int64_t micros;
+    int64_t bytes_read;
+    int64_t bytes_written;
+  };
+
   Iterator* NewInternalIterator(const ReadOptions&,
                                 SequenceNumber* latest_snapshot,
                                 uint32_t* seed);
@@ -121,6 +150,10 @@ class DBImpl : public DB {
   Status InstallCompactionResults(CompactionState* compact)
       EXCLUSIVE_LOCKS_REQUIRED(mutex_);
 
+  const Comparator* user_comparator() const {
+    return internal_comparator_.user_comparator();
+  }
+
   // Constant after construction
   Env* const env_;
   const InternalKeyComparator internal_comparator_;
@@ -161,14 +194,6 @@ class DBImpl : public DB {
   // Has a background compaction been scheduled or is running?
   bool background_compaction_scheduled_ GUARDED_BY(mutex_);
 
-  // Information for a manual compaction
-  struct ManualCompaction {
-    int level;
-    bool done;
-    const InternalKey* begin;  // null means beginning of key range
-    const InternalKey* end;    // null means end of key range
-    InternalKey tmp_storage;   // Used to keep track of compaction progress
-  };
   ManualCompaction* manual_compaction_ GUARDED_BY(mutex_);
 
   VersionSet* const versions_;
@@ -176,30 +201,7 @@ class DBImpl : public DB {
   // Have we encountered a background error in paranoid mode?
   Status bg_error_ GUARDED_BY(mutex_);
 
-  // Per level compaction stats.  stats_[level] stores the stats for
-  // compactions that produced data for the specified "level".
-  struct CompactionStats {
-    int64_t micros;
-    int64_t bytes_read;
-    int64_t bytes_written;
-
-    CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {}
-
-    void Add(const CompactionStats& c) {
-      this->micros += c.micros;
-      this->bytes_read += c.bytes_read;
-      this->bytes_written += c.bytes_written;
-    }
-  };
   CompactionStats stats_[config::kNumLevels] GUARDED_BY(mutex_);
-
-  // No copying allowed
-  DBImpl(const DBImpl&);
-  void operator=(const DBImpl&);
-
-  const Comparator* user_comparator() const {
-    return internal_comparator_.user_comparator();
-  }
 };
 
 // Sanitize db options.  The caller should delete result.info_log if
diff --git a/db/db_iter.cc b/db/db_iter.cc
index 1e5b5e2..8ff288e 100644
--- a/db/db_iter.cc
+++ b/db/db_iter.cc
@@ -55,6 +55,10 @@ class DBIter : public Iterator {
         valid_(false),
         rnd_(seed),
         bytes_until_read_sampling_(RandomCompactionPeriod()) {}
+
+  DBIter(const DBIter&) = delete;
+  DBIter& operator=(const DBIter&) = delete;
+
   virtual ~DBIter() { delete iter_; }
   virtual bool Valid() const { return valid_; }
   virtual Slice key() const {
@@ -106,19 +110,13 @@ class DBIter : public Iterator {
   const Comparator* const user_comparator_;
   Iterator* const iter_;
   SequenceNumber const sequence_;
-
   Status status_;
   std::string saved_key_;    // == current key when direction_==kReverse
   std::string saved_value_;  // == current raw value when direction_==kReverse
   Direction direction_;
   bool valid_;
-
   Random rnd_;
   size_t bytes_until_read_sampling_;
-
-  // No copying allowed
-  DBIter(const DBIter&);
-  void operator=(const DBIter&);
 };
 
 inline bool DBIter::ParseKey(ParsedInternalKey* ikey) {
diff --git a/db/db_test.cc b/db/db_test.cc
index 4343216..78296d5 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -40,10 +40,6 @@ static std::string RandomKey(Random* rnd) {
 
 namespace {
 class AtomicCounter {
- private:
-  port::Mutex mu_;
-  int count_ GUARDED_BY(mu_);
-
  public:
   AtomicCounter() : count_(0) {}
   void Increment() { IncrementBy(1); }
@@ -59,6 +55,10 @@ class AtomicCounter {
     MutexLock l(&mu_);
     count_ = 0;
   }
+
+ private:
+  port::Mutex mu_;
+  int count_ GUARDED_BY(mu_);
 };
 
 void DelayMilliseconds(int millis) {
@@ -227,13 +227,6 @@ class SpecialEnv : public EnvWrapper {
 };
 
 class DBTest {
- private:
-  const FilterPolicy* filter_policy_;
-
-  // Sequence of option configurations to try
-  enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd };
-  int option_config_;
-
  public:
   std::string dbname_;
   SpecialEnv* env_;
@@ -241,7 +234,7 @@ class DBTest {
 
   Options last_options_;
 
-  DBTest() : option_config_(kDefault), env_(new SpecialEnv(Env::Default())) {
+  DBTest() : env_(new SpecialEnv(Env::Default())), option_config_(kDefault) {
     filter_policy_ = NewBloomFilterPolicy(10);
     dbname_ = test::TmpDir() + "/db_test";
     DestroyDB(dbname_, Options());
@@ -533,6 +526,13 @@ class DBTest {
     }
     return files_renamed;
   }
+
+ private:
+  // Sequence of option configurations to try
+  enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd };
+
+  const FilterPolicy* filter_policy_;
+  int option_config_;
 };
 
 TEST(DBTest, Empty) {
diff --git a/db/dbformat.h b/db/dbformat.h
index bdc23b8..013028a 100644
--- a/db/dbformat.h
+++ b/db/dbformat.h
@@ -181,6 +181,9 @@ class LookupKey {
   // the specified sequence number.
   LookupKey(const Slice& user_key, SequenceNumber sequence);
 
+  LookupKey(const LookupKey&) = delete;
+  LookupKey& operator=(const LookupKey&) = delete;
+
   ~LookupKey();
 
   // Return a key suitable for lookup in a MemTable.
@@ -204,10 +207,6 @@ class LookupKey {
   const char* kstart_;
   const char* end_;
   char space_[200];  // Avoid allocation for short keys
-
-  // No copying allowed
-  LookupKey(const LookupKey&);
-  void operator=(const LookupKey&);
 };
 
 inline LookupKey::~LookupKey() {
diff --git a/db/dumpfile.cc b/db/dumpfile.cc
index 1dbff5e..9d22d58 100644
--- a/db/dumpfile.cc
+++ b/db/dumpfile.cc
@@ -38,7 +38,6 @@ bool GuessType(const std::string& fname, FileType* type) {
 // Notified when log reader encounters corruption.
 class CorruptionReporter : public log::Reader::Reporter {
  public:
-  WritableFile* dst_;
   virtual void Corruption(size_t bytes, const Status& status) {
     std::string r = "corruption: ";
     AppendNumberTo(&r, bytes);
@@ -47,6 +46,8 @@ class CorruptionReporter : public log::Reader::Reporter {
     r.push_back('\n');
     dst_->Append(r);
   }
+
+  WritableFile* dst_;
 };
 
 // Print contents of a log file. (*func)() is called on every record.
@@ -73,7 +74,6 @@ Status PrintLogContents(Env* env, const std::string& fname,
 // Called on every item found in a WriteBatch.
 class WriteBatchItemPrinter : public WriteBatch::Handler {
  public:
-  WritableFile* dst_;
   virtual void Put(const Slice& key, const Slice& value) {
     std::string r = "  put '";
     AppendEscapedStringTo(&r, key);
@@ -88,6 +88,8 @@ class WriteBatchItemPrinter : public WriteBatch::Handler {
     r += "'\n";
     dst_->Append(r);
   }
+
+  WritableFile* dst_;
 };
 
 // Called on every log record (each one of which is a WriteBatch)
diff --git a/db/log_reader.h b/db/log_reader.h
index b27c164..001da89 100644
--- a/db/log_reader.h
+++ b/db/log_reader.h
@@ -43,6 +43,9 @@ class Reader {
   Reader(SequentialFile* file, Reporter* reporter, bool checksum,
          uint64_t initial_offset);
 
+  Reader(const Reader&) = delete;
+  Reader& operator=(const Reader&) = delete;
+
   ~Reader();
 
   // Read the next record into *record.  Returns true if read
@@ -58,26 +61,6 @@ class Reader {
   uint64_t LastRecordOffset();
 
  private:
-  SequentialFile* const file_;
-  Reporter* const reporter_;
-  bool const checksum_;
-  char* const backing_store_;
-  Slice buffer_;
-  bool eof_;  // Last Read() indicated EOF by returning < kBlockSize
-
-  // Offset of the last record returned by ReadRecord.
-  uint64_t last_record_offset_;
-  // Offset of the first location past the end of buffer_.
-  uint64_t end_of_buffer_offset_;
-
-  // Offset at which to start looking for the first record to return
-  uint64_t const initial_offset_;
-
-  // True if we are resynchronizing after a seek (initial_offset_ > 0). In
-  // particular, a run of kMiddleType and kLastType records can be silently
-  // skipped in this mode
-  bool resyncing_;
-
   // Extend record types with the following special values
   enum {
     kEof = kMaxRecordType + 1,
@@ -102,9 +85,25 @@ class Reader {
   void ReportCorruption(uint64_t bytes, const char* reason);
   void ReportDrop(uint64_t bytes, const Status& reason);
 
-  // No copying allowed
-  Reader(const Reader&);
-  void operator=(const Reader&);
+  SequentialFile* const file_;
+  Reporter* const reporter_;
+  bool const checksum_;
+  char* const backing_store_;
+  Slice buffer_;
+  bool eof_;  // Last Read() indicated EOF by returning < kBlockSize
+
+  // Offset of the last record returned by ReadRecord.
+  uint64_t last_record_offset_;
+  // Offset of the first location past the end of buffer_.
+  uint64_t end_of_buffer_offset_;
+
+  // Offset at which to start looking for the first record to return
+  uint64_t const initial_offset_;
+
+  // True if we are resynchronizing after a seek (initial_offset_ > 0). In
+  // particular, a run of kMiddleType and kLastType records can be silently
+  // skipped in this mode
+  bool resyncing_;
 };
 
 }  // namespace log
diff --git a/db/log_test.cc b/db/log_test.cc
index 3acaa33..809c418 100644
--- a/db/log_test.cc
+++ b/db/log_test.cc
@@ -37,81 +37,6 @@ static std::string RandomSkewedString(int i, Random* rnd) {
 }
 
 class LogTest {
- private:
-  class StringDest : public WritableFile {
-   public:
-    std::string contents_;
-
-    virtual Status Close() { return Status::OK(); }
-    virtual Status Flush() { return Status::OK(); }
-    virtual Status Sync() { return Status::OK(); }
-    virtual Status Append(const Slice& slice) {
-      contents_.append(slice.data(), slice.size());
-      return Status::OK();
-    }
-  };
-
-  class StringSource : public SequentialFile {
-   public:
-    Slice contents_;
-    bool force_error_;
-    bool returned_partial_;
-    StringSource() : force_error_(false), returned_partial_(false) {}
-
-    virtual Status Read(size_t n, Slice* result, char* scratch) {
-      ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
-
-      if (force_error_) {
-        force_error_ = false;
-        returned_partial_ = true;
-        return Status::Corruption("read error");
-      }
-
-      if (contents_.size() < n) {
-        n = contents_.size();
-        returned_partial_ = true;
-      }
-      *result = Slice(contents_.data(), n);
-      contents_.remove_prefix(n);
-      return Status::OK();
-    }
-
-    virtual Status Skip(uint64_t n) {
-      if (n > contents_.size()) {
-        contents_.clear();
-        return Status::NotFound("in-memory file skipped past end");
-      }
-
-      contents_.remove_prefix(n);
-
-      return Status::OK();
-    }
-  };
-
-  class ReportCollector : public Reader::Reporter {
-   public:
-    size_t dropped_bytes_;
-    std::string message_;
-
-    ReportCollector() : dropped_bytes_(0) {}
-    virtual void Corruption(size_t bytes, const Status& status) {
-      dropped_bytes_ += bytes;
-      message_.append(status.ToString());
-    }
-  };
-
-  StringDest dest_;
-  StringSource source_;
-  ReportCollector report_;
-  bool reading_;
-  Writer* writer_;
-  Reader* reader_;
-
-  // Record metadata for testing initial offset functionality
-  static size_t initial_offset_record_sizes_[];
-  static uint64_t initial_offset_last_record_offsets_[];
-  static int num_initial_offset_records_;
-
  public:
   LogTest()
       : reading_(false),
@@ -232,6 +157,82 @@ class LogTest {
     }
     delete offset_reader;
   }
+
+ private:
+  class StringDest : public WritableFile {
+   public:
+    virtual Status Close() { return Status::OK(); }
+    virtual Status Flush() { return Status::OK(); }
+    virtual Status Sync() { return Status::OK(); }
+    virtual Status Append(const Slice& slice) {
+      contents_.append(slice.data(), slice.size());
+      return Status::OK();
+    }
+
+    std::string contents_;
+  };
+
+  class StringSource : public SequentialFile {
+   public:
+    StringSource() : force_error_(false), returned_partial_(false) {}
+
+    virtual Status Read(size_t n, Slice* result, char* scratch) {
+      ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
+
+      if (force_error_) {
+        force_error_ = false;
+        returned_partial_ = true;
+        return Status::Corruption("read error");
+      }
+
+      if (contents_.size() < n) {
+        n = contents_.size();
+        returned_partial_ = true;
+      }
+      *result = Slice(contents_.data(), n);
+      contents_.remove_prefix(n);
+      return Status::OK();
+    }
+
+    virtual Status Skip(uint64_t n) {
+      if (n > contents_.size()) {
+        contents_.clear();
+        return Status::NotFound("in-memory file skipped past end");
+      }
+
+      contents_.remove_prefix(n);
+
+      return Status::OK();
+    }
+
+    Slice contents_;
+    bool force_error_;
+    bool returned_partial_;
+  };
+
+  class ReportCollector : public Reader::Reporter {
+   public:
+    ReportCollector() : dropped_bytes_(0) {}
+    virtual void Corruption(size_t bytes, const Status& status) {
+      dropped_bytes_ += bytes;
+      message_.append(status.ToString());
+    }
+
+    size_t dropped_bytes_;
+    std::string message_;
+  };
+
+  // Record metadata for testing initial offset functionality
+  static size_t initial_offset_record_sizes_[];
+  static uint64_t initial_offset_last_record_offsets_[];
+  static int num_initial_offset_records_;
+
+  StringDest dest_;
+  StringSource source_;
+  ReportCollector report_;
+  bool reading_;
+  Writer* writer_;
+  Reader* reader_;
 };
 
 size_t LogTest::initial_offset_record_sizes_[] = {
diff --git a/db/log_writer.h b/db/log_writer.h
index 840809d..c0a2114 100644
--- a/db/log_writer.h
+++ b/db/log_writer.h
@@ -29,11 +29,16 @@ class Writer {
   // "*dest" must remain live while this Writer is in use.
   Writer(WritableFile* dest, uint64_t dest_length);
 
+  Writer(const Writer&) = delete;
+  Writer& operator=(const Writer&) = delete;
+
   ~Writer();
 
   Status AddRecord(const Slice& slice);
 
  private:
+  Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length);
+
   WritableFile* dest_;
   int block_offset_;  // Current offset in block
 
@@ -41,12 +46,6 @@ class Writer {
   // pre-computed to reduce the overhead of computing the crc of the
   // record type stored in the header.
   uint32_t type_crc_[kMaxRecordType + 1];
-
-  Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length);
-
-  // No copying allowed
-  Writer(const Writer&);
-  void operator=(const Writer&);
 };
 
 }  // namespace log
diff --git a/db/memtable.h b/db/memtable.h
index ef18bb5..9d986b1 100644
--- a/db/memtable.h
+++ b/db/memtable.h
@@ -23,6 +23,9 @@ class MemTable {
   // is zero and the caller must call Ref() at least once.
   explicit MemTable(const InternalKeyComparator& comparator);
 
+  MemTable(const MemTable&) = delete;
+  MemTable& operator=(const MemTable&) = delete;
+
   // Increase reference count.
   void Ref() { ++refs_; }
 
@@ -60,26 +63,23 @@ class MemTable {
   bool Get(const LookupKey& key, std::string* value, Status* s);
 
  private:
-  ~MemTable();  // Private since only Unref() should be used to delete it
+  friend class MemTableIterator;
+  friend class MemTableBackwardIterator;
 
   struct KeyComparator {
     const InternalKeyComparator comparator;
     explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) {}
     int operator()(const char* a, const char* b) const;
   };
-  friend class MemTableIterator;
-  friend class MemTableBackwardIterator;
 
   typedef SkipList<const char*, KeyComparator> Table;
 
+  ~MemTable();  // Private since only Unref() should be used to delete it
+
   KeyComparator comparator_;
   int refs_;
   Arena arena_;
   Table table_;
-
-  // No copying allowed
-  MemTable(const MemTable&);
-  void operator=(const MemTable&);
 };
 
 }  // namespace leveldb
diff --git a/db/repair.cc b/db/repair.cc
index d5ecc45..3c676ca 100644
--- a/db/repair.cc
+++ b/db/repair.cc
@@ -95,22 +95,6 @@ class Repairer {
     SequenceNumber max_sequence;
   };
 
-  std::string const dbname_;
-  Env* const env_;
-  InternalKeyComparator const icmp_;
-  InternalFilterPolicy const ipolicy_;
-  Options const options_;
-  bool owns_info_log_;
-  bool owns_cache_;
-  TableCache* table_cache_;
-  VersionEdit edit_;
-
-  std::vector<std::string> manifests_;
-  std::vector<uint64_t> table_numbers_;
-  std::vector<uint64_t> logs_;
-  std::vector<TableInfo> tables_;
-  uint64_t next_file_number_;
-
   Status FindFiles() {
     std::vector<std::string> filenames;
     Status status = env_->GetChildren(dbname_, &filenames);
@@ -439,6 +423,22 @@ class Repairer {
     Log(options_.info_log, "Archiving %s: %s\n", fname.c_str(),
         s.ToString().c_str());
   }
+
+  const std::string dbname_;
+  Env* const env_;
+  InternalKeyComparator const icmp_;
+  InternalFilterPolicy const ipolicy_;
+  const Options options_;
+  bool owns_info_log_;
+  bool owns_cache_;
+  TableCache* table_cache_;
+  VersionEdit edit_;
+
+  std::vector<std::string> manifests_;
+  std::vector<uint64_t> table_numbers_;
+  std::vector<uint64_t> logs_;
+  std::vector<TableInfo> tables_;
+  uint64_t next_file_number_;
 };
 }  // namespace
 
diff --git a/db/skiplist.h b/db/skiplist.h
index 05e5733..a59b45b 100644
--- a/db/skiplist.h
+++ b/db/skiplist.h
@@ -49,6 +49,9 @@ class SkipList {
   // must remain allocated for the lifetime of the skiplist object.
   explicit SkipList(Comparator cmp, Arena* arena);
 
+  SkipList(const SkipList&) = delete;
+  SkipList& operator=(const SkipList&) = delete;
+
   // Insert key into the list.
   // REQUIRES: nothing that compares equal to key is currently in the list.
   void Insert(const Key& key);
@@ -98,23 +101,10 @@ class SkipList {
  private:
   enum { kMaxHeight = 12 };
 
-  // Immutable after construction
-  Comparator const compare_;
-  Arena* const arena_;  // Arena used for allocations of nodes
-
-  Node* const head_;
-
-  // Modified only by Insert().  Read racily by readers, but stale
-  // values are ok.
-  std::atomic<int> max_height_;  // Height of the entire list
-
   inline int GetMaxHeight() const {
     return max_height_.load(std::memory_order_relaxed);
   }
 
-  // Read/written only by Insert().
-  Random rnd_;
-
   Node* NewNode(const Key& key, int height);
   int RandomHeight();
   bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); }
@@ -137,9 +127,18 @@ class SkipList {
   // Return head_ if list is empty.
   Node* FindLast() const;
 
-  // No copying allowed
-  SkipList(const SkipList&);
-  void operator=(const SkipList&);
+  // Immutable after construction
+  Comparator const compare_;
+  Arena* const arena_;  // Arena used for allocations of nodes
+
+  Node* const head_;
+
+  // Modified only by Insert().  Read racily by readers, but stale
+  // values are ok.
+  std::atomic<int> max_height_;  // Height of the entire list
+
+  // Read/written only by Insert().
+  Random rnd_;
 };
 
 // Implementation details follow
diff --git a/db/table_cache.h b/db/table_cache.h
index 21ae92d..93069c8 100644
--- a/db/table_cache.h
+++ b/db/table_cache.h
@@ -45,12 +45,12 @@ class TableCache {
   void Evict(uint64_t file_number);
 
  private:
+  Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**);
+
   Env* const env_;
   const std::string dbname_;
   const Options& options_;
   Cache* cache_;
-
-  Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**);
 };
 
 }  // namespace leveldb
diff --git a/db/version_edit.h b/db/version_edit.h
index 3daf4ef..2dadda7 100644
--- a/db/version_edit.h
+++ b/db/version_edit.h
@@ -16,14 +16,14 @@ namespace leveldb {
 class VersionSet;
 
 struct FileMetaData {
+  FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) {}
+
   int refs;
   int allowed_seeks;  // Seeks allowed until compaction
   uint64_t number;
   uint64_t file_size;    // File size in bytes
   InternalKey smallest;  // Smallest internal key served by table
   InternalKey largest;   // Largest internal key served by table
-
-  FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) {}
 };
 
 class VersionEdit {
diff --git a/db/version_set.h b/db/version_set.h
index 334ebd9..69f3d70 100644
--- a/db/version_set.h
+++ b/db/version_set.h
@@ -59,11 +59,6 @@ bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
 
 class Version {
  public:
-  // Append to *iters a sequence of iterators that will
-  // yield the contents of this Version when merged together.
-  // REQUIRES: This version has been saved (see VersionSet::SaveTo)
-  void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters);
-
   // Lookup the value for key.  If found, store it in *val and
   // return OK.  Else return a non-OK status.  Fills *stats.
   // REQUIRES: lock is not held
@@ -71,6 +66,12 @@ class Version {
     FileMetaData* seek_file;
     int seek_file_level;
   };
+
+  // Append to *iters a sequence of iterators that will
+  // yield the contents of this Version when merged together.
+  // REQUIRES: This version has been saved (see VersionSet::SaveTo)
+  void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters);
+
   Status Get(const ReadOptions&, const LookupKey& key, std::string* val,
              GetStats* stats);
 
@@ -118,6 +119,22 @@ class Version {
   friend class VersionSet;
 
   class LevelFileNumIterator;
+
+  explicit Version(VersionSet* vset)
+      : vset_(vset),
+        next_(this),
+        prev_(this),
+        refs_(0),
+        file_to_compact_(nullptr),
+        file_to_compact_level_(-1),
+        compaction_score_(-1),
+        compaction_level_(-1) {}
+
+  Version(const Version&) = delete;
+  Version& operator=(const Version&) = delete;
+
+  ~Version();
+
   Iterator* NewConcatenatingIterator(const ReadOptions&, int level) const;
 
   // Call func(arg, level, f) for every file that overlaps user_key in
@@ -145,28 +162,15 @@ class Version {
   // are initialized by Finalize().
   double compaction_score_;
   int compaction_level_;
-
-  explicit Version(VersionSet* vset)
-      : vset_(vset),
-        next_(this),
-        prev_(this),
-        refs_(0),
-        file_to_compact_(nullptr),
-        file_to_compact_level_(-1),
-        compaction_score_(-1),
-        compaction_level_(-1) {}
-
-  ~Version();
-
-  // No copying allowed
-  Version(const Version&);
-  void operator=(const Version&);
 };
 
 class VersionSet {
  public:
   VersionSet(const std::string& dbname, const Options* options,
              TableCache* table_cache, const InternalKeyComparator*);
+  VersionSet(const VersionSet&) = delete;
+  VersionSet& operator=(const VersionSet&) = delete;
+
   ~VersionSet();
 
   // Apply *edit to the current version to form a new descriptor that
@@ -309,10 +313,6 @@ class VersionSet {
   // Per-level key at which the next compaction at that level should start.
   // Either an empty string, or a valid InternalKey.
   std::string compact_pointer_[config::kNumLevels];
-
-  // No copying allowed
-  VersionSet(const VersionSet&);
-  void operator=(const VersionSet&);
 };
 
 // A Compaction encapsulates information about a compaction.
diff --git a/db/version_set_test.cc b/db/version_set_test.cc
index 43b51d8..f7efe2b 100644
--- a/db/version_set_test.cc
+++ b/db/version_set_test.cc
@@ -11,9 +11,6 @@ namespace leveldb {
 
 class FindFileTest {
  public:
-  std::vector<FileMetaData*> files_;
-  bool disjoint_sorted_files_;
-
   FindFileTest() : disjoint_sorted_files_(true) {}
 
   ~FindFileTest() {
@@ -46,6 +43,11 @@ class FindFileTest {
                                  (smallest != nullptr ? &s : nullptr),
                                  (largest != nullptr ? &l : nullptr));
   }
+
+  bool disjoint_sorted_files_;
+
+ private:
+  std::vector<FileMetaData*> files_;
 };
 
 TEST(FindFileTest, Empty) {
diff --git a/helpers/memenv/memenv.cc b/helpers/memenv/memenv.cc
index 58dc538..b6b790c 100644
--- a/helpers/memenv/memenv.cc
+++ b/helpers/memenv/memenv.cc
@@ -27,6 +27,10 @@ class FileState {
   // and the caller must call Ref() at least once.
   FileState() : refs_(0), size_(0) {}
 
+  // No copying allowed.
+  FileState(const FileState&) = delete;
+  FileState& operator=(const FileState&) = delete;
+
   // Increase the reference count.
   void Ref() {
     MutexLock lock(&refs_mutex_);
@@ -133,21 +137,17 @@ class FileState {
   }
 
  private:
+  enum { kBlockSize = 8 * 1024 };
+
   // Private since only Unref() should be used to delete it.
   ~FileState() { Truncate(); }
 
-  // No copying allowed.
-  FileState(const FileState&);
-  void operator=(const FileState&);
-
   port::Mutex refs_mutex_;
   int refs_ GUARDED_BY(refs_mutex_);
 
   mutable port::Mutex blocks_mutex_;
   std::vector<char*> blocks_ GUARDED_BY(blocks_mutex_);
   uint64_t size_ GUARDED_BY(blocks_mutex_);
-
-  enum { kBlockSize = 8 * 1024 };
 };
 
 class SequentialFileImpl : public SequentialFile {
@@ -380,6 +380,7 @@ class InMemoryEnv : public EnvWrapper {
  private:
   // Map from filenames to FileState objects, representing a simple file system.
   typedef std::map<std::string, FileState*> FileSystem;
+
   port::Mutex mutex_;
   FileSystem file_map_ GUARDED_BY(mutex_);
 };
diff --git a/helpers/memenv/memenv_test.cc b/helpers/memenv/memenv_test.cc
index a0a9469..94ad06b 100644
--- a/helpers/memenv/memenv_test.cc
+++ b/helpers/memenv/memenv_test.cc
@@ -16,10 +16,10 @@ namespace leveldb {
 
 class MemEnvTest {
  public:
-  Env* env_;
-
   MemEnvTest() : env_(NewMemEnv(Env::Default())) {}
   ~MemEnvTest() { delete env_; }
+
+  Env* env_;
 };
 
 TEST(MemEnvTest, Basics) {
diff --git a/include/leveldb/db.h b/include/leveldb/db.h
index 0b8dc24..e52a5b6 100644
--- a/include/leveldb/db.h
+++ b/include/leveldb/db.h
@@ -33,11 +33,11 @@ class LEVELDB_EXPORT Snapshot {
 
 // A range of keys
 struct LEVELDB_EXPORT Range {
-  Slice start;  // Included in the range
-  Slice limit;  // Not included in the range
-
   Range() {}
   Range(const Slice& s, const Slice& l) : start(s), limit(l) {}
+
+  Slice start;  // Included in the range
+  Slice limit;  // Not included in the range
 };
 
 // A DB is a persistent ordered map from keys to values.
diff --git a/include/leveldb/iterator.h b/include/leveldb/iterator.h
index 447e950..bb9a5df 100644
--- a/include/leveldb/iterator.h
+++ b/include/leveldb/iterator.h
@@ -84,12 +84,6 @@ class LEVELDB_EXPORT Iterator {
   // Cleanup functions are stored in a single-linked list.
   // The list's head node is inlined in the iterator.
   struct CleanupNode {
-    // The head node is used if the function pointer is not null.
-    CleanupFunction function;
-    void* arg1;
-    void* arg2;
-    CleanupNode* next;
-
     // True if the node is not used. Only head nodes might be unused.
     bool IsEmpty() const { return function == nullptr; }
     // Invokes the cleanup function.
@@ -97,6 +91,12 @@ class LEVELDB_EXPORT Iterator {
       assert(function != nullptr);
       (*function)(arg1, arg2);
     }
+
+    // The head node is used if the function pointer is not null.
+    CleanupFunction function;
+    void* arg1;
+    void* arg2;
+    CleanupNode* next;
   };
   CleanupNode cleanup_head_;
 };
diff --git a/include/leveldb/options.h b/include/leveldb/options.h
index 7e26dc6..b748772 100644
--- a/include/leveldb/options.h
+++ b/include/leveldb/options.h
@@ -31,6 +31,9 @@ enum CompressionType {
 
 // Options to control the behavior of a database (passed to DB::Open)
 struct LEVELDB_EXPORT Options {
+  // Create an Options object with default values for all fields.
+  Options();
+
   // -------------------
   // Parameters that affect behavior
 
@@ -137,13 +140,12 @@ struct LEVELDB_EXPORT Options {
   // Many applications will benefit from passing the result of
   // NewBloomFilterPolicy() here.
   const FilterPolicy* filter_policy = nullptr;
-
-  // Create an Options object with default values for all fields.
-  Options();
 };
 
 // Options that control read operations
 struct LEVELDB_EXPORT ReadOptions {
+  ReadOptions() = default;
+
   // If true, all data read from underlying storage will be
   // verified against corresponding checksums.
   bool verify_checksums = false;
@@ -157,12 +159,12 @@ struct LEVELDB_EXPORT ReadOptions {
   // not have been released).  If "snapshot" is null, use an implicit
   // snapshot of the state at the beginning of this read operation.
   const Snapshot* snapshot = nullptr;
-
-  ReadOptions() = default;
 };
 
 // Options that control write operations
 struct LEVELDB_EXPORT WriteOptions {
+  WriteOptions() = default;
+
   // If true, the write will be flushed from the operating system
   // buffer cache (by calling WritableFile::Sync()) before the write
   // is considered complete.  If this flag is true, writes will be
@@ -178,8 +180,6 @@ struct LEVELDB_EXPORT WriteOptions {
   // with sync==true has similar crash semantics to a "write()"
   // system call followed by "fsync()".
   bool sync = false;
-
-  WriteOptions() = default;
 };
 
 }  // namespace leveldb
diff --git a/include/leveldb/status.h b/include/leveldb/status.h
index 54cf377..e327314 100644
--- a/include/leveldb/status.h
+++ b/include/leveldb/status.h
@@ -76,13 +76,6 @@ class LEVELDB_EXPORT Status {
   std::string ToString() const;
 
  private:
-  // OK status has a null state_.  Otherwise, state_ is a new[] array
-  // of the following form:
-  //    state_[0..3] == length of message
-  //    state_[4]    == code
-  //    state_[5..]  == message
-  const char* state_;
-
   enum Code {
     kOk = 0,
     kNotFound = 1,
@@ -98,6 +91,13 @@ class LEVELDB_EXPORT Status {
 
   Status(Code code, const Slice& msg, const Slice& msg2);
   static const char* CopyState(const char* s);
+
+  // OK status has a null state_.  Otherwise, state_ is a new[] array
+  // of the following form:
+  //    state_[0..3] == length of message
+  //    state_[4]    == code
+  //    state_[5..]  == message
+  const char* state_;
 };
 
 inline Status::Status(const Status& rhs) {
diff --git a/include/leveldb/table.h b/include/leveldb/table.h
index 14a6a44..25c6013 100644
--- a/include/leveldb/table.h
+++ b/include/leveldb/table.h
@@ -41,7 +41,7 @@ class LEVELDB_EXPORT Table {
                      uint64_t file_size, Table** table);
 
   Table(const Table&) = delete;
-  void operator=(const Table&) = delete;
+  Table& operator=(const Table&) = delete;
 
   ~Table();
 
@@ -59,22 +59,24 @@ class LEVELDB_EXPORT Table {
   uint64_t ApproximateOffsetOf(const Slice& key) const;
 
  private:
+  friend class TableCache;
   struct Rep;
-  Rep* rep_;
 
-  explicit Table(Rep* rep) { rep_ = rep; }
   static Iterator* BlockReader(void*, const ReadOptions&, const Slice&);
 
+  explicit Table(Rep* rep) : rep_(rep) {}
+
   // Calls (*handle_result)(arg, ...) with the entry found after a call
   // to Seek(key).  May not make such a call if filter policy says
   // that key is not present.
-  friend class TableCache;
   Status InternalGet(const ReadOptions&, const Slice& key, void* arg,
                      void (*handle_result)(void* arg, const Slice& k,
                                            const Slice& v));
 
   void ReadMeta(const Footer& footer);
   void ReadFilter(const Slice& filter_handle_value);
+
+  Rep* const rep_;
 };
 
 }  // namespace leveldb
diff --git a/include/leveldb/table_builder.h b/include/leveldb/table_builder.h
index f8361fd..7d8896b 100644
--- a/include/leveldb/table_builder.h
+++ b/include/leveldb/table_builder.h
@@ -33,7 +33,7 @@ class LEVELDB_EXPORT TableBuilder {
   TableBuilder(const Options& options, WritableFile* file);
 
   TableBuilder(const TableBuilder&) = delete;
-  void operator=(const TableBuilder&) = delete;
+  TableBuilder& operator=(const TableBuilder&) = delete;
 
   // REQUIRES: Either Finish() or Abandon() has been called.
   ~TableBuilder();
diff --git a/include/leveldb/write_batch.h b/include/leveldb/write_batch.h
index 21f7c63..94d4115 100644
--- a/include/leveldb/write_batch.h
+++ b/include/leveldb/write_batch.h
@@ -32,6 +32,13 @@ class Slice;
 
 class LEVELDB_EXPORT WriteBatch {
  public:
+  class LEVELDB_EXPORT Handler {
+   public:
+    virtual ~Handler();
+    virtual void Put(const Slice& key, const Slice& value) = 0;
+    virtual void Delete(const Slice& key) = 0;
+  };
+
   WriteBatch();
 
   // Intentionally copyable.
@@ -63,12 +70,6 @@ class LEVELDB_EXPORT WriteBatch {
   void Append(const WriteBatch& source);
 
   // Support for iterating over the contents of a batch.
-  class LEVELDB_EXPORT Handler {
-   public:
-    virtual ~Handler();
-    virtual void Put(const Slice& key, const Slice& value) = 0;
-    virtual void Delete(const Slice& key) = 0;
-  };
   Status Iterate(Handler* handler) const;
 
  private:
diff --git a/table/block.h b/table/block.h
index 3d4b03c..c8f1f7b 100644
--- a/table/block.h
+++ b/table/block.h
@@ -20,24 +20,23 @@ class Block {
   // Initialize the block with the specified contents.
   explicit Block(const BlockContents& contents);
 
+  Block(const Block&) = delete;
+  Block& operator=(const Block&) = delete;
+
   ~Block();
 
   size_t size() const { return size_; }
   Iterator* NewIterator(const Comparator* comparator);
 
  private:
+  class Iter;
+
   uint32_t NumRestarts() const;
 
   const char* data_;
   size_t size_;
   uint32_t restart_offset_;  // Offset in data_ of restart array
   bool owned_;               // Block owns data_[]
-
-  // No copying allowed
-  Block(const Block&);
-  void operator=(const Block&);
-
-  class Iter;
 };
 
 }  // namespace leveldb
diff --git a/table/block_builder.h b/table/block_builder.h
index d0d9b6e..f91f5e6 100644
--- a/table/block_builder.h
+++ b/table/block_builder.h
@@ -19,6 +19,9 @@ class BlockBuilder {
  public:
   explicit BlockBuilder(const Options* options);
 
+  BlockBuilder(const BlockBuilder&) = delete;
+  BlockBuilder& operator=(const BlockBuilder&) = delete;
+
   // Reset the contents as if the BlockBuilder was just constructed.
   void Reset();
 
@@ -45,10 +48,6 @@ class BlockBuilder {
   int counter_;                     // Number of entries emitted since restart
   bool finished_;                   // Has Finish() been called?
   std::string last_key_;
-
-  // No copying allowed
-  BlockBuilder(const BlockBuilder&);
-  void operator=(const BlockBuilder&);
 };
 
 }  // namespace leveldb
diff --git a/table/filter_block.h b/table/filter_block.h
index 1b034dc..73b5399 100644
--- a/table/filter_block.h
+++ b/table/filter_block.h
@@ -32,6 +32,9 @@ class FilterBlockBuilder {
  public:
   explicit FilterBlockBuilder(const FilterPolicy*);
 
+  FilterBlockBuilder(const FilterBlockBuilder&) = delete;
+  FilterBlockBuilder& operator=(const FilterBlockBuilder&) = delete;
+
   void StartBlock(uint64_t block_offset);
   void AddKey(const Slice& key);
   Slice Finish();
@@ -45,10 +48,6 @@ class FilterBlockBuilder {
   std::string result_;           // Filter data computed so far
   std::vector<Slice> tmp_keys_;  // policy_->CreateFilter() argument
   std::vector<uint32_t> filter_offsets_;
-
-  // No copying allowed
-  FilterBlockBuilder(const FilterBlockBuilder&);
-  void operator=(const FilterBlockBuilder&);
 };
 
 class FilterBlockReader {
diff --git a/table/format.h b/table/format.h
index dacaa9f..2ad145c 100644
--- a/table/format.h
+++ b/table/format.h
@@ -23,6 +23,9 @@ struct ReadOptions;
 // block or a meta block.
 class BlockHandle {
  public:
+  // Maximum encoding length of a BlockHandle
+  enum { kMaxEncodedLength = 10 + 10 };
+
   BlockHandle();
 
   // The offset of the block in the file.
@@ -36,9 +39,6 @@ class BlockHandle {
   void EncodeTo(std::string* dst) const;
   Status DecodeFrom(Slice* input);
 
-  // Maximum encoding length of a BlockHandle
-  enum { kMaxEncodedLength = 10 + 10 };
-
  private:
   uint64_t offset_;
   uint64_t size_;
@@ -48,6 +48,11 @@ class BlockHandle {
 // end of every table file.
 class Footer {
  public:
+  // Encoded length of a Footer.  Note that the serialization of a
+  // Footer will always occupy exactly this many bytes.  It consists
+  // of two block handles and a magic number.
+  enum { kEncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8 };
+
   Footer() {}
 
   // The block handle for the metaindex block of the table
@@ -61,11 +66,6 @@ class Footer {
   void EncodeTo(std::string* dst) const;
   Status DecodeFrom(Slice* input);
 
-  // Encoded length of a Footer.  Note that the serialization of a
-  // Footer will always occupy exactly this many bytes.  It consists
-  // of two block handles and a magic number.
-  enum { kEncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8 };
-
  private:
   BlockHandle metaindex_handle_;
   BlockHandle index_handle_;
diff --git a/table/merger.cc b/table/merger.cc
index 3a5c3e4..1bbc6cf 100644
--- a/table/merger.cc
+++ b/table/merger.cc
@@ -129,6 +129,9 @@ class MergingIterator : public Iterator {
   }
 
  private:
+  // Which direction is the iterator moving?
+  enum Direction { kForward, kReverse };
+
   void FindSmallest();
   void FindLargest();
 
@@ -139,9 +142,6 @@ class MergingIterator : public Iterator {
   IteratorWrapper* children_;
   int n_;
   IteratorWrapper* current_;
-
-  // Which direction is the iterator moving?
-  enum Direction { kForward, kReverse };
   Direction direction_;
 };
 
diff --git a/table/table_builder.cc b/table/table_builder.cc
index 9afff76..278febf 100644
--- a/table/table_builder.cc
+++ b/table/table_builder.cc
@@ -19,6 +19,22 @@
 namespace leveldb {
 
 struct TableBuilder::Rep {
+  Rep(const Options& opt, WritableFile* f)
+      : options(opt),
+        index_block_options(opt),
+        file(f),
+        offset(0),
+        data_block(&options),
+        index_block(&index_block_options),
+        num_entries(0),
+        closed(false),
+        filter_block(opt.filter_policy == nullptr
+                         ? nullptr
+                         : new FilterBlockBuilder(opt.filter_policy)),
+        pending_index_entry(false) {
+    index_block_options.block_restart_interval = 1;
+  }
+
   Options options;
   Options index_block_options;
   WritableFile* file;
@@ -44,22 +60,6 @@ struct TableBuilder::Rep {
   BlockHandle pending_handle;  // Handle to add to index block
 
   std::string compressed_output;
-
-  Rep(const Options& opt, WritableFile* f)
-      : options(opt),
-        index_block_options(opt),
-        file(f),
-        offset(0),
-        data_block(&options),
-        index_block(&index_block_options),
-        num_entries(0),
-        closed(false),
-        filter_block(opt.filter_policy == nullptr
-                         ? nullptr
-                         : new FilterBlockBuilder(opt.filter_policy)),
-        pending_index_entry(false) {
-    index_block_options.block_restart_interval = 1;
-  }
 };
 
 TableBuilder::TableBuilder(const Options& options, WritableFile* file)
diff --git a/util/arena.cc b/util/arena.cc
index eadec8a..46e3b2e 100644
--- a/util/arena.cc
+++ b/util/arena.cc
@@ -8,10 +8,8 @@ namespace leveldb {
 
 static const int kBlockSize = 4096;
 
-Arena::Arena() : memory_usage_(0) {
-  alloc_ptr_ = nullptr;  // First allocation will allocate a block
-  alloc_bytes_remaining_ = 0;
-}
+Arena::Arena()
+    : alloc_ptr_(nullptr), alloc_bytes_remaining_(0), memory_usage_(0) {}
 
 Arena::~Arena() {
   for (size_t i = 0; i < blocks_.size(); i++) {
diff --git a/util/arena.h b/util/arena.h
index 624e262..68fc55d 100644
--- a/util/arena.h
+++ b/util/arena.h
@@ -16,6 +16,10 @@ namespace leveldb {
 class Arena {
  public:
   Arena();
+
+  Arena(const Arena&) = delete;
+  Arena& operator=(const Arena&) = delete;
+
   ~Arena();
 
   // Return a pointer to a newly allocated memory block of "bytes" bytes.
@@ -46,10 +50,6 @@ class Arena {
   // TODO(costan): This member is accessed via atomics, but the others are
   //               accessed without any locking. Is this OK?
   std::atomic<size_t> memory_usage_;
-
-  // No copying allowed
-  Arena(const Arena&);
-  void operator=(const Arena&);
 };
 
 inline char* Arena::Allocate(size_t bytes) {
diff --git a/util/bloom.cc b/util/bloom.cc
index 097ce7a..7f97464 100644
--- a/util/bloom.cc
+++ b/util/bloom.cc
@@ -15,10 +15,6 @@ static uint32_t BloomHash(const Slice& key) {
 }
 
 class BloomFilterPolicy : public FilterPolicy {
- private:
-  size_t bits_per_key_;
-  size_t k_;
-
  public:
   explicit BloomFilterPolicy(int bits_per_key) : bits_per_key_(bits_per_key) {
     // We intentionally round down to reduce probing cost a little bit
@@ -82,6 +78,10 @@ class BloomFilterPolicy : public FilterPolicy {
     }
     return true;
   }
+
+ private:
+  size_t bits_per_key_;
+  size_t k_;
 };
 }  // namespace
 
diff --git a/util/bloom_test.cc b/util/bloom_test.cc
index 71c4115..436daa9 100644
--- a/util/bloom_test.cc
+++ b/util/bloom_test.cc
@@ -19,11 +19,6 @@ static Slice Key(int i, char* buffer) {
 }
 
 class BloomTest {
- private:
-  const FilterPolicy* policy_;
-  std::string filter_;
-  std::vector<std::string> keys_;
-
  public:
   BloomTest() : policy_(NewBloomFilterPolicy(10)) {}
 
@@ -78,6 +73,11 @@ class BloomTest {
     }
     return result / 10000.0;
   }
+
+ private:
+  const FilterPolicy* policy_;
+  std::string filter_;
+  std::vector<std::string> keys_;
 };
 
 TEST(BloomTest, EmptyFilter) {
diff --git a/util/cache_test.cc b/util/cache_test.cc
index d5c1a1d..974334b 100644
--- a/util/cache_test.cc
+++ b/util/cache_test.cc
@@ -25,8 +25,6 @@ static int DecodeValue(void* v) { return reinterpret_cast<uintptr_t>(v); }
 
 class CacheTest {
  public:
-  static CacheTest* current_;
-
   static void Deleter(const Slice& key, void* v) {
     current_->deleted_keys_.push_back(DecodeKey(key));
     current_->deleted_values_.push_back(DecodeValue(v));
@@ -61,6 +59,8 @@ class CacheTest {
   }
 
   void Erase(int key) { cache_->Erase(EncodeKey(key)); }
+
+  static CacheTest* current_;
 };
 CacheTest* CacheTest::current_;
 
diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc
index 6a2a1fc..4b72934 100644
--- a/util/env_posix_test.cc
+++ b/util/env_posix_test.cc
@@ -14,13 +14,14 @@ static const int kMMapLimit = 4;
 
 class EnvPosixTest {
  public:
-  Env* env_;
-  EnvPosixTest() : env_(Env::Default()) {}
-
   static void SetFileLimits(int read_only_file_limit, int mmap_limit) {
     EnvPosixTestHelper::SetReadOnlyFDLimit(read_only_file_limit);
     EnvPosixTestHelper::SetReadOnlyMMapLimit(mmap_limit);
   }
+
+  EnvPosixTest() : env_(Env::Default()) {}
+
+  Env* env_;
 };
 
 TEST(EnvPosixTest, TestOpenOnRead) {
diff --git a/util/env_test.cc b/util/env_test.cc
index 3e81261..9e2ad1e 100644
--- a/util/env_test.cc
+++ b/util/env_test.cc
@@ -19,8 +19,9 @@ static const int kDelayMicros = 100000;
 
 class EnvTest {
  public:
-  Env* env_;
   EnvTest() : env_(Env::Default()) {}
+
+  Env* env_;
 };
 
 namespace {
diff --git a/util/env_windows.cc b/util/env_windows.cc
index c537938..09e3df6 100644
--- a/util/env_windows.cc
+++ b/util/env_windows.cc
@@ -626,21 +626,19 @@ class WindowsEnv : public Env {
   }
 
  private:
+  // Entry per Schedule() call
+  struct BGItem {
+    void* arg;
+    void (*function)(void*);
+  };
+
   // BGThread() is the body of the background thread
   void BGThread();
 
   std::mutex mu_;
   std::condition_variable bgsignal_;
   bool started_bgthread_;
-
-  // Entry per Schedule() call
-  struct BGItem {
-    void* arg;
-    void (*function)(void*);
-  };
-  typedef std::deque<BGItem> BGQueue;
-  BGQueue queue_;
-
+  std::deque<BGItem> queue_;
   Limiter mmap_limiter_;
 };
 
diff --git a/util/env_windows_test.cc b/util/env_windows_test.cc
index 4451b9e..3c22133 100644
--- a/util/env_windows_test.cc
+++ b/util/env_windows_test.cc
@@ -14,12 +14,13 @@ static const int kMMapLimit = 4;
 
 class EnvWindowsTest {
  public:
-  Env* env_;
-  EnvWindowsTest() : env_(Env::Default()) {}
-
   static void SetFileLimits(int mmap_limit) {
     EnvWindowsTestHelper::SetReadOnlyMMapLimit(mmap_limit);
   }
+
+  EnvWindowsTest() : env_(Env::Default()) {}
+
+  Env* env_;
 };
 
 TEST(EnvWindowsTest, TestOpenOnRead) {
diff --git a/util/histogram.h b/util/histogram.h
index fe281a9..4da60fb 100644
--- a/util/histogram.h
+++ b/util/histogram.h
@@ -21,20 +21,22 @@ class Histogram {
   std::string ToString() const;
 
  private:
+  enum { kNumBuckets = 154 };
+
+  double Median() const;
+  double Percentile(double p) const;
+  double Average() const;
+  double StandardDeviation() const;
+
+  static const double kBucketLimit[kNumBuckets];
+
   double min_;
   double max_;
   double num_;
   double sum_;
   double sum_squares_;
 
-  enum { kNumBuckets = 154 };
-  static const double kBucketLimit[kNumBuckets];
   double buckets_[kNumBuckets];
-
-  double Median() const;
-  double Percentile(double p) const;
-  double Average() const;
-  double StandardDeviation() const;
 };
 
 }  // namespace leveldb

From 78b39d68c15ba020c0d60a3906fb66dbf1697595 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Fri, 3 May 2019 12:55:26 -0700
Subject: [PATCH 077/181] Bump the version number from 1.21 to 1.22.

PiperOrigin-RevId: 246558281
---
 CMakeLists.txt       | 2 +-
 include/leveldb/db.h | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 97953ab..1409c06 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -4,7 +4,7 @@
 
 cmake_minimum_required(VERSION 3.9)
 # Keep the version below in sync with the one in db.h
-project(leveldb VERSION 1.21.0 LANGUAGES C CXX)
+project(leveldb VERSION 1.22.0 LANGUAGES C CXX)
 
 # This project can use C11, but will gracefully decay down to C89.
 set(CMAKE_C_STANDARD 11)
diff --git a/include/leveldb/db.h b/include/leveldb/db.h
index e52a5b6..ea3d9e5 100644
--- a/include/leveldb/db.h
+++ b/include/leveldb/db.h
@@ -16,7 +16,7 @@ namespace leveldb {
 
 // Update CMakeLists.txt if you change these
 static const int kMajorVersion = 1;
-static const int kMinorVersion = 21;
+static const int kMinorVersion = 22;
 
 struct Options;
 struct ReadOptions;

From 24424a1ef2c284f4ec30544a3458023362cbeacd Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Sat, 4 May 2019 17:40:21 -0700
Subject: [PATCH 078/181] Style cleanup.

1) Convert iterator-based for loops to C++11 foreach loops.
2) Convert "void operator=" to "T& operator=".
3) Switch from copy operators from private to public deleted.
4) Switch from empty ctors / dtors to "= default" where appropriate.

PiperOrigin-RevId: 246679195
---
 db/db_impl.cc               |  4 +-
 db/db_test.cc               | 22 ++++-----
 db/fault_injection_test.cc  | 45 ++++++++++---------
 db/log_reader.cc            |  2 +-
 db/log_writer.cc            |  2 +-
 db/memtable.cc              | 27 +++++------
 db/version_edit.cc          | 14 +++---
 db/version_edit.h           |  2 +-
 db/version_set.cc           | 19 ++++----
 db/version_set_test.cc      |  4 +-
 db/write_batch.cc           |  8 ++--
 helpers/memenv/memenv.cc    | 53 +++++++++++-----------
 include/leveldb/db.h        |  2 +-
 table/format.h              |  2 +-
 table/table_test.cc         | 89 +++++++++++++++++++------------------
 table/two_level_iterator.cc |  2 +-
 util/comparator.cc          | 18 ++++----
 util/env.cc                 | 12 ++---
 18 files changed, 164 insertions(+), 163 deletions(-)

diff --git a/db/db_impl.cc b/db/db_impl.cc
index 761ebf6..82be594 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -1469,7 +1469,7 @@ Status DB::Delete(const WriteOptions& opt, const Slice& key) {
   return Write(opt, &batch);
 }
 
-DB::~DB() {}
+DB::~DB() = default;
 
 Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
   *dbptr = nullptr;
@@ -1514,7 +1514,7 @@ Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
   return s;
 }
 
-Snapshot::~Snapshot() {}
+Snapshot::~Snapshot() = default;
 
 Status DestroyDB(const std::string& dbname, const Options& options) {
   Env* env = options.env;
diff --git a/db/db_test.cc b/db/db_test.cc
index 78296d5..e7386f7 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -2024,19 +2024,19 @@ class ModelDB : public DB {
   };
 
   explicit ModelDB(const Options& options) : options_(options) {}
-  ~ModelDB() {}
+  ~ModelDB() override = default;
   virtual Status Put(const WriteOptions& o, const Slice& k, const Slice& v) {
     return DB::Put(o, k, v);
   }
-  virtual Status Delete(const WriteOptions& o, const Slice& key) {
+  Status Delete(const WriteOptions& o, const Slice& key) override {
     return DB::Delete(o, key);
   }
-  virtual Status Get(const ReadOptions& options, const Slice& key,
-                     std::string* value) {
+  Status Get(const ReadOptions& options, const Slice& key,
+             std::string* value) override {
     assert(false);  // Not implemented
     return Status::NotFound(key);
   }
-  virtual Iterator* NewIterator(const ReadOptions& options) {
+  Iterator* NewIterator(const ReadOptions& options) override {
     if (options.snapshot == nullptr) {
       KVMap* saved = new KVMap;
       *saved = map_;
@@ -2047,16 +2047,16 @@ class ModelDB : public DB {
       return new ModelIter(snapshot_state, false);
     }
   }
-  virtual const Snapshot* GetSnapshot() {
+  const Snapshot* GetSnapshot() override {
     ModelSnapshot* snapshot = new ModelSnapshot;
     snapshot->map_ = map_;
     return snapshot;
   }
 
-  virtual void ReleaseSnapshot(const Snapshot* snapshot) {
+  void ReleaseSnapshot(const Snapshot* snapshot) override {
     delete reinterpret_cast<const ModelSnapshot*>(snapshot);
   }
-  virtual Status Write(const WriteOptions& options, WriteBatch* batch) {
+  Status Write(const WriteOptions& options, WriteBatch* batch) override {
     class Handler : public WriteBatch::Handler {
      public:
       KVMap* map_;
@@ -2070,15 +2070,15 @@ class ModelDB : public DB {
     return batch->Iterate(&handler);
   }
 
-  virtual bool GetProperty(const Slice& property, std::string* value) {
+  bool GetProperty(const Slice& property, std::string* value) override {
     return false;
   }
-  virtual void GetApproximateSizes(const Range* r, int n, uint64_t* sizes) {
+  void GetApproximateSizes(const Range* r, int n, uint64_t* sizes) override {
     for (int i = 0; i < n; i++) {
       sizes[i] = 0;
     }
   }
-  virtual void CompactRange(const Slice* start, const Slice* end) {}
+  void CompactRange(const Slice* start, const Slice* end) override {}
 
  private:
   class ModelIter : public Iterator {
diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc
index 7088ea7..5b31bb8 100644
--- a/db/fault_injection_test.cc
+++ b/db/fault_injection_test.cc
@@ -109,11 +109,11 @@ class TestWritableFile : public WritableFile {
  public:
   TestWritableFile(const FileState& state, WritableFile* f,
                    FaultInjectionTestEnv* env);
-  virtual ~TestWritableFile();
-  virtual Status Append(const Slice& data);
-  virtual Status Close();
-  virtual Status Flush();
-  virtual Status Sync();
+  ~TestWritableFile() override;
+  Status Append(const Slice& data) override;
+  Status Close() override;
+  Status Flush() override;
+  Status Sync() override;
 
  private:
   FileState state_;
@@ -128,13 +128,13 @@ class FaultInjectionTestEnv : public EnvWrapper {
  public:
   FaultInjectionTestEnv()
       : EnvWrapper(Env::Default()), filesystem_active_(true) {}
-  virtual ~FaultInjectionTestEnv() {}
-  virtual Status NewWritableFile(const std::string& fname,
-                                 WritableFile** result);
-  virtual Status NewAppendableFile(const std::string& fname,
-                                   WritableFile** result);
-  virtual Status DeleteFile(const std::string& f);
-  virtual Status RenameFile(const std::string& s, const std::string& t);
+  ~FaultInjectionTestEnv() override = default;
+  Status NewWritableFile(const std::string& fname,
+                         WritableFile** result) override;
+  Status NewAppendableFile(const std::string& fname,
+                           WritableFile** result) override;
+  Status DeleteFile(const std::string& f) override;
+  Status RenameFile(const std::string& s, const std::string& t) override;
 
   void WritableFileClosed(const FileState& state);
   Status DropUnsyncedFileData();
@@ -268,10 +268,11 @@ Status FaultInjectionTestEnv::NewAppendableFile(const std::string& fname,
 Status FaultInjectionTestEnv::DropUnsyncedFileData() {
   Status s;
   MutexLock l(&mutex_);
-  for (std::map<std::string, FileState>::const_iterator it =
-           db_file_state_.begin();
-       s.ok() && it != db_file_state_.end(); ++it) {
-    const FileState& state = it->second;
+  for (const auto& kvp : db_file_state_) {
+    if (!s.ok()) {
+      break;
+    }
+    const FileState& state = kvp.second;
     if (!state.IsFullySynced()) {
       s = state.DropUnsyncedData();
     }
@@ -340,12 +341,14 @@ Status FaultInjectionTestEnv::DeleteFilesCreatedAfterLastDirSync() {
   std::set<std::string> new_files(new_files_since_last_dir_sync_.begin(),
                                   new_files_since_last_dir_sync_.end());
   mutex_.Unlock();
-  Status s;
-  std::set<std::string>::const_iterator it;
-  for (it = new_files.begin(); s.ok() && it != new_files.end(); ++it) {
-    s = DeleteFile(*it);
+  Status status;
+  for (const auto& new_file : new_files) {
+    Status delete_status = DeleteFile(new_file);
+    if (!delete_status.ok() && status.ok()) {
+      status = std::move(delete_status);
+    }
   }
-  return s;
+  return status;
 }
 
 void FaultInjectionTestEnv::WritableFileClosed(const FileState& state) {
diff --git a/db/log_reader.cc b/db/log_reader.cc
index f472723..b770fee 100644
--- a/db/log_reader.cc
+++ b/db/log_reader.cc
@@ -13,7 +13,7 @@
 namespace leveldb {
 namespace log {
 
-Reader::Reporter::~Reporter() {}
+Reader::Reporter::~Reporter() = default;
 
 Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
                uint64_t initial_offset)
diff --git a/db/log_writer.cc b/db/log_writer.cc
index 5e83f6a..bfb16fb 100644
--- a/db/log_writer.cc
+++ b/db/log_writer.cc
@@ -29,7 +29,7 @@ Writer::Writer(WritableFile* dest, uint64_t dest_length)
   InitTypeCrc(type_crc_);
 }
 
-Writer::~Writer() {}
+Writer::~Writer() = default;
 
 Status Writer::AddRecord(const Slice& slice) {
   const char* ptr = slice.data();
diff --git a/db/memtable.cc b/db/memtable.cc
index c91405c..00931d4 100644
--- a/db/memtable.cc
+++ b/db/memtable.cc
@@ -47,27 +47,28 @@ class MemTableIterator : public Iterator {
  public:
   explicit MemTableIterator(MemTable::Table* table) : iter_(table) {}
 
-  virtual bool Valid() const { return iter_.Valid(); }
-  virtual void Seek(const Slice& k) { iter_.Seek(EncodeKey(&tmp_, k)); }
-  virtual void SeekToFirst() { iter_.SeekToFirst(); }
-  virtual void SeekToLast() { iter_.SeekToLast(); }
-  virtual void Next() { iter_.Next(); }
-  virtual void Prev() { iter_.Prev(); }
-  virtual Slice key() const { return GetLengthPrefixedSlice(iter_.key()); }
-  virtual Slice value() const {
+  MemTableIterator(const MemTableIterator&) = delete;
+  MemTableIterator& operator=(const MemTableIterator&) = delete;
+
+  ~MemTableIterator() override = default;
+
+  bool Valid() const override { return iter_.Valid(); }
+  void Seek(const Slice& k) override { iter_.Seek(EncodeKey(&tmp_, k)); }
+  void SeekToFirst() override { iter_.SeekToFirst(); }
+  void SeekToLast() override { iter_.SeekToLast(); }
+  void Next() override { iter_.Next(); }
+  void Prev() override { iter_.Prev(); }
+  Slice key() const override { return GetLengthPrefixedSlice(iter_.key()); }
+  Slice value() const override {
     Slice key_slice = GetLengthPrefixedSlice(iter_.key());
     return GetLengthPrefixedSlice(key_slice.data() + key_slice.size());
   }
 
-  virtual Status status() const { return Status::OK(); }
+  Status status() const override { return Status::OK(); }
 
  private:
   MemTable::Table::Iterator iter_;
   std::string tmp_;  // For passing to EncodeKey
-
-  // No copying allowed
-  MemTableIterator(const MemTableIterator&);
-  void operator=(const MemTableIterator&);
 };
 
 Iterator* MemTable::NewIterator() { return new MemTableIterator(&table_); }
diff --git a/db/version_edit.cc b/db/version_edit.cc
index 44a4d02..2385e7d 100644
--- a/db/version_edit.cc
+++ b/db/version_edit.cc
@@ -66,11 +66,10 @@ void VersionEdit::EncodeTo(std::string* dst) const {
     PutLengthPrefixedSlice(dst, compact_pointers_[i].second.Encode());
   }
 
-  for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
-       iter != deleted_files_.end(); ++iter) {
+  for (const auto& deleted_file_kvp : deleted_files_) {
     PutVarint32(dst, kDeletedFile);
-    PutVarint32(dst, iter->first);   // level
-    PutVarint64(dst, iter->second);  // file number
+    PutVarint32(dst, deleted_file_kvp.first);   // level
+    PutVarint64(dst, deleted_file_kvp.second);  // file number
   }
 
   for (size_t i = 0; i < new_files_.size(); i++) {
@@ -233,12 +232,11 @@ std::string VersionEdit::DebugString() const {
     r.append(" ");
     r.append(compact_pointers_[i].second.DebugString());
   }
-  for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
-       iter != deleted_files_.end(); ++iter) {
+  for (const auto& deleted_files_kvp : deleted_files_) {
     r.append("\n  DeleteFile: ");
-    AppendNumberTo(&r, iter->first);
+    AppendNumberTo(&r, deleted_files_kvp.first);
     r.append(" ");
-    AppendNumberTo(&r, iter->second);
+    AppendNumberTo(&r, deleted_files_kvp.second);
   }
   for (size_t i = 0; i < new_files_.size(); i++) {
     const FileMetaData& f = new_files_[i].second;
diff --git a/db/version_edit.h b/db/version_edit.h
index 2dadda7..86b2f22 100644
--- a/db/version_edit.h
+++ b/db/version_edit.h
@@ -29,7 +29,7 @@ struct FileMetaData {
 class VersionEdit {
  public:
   VersionEdit() { Clear(); }
-  ~VersionEdit() {}
+  ~VersionEdit() = default;
 
   void Clear();
 
diff --git a/db/version_set.cc b/db/version_set.cc
index 96a92cc..099fa57 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -656,11 +656,9 @@ class VersionSet::Builder {
     }
 
     // Delete files
-    const VersionEdit::DeletedFileSet& del = edit->deleted_files_;
-    for (VersionEdit::DeletedFileSet::const_iterator iter = del.begin();
-         iter != del.end(); ++iter) {
-      const int level = iter->first;
-      const uint64_t number = iter->second;
+    for (const auto& deleted_file_set_kvp : edit->deleted_files_) {
+      const int level = deleted_file_set_kvp.first;
+      const uint64_t number = deleted_file_set_kvp.second;
       levels_[level].deleted_files.insert(number);
     }
 
@@ -701,18 +699,17 @@ class VersionSet::Builder {
       const std::vector<FileMetaData*>& base_files = base_->files_[level];
       std::vector<FileMetaData*>::const_iterator base_iter = base_files.begin();
       std::vector<FileMetaData*>::const_iterator base_end = base_files.end();
-      const FileSet* added = levels_[level].added_files;
-      v->files_[level].reserve(base_files.size() + added->size());
-      for (FileSet::const_iterator added_iter = added->begin();
-           added_iter != added->end(); ++added_iter) {
+      const FileSet* added_files = levels_[level].added_files;
+      v->files_[level].reserve(base_files.size() + added_files->size());
+      for (const auto& added_file : *added_files) {
         // Add all smaller files listed in base_
         for (std::vector<FileMetaData*>::const_iterator bpos =
-                 std::upper_bound(base_iter, base_end, *added_iter, cmp);
+                 std::upper_bound(base_iter, base_end, added_file, cmp);
              base_iter != bpos; ++base_iter) {
           MaybeAddFile(v, level, *base_iter);
         }
 
-        MaybeAddFile(v, level, *added_iter);
+        MaybeAddFile(v, level, added_file);
       }
 
       // Add remaining base files
diff --git a/db/version_set_test.cc b/db/version_set_test.cc
index f7efe2b..c1056a1 100644
--- a/db/version_set_test.cc
+++ b/db/version_set_test.cc
@@ -184,14 +184,14 @@ class AddBoundaryInputsTest {
   std::vector<FileMetaData*> all_files_;
   InternalKeyComparator icmp_;
 
-  AddBoundaryInputsTest() : icmp_(BytewiseComparator()){};
+  AddBoundaryInputsTest() : icmp_(BytewiseComparator()) {}
 
   ~AddBoundaryInputsTest() {
     for (size_t i = 0; i < all_files_.size(); ++i) {
       delete all_files_[i];
     }
     all_files_.clear();
-  };
+  }
 
   FileMetaData* CreateFileMetaData(uint64_t number, InternalKey smallest,
                                    InternalKey largest) {
diff --git a/db/write_batch.cc b/db/write_batch.cc
index 2dec642..b54313c 100644
--- a/db/write_batch.cc
+++ b/db/write_batch.cc
@@ -28,9 +28,9 @@ static const size_t kHeader = 12;
 
 WriteBatch::WriteBatch() { Clear(); }
 
-WriteBatch::~WriteBatch() {}
+WriteBatch::~WriteBatch() = default;
 
-WriteBatch::Handler::~Handler() {}
+WriteBatch::Handler::~Handler() = default;
 
 void WriteBatch::Clear() {
   rep_.clear();
@@ -118,11 +118,11 @@ class MemTableInserter : public WriteBatch::Handler {
   SequenceNumber sequence_;
   MemTable* mem_;
 
-  virtual void Put(const Slice& key, const Slice& value) {
+  void Put(const Slice& key, const Slice& value) override {
     mem_->Add(sequence_, kTypeValue, key, value);
     sequence_++;
   }
-  virtual void Delete(const Slice& key) {
+  void Delete(const Slice& key) override {
     mem_->Add(sequence_, kTypeDeletion, key, Slice());
     sequence_++;
   }
diff --git a/helpers/memenv/memenv.cc b/helpers/memenv/memenv.cc
index b6b790c..2d4fbaa 100644
--- a/helpers/memenv/memenv.cc
+++ b/helpers/memenv/memenv.cc
@@ -223,16 +223,15 @@ class InMemoryEnv : public EnvWrapper {
  public:
   explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) {}
 
-  virtual ~InMemoryEnv() {
-    for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end();
-         ++i) {
-      i->second->Unref();
+  ~InMemoryEnv() override {
+    for (const auto& kvp : file_map_) {
+      kvp.second->Unref();
     }
   }
 
   // Partial implementation of the Env interface.
-  virtual Status NewSequentialFile(const std::string& fname,
-                                   SequentialFile** result) {
+  Status NewSequentialFile(const std::string& fname,
+                           SequentialFile** result) override {
     MutexLock lock(&mutex_);
     if (file_map_.find(fname) == file_map_.end()) {
       *result = nullptr;
@@ -243,8 +242,8 @@ class InMemoryEnv : public EnvWrapper {
     return Status::OK();
   }
 
-  virtual Status NewRandomAccessFile(const std::string& fname,
-                                     RandomAccessFile** result) {
+  Status NewRandomAccessFile(const std::string& fname,
+                             RandomAccessFile** result) override {
     MutexLock lock(&mutex_);
     if (file_map_.find(fname) == file_map_.end()) {
       *result = nullptr;
@@ -255,8 +254,8 @@ class InMemoryEnv : public EnvWrapper {
     return Status::OK();
   }
 
-  virtual Status NewWritableFile(const std::string& fname,
-                                 WritableFile** result) {
+  Status NewWritableFile(const std::string& fname,
+                         WritableFile** result) override {
     MutexLock lock(&mutex_);
     FileSystem::iterator it = file_map_.find(fname);
 
@@ -275,8 +274,8 @@ class InMemoryEnv : public EnvWrapper {
     return Status::OK();
   }
 
-  virtual Status NewAppendableFile(const std::string& fname,
-                                   WritableFile** result) {
+  Status NewAppendableFile(const std::string& fname,
+                           WritableFile** result) override {
     MutexLock lock(&mutex_);
     FileState** sptr = &file_map_[fname];
     FileState* file = *sptr;
@@ -288,19 +287,18 @@ class InMemoryEnv : public EnvWrapper {
     return Status::OK();
   }
 
-  virtual bool FileExists(const std::string& fname) {
+  bool FileExists(const std::string& fname) override {
     MutexLock lock(&mutex_);
     return file_map_.find(fname) != file_map_.end();
   }
 
-  virtual Status GetChildren(const std::string& dir,
-                             std::vector<std::string>* result) {
+  Status GetChildren(const std::string& dir,
+                     std::vector<std::string>* result) override {
     MutexLock lock(&mutex_);
     result->clear();
 
-    for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end();
-         ++i) {
-      const std::string& filename = i->first;
+    for (const auto& kvp : file_map_) {
+      const std::string& filename = kvp.first;
 
       if (filename.size() >= dir.size() + 1 && filename[dir.size()] == '/' &&
           Slice(filename).starts_with(Slice(dir))) {
@@ -321,7 +319,7 @@ class InMemoryEnv : public EnvWrapper {
     file_map_.erase(fname);
   }
 
-  virtual Status DeleteFile(const std::string& fname) {
+  Status DeleteFile(const std::string& fname) override {
     MutexLock lock(&mutex_);
     if (file_map_.find(fname) == file_map_.end()) {
       return Status::IOError(fname, "File not found");
@@ -331,11 +329,11 @@ class InMemoryEnv : public EnvWrapper {
     return Status::OK();
   }
 
-  virtual Status CreateDir(const std::string& dirname) { return Status::OK(); }
+  Status CreateDir(const std::string& dirname) override { return Status::OK(); }
 
-  virtual Status DeleteDir(const std::string& dirname) { return Status::OK(); }
+  Status DeleteDir(const std::string& dirname) override { return Status::OK(); }
 
-  virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) {
+  Status GetFileSize(const std::string& fname, uint64_t* file_size) override {
     MutexLock lock(&mutex_);
     if (file_map_.find(fname) == file_map_.end()) {
       return Status::IOError(fname, "File not found");
@@ -345,7 +343,8 @@ class InMemoryEnv : public EnvWrapper {
     return Status::OK();
   }
 
-  virtual Status RenameFile(const std::string& src, const std::string& target) {
+  Status RenameFile(const std::string& src,
+                    const std::string& target) override {
     MutexLock lock(&mutex_);
     if (file_map_.find(src) == file_map_.end()) {
       return Status::IOError(src, "File not found");
@@ -357,22 +356,22 @@ class InMemoryEnv : public EnvWrapper {
     return Status::OK();
   }
 
-  virtual Status LockFile(const std::string& fname, FileLock** lock) {
+  Status LockFile(const std::string& fname, FileLock** lock) override {
     *lock = new FileLock;
     return Status::OK();
   }
 
-  virtual Status UnlockFile(FileLock* lock) {
+  Status UnlockFile(FileLock* lock) override {
     delete lock;
     return Status::OK();
   }
 
-  virtual Status GetTestDirectory(std::string* path) {
+  Status GetTestDirectory(std::string* path) override {
     *path = "/test";
     return Status::OK();
   }
 
-  virtual Status NewLogger(const std::string& fname, Logger** result) {
+  Status NewLogger(const std::string& fname, Logger** result) override {
     *result = new NoOpLogger;
     return Status::OK();
   }
diff --git a/include/leveldb/db.h b/include/leveldb/db.h
index ea3d9e5..b73014a 100644
--- a/include/leveldb/db.h
+++ b/include/leveldb/db.h
@@ -33,7 +33,7 @@ class LEVELDB_EXPORT Snapshot {
 
 // A range of keys
 struct LEVELDB_EXPORT Range {
-  Range() {}
+  Range() = default;
   Range(const Slice& s, const Slice& l) : start(s), limit(l) {}
 
   Slice start;  // Included in the range
diff --git a/table/format.h b/table/format.h
index 2ad145c..e49dfdc 100644
--- a/table/format.h
+++ b/table/format.h
@@ -53,7 +53,7 @@ class Footer {
   // of two block handles and a magic number.
   enum { kEncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8 };
 
-  Footer() {}
+  Footer() = default;
 
   // The block handle for the metaindex block of the table
   const BlockHandle& metaindex_handle() const { return metaindex_handle_; }
diff --git a/table/table_test.cc b/table/table_test.cc
index 0974052..3c63e32 100644
--- a/table/table_test.cc
+++ b/table/table_test.cc
@@ -89,7 +89,7 @@ struct STLLessThan {
 
 class StringSink : public WritableFile {
  public:
-  ~StringSink() {}
+  ~StringSink() = default;
 
   const std::string& contents() const { return contents_; }
 
@@ -111,7 +111,7 @@ class StringSource : public RandomAccessFile {
   StringSource(const Slice& contents)
       : contents_(contents.data(), contents.size()) {}
 
-  virtual ~StringSource() {}
+  virtual ~StringSource() = default;
 
   uint64_t Size() const { return contents_.size(); }
 
@@ -139,7 +139,7 @@ typedef std::map<std::string, std::string, STLLessThan> KVMap;
 class Constructor {
  public:
   explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) {}
-  virtual ~Constructor() {}
+  virtual ~Constructor() = default;
 
   void Add(const std::string& key, const Slice& value) {
     data_[key] = value.ToString();
@@ -152,8 +152,8 @@ class Constructor {
               KVMap* kvmap) {
     *kvmap = data_;
     keys->clear();
-    for (KVMap::const_iterator it = data_.begin(); it != data_.end(); ++it) {
-      keys->push_back(it->first);
+    for (const auto& kvp : data_) {
+      keys->push_back(kvp.first);
     }
     data_.clear();
     Status s = FinishImpl(options, *kvmap);
@@ -165,7 +165,7 @@ class Constructor {
 
   virtual Iterator* NewIterator() const = 0;
 
-  virtual const KVMap& data() { return data_; }
+  const KVMap& data() const { return data_; }
 
   virtual DB* db() const { return nullptr; }  // Overridden in DBConstructor
 
@@ -177,14 +177,14 @@ class BlockConstructor : public Constructor {
  public:
   explicit BlockConstructor(const Comparator* cmp)
       : Constructor(cmp), comparator_(cmp), block_(nullptr) {}
-  ~BlockConstructor() { delete block_; }
-  virtual Status FinishImpl(const Options& options, const KVMap& data) {
+  ~BlockConstructor() override { delete block_; }
+  Status FinishImpl(const Options& options, const KVMap& data) override {
     delete block_;
     block_ = nullptr;
     BlockBuilder builder(&options);
 
-    for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
-      builder.Add(it->first, it->second);
+    for (const auto& kvp : data) {
+      builder.Add(kvp.first, kvp.second);
     }
     // Open the block
     data_ = builder.Finish().ToString();
@@ -195,12 +195,12 @@ class BlockConstructor : public Constructor {
     block_ = new Block(contents);
     return Status::OK();
   }
-  virtual Iterator* NewIterator() const {
+  Iterator* NewIterator() const override {
     return block_->NewIterator(comparator_);
   }
 
  private:
-  const Comparator* comparator_;
+  const Comparator* const comparator_;
   std::string data_;
   Block* block_;
 
@@ -211,14 +211,14 @@ class TableConstructor : public Constructor {
  public:
   TableConstructor(const Comparator* cmp)
       : Constructor(cmp), source_(nullptr), table_(nullptr) {}
-  ~TableConstructor() { Reset(); }
-  virtual Status FinishImpl(const Options& options, const KVMap& data) {
+  ~TableConstructor() override { Reset(); }
+  Status FinishImpl(const Options& options, const KVMap& data) override {
     Reset();
     StringSink sink;
     TableBuilder builder(options, &sink);
 
-    for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
-      builder.Add(it->first, it->second);
+    for (const auto& kvp : data) {
+      builder.Add(kvp.first, kvp.second);
       ASSERT_TRUE(builder.status().ok());
     }
     Status s = builder.Finish();
@@ -233,7 +233,7 @@ class TableConstructor : public Constructor {
     return Table::Open(table_options, source_, sink.contents().size(), &table_);
   }
 
-  virtual Iterator* NewIterator() const {
+  Iterator* NewIterator() const override {
     return table_->NewIterator(ReadOptions());
   }
 
@@ -259,20 +259,25 @@ class TableConstructor : public Constructor {
 class KeyConvertingIterator : public Iterator {
  public:
   explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) {}
-  virtual ~KeyConvertingIterator() { delete iter_; }
-  virtual bool Valid() const { return iter_->Valid(); }
-  virtual void Seek(const Slice& target) {
+
+  KeyConvertingIterator(const KeyConvertingIterator&) = delete;
+  KeyConvertingIterator& operator=(const KeyConvertingIterator&) = delete;
+
+  ~KeyConvertingIterator() override { delete iter_; }
+
+  bool Valid() const override { return iter_->Valid(); }
+  void Seek(const Slice& target) override {
     ParsedInternalKey ikey(target, kMaxSequenceNumber, kTypeValue);
     std::string encoded;
     AppendInternalKey(&encoded, ikey);
     iter_->Seek(encoded);
   }
-  virtual void SeekToFirst() { iter_->SeekToFirst(); }
-  virtual void SeekToLast() { iter_->SeekToLast(); }
-  virtual void Next() { iter_->Next(); }
-  virtual void Prev() { iter_->Prev(); }
+  void SeekToFirst() override { iter_->SeekToFirst(); }
+  void SeekToLast() override { iter_->SeekToLast(); }
+  void Next() override { iter_->Next(); }
+  void Prev() override { iter_->Prev(); }
 
-  virtual Slice key() const {
+  Slice key() const override {
     assert(Valid());
     ParsedInternalKey key;
     if (!ParseInternalKey(iter_->key(), &key)) {
@@ -282,18 +287,14 @@ class KeyConvertingIterator : public Iterator {
     return key.user_key;
   }
 
-  virtual Slice value() const { return iter_->value(); }
-  virtual Status status() const {
+  Slice value() const override { return iter_->value(); }
+  Status status() const override {
     return status_.ok() ? iter_->status() : status_;
   }
 
  private:
   mutable Status status_;
   Iterator* iter_;
-
-  // No copying allowed
-  KeyConvertingIterator(const KeyConvertingIterator&);
-  void operator=(const KeyConvertingIterator&);
 };
 
 class MemTableConstructor : public Constructor {
@@ -303,24 +304,24 @@ class MemTableConstructor : public Constructor {
     memtable_ = new MemTable(internal_comparator_);
     memtable_->Ref();
   }
-  ~MemTableConstructor() { memtable_->Unref(); }
-  virtual Status FinishImpl(const Options& options, const KVMap& data) {
+  ~MemTableConstructor() override { memtable_->Unref(); }
+  Status FinishImpl(const Options& options, const KVMap& data) override {
     memtable_->Unref();
     memtable_ = new MemTable(internal_comparator_);
     memtable_->Ref();
     int seq = 1;
-    for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
-      memtable_->Add(seq, kTypeValue, it->first, it->second);
+    for (const auto& kvp : data) {
+      memtable_->Add(seq, kTypeValue, kvp.first, kvp.second);
       seq++;
     }
     return Status::OK();
   }
-  virtual Iterator* NewIterator() const {
+  Iterator* NewIterator() const override {
     return new KeyConvertingIterator(memtable_->NewIterator());
   }
 
  private:
-  InternalKeyComparator internal_comparator_;
+  const InternalKeyComparator internal_comparator_;
   MemTable* memtable_;
 };
 
@@ -331,23 +332,23 @@ class DBConstructor : public Constructor {
     db_ = nullptr;
     NewDB();
   }
-  ~DBConstructor() { delete db_; }
-  virtual Status FinishImpl(const Options& options, const KVMap& data) {
+  ~DBConstructor() override { delete db_; }
+  Status FinishImpl(const Options& options, const KVMap& data) override {
     delete db_;
     db_ = nullptr;
     NewDB();
-    for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
+    for (const auto& kvp : data) {
       WriteBatch batch;
-      batch.Put(it->first, it->second);
+      batch.Put(kvp.first, kvp.second);
       ASSERT_TRUE(db_->Write(WriteOptions(), &batch).ok());
     }
     return Status::OK();
   }
-  virtual Iterator* NewIterator() const {
+  Iterator* NewIterator() const override {
     return db_->NewIterator(ReadOptions());
   }
 
-  virtual DB* db() const { return db_; }
+  DB* db() const override { return db_; }
 
  private:
   void NewDB() {
@@ -365,7 +366,7 @@ class DBConstructor : public Constructor {
     ASSERT_TRUE(status.ok()) << status.ToString();
   }
 
-  const Comparator* comparator_;
+  const Comparator* const comparator_;
   DB* db_;
 };
 
diff --git a/table/two_level_iterator.cc b/table/two_level_iterator.cc
index 5340a4d..1fc4626 100644
--- a/table/two_level_iterator.cc
+++ b/table/two_level_iterator.cc
@@ -77,7 +77,7 @@ TwoLevelIterator::TwoLevelIterator(Iterator* index_iter,
       index_iter_(index_iter),
       data_iter_(nullptr) {}
 
-TwoLevelIterator::~TwoLevelIterator() {}
+TwoLevelIterator::~TwoLevelIterator() = default;
 
 void TwoLevelIterator::Seek(const Slice& target) {
   index_iter_.Seek(target);
diff --git a/util/comparator.cc b/util/comparator.cc
index 26d1eb3..c5766e9 100644
--- a/util/comparator.cc
+++ b/util/comparator.cc
@@ -2,32 +2,34 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include "leveldb/comparator.h"
+
 #include <algorithm>
 #include <cstdint>
 #include <string>
+#include <type_traits>
 
-#include "leveldb/comparator.h"
 #include "leveldb/slice.h"
 #include "util/logging.h"
 #include "util/no_destructor.h"
 
 namespace leveldb {
 
-Comparator::~Comparator() {}
+Comparator::~Comparator() = default;
 
 namespace {
 class BytewiseComparatorImpl : public Comparator {
  public:
-  BytewiseComparatorImpl() {}
+  BytewiseComparatorImpl() = default;
 
-  virtual const char* Name() const { return "leveldb.BytewiseComparator"; }
+  const char* Name() const override { return "leveldb.BytewiseComparator"; }
 
-  virtual int Compare(const Slice& a, const Slice& b) const {
+  int Compare(const Slice& a, const Slice& b) const override {
     return a.compare(b);
   }
 
-  virtual void FindShortestSeparator(std::string* start,
-                                     const Slice& limit) const {
+  void FindShortestSeparator(std::string* start,
+                             const Slice& limit) const override {
     // Find length of common prefix
     size_t min_length = std::min(start->size(), limit.size());
     size_t diff_index = 0;
@@ -49,7 +51,7 @@ class BytewiseComparatorImpl : public Comparator {
     }
   }
 
-  virtual void FindShortSuccessor(std::string* key) const {
+  void FindShortSuccessor(std::string* key) const override {
     // Find first character that can be incremented
     size_t n = key->size();
     for (size_t i = 0; i < n; i++) {
diff --git a/util/env.cc b/util/env.cc
index 6cd5f2e..d2f0aef 100644
--- a/util/env.cc
+++ b/util/env.cc
@@ -6,21 +6,21 @@
 
 namespace leveldb {
 
-Env::~Env() {}
+Env::~Env() = default;
 
 Status Env::NewAppendableFile(const std::string& fname, WritableFile** result) {
   return Status::NotSupported("NewAppendableFile", fname);
 }
 
-SequentialFile::~SequentialFile() {}
+SequentialFile::~SequentialFile() = default;
 
-RandomAccessFile::~RandomAccessFile() {}
+RandomAccessFile::~RandomAccessFile() = default;
 
-WritableFile::~WritableFile() {}
+WritableFile::~WritableFile() = default;
 
-Logger::~Logger() {}
+Logger::~Logger() = default;
 
-FileLock::~FileLock() {}
+FileLock::~FileLock() = default;
 
 void Log(Logger* info_log, const char* format, ...) {
   if (info_log != nullptr) {

From 506b1722ef1a58d87325575d9bbcd3c8869381c7 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Sat, 4 May 2019 18:05:13 -0700
Subject: [PATCH 079/181] Convert missed virtual -> override in db_test.cc.

PiperOrigin-RevId: 246680419
---
 db/db_test.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/db/db_test.cc b/db/db_test.cc
index e7386f7..1da8db2 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -2025,7 +2025,7 @@ class ModelDB : public DB {
 
   explicit ModelDB(const Options& options) : options_(options) {}
   ~ModelDB() override = default;
-  virtual Status Put(const WriteOptions& o, const Slice& k, const Slice& v) {
+  Status Put(const WriteOptions& o, const Slice& k, const Slice& v) override {
     return DB::Put(o, k, v);
   }
   Status Delete(const WriteOptions& o, const Slice& key) override {

From 4bd052d7e8b0469b2b87664388e2a99cb212ecdb Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Sun, 5 May 2019 12:55:41 -0700
Subject: [PATCH 080/181] Consolidate benchmark code to benchmarks/.

Currently, the benchmark used to assess leveldb changes lives in db/. The codebase also contains two benchmarks against other database engines in doc/bench/. Moving all the benchmarks in one place opens up the way for extracting common code.

PiperOrigin-RevId: 246737541
---
 .clang-format                                 | 2 +-
 CMakeLists.txt                                | 6 +++---
 {db => benchmarks}/db_bench.cc                | 0
 {doc/bench => benchmarks}/db_bench_sqlite3.cc | 0
 {doc/bench => benchmarks}/db_bench_tree_db.cc | 0
 doc/benchmark.html                            | 6 +++---
 6 files changed, 7 insertions(+), 7 deletions(-)
 rename {db => benchmarks}/db_bench.cc (100%)
 rename {doc/bench => benchmarks}/db_bench_sqlite3.cc (100%)
 rename {doc/bench => benchmarks}/db_bench_tree_db.cc (100%)

diff --git a/.clang-format b/.clang-format
index 75f3401..f493f75 100644
--- a/.clang-format
+++ b/.clang-format
@@ -8,7 +8,7 @@ DerivePointerAlignment: false
 # Order them so that when imported to the authoritative repository they will be
 # in correct alphabetical order.
 IncludeCategories:
-  - Regex:           '^(<|"(db|helpers)/)'
+  - Regex:           '^(<|"(benchmarks|db|helpers)/)'
     Priority:        1
   - Regex:           '^"(leveldb)/'
     Priority:        2
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 1409c06..96592d0 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -374,12 +374,12 @@ if(LEVELDB_BUILD_BENCHMARKS)
   endfunction(leveldb_benchmark)
 
   if(NOT BUILD_SHARED_LIBS)
-    leveldb_benchmark("${PROJECT_SOURCE_DIR}/db/db_bench.cc")
+    leveldb_benchmark("${PROJECT_SOURCE_DIR}/benchmarks/db_bench.cc")
   endif(NOT BUILD_SHARED_LIBS)
 
   check_library_exists(sqlite3 sqlite3_open "" HAVE_SQLITE3)
   if(HAVE_SQLITE3)
-    leveldb_benchmark("${PROJECT_SOURCE_DIR}/doc/bench/db_bench_sqlite3.cc")
+    leveldb_benchmark("${PROJECT_SOURCE_DIR}/benchmarks/db_bench_sqlite3.cc")
     target_link_libraries(db_bench_sqlite3 sqlite3)
   endif(HAVE_SQLITE3)
 
@@ -399,7 +399,7 @@ int main() {
   "  HAVE_KYOTOCABINET)
   set(CMAKE_REQUIRED_LIBRARIES ${OLD_CMAKE_REQURED_LIBRARIES})
   if(HAVE_KYOTOCABINET)
-    leveldb_benchmark("${PROJECT_SOURCE_DIR}/doc/bench/db_bench_tree_db.cc")
+    leveldb_benchmark("${PROJECT_SOURCE_DIR}/benchmarks/db_bench_tree_db.cc")
     target_link_libraries(db_bench_tree_db kyotocabinet)
   endif(HAVE_KYOTOCABINET)
 endif(LEVELDB_BUILD_BENCHMARKS)
diff --git a/db/db_bench.cc b/benchmarks/db_bench.cc
similarity index 100%
rename from db/db_bench.cc
rename to benchmarks/db_bench.cc
diff --git a/doc/bench/db_bench_sqlite3.cc b/benchmarks/db_bench_sqlite3.cc
similarity index 100%
rename from doc/bench/db_bench_sqlite3.cc
rename to benchmarks/db_bench_sqlite3.cc
diff --git a/doc/bench/db_bench_tree_db.cc b/benchmarks/db_bench_tree_db.cc
similarity index 100%
rename from doc/bench/db_bench_tree_db.cc
rename to benchmarks/db_bench_tree_db.cc
diff --git a/doc/benchmark.html b/doc/benchmark.html
index c463977..f3fd771 100644
--- a/doc/benchmark.html
+++ b/doc/benchmark.html
@@ -90,9 +90,9 @@ div.bsql {
 <h4>Benchmark Source Code</h4>
 <p>We wrote benchmark tools for SQLite and Kyoto TreeDB based on LevelDB's <span class="code">db_bench</span>. The code for each of the benchmarks resides here:</p>
 <ul>
-	<li> <b>LevelDB:</b> <a href="http://code.google.com/p/leveldb/source/browse/trunk/db/db_bench.cc">db/db_bench.cc</a>.</li>
-	<li> <b>SQLite:</b> <a href="http://code.google.com/p/leveldb/source/browse/#svn%2Ftrunk%2Fdoc%2Fbench%2Fdb_bench_sqlite3.cc">doc/bench/db_bench_sqlite3.cc</a>.</li>
-	<li> <b>Kyoto TreeDB:</b> <a href="http://code.google.com/p/leveldb/source/browse/#svn%2Ftrunk%2Fdoc%2Fbench%2Fdb_bench_tree_db.cc">doc/bench/db_bench_tree_db.cc</a>.</li>
+	<li> <b>LevelDB:</b> <a href="https://github.com/google/leveldb/blob/master/benchmarks/db_bench.cc">benchmarks/db_bench.cc</a>.</li>
+	<li> <b>SQLite:</b> <a href="https://github.com/google/leveldb/blob/master/benchmarks/db_bench_sqlite3.cc">benchmarks/db_bench_sqlite3.cc</a>.</li>
+	<li> <b>Kyoto TreeDB:</b> <a href="https://github.com/google/leveldb/blob/master/benchmarks/db_bench_tree_db.cc">benchmarks/db_bench_tree_db.cc</a>.</li>
 </ul>
 
 <h4>Custom Build Specifications</h4>

From cd1ec032cd276409ba403cab4d0b2548dd26b890 Mon Sep 17 00:00:00 2001
From: allangj <allangj1618@gmail.com>
Date: Fri, 8 Apr 2016 09:08:21 -0600
Subject: [PATCH 081/181] Add argument definition for void c functions.

Allow the use c.h on projects with -Wstrict-prototypes
Modify CMakelist to include -Wstrict-prototypes
---
 CMakeLists.txt      |  2 ++
 include/leveldb/c.h | 14 +++++++-------
 2 files changed, 9 insertions(+), 7 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 96592d0..4f16f62 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -50,6 +50,8 @@ check_cxx_symbol_exists(F_FULLFSYNC "fcntl.h" HAVE_FULLFSYNC)
 
 include(CheckCXXSourceCompiles)
 
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wstrict-prototypes")
+
 # Test whether -Wthread-safety is available. See
 # https://clang.llvm.org/docs/ThreadSafetyAnalysis.html
 # -Werror is necessary because unknown attributes only generate warnings.
diff --git a/include/leveldb/c.h b/include/leveldb/c.h
index 8e0d592..04e383c 100644
--- a/include/leveldb/c.h
+++ b/include/leveldb/c.h
@@ -147,7 +147,7 @@ LEVELDB_EXPORT void leveldb_iter_get_error(const leveldb_iterator_t*,
 
 /* Write batch */
 
-LEVELDB_EXPORT leveldb_writebatch_t* leveldb_writebatch_create();
+LEVELDB_EXPORT leveldb_writebatch_t* leveldb_writebatch_create(void);
 LEVELDB_EXPORT void leveldb_writebatch_destroy(leveldb_writebatch_t*);
 LEVELDB_EXPORT void leveldb_writebatch_clear(leveldb_writebatch_t*);
 LEVELDB_EXPORT void leveldb_writebatch_put(leveldb_writebatch_t*,
@@ -164,7 +164,7 @@ LEVELDB_EXPORT void leveldb_writebatch_append(
 
 /* Options */
 
-LEVELDB_EXPORT leveldb_options_t* leveldb_options_create();
+LEVELDB_EXPORT leveldb_options_t* leveldb_options_create(void);
 LEVELDB_EXPORT void leveldb_options_destroy(leveldb_options_t*);
 LEVELDB_EXPORT void leveldb_options_set_comparator(leveldb_options_t*,
                                                    leveldb_comparator_t*);
@@ -219,7 +219,7 @@ LEVELDB_EXPORT leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(
 
 /* Read options */
 
-LEVELDB_EXPORT leveldb_readoptions_t* leveldb_readoptions_create();
+LEVELDB_EXPORT leveldb_readoptions_t* leveldb_readoptions_create(void);
 LEVELDB_EXPORT void leveldb_readoptions_destroy(leveldb_readoptions_t*);
 LEVELDB_EXPORT void leveldb_readoptions_set_verify_checksums(
     leveldb_readoptions_t*, unsigned char);
@@ -230,7 +230,7 @@ LEVELDB_EXPORT void leveldb_readoptions_set_snapshot(leveldb_readoptions_t*,
 
 /* Write options */
 
-LEVELDB_EXPORT leveldb_writeoptions_t* leveldb_writeoptions_create();
+LEVELDB_EXPORT leveldb_writeoptions_t* leveldb_writeoptions_create(void);
 LEVELDB_EXPORT void leveldb_writeoptions_destroy(leveldb_writeoptions_t*);
 LEVELDB_EXPORT void leveldb_writeoptions_set_sync(leveldb_writeoptions_t*,
                                                   unsigned char);
@@ -242,7 +242,7 @@ LEVELDB_EXPORT void leveldb_cache_destroy(leveldb_cache_t* cache);
 
 /* Env */
 
-LEVELDB_EXPORT leveldb_env_t* leveldb_create_default_env();
+LEVELDB_EXPORT leveldb_env_t* leveldb_create_default_env(void);
 LEVELDB_EXPORT void leveldb_env_destroy(leveldb_env_t*);
 
 /* If not NULL, the returned buffer must be released using leveldb_free(). */
@@ -258,10 +258,10 @@ LEVELDB_EXPORT char* leveldb_env_get_test_directory(leveldb_env_t*);
 LEVELDB_EXPORT void leveldb_free(void* ptr);
 
 /* Return the major version number for this release. */
-LEVELDB_EXPORT int leveldb_major_version();
+LEVELDB_EXPORT int leveldb_major_version(void);
 
 /* Return the minor version number for this release. */
-LEVELDB_EXPORT int leveldb_minor_version();
+LEVELDB_EXPORT int leveldb_minor_version(void);
 
 #ifdef __cplusplus
 } /* end extern "C" */

From 142035edd4b1ab431c0ecbd547d4a77f1eca0667 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Mon, 6 May 2019 10:51:11 -0700
Subject: [PATCH 082/181] Initialize Stats::start_ before first use in
 Stats::Start().

Avoids a use before initialization error. This fixes issue #676.

PiperOrigin-RevId: 246855204
---
 benchmarks/db_bench.cc | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/benchmarks/db_bench.cc b/benchmarks/db_bench.cc
index 3090b43..3696023 100644
--- a/benchmarks/db_bench.cc
+++ b/benchmarks/db_bench.cc
@@ -187,14 +187,12 @@ class Stats {
 
   void Start() {
     next_report_ = 100;
-    last_op_finish_ = start_;
     hist_.Clear();
     done_ = 0;
     bytes_ = 0;
     seconds_ = 0;
-    start_ = g_env->NowMicros();
-    finish_ = start_;
     message_.clear();
+    start_ = finish_ = last_op_finish_ = g_env->NowMicros();
   }
 
   void Merge(const Stats& other) {

From a7528a5d2bd29126b60a277b528ed606b67c1771 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Mon, 6 May 2019 10:58:38 -0700
Subject: [PATCH 083/181] Clean up util/coding.{h,cc}.

1) Inline EncodeFixed{32,64}(). They emit single machine instructions on 64-bit processors.
2) Remove size narrowing compiler warnings from DecodeFixed{32,64}().
3) Add comments explaining the current state of optimizations in compilers we care about.
4) Change C-style includes, like <stdint.h>, to C++ style, like <cstdint>.
5) memcpy -> std::memcpy.

The optimization comments are based on https://godbolt.org/z/RdIqS1. The missed optimization opportunities in clang have been reported as https://bugs.llvm.org/show_bug.cgi?id=41761

The change does not have significant impact on benchmarks. Results below.

LevelDB:    version 1.22
Date:       Mon May  6 10:42:18 2019
CPU:        72 * Intel(R) Xeon(R) Gold 6154 CPU @ 3.00GHz
CPUCache:   25344 KB
Keys:       16 bytes each
Values:     100 bytes each (50 bytes after compression)
Entries:    1000000
RawSize:    110.6 MB (estimated)
FileSize:   62.9 MB (estimated)

With change
------------------------------------------------
fillseq      :       2.327 micros/op;   47.5 MB/s
fillsync     :    4185.526 micros/op;    0.0 MB/s (1000 ops)
fillrandom   :       3.662 micros/op;   30.2 MB/s
overwrite    :       4.261 micros/op;   26.0 MB/s
readrandom   :       4.239 micros/op; (1000000 of 1000000 found)
readrandom   :       3.649 micros/op; (1000000 of 1000000 found)
readseq      :       0.174 micros/op;  636.7 MB/s
readreverse  :       0.271 micros/op;  408.7 MB/s
compact      :  570495.000 micros/op;
readrandom   :       2.735 micros/op; (1000000 of 1000000 found)
readseq      :       0.118 micros/op;  937.3 MB/s
readreverse  :       0.190 micros/op;  583.7 MB/s
fill100K     :     860.164 micros/op;  110.9 MB/s (1000 ops)
crc32c       :       1.131 micros/op; 3455.2 MB/s (4K per op)
snappycomp   :       3.034 micros/op; 1287.5 MB/s (output: 55.1%)
snappyuncomp :       0.544 micros/op; 7176.0 MB/s

Baseline
------------------------------------------------
fillseq      :       2.365 micros/op;   46.8 MB/s
fillsync     :    4240.165 micros/op;    0.0 MB/s (1000 ops)
fillrandom   :       3.244 micros/op;   34.1 MB/s
overwrite    :       4.153 micros/op;   26.6 MB/s
readrandom   :       4.698 micros/op; (1000000 of 1000000 found)
readrandom   :       4.065 micros/op; (1000000 of 1000000 found)
readseq      :       0.192 micros/op;  576.3 MB/s
readreverse  :       0.286 micros/op;  386.7 MB/s
compact      :  635979.000 micros/op;
readrandom   :       3.264 micros/op; (1000000 of 1000000 found)
readseq      :       0.169 micros/op;  652.8 MB/s
readreverse  :       0.213 micros/op;  519.5 MB/s
fill100K     :    1055.367 micros/op;   90.4 MB/s (1000 ops)
crc32c       :       1.353 micros/op; 2887.3 MB/s (4K per op)
snappycomp   :       3.036 micros/op; 1286.7 MB/s (output: 55.1%)
snappyuncomp :       0.540 micros/op; 7238.6 MB/s
PiperOrigin-RevId: 246856811
---
 util/coding.cc |  26 -------------
 util/coding.h  | 102 +++++++++++++++++++++++++++++++++++++++----------
 2 files changed, 81 insertions(+), 47 deletions(-)

diff --git a/util/coding.cc b/util/coding.cc
index e2089df..55be020 100644
--- a/util/coding.cc
+++ b/util/coding.cc
@@ -6,32 +6,6 @@
 
 namespace leveldb {
 
-void EncodeFixed32(char* dst, uint32_t value) {
-  if (port::kLittleEndian) {
-    memcpy(dst, &value, sizeof(value));
-  } else {
-    dst[0] = value & 0xff;
-    dst[1] = (value >> 8) & 0xff;
-    dst[2] = (value >> 16) & 0xff;
-    dst[3] = (value >> 24) & 0xff;
-  }
-}
-
-void EncodeFixed64(char* dst, uint64_t value) {
-  if (port::kLittleEndian) {
-    memcpy(dst, &value, sizeof(value));
-  } else {
-    dst[0] = value & 0xff;
-    dst[1] = (value >> 8) & 0xff;
-    dst[2] = (value >> 16) & 0xff;
-    dst[3] = (value >> 24) & 0xff;
-    dst[4] = (value >> 32) & 0xff;
-    dst[5] = (value >> 40) & 0xff;
-    dst[6] = (value >> 48) & 0xff;
-    dst[7] = (value >> 56) & 0xff;
-  }
-}
-
 void PutFixed32(std::string* dst, uint32_t value) {
   char buf[sizeof(value)];
   EncodeFixed32(buf, value);
diff --git a/util/coding.h b/util/coding.h
index d9eeaa3..92a961f 100644
--- a/util/coding.h
+++ b/util/coding.h
@@ -10,9 +10,8 @@
 #ifndef STORAGE_LEVELDB_UTIL_CODING_H_
 #define STORAGE_LEVELDB_UTIL_CODING_H_
 
-#include <stdint.h>
-#include <string.h>
-
+#include <cstdint>
+#include <cstring>
 #include <string>
 
 #include "leveldb/slice.h"
@@ -43,45 +42,106 @@ const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* v);
 // Returns the length of the varint32 or varint64 encoding of "v"
 int VarintLength(uint64_t v);
 
-// Lower-level versions of Put... that write directly into a character buffer
-// REQUIRES: dst has enough space for the value being written
-void EncodeFixed32(char* dst, uint32_t value);
-void EncodeFixed64(char* dst, uint64_t value);
-
 // Lower-level versions of Put... that write directly into a character buffer
 // and return a pointer just past the last byte written.
 // REQUIRES: dst has enough space for the value being written
 char* EncodeVarint32(char* dst, uint32_t value);
 char* EncodeVarint64(char* dst, uint64_t value);
 
+// TODO(costan): Remove port::kLittleEndian and the fast paths based on
+//               std::memcpy when clang learns to optimize the generic code, as
+//               described in https://bugs.llvm.org/show_bug.cgi?id=41761
+//
+// The platform-independent code in DecodeFixed{32,64}() gets optimized to mov
+// on x86 and ldr on ARM64, by both clang and gcc. However, only gcc optimizes
+// the platform-independent code in EncodeFixed{32,64}() to mov / str.
+
+// Lower-level versions of Put... that write directly into a character buffer
+// REQUIRES: dst has enough space for the value being written
+
+inline void EncodeFixed32(char* dst, uint32_t value) {
+  uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
+
+  if (port::kLittleEndian) {
+    // Fast path for little-endian CPUs. All major compilers optimize this to a
+    // single mov (x86_64) / str (ARM) instruction.
+    std::memcpy(buffer, &value, sizeof(uint32_t));
+    return;
+  }
+
+  // Platform-independent code.
+  // Currently, only gcc optimizes this to a single mov / str instruction.
+  buffer[0] = static_cast<uint8_t>(value);
+  buffer[1] = static_cast<uint8_t>(value >> 8);
+  buffer[2] = static_cast<uint8_t>(value >> 16);
+  buffer[3] = static_cast<uint8_t>(value >> 24);
+}
+
+inline void EncodeFixed64(char* dst, uint64_t value) {
+  uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
+
+  if (port::kLittleEndian) {
+    // Fast path for little-endian CPUs. All major compilers optimize this to a
+    // single mov (x86_64) / str (ARM) instruction.
+    std::memcpy(buffer, &value, sizeof(uint64_t));
+    return;
+  }
+
+  // Platform-independent code.
+  // Currently, only gcc optimizes this to a single mov / str instruction.
+  buffer[0] = static_cast<uint8_t>(value);
+  buffer[1] = static_cast<uint8_t>(value >> 8);
+  buffer[2] = static_cast<uint8_t>(value >> 16);
+  buffer[3] = static_cast<uint8_t>(value >> 24);
+  buffer[4] = static_cast<uint8_t>(value >> 32);
+  buffer[5] = static_cast<uint8_t>(value >> 40);
+  buffer[6] = static_cast<uint8_t>(value >> 48);
+  buffer[7] = static_cast<uint8_t>(value >> 56);
+}
+
 // Lower-level versions of Get... that read directly from a character buffer
 // without any bounds checking.
 
 inline uint32_t DecodeFixed32(const char* ptr) {
+  const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
+
   if (port::kLittleEndian) {
-    // Load the raw bytes
+    // Fast path for little-endian CPUs. All major compilers optimize this to a
+    // single mov (x86_64) / ldr (ARM) instruction.
     uint32_t result;
-    memcpy(&result, ptr, sizeof(result));  // gcc optimizes this to a plain load
+    std::memcpy(&result, buffer, sizeof(uint32_t));
     return result;
-  } else {
-    return ((static_cast<uint32_t>(static_cast<unsigned char>(ptr[0]))) |
-            (static_cast<uint32_t>(static_cast<unsigned char>(ptr[1])) << 8) |
-            (static_cast<uint32_t>(static_cast<unsigned char>(ptr[2])) << 16) |
-            (static_cast<uint32_t>(static_cast<unsigned char>(ptr[3])) << 24));
   }
+
+  // Platform-independent code.
+  // Clang and gcc optimize this to a single mov / ldr instruction.
+  return (static_cast<uint32_t>(buffer[0])) |
+         (static_cast<uint32_t>(buffer[1]) << 8) |
+         (static_cast<uint32_t>(buffer[2]) << 16) |
+         (static_cast<uint32_t>(buffer[3]) << 24);
 }
 
 inline uint64_t DecodeFixed64(const char* ptr) {
+  const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
+
   if (port::kLittleEndian) {
-    // Load the raw bytes
+    // Fast path for little-endian CPUs. All major compilers optimize this to a
+    // single mov (x86_64) / ldr (ARM) instruction.
     uint64_t result;
-    memcpy(&result, ptr, sizeof(result));  // gcc optimizes this to a plain load
+    std::memcpy(&result, buffer, sizeof(uint64_t));
     return result;
-  } else {
-    uint64_t lo = DecodeFixed32(ptr);
-    uint64_t hi = DecodeFixed32(ptr + 4);
-    return (hi << 32) | lo;
   }
+
+  // Platform-independent code.
+  // Clang and gcc optimize this to a single mov / ldr instruction.
+  return (static_cast<uint64_t>(buffer[0])) |
+         (static_cast<uint64_t>(buffer[1]) << 8) |
+         (static_cast<uint64_t>(buffer[2]) << 16) |
+         (static_cast<uint64_t>(buffer[3]) << 24) |
+         (static_cast<uint64_t>(buffer[4]) << 32) |
+         (static_cast<uint64_t>(buffer[5]) << 40) |
+         (static_cast<uint64_t>(buffer[6]) << 48) |
+         (static_cast<uint64_t>(buffer[7]) << 56);
 }
 
 // Internal routine for use by fallback path of GetVarint32Ptr

From 9521545b062841409cf66eff0655feff09d9fd82 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Mon, 6 May 2019 15:20:42 -0700
Subject: [PATCH 084/181] Formatting changes for prior O_CLOEXEC fix.

Two minor corrections to correct the 900f7d37eb322 commit
to conform to the Google C++ style guide.

PiperOrigin-RevId: 246907647
---
 util/env_posix.cc      | 22 +++++++++++-----------
 util/env_posix_test.cc |  4 ++--
 2 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/util/env_posix.cc b/util/env_posix.cc
index d94be10..cd0508b 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -50,9 +50,9 @@ int g_mmap_limit = kDefaultMmapLimit;
 
 // Common flags defined for all posix open operations
 #if defined(HAVE_O_CLOEXEC)
-constexpr const int O_FLAGS = O_CLOEXEC;
+constexpr const int kOpenBaseFlags = O_CLOEXEC;
 #else
-constexpr const int O_FLAGS = 0;
+constexpr const int kOpenBaseFlags = 0;
 #endif  // defined(HAVE_O_CLOEXEC)
 
 constexpr const size_t kWritableFileBufferSize = 65536;
@@ -172,7 +172,7 @@ class PosixRandomAccessFile final : public RandomAccessFile {
               char* scratch) const override {
     int fd = fd_;
     if (!has_permanent_fd_) {
-      fd = ::open(filename_.c_str(), O_RDONLY | O_FLAGS);
+      fd = ::open(filename_.c_str(), O_RDONLY | kOpenBaseFlags);
       if (fd < 0) {
         return PosixError(filename_, errno);
       }
@@ -350,7 +350,7 @@ class PosixWritableFile final : public WritableFile {
       return status;
     }
 
-    int fd = ::open(dirname_.c_str(), O_RDONLY | O_FLAGS);
+    int fd = ::open(dirname_.c_str(), O_RDONLY | kOpenBaseFlags);
     if (fd < 0) {
       status = PosixError(dirname_, errno);
     } else {
@@ -498,7 +498,7 @@ class PosixEnv : public Env {
 
   Status NewSequentialFile(const std::string& filename,
                            SequentialFile** result) override {
-    int fd = ::open(filename.c_str(), O_RDONLY | O_FLAGS);
+    int fd = ::open(filename.c_str(), O_RDONLY | kOpenBaseFlags);
     if (fd < 0) {
       *result = nullptr;
       return PosixError(filename, errno);
@@ -511,7 +511,7 @@ class PosixEnv : public Env {
   Status NewRandomAccessFile(const std::string& filename,
                              RandomAccessFile** result) override {
     *result = nullptr;
-    int fd = ::open(filename.c_str(), O_RDONLY | O_FLAGS);
+    int fd = ::open(filename.c_str(), O_RDONLY | kOpenBaseFlags);
     if (fd < 0) {
       return PosixError(filename, errno);
     }
@@ -543,8 +543,8 @@ class PosixEnv : public Env {
 
   Status NewWritableFile(const std::string& filename,
                          WritableFile** result) override {
-    int fd =
-        ::open(filename.c_str(), O_TRUNC | O_WRONLY | O_CREAT | O_FLAGS, 0644);
+    int fd = ::open(filename.c_str(),
+                    O_TRUNC | O_WRONLY | O_CREAT | kOpenBaseFlags, 0644);
     if (fd < 0) {
       *result = nullptr;
       return PosixError(filename, errno);
@@ -556,8 +556,8 @@ class PosixEnv : public Env {
 
   Status NewAppendableFile(const std::string& filename,
                            WritableFile** result) override {
-    int fd =
-        ::open(filename.c_str(), O_APPEND | O_WRONLY | O_CREAT | O_FLAGS, 0644);
+    int fd = ::open(filename.c_str(),
+                    O_APPEND | O_WRONLY | O_CREAT | kOpenBaseFlags, 0644);
     if (fd < 0) {
       *result = nullptr;
       return PosixError(filename, errno);
@@ -627,7 +627,7 @@ class PosixEnv : public Env {
   Status LockFile(const std::string& filename, FileLock** lock) override {
     *lock = nullptr;
 
-    int fd = ::open(filename.c_str(), O_RDWR | O_CREAT | O_FLAGS, 0644);
+    int fd = ::open(filename.c_str(), O_RDWR | O_CREAT | kOpenBaseFlags, 0644);
     if (fd < 0) {
       return PosixError(filename, errno);
     }
diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc
index 3f7bfda..54d43f0 100644
--- a/util/env_posix_test.cc
+++ b/util/env_posix_test.cc
@@ -117,7 +117,7 @@ TEST(EnvPosixTest, TestCloseOnExec) {
 
 #endif  // defined(HAVE_O_CLOEXEC)
 
-int cloexecChild() {
+int CloexecChild() {
   // Checks for open file descriptors in the range 3..FD_SETSIZE.
   for (int i = 3; i < FD_SETSIZE; i++) {
     int dup_result = dup2(i, i);
@@ -156,7 +156,7 @@ int cloexecChild() {
 int main(int argc, char** argv) {
   // Check if this is the child process for TestCloseOnExec
   if (argc > 1 && strcmp(argv[1], "-cloexec-child") == 0) {
-    return leveldb::cloexecChild();
+    return leveldb::CloexecChild();
   }
   // All tests currently run with the same read-only file limits.
   leveldb::EnvPosixTest::SetFileLimits(leveldb::kReadOnlyFileLimit,

From 27dc99fb2642cadc87c9aaec82c54a2c725ee0d6 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Tue, 7 May 2019 14:19:08 -0700
Subject: [PATCH 085/181] Fix EnvPosix tests on Travis CI.

The previous attempt of having EnvPosix use O_CLOEXEC (close-on-exec()) when opening file descriptors added tests that relied on procfs, which is Linux-specific. These tests failed on macOS. Unfortunately, the test failures were not caught due to a (since fixed) error in our Travis CI configuration.

This CL re-structures the tests to only rely on POSIX features. Since there is no POSIX-compliant way to get a file name/path out of a file descriptor, this CL breaks up the O_CLOEXEC test into multiple tests, where each Env method that creates an FD gets its own test. This is intended to make it easier to find and fix errors in Env implementations.

This CL also fixes the implementation of NewLogger() to use O_CLOEXEC on macOS. The current implementation passes "we" to fopen(), but the macOS standard C library does not implement the "e" flag yet.

PiperOrigin-RevId: 247088953
---
 util/env_posix.cc      |  10 +-
 util/env_posix_test.cc | 353 +++++++++++++++++++++++++++++++----------
 2 files changed, 278 insertions(+), 85 deletions(-)

diff --git a/util/env_posix.cc b/util/env_posix.cc
index cd0508b..0cfb069 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -683,8 +683,16 @@ class PosixEnv : public Env {
   }
 
   Status NewLogger(const std::string& filename, Logger** result) override {
-    std::FILE* fp = std::fopen(filename.c_str(), "we");
+    int fd = ::open(filename.c_str(),
+                    O_APPEND | O_WRONLY | O_CREAT | kOpenBaseFlags, 0644);
+    if (fd < 0) {
+      *result = nullptr;
+      return PosixError(filename, errno);
+    }
+
+    std::FILE* fp = ::fdopen(fd, "w");
     if (fp == nullptr) {
+      ::close(fd);
       *result = nullptr;
       return PosixError(filename, errno);
     } else {
diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc
index 54d43f0..9675d73 100644
--- a/util/env_posix_test.cc
+++ b/util/env_posix_test.cc
@@ -2,14 +2,167 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include <sys/resource.h>
 #include <sys/wait.h>
 #include <unistd.h>
 
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <string>
+#include <unordered_set>
+#include <vector>
+
 #include "leveldb/env.h"
 #include "port/port.h"
 #include "util/env_posix_test_helper.h"
 #include "util/testharness.h"
 
+#if HAVE_O_CLOEXEC
+
+namespace {
+
+// Exit codes for the helper process spawned by TestCloseOnExec* tests.
+// Useful for debugging test failures.
+constexpr int kTextCloseOnExecHelperExecFailedCode = 61;
+constexpr int kTextCloseOnExecHelperDup2FailedCode = 62;
+constexpr int kTextCloseOnExecHelperFoundOpenFdCode = 63;
+
+// Global set by main() and read in TestCloseOnExec.
+//
+// The argv[0] value is stored in a std::vector instead of a std::string because
+// std::string does not return a mutable pointer to its buffer until C++17.
+//
+// The vector stores the string pointed to by argv[0], plus the trailing null.
+std::vector<char>* GetArgvZero() {
+  static std::vector<char> program_name;
+  return &program_name;
+}
+
+// Command-line switch used to run this test as the CloseOnExecSwitch helper.
+static const char kTestCloseOnExecSwitch[] = "--test-close-on-exec-helper";
+
+// Executed in a separate process by TestCloseOnExec* tests.
+//
+// main() delegates to this function when the test executable is launched with
+// a special command-line switch. TestCloseOnExec* tests fork()+exec() the test
+// executable and pass the special command-line switch.
+//
+
+// main() delegates to this function when the test executable is launched with
+// a special command-line switch. TestCloseOnExec* tests fork()+exec() the test
+// executable and pass the special command-line switch.
+//
+// When main() delegates to this function, the process probes whether a given
+// file descriptor is open, and communicates the result via its exit code.
+int TestCloseOnExecHelperMain(char* pid_arg) {
+  int fd = std::atoi(pid_arg);
+  // When given the same file descriptor twice, dup2() returns -1 if the
+  // file descriptor is closed, or the given file descriptor if it is open.
+  if (::dup2(fd, fd) == fd) {
+    std::fprintf(stderr, "Unexpected open fd %d\n", fd);
+    return kTextCloseOnExecHelperFoundOpenFdCode;
+  }
+  // Double-check that dup2() is saying the file descriptor is closed.
+  if (errno != EBADF) {
+    std::fprintf(stderr, "Unexpected errno after calling dup2 on fd %d: %s\n",
+                 fd, std::strerror(errno));
+    return kTextCloseOnExecHelperDup2FailedCode;
+  }
+  return 0;
+}
+
+// File descriptors are small non-negative integers.
+//
+// Returns void so the implementation can use ASSERT_EQ.
+void GetMaxFileDescriptor(int* result_fd) {
+  // Get the maximum file descriptor number.
+  ::rlimit fd_rlimit;
+  ASSERT_EQ(0, ::getrlimit(RLIMIT_NOFILE, &fd_rlimit));
+  *result_fd = fd_rlimit.rlim_cur;
+}
+
+// Iterates through all possible FDs and returns the currently open ones.
+//
+// Returns void so the implementation can use ASSERT_EQ.
+void GetOpenFileDescriptors(std::unordered_set<int>* open_fds) {
+  int max_fd = 0;
+  GetMaxFileDescriptor(&max_fd);
+
+  for (int fd = 0; fd < max_fd; ++fd) {
+    if (::dup2(fd, fd) != fd) {
+      // When given the same file descriptor twice, dup2() returns -1 if the
+      // file descriptor is closed, or the given file descriptor if it is open.
+      //
+      // Double-check that dup2() is saying the fd is closed.
+      ASSERT_EQ(EBADF, errno)
+          << "dup2() should set errno to EBADF on closed file descriptors";
+      continue;
+    }
+    open_fds->insert(fd);
+  }
+}
+
+// Finds an FD open since a previous call to GetOpenFileDescriptors().
+//
+// |baseline_open_fds| is the result of a previous GetOpenFileDescriptors()
+// call. Assumes that exactly one FD was opened since that call.
+//
+// Returns void so the implementation can use ASSERT_EQ.
+void GetNewlyOpenedFileDescriptor(
+    const std::unordered_set<int>& baseline_open_fds, int* result_fd) {
+  std::unordered_set<int> open_fds;
+  GetOpenFileDescriptors(&open_fds);
+  for (int fd : baseline_open_fds) {
+    ASSERT_EQ(1, open_fds.count(fd))
+        << "Previously opened file descriptor was closed during test setup";
+    open_fds.erase(fd);
+  }
+  ASSERT_EQ(1, open_fds.size())
+      << "Expected exactly one newly opened file descriptor during test setup";
+  *result_fd = *open_fds.begin();
+}
+
+// Check that a fork()+exec()-ed child process does not have an extra open FD.
+void CheckCloseOnExecDoesNotLeakFDs(
+    const std::unordered_set<int>& baseline_open_fds) {
+  // Prepare the argument list for the child process.
+  // execv() wants mutable buffers.
+  char switch_buffer[sizeof(kTestCloseOnExecSwitch)];
+  std::memcpy(switch_buffer, kTestCloseOnExecSwitch,
+              sizeof(kTestCloseOnExecSwitch));
+
+  int probed_fd;
+  GetNewlyOpenedFileDescriptor(baseline_open_fds, &probed_fd);
+  std::string fd_string = std::to_string(probed_fd);
+  std::vector<char> fd_buffer(fd_string.begin(), fd_string.end());
+  fd_buffer.emplace_back('\0');
+
+  // The helper process is launched with the command below.
+  //      env_posix_tests --test-close-on-exec-helper 3
+  char* child_argv[] = {GetArgvZero()->data(), switch_buffer, fd_buffer.data(),
+                        nullptr};
+
+  constexpr int kForkInChildProcessReturnValue = 0;
+  int child_pid = fork();
+  if (child_pid == kForkInChildProcessReturnValue) {
+    ::execv(child_argv[0], child_argv);
+    std::fprintf(stderr, "Error spawning child process: %s\n", strerror(errno));
+    std::exit(kTextCloseOnExecHelperExecFailedCode);
+  }
+
+  int child_status = 0;
+  ASSERT_EQ(child_pid, ::waitpid(child_pid, &child_status, 0));
+  ASSERT_TRUE(WIFEXITED(child_status))
+      << "The helper process did not exit with an exit code";
+  ASSERT_EQ(0, WEXITSTATUS(child_status))
+      << "The helper process encountered an error";
+}
+
+}  // namespace
+
+#endif  // HAVE_O_CLOEXEC
+
 namespace leveldb {
 
 static const int kReadOnlyFileLimit = 4;
@@ -58,106 +211,138 @@ TEST(EnvPosixTest, TestOpenOnRead) {
   ASSERT_OK(env_->DeleteFile(test_file));
 }
 
-#if defined(HAVE_O_CLOEXEC)
+#if HAVE_O_CLOEXEC
 
-TEST(EnvPosixTest, TestCloseOnExec) {
-  // Test that file handles are not inherited by child processes.
+TEST(EnvPosixTest, TestCloseOnExecSequentialFile) {
+  std::unordered_set<int> open_fds;
+  GetOpenFileDescriptors(&open_fds);
 
-  // Open file handles with each of the open methods.
   std::string test_dir;
   ASSERT_OK(env_->GetTestDirectory(&test_dir));
-  std::vector<std::string> test_files = {
-      test_dir + "/close_on_exec_seq.txt",
-      test_dir + "/close_on_exec_rand.txt",
-      test_dir + "/close_on_exec_write.txt",
-      test_dir + "/close_on_exec_append.txt",
-      test_dir + "/close_on_exec_lock.txt",
-      test_dir + "/close_on_exec_log.txt",
-  };
-  for (const std::string& test_file : test_files) {
-    const char kFileData[] = "0123456789";
-    ASSERT_OK(WriteStringToFile(env_, kFileData, test_file));
-  }
-  leveldb::SequentialFile* seqFile = nullptr;
-  leveldb::RandomAccessFile* randFile = nullptr;
-  leveldb::WritableFile* writeFile = nullptr;
-  leveldb::WritableFile* appendFile = nullptr;
-  leveldb::FileLock* lockFile = nullptr;
-  leveldb::Logger* logFile = nullptr;
-  ASSERT_OK(env_->NewSequentialFile(test_files[0], &seqFile));
-  ASSERT_OK(env_->NewRandomAccessFile(test_files[1], &randFile));
-  ASSERT_OK(env_->NewWritableFile(test_files[2], &writeFile));
-  ASSERT_OK(env_->NewAppendableFile(test_files[3], &appendFile));
-  ASSERT_OK(env_->LockFile(test_files[4], &lockFile));
-  ASSERT_OK(env_->NewLogger(test_files[5], &logFile));
+  std::string file_path = test_dir + "/close_on_exec_sequential.txt";
+  ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
 
-  // Fork a child process and wait for it to complete.
-  int pid = fork();
-  if (pid == 0) {
-    const char* const child[] = {"/proc/self/exe", "-cloexec-child", nullptr};
-    execv(child[0], const_cast<char* const*>(child));
-    printf("Error spawning child process: %s\n", strerror(errno));
-    exit(6);
-  }
-  int status;
-  waitpid(pid, &status, 0);
-  ASSERT_EQ(0, WEXITSTATUS(status));
+  leveldb::SequentialFile* file = nullptr;
+  ASSERT_OK(env_->NewSequentialFile(file_path, &file));
+  CheckCloseOnExecDoesNotLeakFDs(open_fds);
+  delete file;
 
-  // cleanup
-  ASSERT_OK(env_->UnlockFile(lockFile));
-  delete seqFile;
-  delete randFile;
-  delete writeFile;
-  delete appendFile;
-  delete logFile;
-  for (const std::string& test_file : test_files) {
-    ASSERT_OK(env_->DeleteFile(test_file));
-  }
+  ASSERT_OK(env_->DeleteFile(file_path));
 }
 
-#endif  // defined(HAVE_O_CLOEXEC)
+TEST(EnvPosixTest, TestCloseOnExecRandomAccessFile) {
+  std::unordered_set<int> open_fds;
+  GetOpenFileDescriptors(&open_fds);
 
-int CloexecChild() {
-  // Checks for open file descriptors in the range 3..FD_SETSIZE.
-  for (int i = 3; i < FD_SETSIZE; i++) {
-    int dup_result = dup2(i, i);
-    if (dup_result != -1) {
-      printf("Unexpected open file %d\n", i);
-      char nbuf[28];
-      snprintf(nbuf, 28, "/proc/self/fd/%d", i);
-      char dbuf[1024];
-      int result = readlink(nbuf, dbuf, 1024);
-      if (0 < result && result < 1024) {
-        dbuf[result] = 0;
-        printf("File descriptor %d is %s\n", i, dbuf);
-        if (strstr(dbuf, "close_on_exec_") == nullptr) {
-          continue;
-        }
-      } else if (result >= 1024) {
-        printf("(file name length is too long)\n");
-      } else {
-        printf("Couldn't get file name: %s\n", strerror(errno));
-      }
-      return 3;
-    } else {
-      int e = errno;
-      if (e != EBADF) {
-        printf("Unexpected result reading file handle %d: %s\n", i,
-               strerror(errno));
-        return 4;
-      }
-    }
+  std::string test_dir;
+  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  std::string file_path = test_dir + "/close_on_exec_random_access.txt";
+  ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+
+  // Exhaust the RandomAccessFile mmap limit. This way, the test
+  // RandomAccessFile instance below is backed by a file descriptor, not by an
+  // mmap region.
+  leveldb::RandomAccessFile* mmapped_files[kReadOnlyFileLimit] = {nullptr};
+  for (int i = 0; i < kReadOnlyFileLimit; i++) {
+    ASSERT_OK(env_->NewRandomAccessFile(file_path, &mmapped_files[i]));
   }
-  return 0;
+
+  leveldb::RandomAccessFile* file = nullptr;
+  ASSERT_OK(env_->NewRandomAccessFile(file_path, &file));
+  CheckCloseOnExecDoesNotLeakFDs(open_fds);
+  delete file;
+
+  for (int i = 0; i < kReadOnlyFileLimit; i++) {
+    delete mmapped_files[i];
+  }
+  ASSERT_OK(env_->DeleteFile(file_path));
 }
 
+TEST(EnvPosixTest, TestCloseOnExecWritableFile) {
+  std::unordered_set<int> open_fds;
+  GetOpenFileDescriptors(&open_fds);
+
+  std::string test_dir;
+  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  std::string file_path = test_dir + "/close_on_exec_writable.txt";
+  ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+
+  leveldb::WritableFile* file = nullptr;
+  ASSERT_OK(env_->NewWritableFile(file_path, &file));
+  CheckCloseOnExecDoesNotLeakFDs(open_fds);
+  delete file;
+
+  ASSERT_OK(env_->DeleteFile(file_path));
+}
+
+TEST(EnvPosixTest, TestCloseOnExecAppendableFile) {
+  std::unordered_set<int> open_fds;
+  GetOpenFileDescriptors(&open_fds);
+
+  std::string test_dir;
+  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  std::string file_path = test_dir + "/close_on_exec_appendable.txt";
+  ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+
+  leveldb::WritableFile* file = nullptr;
+  ASSERT_OK(env_->NewAppendableFile(file_path, &file));
+  CheckCloseOnExecDoesNotLeakFDs(open_fds);
+  delete file;
+
+  ASSERT_OK(env_->DeleteFile(file_path));
+}
+
+TEST(EnvPosixTest, TestCloseOnExecLockFile) {
+  std::unordered_set<int> open_fds;
+  GetOpenFileDescriptors(&open_fds);
+
+  std::string test_dir;
+  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  std::string file_path = test_dir + "/close_on_exec_lock.txt";
+  ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+
+  leveldb::FileLock* lock = nullptr;
+  ASSERT_OK(env_->LockFile(file_path, &lock));
+  CheckCloseOnExecDoesNotLeakFDs(open_fds);
+  ASSERT_OK(env_->UnlockFile(lock));
+
+  ASSERT_OK(env_->DeleteFile(file_path));
+}
+
+TEST(EnvPosixTest, TestCloseOnExecLogger) {
+  std::unordered_set<int> open_fds;
+  GetOpenFileDescriptors(&open_fds);
+
+  std::string test_dir;
+  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  std::string file_path = test_dir + "/close_on_exec_logger.txt";
+  ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+
+  leveldb::Logger* file = nullptr;
+  ASSERT_OK(env_->NewLogger(file_path, &file));
+  CheckCloseOnExecDoesNotLeakFDs(open_fds);
+  delete file;
+
+  ASSERT_OK(env_->DeleteFile(file_path));
+}
+
+#endif  // HAVE_O_CLOEXEC
+
 }  // namespace leveldb
 
 int main(int argc, char** argv) {
-  // Check if this is the child process for TestCloseOnExec
-  if (argc > 1 && strcmp(argv[1], "-cloexec-child") == 0) {
-    return leveldb::CloexecChild();
+#if HAVE_O_CLOEXEC
+  // Check if we're invoked as a helper program, or as the test suite.
+  for (int i = 1; i < argc; ++i) {
+    if (!std::strcmp(argv[i], kTestCloseOnExecSwitch)) {
+      return TestCloseOnExecHelperMain(argv[i + 1]);
+    }
   }
+
+  // Save argv[0] early, because googletest may modify argv.
+  GetArgvZero()->assign(argv[0], argv[0] + std::strlen(argv[0]) + 1);
+#endif  // HAVE_O_CLOEXEC
+
   // All tests currently run with the same read-only file limits.
   leveldb::EnvPosixTest::SetFileLimits(leveldb::kReadOnlyFileLimit,
                                        leveldb::kMMapLimit);

From b7b86baec9ce47569affc5db54a20a6cc520e0f0 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Wed, 8 May 2019 17:28:28 -0700
Subject: [PATCH 086/181] Using std::ostringstream in key DebugString.

Switching from snprintf to std::ostringstream eliminates
cast warning for (unsigned long long).

PiperOrigin-RevId: 247326681
---
 db/dbformat.cc      | 23 ++++++++++-------------
 db/dbformat_test.cc | 14 ++++++++++++++
 2 files changed, 24 insertions(+), 13 deletions(-)

diff --git a/db/dbformat.cc b/db/dbformat.cc
index 69e8dc6..459eddf 100644
--- a/db/dbformat.cc
+++ b/db/dbformat.cc
@@ -6,6 +6,8 @@
 
 #include <stdio.h>
 
+#include <sstream>
+
 #include "port/port.h"
 #include "util/coding.h"
 
@@ -23,25 +25,20 @@ void AppendInternalKey(std::string* result, const ParsedInternalKey& key) {
 }
 
 std::string ParsedInternalKey::DebugString() const {
-  char buf[50];
-  snprintf(buf, sizeof(buf), "' @ %llu : %d", (unsigned long long)sequence,
-           int(type));
-  std::string result = "'";
-  result += EscapeString(user_key.ToString());
-  result += buf;
-  return result;
+  std::ostringstream ss;
+  ss << '\'' << EscapeString(user_key.ToString()) << "' @ " << sequence << " : "
+     << static_cast<int>(type);
+  return ss.str();
 }
 
 std::string InternalKey::DebugString() const {
-  std::string result;
   ParsedInternalKey parsed;
   if (ParseInternalKey(rep_, &parsed)) {
-    result = parsed.DebugString();
-  } else {
-    result = "(bad)";
-    result.append(EscapeString(rep_));
+    return parsed.DebugString();
   }
-  return result;
+  std::ostringstream ss;
+  ss << "(bad)" << EscapeString(rep_);
+  return ss.str();
 }
 
 const char* InternalKeyComparator::Name() const {
diff --git a/db/dbformat_test.cc b/db/dbformat_test.cc
index 87e6aae..f75d850 100644
--- a/db/dbformat_test.cc
+++ b/db/dbformat_test.cc
@@ -106,6 +106,20 @@ TEST(FormatTest, InternalKeyShortestSuccessor) {
             ShortSuccessor(IKey("\xff\xff", 100, kTypeValue)));
 }
 
+TEST(FormatTest, ParsedInternalKeyDebugString) {
+  ParsedInternalKey key("The \"key\" in 'single quotes'", 42, kTypeValue);
+
+  ASSERT_EQ("'The \"key\" in 'single quotes'' @ 42 : 1", key.DebugString());
+}
+
+TEST(FormatTest, InternalKeyDebugString) {
+  InternalKey key("The \"key\" in 'single quotes'", 42, kTypeValue);
+  ASSERT_EQ("'The \"key\" in 'single quotes'' @ 42 : 1", key.DebugString());
+
+  InternalKey invalid_key;
+  ASSERT_EQ("(bad)", invalid_key.DebugString());
+}
+
 }  // namespace leveldb
 
 int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

From 85cd40d108d8f8d91f58fd263c0f8428d11c34d5 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Thu, 9 May 2019 13:17:39 -0700
Subject: [PATCH 087/181] Added unit test for InternalKey::DecodeFrom with
 empty string.

PiperOrigin-RevId: 247483339
---
 db/dbformat_test.cc | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/db/dbformat_test.cc b/db/dbformat_test.cc
index f75d850..1209369 100644
--- a/db/dbformat_test.cc
+++ b/db/dbformat_test.cc
@@ -65,6 +65,12 @@ TEST(FormatTest, InternalKey_EncodeDecode) {
   }
 }
 
+TEST(FormatTest, InternalKey_DecodeFromEmpty) {
+  InternalKey internal_key;
+
+  ASSERT_TRUE(!internal_key.DecodeFrom(""));
+}
+
 TEST(FormatTest, InternalKeyShortSeparator) {
   // When user keys are same
   ASSERT_EQ(IKey("foo", 100, kTypeValue),

From 28e6d238be73e743c963fc0a26395b783a7565e2 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Thu, 9 May 2019 14:00:07 -0700
Subject: [PATCH 088/181] Switch to using C++ 11 override specifier.

PiperOrigin-RevId: 247491163
---
 db/c.cc                     | 22 +++++++++---------
 db/db_impl.cc               |  2 +-
 db/db_impl.h                | 25 ++++++++++----------
 db/db_iter.cc               | 20 ++++++++--------
 db/db_test.cc               | 46 ++++++++++++++++++-------------------
 db/dbformat.h               | 16 ++++++-------
 db/dumpfile.cc              |  6 ++---
 db/leveldbutil.cc           |  8 +++----
 db/log_test.cc              | 14 +++++------
 db/repair.cc                |  2 +-
 db/version_set.cc           | 20 ++++++++--------
 helpers/memenv/memenv.cc    | 24 +++++++++----------
 table/block.cc              | 18 +++++++--------
 table/filter_block_test.cc  |  6 ++---
 table/merger.cc             | 20 ++++++++--------
 table/table_test.cc         | 26 ++++++++++-----------
 table/two_level_iterator.cc | 20 ++++++++--------
 util/bloom.cc               |  6 ++---
 util/cache.cc               | 20 ++++++++--------
 19 files changed, 161 insertions(+), 160 deletions(-)

diff --git a/db/c.cc b/db/c.cc
index e0f3367..1f6fd64 100644
--- a/db/c.cc
+++ b/db/c.cc
@@ -84,17 +84,17 @@ struct leveldb_filelock_t {
 };
 
 struct leveldb_comparator_t : public Comparator {
-  virtual ~leveldb_comparator_t() { (*destructor_)(state_); }
+  ~leveldb_comparator_t() override { (*destructor_)(state_); }
 
-  virtual int Compare(const Slice& a, const Slice& b) const {
+  int Compare(const Slice& a, const Slice& b) const override {
     return (*compare_)(state_, a.data(), a.size(), b.data(), b.size());
   }
 
-  virtual const char* Name() const { return (*name_)(state_); }
+  const char* Name() const override { return (*name_)(state_); }
 
   // No-ops since the C binding does not support key shortening methods.
-  virtual void FindShortestSeparator(std::string*, const Slice&) const {}
-  virtual void FindShortSuccessor(std::string* key) const {}
+  void FindShortestSeparator(std::string*, const Slice&) const override {}
+  void FindShortSuccessor(std::string* key) const override {}
 
   void* state_;
   void (*destructor_)(void*);
@@ -104,11 +104,11 @@ struct leveldb_comparator_t : public Comparator {
 };
 
 struct leveldb_filterpolicy_t : public FilterPolicy {
-  virtual ~leveldb_filterpolicy_t() { (*destructor_)(state_); }
+  ~leveldb_filterpolicy_t() override { (*destructor_)(state_); }
 
-  virtual const char* Name() const { return (*name_)(state_); }
+  const char* Name() const override { return (*name_)(state_); }
 
-  virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
+  void CreateFilter(const Slice* keys, int n, std::string* dst) const override {
     std::vector<const char*> key_pointers(n);
     std::vector<size_t> key_sizes(n);
     for (int i = 0; i < n; i++) {
@@ -121,7 +121,7 @@ struct leveldb_filterpolicy_t : public FilterPolicy {
     free(filter);
   }
 
-  virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const {
+  bool KeyMayMatch(const Slice& key, const Slice& filter) const override {
     return (*key_match_)(state_, key.data(), key.size(), filter.data(),
                          filter.size());
   }
@@ -345,10 +345,10 @@ void leveldb_writebatch_iterate(const leveldb_writebatch_t* b, void* state,
     void* state_;
     void (*put_)(void*, const char* k, size_t klen, const char* v, size_t vlen);
     void (*deleted_)(void*, const char* k, size_t klen);
-    virtual void Put(const Slice& key, const Slice& value) {
+    void Put(const Slice& key, const Slice& value) override {
       (*put_)(state_, key.data(), key.size(), value.data(), value.size());
     }
-    virtual void Delete(const Slice& key) {
+    void Delete(const Slice& key) override {
       (*deleted_)(state_, key.data(), key.size());
     }
   };
diff --git a/db/db_impl.cc b/db/db_impl.cc
index 82be594..22c0d53 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -376,7 +376,7 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
     Logger* info_log;
     const char* fname;
     Status* status;  // null if options_.paranoid_checks==false
-    virtual void Corruption(size_t bytes, const Status& s) {
+    void Corruption(size_t bytes, const Status& s) override {
       Log(info_log, "%s%s: dropping %d bytes; %s",
           (this->status == nullptr ? "(ignoring error) " : ""), fname,
           static_cast<int>(bytes), s.ToString().c_str());
diff --git a/db/db_impl.h b/db/db_impl.h
index ae87d6e..a3f1ed1 100644
--- a/db/db_impl.h
+++ b/db/db_impl.h
@@ -33,20 +33,21 @@ class DBImpl : public DB {
   DBImpl(const DBImpl&) = delete;
   DBImpl& operator=(const DBImpl&) = delete;
 
-  virtual ~DBImpl();
+  ~DBImpl() override;
 
   // Implementations of the DB interface
-  virtual Status Put(const WriteOptions&, const Slice& key, const Slice& value);
-  virtual Status Delete(const WriteOptions&, const Slice& key);
-  virtual Status Write(const WriteOptions& options, WriteBatch* updates);
-  virtual Status Get(const ReadOptions& options, const Slice& key,
-                     std::string* value);
-  virtual Iterator* NewIterator(const ReadOptions&);
-  virtual const Snapshot* GetSnapshot();
-  virtual void ReleaseSnapshot(const Snapshot* snapshot);
-  virtual bool GetProperty(const Slice& property, std::string* value);
-  virtual void GetApproximateSizes(const Range* range, int n, uint64_t* sizes);
-  virtual void CompactRange(const Slice* begin, const Slice* end);
+  Status Put(const WriteOptions&, const Slice& key,
+             const Slice& value) override;
+  Status Delete(const WriteOptions&, const Slice& key) override;
+  Status Write(const WriteOptions& options, WriteBatch* updates) override;
+  Status Get(const ReadOptions& options, const Slice& key,
+             std::string* value) override;
+  Iterator* NewIterator(const ReadOptions&) override;
+  const Snapshot* GetSnapshot() override;
+  void ReleaseSnapshot(const Snapshot* snapshot) override;
+  bool GetProperty(const Slice& property, std::string* value) override;
+  void GetApproximateSizes(const Range* range, int n, uint64_t* sizes) override;
+  void CompactRange(const Slice* begin, const Slice* end) override;
 
   // Extra methods (for testing) that are not in the public DB interface
 
diff --git a/db/db_iter.cc b/db/db_iter.cc
index 6e52550..98715a9 100644
--- a/db/db_iter.cc
+++ b/db/db_iter.cc
@@ -59,17 +59,17 @@ class DBIter : public Iterator {
   DBIter(const DBIter&) = delete;
   DBIter& operator=(const DBIter&) = delete;
 
-  virtual ~DBIter() { delete iter_; }
-  virtual bool Valid() const { return valid_; }
-  virtual Slice key() const {
+  ~DBIter() override { delete iter_; }
+  bool Valid() const override { return valid_; }
+  Slice key() const override {
     assert(valid_);
     return (direction_ == kForward) ? ExtractUserKey(iter_->key()) : saved_key_;
   }
-  virtual Slice value() const {
+  Slice value() const override {
     assert(valid_);
     return (direction_ == kForward) ? iter_->value() : saved_value_;
   }
-  virtual Status status() const {
+  Status status() const override {
     if (status_.ok()) {
       return iter_->status();
     } else {
@@ -77,11 +77,11 @@ class DBIter : public Iterator {
     }
   }
 
-  virtual void Next();
-  virtual void Prev();
-  virtual void Seek(const Slice& target);
-  virtual void SeekToFirst();
-  virtual void SeekToLast();
+  void Next() override;
+  void Prev() override;
+  void Seek(const Slice& target) override;
+  void SeekToFirst() override;
+  void SeekToLast() override;
 
  private:
   void FindNextUserEntry(bool skipping, std::string* skip);
diff --git a/db/db_test.cc b/db/db_test.cc
index 1da8db2..9a8faf1 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -210,9 +210,9 @@ class SpecialEnv : public EnvWrapper {
      public:
       CountingFile(RandomAccessFile* target, AtomicCounter* counter)
           : target_(target), counter_(counter) {}
-      virtual ~CountingFile() { delete target_; }
-      virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                          char* scratch) const {
+      ~CountingFile() override { delete target_; }
+      Status Read(uint64_t offset, size_t n, Slice* result,
+                  char* scratch) const override {
         counter_->Increment();
         return target_->Read(offset, n, result, scratch);
       }
@@ -1504,14 +1504,14 @@ TEST(DBTest, Fflush_Issue474) {
 TEST(DBTest, ComparatorCheck) {
   class NewComparator : public Comparator {
    public:
-    virtual const char* Name() const { return "leveldb.NewComparator"; }
-    virtual int Compare(const Slice& a, const Slice& b) const {
+    const char* Name() const override { return "leveldb.NewComparator"; }
+    int Compare(const Slice& a, const Slice& b) const override {
       return BytewiseComparator()->Compare(a, b);
     }
-    virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
+    void FindShortestSeparator(std::string* s, const Slice& l) const override {
       BytewiseComparator()->FindShortestSeparator(s, l);
     }
-    virtual void FindShortSuccessor(std::string* key) const {
+    void FindShortSuccessor(std::string* key) const override {
       BytewiseComparator()->FindShortSuccessor(key);
     }
   };
@@ -1527,15 +1527,15 @@ TEST(DBTest, ComparatorCheck) {
 TEST(DBTest, CustomComparator) {
   class NumberComparator : public Comparator {
    public:
-    virtual const char* Name() const { return "test.NumberComparator"; }
-    virtual int Compare(const Slice& a, const Slice& b) const {
+    const char* Name() const override { return "test.NumberComparator"; }
+    int Compare(const Slice& a, const Slice& b) const override {
       return ToNumber(a) - ToNumber(b);
     }
-    virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
+    void FindShortestSeparator(std::string* s, const Slice& l) const override {
       ToNumber(*s);  // Check format
       ToNumber(l);   // Check format
     }
-    virtual void FindShortSuccessor(std::string* key) const {
+    void FindShortSuccessor(std::string* key) const override {
       ToNumber(*key);  // Check format
     }
 
@@ -2060,10 +2060,10 @@ class ModelDB : public DB {
     class Handler : public WriteBatch::Handler {
      public:
       KVMap* map_;
-      virtual void Put(const Slice& key, const Slice& value) {
+      void Put(const Slice& key, const Slice& value) override {
         (*map_)[key.ToString()] = value.ToString();
       }
-      virtual void Delete(const Slice& key) { map_->erase(key.ToString()); }
+      void Delete(const Slice& key) override { map_->erase(key.ToString()); }
     };
     Handler handler;
     handler.map_ = &map_;
@@ -2085,26 +2085,26 @@ class ModelDB : public DB {
    public:
     ModelIter(const KVMap* map, bool owned)
         : map_(map), owned_(owned), iter_(map_->end()) {}
-    ~ModelIter() {
+    ~ModelIter() override {
       if (owned_) delete map_;
     }
-    virtual bool Valid() const { return iter_ != map_->end(); }
-    virtual void SeekToFirst() { iter_ = map_->begin(); }
-    virtual void SeekToLast() {
+    bool Valid() const override { return iter_ != map_->end(); }
+    void SeekToFirst() override { iter_ = map_->begin(); }
+    void SeekToLast() override {
       if (map_->empty()) {
         iter_ = map_->end();
       } else {
         iter_ = map_->find(map_->rbegin()->first);
       }
     }
-    virtual void Seek(const Slice& k) {
+    void Seek(const Slice& k) override {
       iter_ = map_->lower_bound(k.ToString());
     }
-    virtual void Next() { ++iter_; }
-    virtual void Prev() { --iter_; }
-    virtual Slice key() const { return iter_->first; }
-    virtual Slice value() const { return iter_->second; }
-    virtual Status status() const { return Status::OK(); }
+    void Next() override { ++iter_; }
+    void Prev() override { --iter_; }
+    Slice key() const override { return iter_->first; }
+    Slice value() const override { return iter_->second; }
+    Status status() const override { return Status::OK(); }
 
    private:
     const KVMap* const map_;
diff --git a/db/dbformat.h b/db/dbformat.h
index f990040..abcb489 100644
--- a/db/dbformat.h
+++ b/db/dbformat.h
@@ -103,11 +103,11 @@ class InternalKeyComparator : public Comparator {
 
  public:
   explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) {}
-  virtual const char* Name() const;
-  virtual int Compare(const Slice& a, const Slice& b) const;
-  virtual void FindShortestSeparator(std::string* start,
-                                     const Slice& limit) const;
-  virtual void FindShortSuccessor(std::string* key) const;
+  const char* Name() const override;
+  int Compare(const Slice& a, const Slice& b) const override;
+  void FindShortestSeparator(std::string* start,
+                             const Slice& limit) const override;
+  void FindShortSuccessor(std::string* key) const override;
 
   const Comparator* user_comparator() const { return user_comparator_; }
 
@@ -121,9 +121,9 @@ class InternalFilterPolicy : public FilterPolicy {
 
  public:
   explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) {}
-  virtual const char* Name() const;
-  virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const;
-  virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const;
+  const char* Name() const override;
+  void CreateFilter(const Slice* keys, int n, std::string* dst) const override;
+  bool KeyMayMatch(const Slice& key, const Slice& filter) const override;
 };
 
 // Modules in this directory should keep internal keys wrapped inside
diff --git a/db/dumpfile.cc b/db/dumpfile.cc
index 9d22d58..77d5900 100644
--- a/db/dumpfile.cc
+++ b/db/dumpfile.cc
@@ -38,7 +38,7 @@ bool GuessType(const std::string& fname, FileType* type) {
 // Notified when log reader encounters corruption.
 class CorruptionReporter : public log::Reader::Reporter {
  public:
-  virtual void Corruption(size_t bytes, const Status& status) {
+  void Corruption(size_t bytes, const Status& status) override {
     std::string r = "corruption: ";
     AppendNumberTo(&r, bytes);
     r += " bytes; ";
@@ -74,7 +74,7 @@ Status PrintLogContents(Env* env, const std::string& fname,
 // Called on every item found in a WriteBatch.
 class WriteBatchItemPrinter : public WriteBatch::Handler {
  public:
-  virtual void Put(const Slice& key, const Slice& value) {
+  void Put(const Slice& key, const Slice& value) override {
     std::string r = "  put '";
     AppendEscapedStringTo(&r, key);
     r += "' '";
@@ -82,7 +82,7 @@ class WriteBatchItemPrinter : public WriteBatch::Handler {
     r += "'\n";
     dst_->Append(r);
   }
-  virtual void Delete(const Slice& key) {
+  void Delete(const Slice& key) override {
     std::string r = "  del '";
     AppendEscapedStringTo(&r, key);
     r += "'\n";
diff --git a/db/leveldbutil.cc b/db/leveldbutil.cc
index b21cf8e..55cdcc5 100644
--- a/db/leveldbutil.cc
+++ b/db/leveldbutil.cc
@@ -13,13 +13,13 @@ namespace {
 
 class StdoutPrinter : public WritableFile {
  public:
-  virtual Status Append(const Slice& data) {
+  Status Append(const Slice& data) override {
     fwrite(data.data(), 1, data.size(), stdout);
     return Status::OK();
   }
-  virtual Status Close() { return Status::OK(); }
-  virtual Status Flush() { return Status::OK(); }
-  virtual Status Sync() { return Status::OK(); }
+  Status Close() override { return Status::OK(); }
+  Status Flush() override { return Status::OK(); }
+  Status Sync() override { return Status::OK(); }
 };
 
 bool HandleDumpCommand(Env* env, char** files, int num) {
diff --git a/db/log_test.cc b/db/log_test.cc
index 809c418..0e31648 100644
--- a/db/log_test.cc
+++ b/db/log_test.cc
@@ -161,10 +161,10 @@ class LogTest {
  private:
   class StringDest : public WritableFile {
    public:
-    virtual Status Close() { return Status::OK(); }
-    virtual Status Flush() { return Status::OK(); }
-    virtual Status Sync() { return Status::OK(); }
-    virtual Status Append(const Slice& slice) {
+    Status Close() override { return Status::OK(); }
+    Status Flush() override { return Status::OK(); }
+    Status Sync() override { return Status::OK(); }
+    Status Append(const Slice& slice) override {
       contents_.append(slice.data(), slice.size());
       return Status::OK();
     }
@@ -176,7 +176,7 @@ class LogTest {
    public:
     StringSource() : force_error_(false), returned_partial_(false) {}
 
-    virtual Status Read(size_t n, Slice* result, char* scratch) {
+    Status Read(size_t n, Slice* result, char* scratch) override {
       ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
 
       if (force_error_) {
@@ -194,7 +194,7 @@ class LogTest {
       return Status::OK();
     }
 
-    virtual Status Skip(uint64_t n) {
+    Status Skip(uint64_t n) override {
       if (n > contents_.size()) {
         contents_.clear();
         return Status::NotFound("in-memory file skipped past end");
@@ -213,7 +213,7 @@ class LogTest {
   class ReportCollector : public Reader::Reporter {
    public:
     ReportCollector() : dropped_bytes_(0) {}
-    virtual void Corruption(size_t bytes, const Status& status) {
+    void Corruption(size_t bytes, const Status& status) override {
       dropped_bytes_ += bytes;
       message_.append(status.ToString());
     }
diff --git a/db/repair.cc b/db/repair.cc
index 3c676ca..d9d12ba 100644
--- a/db/repair.cc
+++ b/db/repair.cc
@@ -145,7 +145,7 @@ class Repairer {
       Env* env;
       Logger* info_log;
       uint64_t lognum;
-      virtual void Corruption(size_t bytes, const Status& s) {
+      void Corruption(size_t bytes, const Status& s) override {
         // We print error messages for corruption, but continue repairing.
         Log(info_log, "Log #%llu: dropping %d bytes; %s",
             (unsigned long long)lognum, static_cast<int>(bytes),
diff --git a/db/version_set.cc b/db/version_set.cc
index 099fa57..0f8bec1 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -167,19 +167,19 @@ class Version::LevelFileNumIterator : public Iterator {
                        const std::vector<FileMetaData*>* flist)
       : icmp_(icmp), flist_(flist), index_(flist->size()) {  // Marks as invalid
   }
-  virtual bool Valid() const { return index_ < flist_->size(); }
-  virtual void Seek(const Slice& target) {
+  bool Valid() const override { return index_ < flist_->size(); }
+  void Seek(const Slice& target) override {
     index_ = FindFile(icmp_, *flist_, target);
   }
-  virtual void SeekToFirst() { index_ = 0; }
-  virtual void SeekToLast() {
+  void SeekToFirst() override { index_ = 0; }
+  void SeekToLast() override {
     index_ = flist_->empty() ? 0 : flist_->size() - 1;
   }
-  virtual void Next() {
+  void Next() override {
     assert(Valid());
     index_++;
   }
-  virtual void Prev() {
+  void Prev() override {
     assert(Valid());
     if (index_ == 0) {
       index_ = flist_->size();  // Marks as invalid
@@ -187,17 +187,17 @@ class Version::LevelFileNumIterator : public Iterator {
       index_--;
     }
   }
-  Slice key() const {
+  Slice key() const override {
     assert(Valid());
     return (*flist_)[index_]->largest.Encode();
   }
-  Slice value() const {
+  Slice value() const override {
     assert(Valid());
     EncodeFixed64(value_buf_, (*flist_)[index_]->number);
     EncodeFixed64(value_buf_ + 8, (*flist_)[index_]->file_size);
     return Slice(value_buf_, sizeof(value_buf_));
   }
-  virtual Status status() const { return Status::OK(); }
+  Status status() const override { return Status::OK(); }
 
  private:
   const InternalKeyComparator icmp_;
@@ -883,7 +883,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
 Status VersionSet::Recover(bool* save_manifest) {
   struct LogReporter : public log::Reader::Reporter {
     Status* status;
-    virtual void Corruption(size_t bytes, const Status& s) {
+    void Corruption(size_t bytes, const Status& s) override {
       if (this->status->ok()) *this->status = s;
     }
   };
diff --git a/helpers/memenv/memenv.cc b/helpers/memenv/memenv.cc
index 2d4fbaa..31d2bc0 100644
--- a/helpers/memenv/memenv.cc
+++ b/helpers/memenv/memenv.cc
@@ -156,9 +156,9 @@ class SequentialFileImpl : public SequentialFile {
     file_->Ref();
   }
 
-  ~SequentialFileImpl() { file_->Unref(); }
+  ~SequentialFileImpl() override { file_->Unref(); }
 
-  virtual Status Read(size_t n, Slice* result, char* scratch) {
+  Status Read(size_t n, Slice* result, char* scratch) override {
     Status s = file_->Read(pos_, n, result, scratch);
     if (s.ok()) {
       pos_ += result->size();
@@ -166,7 +166,7 @@ class SequentialFileImpl : public SequentialFile {
     return s;
   }
 
-  virtual Status Skip(uint64_t n) {
+  Status Skip(uint64_t n) override {
     if (pos_ > file_->Size()) {
       return Status::IOError("pos_ > file_->Size()");
     }
@@ -187,10 +187,10 @@ class RandomAccessFileImpl : public RandomAccessFile {
  public:
   explicit RandomAccessFileImpl(FileState* file) : file_(file) { file_->Ref(); }
 
-  ~RandomAccessFileImpl() { file_->Unref(); }
+  ~RandomAccessFileImpl() override { file_->Unref(); }
 
-  virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                      char* scratch) const {
+  Status Read(uint64_t offset, size_t n, Slice* result,
+              char* scratch) const override {
     return file_->Read(offset, n, result, scratch);
   }
 
@@ -202,13 +202,13 @@ class WritableFileImpl : public WritableFile {
  public:
   WritableFileImpl(FileState* file) : file_(file) { file_->Ref(); }
 
-  ~WritableFileImpl() { file_->Unref(); }
+  ~WritableFileImpl() override { file_->Unref(); }
 
-  virtual Status Append(const Slice& data) { return file_->Append(data); }
+  Status Append(const Slice& data) override { return file_->Append(data); }
 
-  virtual Status Close() { return Status::OK(); }
-  virtual Status Flush() { return Status::OK(); }
-  virtual Status Sync() { return Status::OK(); }
+  Status Close() override { return Status::OK(); }
+  Status Flush() override { return Status::OK(); }
+  Status Sync() override { return Status::OK(); }
 
  private:
   FileState* file_;
@@ -216,7 +216,7 @@ class WritableFileImpl : public WritableFile {
 
 class NoOpLogger : public Logger {
  public:
-  virtual void Logv(const char* format, va_list ap) {}
+  void Logv(const char* format, va_list ap) override {}
 };
 
 class InMemoryEnv : public EnvWrapper {
diff --git a/table/block.cc b/table/block.cc
index ad0ee98..05c600f 100644
--- a/table/block.cc
+++ b/table/block.cc
@@ -123,23 +123,23 @@ class Block::Iter : public Iterator {
     assert(num_restarts_ > 0);
   }
 
-  virtual bool Valid() const { return current_ < restarts_; }
-  virtual Status status() const { return status_; }
-  virtual Slice key() const {
+  bool Valid() const override { return current_ < restarts_; }
+  Status status() const override { return status_; }
+  Slice key() const override {
     assert(Valid());
     return key_;
   }
-  virtual Slice value() const {
+  Slice value() const override {
     assert(Valid());
     return value_;
   }
 
-  virtual void Next() {
+  void Next() override {
     assert(Valid());
     ParseNextKey();
   }
 
-  virtual void Prev() {
+  void Prev() override {
     assert(Valid());
 
     // Scan backwards to a restart point before current_
@@ -160,7 +160,7 @@ class Block::Iter : public Iterator {
     } while (ParseNextKey() && NextEntryOffset() < original);
   }
 
-  virtual void Seek(const Slice& target) {
+  void Seek(const Slice& target) override {
     // Binary search in restart array to find the last restart point
     // with a key < target
     uint32_t left = 0;
@@ -200,12 +200,12 @@ class Block::Iter : public Iterator {
     }
   }
 
-  virtual void SeekToFirst() {
+  void SeekToFirst() override {
     SeekToRestartPoint(0);
     ParseNextKey();
   }
 
-  virtual void SeekToLast() {
+  void SeekToLast() override {
     SeekToRestartPoint(num_restarts_ - 1);
     while (ParseNextKey() && NextEntryOffset() < restarts_) {
       // Keep skipping
diff --git a/table/filter_block_test.cc b/table/filter_block_test.cc
index 6cdd435..8b33bbd 100644
--- a/table/filter_block_test.cc
+++ b/table/filter_block_test.cc
@@ -16,16 +16,16 @@ namespace leveldb {
 // For testing: emit an array with one hash value per key
 class TestHashFilter : public FilterPolicy {
  public:
-  virtual const char* Name() const { return "TestHashFilter"; }
+  const char* Name() const override { return "TestHashFilter"; }
 
-  virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
+  void CreateFilter(const Slice* keys, int n, std::string* dst) const override {
     for (int i = 0; i < n; i++) {
       uint32_t h = Hash(keys[i].data(), keys[i].size(), 1);
       PutFixed32(dst, h);
     }
   }
 
-  virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const {
+  bool KeyMayMatch(const Slice& key, const Slice& filter) const override {
     uint32_t h = Hash(key.data(), key.size(), 1);
     for (size_t i = 0; i + 4 <= filter.size(); i += 4) {
       if (h == DecodeFixed32(filter.data() + i)) {
diff --git a/table/merger.cc b/table/merger.cc
index 1bbc6cf..76441b1 100644
--- a/table/merger.cc
+++ b/table/merger.cc
@@ -24,11 +24,11 @@ class MergingIterator : public Iterator {
     }
   }
 
-  virtual ~MergingIterator() { delete[] children_; }
+  ~MergingIterator() override { delete[] children_; }
 
-  virtual bool Valid() const { return (current_ != nullptr); }
+  bool Valid() const override { return (current_ != nullptr); }
 
-  virtual void SeekToFirst() {
+  void SeekToFirst() override {
     for (int i = 0; i < n_; i++) {
       children_[i].SeekToFirst();
     }
@@ -36,7 +36,7 @@ class MergingIterator : public Iterator {
     direction_ = kForward;
   }
 
-  virtual void SeekToLast() {
+  void SeekToLast() override {
     for (int i = 0; i < n_; i++) {
       children_[i].SeekToLast();
     }
@@ -44,7 +44,7 @@ class MergingIterator : public Iterator {
     direction_ = kReverse;
   }
 
-  virtual void Seek(const Slice& target) {
+  void Seek(const Slice& target) override {
     for (int i = 0; i < n_; i++) {
       children_[i].Seek(target);
     }
@@ -52,7 +52,7 @@ class MergingIterator : public Iterator {
     direction_ = kForward;
   }
 
-  virtual void Next() {
+  void Next() override {
     assert(Valid());
 
     // Ensure that all children are positioned after key().
@@ -78,7 +78,7 @@ class MergingIterator : public Iterator {
     FindSmallest();
   }
 
-  virtual void Prev() {
+  void Prev() override {
     assert(Valid());
 
     // Ensure that all children are positioned before key().
@@ -107,17 +107,17 @@ class MergingIterator : public Iterator {
     FindLargest();
   }
 
-  virtual Slice key() const {
+  Slice key() const override {
     assert(Valid());
     return current_->key();
   }
 
-  virtual Slice value() const {
+  Slice value() const override {
     assert(Valid());
     return current_->value();
   }
 
-  virtual Status status() const {
+  Status status() const override {
     Status status;
     for (int i = 0; i < n_; i++) {
       status = children_[i].status();
diff --git a/table/table_test.cc b/table/table_test.cc
index 3c63e32..f689a27 100644
--- a/table/table_test.cc
+++ b/table/table_test.cc
@@ -38,23 +38,23 @@ static std::string Reverse(const Slice& key) {
 namespace {
 class ReverseKeyComparator : public Comparator {
  public:
-  virtual const char* Name() const {
+  const char* Name() const override {
     return "leveldb.ReverseBytewiseComparator";
   }
 
-  virtual int Compare(const Slice& a, const Slice& b) const {
+  int Compare(const Slice& a, const Slice& b) const override {
     return BytewiseComparator()->Compare(Reverse(a), Reverse(b));
   }
 
-  virtual void FindShortestSeparator(std::string* start,
-                                     const Slice& limit) const {
+  void FindShortestSeparator(std::string* start,
+                             const Slice& limit) const override {
     std::string s = Reverse(*start);
     std::string l = Reverse(limit);
     BytewiseComparator()->FindShortestSeparator(&s, l);
     *start = Reverse(s);
   }
 
-  virtual void FindShortSuccessor(std::string* key) const {
+  void FindShortSuccessor(std::string* key) const override {
     std::string s = Reverse(*key);
     BytewiseComparator()->FindShortSuccessor(&s);
     *key = Reverse(s);
@@ -89,15 +89,15 @@ struct STLLessThan {
 
 class StringSink : public WritableFile {
  public:
-  ~StringSink() = default;
+  ~StringSink() override = default;
 
   const std::string& contents() const { return contents_; }
 
-  virtual Status Close() { return Status::OK(); }
-  virtual Status Flush() { return Status::OK(); }
-  virtual Status Sync() { return Status::OK(); }
+  Status Close() override { return Status::OK(); }
+  Status Flush() override { return Status::OK(); }
+  Status Sync() override { return Status::OK(); }
 
-  virtual Status Append(const Slice& data) {
+  Status Append(const Slice& data) override {
     contents_.append(data.data(), data.size());
     return Status::OK();
   }
@@ -111,12 +111,12 @@ class StringSource : public RandomAccessFile {
   StringSource(const Slice& contents)
       : contents_(contents.data(), contents.size()) {}
 
-  virtual ~StringSource() = default;
+  ~StringSource() override = default;
 
   uint64_t Size() const { return contents_.size(); }
 
-  virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                      char* scratch) const {
+  Status Read(uint64_t offset, size_t n, Slice* result,
+              char* scratch) const override {
     if (offset >= contents_.size()) {
       return Status::InvalidArgument("invalid Read offset");
     }
diff --git a/table/two_level_iterator.cc b/table/two_level_iterator.cc
index 1fc4626..144790d 100644
--- a/table/two_level_iterator.cc
+++ b/table/two_level_iterator.cc
@@ -20,24 +20,24 @@ class TwoLevelIterator : public Iterator {
   TwoLevelIterator(Iterator* index_iter, BlockFunction block_function,
                    void* arg, const ReadOptions& options);
 
-  virtual ~TwoLevelIterator();
+  ~TwoLevelIterator() override;
 
-  virtual void Seek(const Slice& target);
-  virtual void SeekToFirst();
-  virtual void SeekToLast();
-  virtual void Next();
-  virtual void Prev();
+  void Seek(const Slice& target) override;
+  void SeekToFirst() override;
+  void SeekToLast() override;
+  void Next() override;
+  void Prev() override;
 
-  virtual bool Valid() const { return data_iter_.Valid(); }
-  virtual Slice key() const {
+  bool Valid() const override { return data_iter_.Valid(); }
+  Slice key() const override {
     assert(Valid());
     return data_iter_.key();
   }
-  virtual Slice value() const {
+  Slice value() const override {
     assert(Valid());
     return data_iter_.value();
   }
-  virtual Status status() const {
+  Status status() const override {
     // It'd be nice if status() returned a const Status& instead of a Status
     if (!index_iter_.status().ok()) {
       return index_iter_.status();
diff --git a/util/bloom.cc b/util/bloom.cc
index 7f97464..87547a7 100644
--- a/util/bloom.cc
+++ b/util/bloom.cc
@@ -23,9 +23,9 @@ class BloomFilterPolicy : public FilterPolicy {
     if (k_ > 30) k_ = 30;
   }
 
-  virtual const char* Name() const { return "leveldb.BuiltinBloomFilter2"; }
+  const char* Name() const override { return "leveldb.BuiltinBloomFilter2"; }
 
-  virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
+  void CreateFilter(const Slice* keys, int n, std::string* dst) const override {
     // Compute bloom filter size (in both bits and bytes)
     size_t bits = n * bits_per_key_;
 
@@ -53,7 +53,7 @@ class BloomFilterPolicy : public FilterPolicy {
     }
   }
 
-  virtual bool KeyMayMatch(const Slice& key, const Slice& bloom_filter) const {
+  bool KeyMayMatch(const Slice& key, const Slice& bloom_filter) const override {
     const size_t len = bloom_filter.size();
     if (len < 2) return false;
 
diff --git a/util/cache.cc b/util/cache.cc
index 0f801cc..12de306 100644
--- a/util/cache.cc
+++ b/util/cache.cc
@@ -354,37 +354,37 @@ class ShardedLRUCache : public Cache {
       shard_[s].SetCapacity(per_shard);
     }
   }
-  virtual ~ShardedLRUCache() {}
-  virtual Handle* Insert(const Slice& key, void* value, size_t charge,
-                         void (*deleter)(const Slice& key, void* value)) {
+  ~ShardedLRUCache() override {}
+  Handle* Insert(const Slice& key, void* value, size_t charge,
+                 void (*deleter)(const Slice& key, void* value)) override {
     const uint32_t hash = HashSlice(key);
     return shard_[Shard(hash)].Insert(key, hash, value, charge, deleter);
   }
-  virtual Handle* Lookup(const Slice& key) {
+  Handle* Lookup(const Slice& key) override {
     const uint32_t hash = HashSlice(key);
     return shard_[Shard(hash)].Lookup(key, hash);
   }
-  virtual void Release(Handle* handle) {
+  void Release(Handle* handle) override {
     LRUHandle* h = reinterpret_cast<LRUHandle*>(handle);
     shard_[Shard(h->hash)].Release(handle);
   }
-  virtual void Erase(const Slice& key) {
+  void Erase(const Slice& key) override {
     const uint32_t hash = HashSlice(key);
     shard_[Shard(hash)].Erase(key, hash);
   }
-  virtual void* Value(Handle* handle) {
+  void* Value(Handle* handle) override {
     return reinterpret_cast<LRUHandle*>(handle)->value;
   }
-  virtual uint64_t NewId() {
+  uint64_t NewId() override {
     MutexLock l(&id_mutex_);
     return ++(last_id_);
   }
-  virtual void Prune() {
+  void Prune() override {
     for (int s = 0; s < kNumShards; s++) {
       shard_[s].Prune();
     }
   }
-  virtual size_t TotalCharge() const {
+  size_t TotalCharge() const override {
     size_t total = 0;
     for (int s = 0; s < kNumShards; s++) {
       total += shard_[s].TotalCharge();

From 1d0b101165ddd34f26cc5c62b76f2a2e0d622483 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Mon, 13 May 2019 09:31:30 -0700
Subject: [PATCH 089/181] Converted two for-loops to while-loops.

Converted `for (;<condition>;)` to `while (<condition>)`.

PiperOrigin-RevId: 247950510
---
 db/db_impl.cc     | 2 +-
 db/version_set.cc | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/db/db_impl.cc b/db/db_impl.cc
index 22c0d53..067c67d 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -903,7 +903,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
   std::string current_user_key;
   bool has_current_user_key = false;
   SequenceNumber last_sequence_for_key = kMaxSequenceNumber;
-  for (; input->Valid() && !shutting_down_.load(std::memory_order_acquire);) {
+  while (input->Valid() && !shutting_down_.load(std::memory_order_acquire)) {
     // Prioritize immutable compaction work
     if (has_imm_.load(std::memory_order_relaxed)) {
       const uint64_t imm_start = env_->NowMicros();
diff --git a/db/version_set.cc b/db/version_set.cc
index 0f8bec1..b62a2d0 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -1532,7 +1532,7 @@ bool Compaction::IsBaseLevelForKey(const Slice& user_key) {
   const Comparator* user_cmp = input_version_->vset_->icmp_.user_comparator();
   for (int lvl = level_ + 2; lvl < config::kNumLevels; lvl++) {
     const std::vector<FileMetaData*>& files = input_version_->files_[lvl];
-    for (; level_ptrs_[lvl] < files.size();) {
+    while (level_ptrs_[lvl] < files.size()) {
       FileMetaData* f = files[level_ptrs_[lvl]];
       if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) {
         // We've advanced far enough

From c00e177f3613068eda4bff4abfbd3bd4165a86e8 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Wed, 15 May 2019 13:13:13 -0700
Subject: [PATCH 090/181] Guard DBImpl::versions_ by mutex_.

mutex_ was already acquired before accessing DBImpl::versions_ in all
but one place: DBImpl::GetApproximateSizes. This change requires mutex_
to be held before accessing versions_.

PiperOrigin-RevId: 248390814
---
 db/db_impl.cc | 17 ++++++-----------
 db/db_impl.h  |  2 +-
 2 files changed, 7 insertions(+), 12 deletions(-)

diff --git a/db/db_impl.cc b/db/db_impl.cc
index 067c67d..94b5d4c 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -893,10 +893,11 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
     compact->smallest_snapshot = snapshots_.oldest()->sequence_number();
   }
 
+  Iterator* input = versions_->MakeInputIterator(compact->compaction);
+
   // Release mutex while we're actually doing the compaction work
   mutex_.Unlock();
 
-  Iterator* input = versions_->MakeInputIterator(compact->compaction);
   input->SeekToFirst();
   Status status;
   ParsedInternalKey ikey;
@@ -1433,12 +1434,9 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
 
 void DBImpl::GetApproximateSizes(const Range* range, int n, uint64_t* sizes) {
   // TODO(opt): better implementation
-  Version* v;
-  {
-    MutexLock l(&mutex_);
-    versions_->current()->Ref();
-    v = versions_->current();
-  }
+  MutexLock l(&mutex_);
+  Version* v = versions_->current();
+  v->Ref();
 
   for (int i = 0; i < n; i++) {
     // Convert user_key into a corresponding internal key.
@@ -1449,10 +1447,7 @@ void DBImpl::GetApproximateSizes(const Range* range, int n, uint64_t* sizes) {
     sizes[i] = (limit >= start ? limit - start : 0);
   }
 
-  {
-    MutexLock l(&mutex_);
-    v->Unref();
-  }
+  v->Unref();
 }
 
 // Default implementations of convenience methods that subclasses of DB
diff --git a/db/db_impl.h b/db/db_impl.h
index a3f1ed1..685735c 100644
--- a/db/db_impl.h
+++ b/db/db_impl.h
@@ -197,7 +197,7 @@ class DBImpl : public DB {
 
   ManualCompaction* manual_compaction_ GUARDED_BY(mutex_);
 
-  VersionSet* const versions_;
+  VersionSet* const versions_ GUARDED_BY(mutex_);
 
   // Have we encountered a background error in paranoid mode?
   Status bg_error_ GUARDED_BY(mutex_);

From ae49533210e96bdee9c9479a7fa547f375a39c8b Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Wed, 22 May 2019 15:19:38 -0700
Subject: [PATCH 091/181] Add explicit typecasts to avoid compiler warning.

Fixes issue #684.

PiperOrigin-RevId: 249531001
---
 util/status.cc | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/util/status.cc b/util/status.cc
index 6ca8da6..15ce747 100644
--- a/util/status.cc
+++ b/util/status.cc
@@ -20,8 +20,8 @@ const char* Status::CopyState(const char* state) {
 
 Status::Status(Code code, const Slice& msg, const Slice& msg2) {
   assert(code != kOk);
-  const uint32_t len1 = msg.size();
-  const uint32_t len2 = msg2.size();
+  const uint32_t len1 = static_cast<uint32_t>(msg.size());
+  const uint32_t len2 = static_cast<uint32_t>(msg2.size());
   const uint32_t size = len1 + (len2 ? (2 + len2) : 0);
   char* result = new char[size + 5];
   memcpy(result, &size, sizeof(size));

From a3b71c1ff65e30ced00e85ebbca9ae5786af6626 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Fri, 24 May 2019 14:25:42 -0700
Subject: [PATCH 092/181] Use GCC 9 on Travis CI

PiperOrigin-RevId: 249899128
---
 .travis.yml | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index 436e037..7c9eba2 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -25,8 +25,8 @@ addons:
     packages:
     - clang-8
     - cmake
-    - gcc-8
-    - g++-8
+    - gcc-9
+    - g++-9
     - libgoogle-perftools-dev
     - libkyotocabinet-dev
     - libsnappy-dev
@@ -36,7 +36,7 @@ addons:
     packages:
     - cmake
     - crc32c
-    - gcc@8
+    - gcc@9
     - gperftools
     - kyotocabinet
     - llvm@8
@@ -52,7 +52,7 @@ install:
     export PATH="$(brew --prefix llvm)/bin:$PATH";
   fi
 # /usr/bin/gcc points to an older compiler on both Linux and macOS.
-- if [ "$CXX" = "g++" ]; then export CXX="g++-8" CC="gcc-8"; fi
+- if [ "$CXX" = "g++" ]; then export CXX="g++-9" CC="gcc-9"; fi
 # /usr/bin/clang points to an older compiler on both Linux and macOS.
 #
 # Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values

From 863f185970eff21e826e5fe1164a6215a515c23b Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Tue, 28 May 2019 10:17:03 -0700
Subject: [PATCH 093/181] unsigned char -> uint8_t

PiperOrigin-RevId: 250309603
---
 db/c.cc             | 30 +++++++++++++-----------------
 db/c_test.c         |  8 +++-----
 db/dbformat.h       |  8 +++++---
 include/leveldb/c.h | 20 ++++++++++----------
 table/block.cc      |  7 ++++---
 util/coding.cc      | 10 +++++-----
 util/coding.h       |  2 +-
 util/crc32c_test.cc |  2 +-
 util/hash.cc        |  6 +++---
 util/hash_test.cc   | 10 +++++-----
 util/logging.cc     | 11 +++++------
 11 files changed, 55 insertions(+), 59 deletions(-)

diff --git a/db/c.cc b/db/c.cc
index 1f6fd64..3a492f9 100644
--- a/db/c.cc
+++ b/db/c.cc
@@ -4,7 +4,8 @@
 
 #include "leveldb/c.h"
 
-#include <stdlib.h>
+#include <cstdint>
+#include <cstdlib>
 
 #include "leveldb/cache.h"
 #include "leveldb/comparator.h"
@@ -132,8 +133,8 @@ struct leveldb_filterpolicy_t : public FilterPolicy {
   char* (*create_)(void*, const char* const* key_array,
                    const size_t* key_length_array, int num_keys,
                    size_t* filter_length);
-  unsigned char (*key_match_)(void*, const char* key, size_t length,
-                              const char* filter, size_t filter_length);
+  uint8_t (*key_match_)(void*, const char* key, size_t length,
+                        const char* filter, size_t filter_length);
 };
 
 struct leveldb_env_t {
@@ -281,7 +282,7 @@ void leveldb_iter_destroy(leveldb_iterator_t* iter) {
   delete iter;
 }
 
-unsigned char leveldb_iter_valid(const leveldb_iterator_t* iter) {
+uint8_t leveldb_iter_valid(const leveldb_iterator_t* iter) {
   return iter->rep->Valid();
 }
 
@@ -378,18 +379,15 @@ void leveldb_options_set_filter_policy(leveldb_options_t* opt,
   opt->rep.filter_policy = policy;
 }
 
-void leveldb_options_set_create_if_missing(leveldb_options_t* opt,
-                                           unsigned char v) {
+void leveldb_options_set_create_if_missing(leveldb_options_t* opt, uint8_t v) {
   opt->rep.create_if_missing = v;
 }
 
-void leveldb_options_set_error_if_exists(leveldb_options_t* opt,
-                                         unsigned char v) {
+void leveldb_options_set_error_if_exists(leveldb_options_t* opt, uint8_t v) {
   opt->rep.error_if_exists = v;
 }
 
-void leveldb_options_set_paranoid_checks(leveldb_options_t* opt,
-                                         unsigned char v) {
+void leveldb_options_set_paranoid_checks(leveldb_options_t* opt, uint8_t v) {
   opt->rep.paranoid_checks = v;
 }
 
@@ -449,8 +447,8 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create(
     char* (*create_filter)(void*, const char* const* key_array,
                            const size_t* key_length_array, int num_keys,
                            size_t* filter_length),
-    unsigned char (*key_may_match)(void*, const char* key, size_t length,
-                                   const char* filter, size_t filter_length),
+    uint8_t (*key_may_match)(void*, const char* key, size_t length,
+                             const char* filter, size_t filter_length),
     const char* (*name)(void*)) {
   leveldb_filterpolicy_t* result = new leveldb_filterpolicy_t;
   result->state_ = state;
@@ -497,12 +495,11 @@ leveldb_readoptions_t* leveldb_readoptions_create() {
 void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) { delete opt; }
 
 void leveldb_readoptions_set_verify_checksums(leveldb_readoptions_t* opt,
-                                              unsigned char v) {
+                                              uint8_t v) {
   opt->rep.verify_checksums = v;
 }
 
-void leveldb_readoptions_set_fill_cache(leveldb_readoptions_t* opt,
-                                        unsigned char v) {
+void leveldb_readoptions_set_fill_cache(leveldb_readoptions_t* opt, uint8_t v) {
   opt->rep.fill_cache = v;
 }
 
@@ -517,8 +514,7 @@ leveldb_writeoptions_t* leveldb_writeoptions_create() {
 
 void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) { delete opt; }
 
-void leveldb_writeoptions_set_sync(leveldb_writeoptions_t* opt,
-                                   unsigned char v) {
+void leveldb_writeoptions_set_sync(leveldb_writeoptions_t* opt, uint8_t v) {
   opt->rep.sync = v;
 }
 
diff --git a/db/c_test.c b/db/c_test.c
index ae14b99..16c77ee 100644
--- a/db/c_test.c
+++ b/db/c_test.c
@@ -120,7 +120,7 @@ static const char* CmpName(void* arg) {
 }
 
 // Custom filter policy
-static unsigned char fake_filter_result = 1;
+static uint8_t fake_filter_result = 1;
 static void FilterDestroy(void* arg) { }
 static const char* FilterName(void* arg) {
   return "TestFilter";
@@ -135,10 +135,8 @@ static char* FilterCreate(
   memcpy(result, "fake", 4);
   return result;
 }
-unsigned char FilterKeyMatch(
-    void* arg,
-    const char* key, size_t length,
-    const char* filter, size_t filter_length) {
+uint8_t FilterKeyMatch(void* arg, const char* key, size_t length,
+                       const char* filter, size_t filter_length) {
   CheckCondition(filter_length == 4);
   CheckCondition(memcmp(filter, "fake", 4) == 0);
   return fake_filter_result;
diff --git a/db/dbformat.h b/db/dbformat.h
index abcb489..a1c30ed 100644
--- a/db/dbformat.h
+++ b/db/dbformat.h
@@ -5,7 +5,9 @@
 #ifndef STORAGE_LEVELDB_DB_DBFORMAT_H_
 #define STORAGE_LEVELDB_DB_DBFORMAT_H_
 
-#include <stdio.h>
+#include <cstddef>
+#include <cstdint>
+#include <string>
 
 #include "leveldb/comparator.h"
 #include "leveldb/db.h"
@@ -171,11 +173,11 @@ inline bool ParseInternalKey(const Slice& internal_key,
   const size_t n = internal_key.size();
   if (n < 8) return false;
   uint64_t num = DecodeFixed64(internal_key.data() + n - 8);
-  unsigned char c = num & 0xff;
+  uint8_t c = num & 0xff;
   result->sequence = num >> 8;
   result->type = static_cast<ValueType>(c);
   result->user_key = Slice(internal_key.data(), n - 8);
-  return (c <= static_cast<unsigned char>(kTypeValue));
+  return (c <= static_cast<uint8_t>(kTypeValue));
 }
 
 // A helper class useful for DBImpl::Get()
diff --git a/include/leveldb/c.h b/include/leveldb/c.h
index 04e383c..02c79ba 100644
--- a/include/leveldb/c.h
+++ b/include/leveldb/c.h
@@ -32,7 +32,7 @@
   On failure, leveldb frees the old value of *errptr and
   set *errptr to a malloc()ed error message.
 
-  (4) Bools have the type unsigned char (0 == false; rest == true)
+  (4) Bools have the type uint8_t (0 == false; rest == true)
 
   (5) All of the pointer arguments must be non-NULL.
 */
@@ -131,7 +131,7 @@ LEVELDB_EXPORT void leveldb_repair_db(const leveldb_options_t* options,
 /* Iterator */
 
 LEVELDB_EXPORT void leveldb_iter_destroy(leveldb_iterator_t*);
-LEVELDB_EXPORT unsigned char leveldb_iter_valid(const leveldb_iterator_t*);
+LEVELDB_EXPORT uint8_t leveldb_iter_valid(const leveldb_iterator_t*);
 LEVELDB_EXPORT void leveldb_iter_seek_to_first(leveldb_iterator_t*);
 LEVELDB_EXPORT void leveldb_iter_seek_to_last(leveldb_iterator_t*);
 LEVELDB_EXPORT void leveldb_iter_seek(leveldb_iterator_t*, const char* k,
@@ -171,11 +171,11 @@ LEVELDB_EXPORT void leveldb_options_set_comparator(leveldb_options_t*,
 LEVELDB_EXPORT void leveldb_options_set_filter_policy(leveldb_options_t*,
                                                       leveldb_filterpolicy_t*);
 LEVELDB_EXPORT void leveldb_options_set_create_if_missing(leveldb_options_t*,
-                                                          unsigned char);
+                                                          uint8_t);
 LEVELDB_EXPORT void leveldb_options_set_error_if_exists(leveldb_options_t*,
-                                                        unsigned char);
+                                                        uint8_t);
 LEVELDB_EXPORT void leveldb_options_set_paranoid_checks(leveldb_options_t*,
-                                                        unsigned char);
+                                                        uint8_t);
 LEVELDB_EXPORT void leveldb_options_set_env(leveldb_options_t*, leveldb_env_t*);
 LEVELDB_EXPORT void leveldb_options_set_info_log(leveldb_options_t*,
                                                  leveldb_logger_t*);
@@ -209,8 +209,8 @@ LEVELDB_EXPORT leveldb_filterpolicy_t* leveldb_filterpolicy_create(
     char* (*create_filter)(void*, const char* const* key_array,
                            const size_t* key_length_array, int num_keys,
                            size_t* filter_length),
-    unsigned char (*key_may_match)(void*, const char* key, size_t length,
-                                   const char* filter, size_t filter_length),
+    uint8_t (*key_may_match)(void*, const char* key, size_t length,
+                             const char* filter, size_t filter_length),
     const char* (*name)(void*));
 LEVELDB_EXPORT void leveldb_filterpolicy_destroy(leveldb_filterpolicy_t*);
 
@@ -222,9 +222,9 @@ LEVELDB_EXPORT leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(
 LEVELDB_EXPORT leveldb_readoptions_t* leveldb_readoptions_create(void);
 LEVELDB_EXPORT void leveldb_readoptions_destroy(leveldb_readoptions_t*);
 LEVELDB_EXPORT void leveldb_readoptions_set_verify_checksums(
-    leveldb_readoptions_t*, unsigned char);
+    leveldb_readoptions_t*, uint8_t);
 LEVELDB_EXPORT void leveldb_readoptions_set_fill_cache(leveldb_readoptions_t*,
-                                                       unsigned char);
+                                                       uint8_t);
 LEVELDB_EXPORT void leveldb_readoptions_set_snapshot(leveldb_readoptions_t*,
                                                      const leveldb_snapshot_t*);
 
@@ -233,7 +233,7 @@ LEVELDB_EXPORT void leveldb_readoptions_set_snapshot(leveldb_readoptions_t*,
 LEVELDB_EXPORT leveldb_writeoptions_t* leveldb_writeoptions_create(void);
 LEVELDB_EXPORT void leveldb_writeoptions_destroy(leveldb_writeoptions_t*);
 LEVELDB_EXPORT void leveldb_writeoptions_set_sync(leveldb_writeoptions_t*,
-                                                  unsigned char);
+                                                  uint8_t);
 
 /* Cache */
 
diff --git a/table/block.cc b/table/block.cc
index 05c600f..2fe89ea 100644
--- a/table/block.cc
+++ b/table/block.cc
@@ -7,6 +7,7 @@
 #include "table/block.h"
 
 #include <algorithm>
+#include <cstdint>
 #include <vector>
 
 #include "leveldb/comparator.h"
@@ -55,9 +56,9 @@ static inline const char* DecodeEntry(const char* p, const char* limit,
                                       uint32_t* shared, uint32_t* non_shared,
                                       uint32_t* value_length) {
   if (limit - p < 3) return nullptr;
-  *shared = reinterpret_cast<const unsigned char*>(p)[0];
-  *non_shared = reinterpret_cast<const unsigned char*>(p)[1];
-  *value_length = reinterpret_cast<const unsigned char*>(p)[2];
+  *shared = reinterpret_cast<const uint8_t*>(p)[0];
+  *non_shared = reinterpret_cast<const uint8_t*>(p)[1];
+  *value_length = reinterpret_cast<const uint8_t*>(p)[2];
   if ((*shared | *non_shared | *value_length) < 128) {
     // Fast path: all three values are encoded in one byte each
     p += 3;
diff --git a/util/coding.cc b/util/coding.cc
index 55be020..df3fa10 100644
--- a/util/coding.cc
+++ b/util/coding.cc
@@ -20,7 +20,7 @@ void PutFixed64(std::string* dst, uint64_t value) {
 
 char* EncodeVarint32(char* dst, uint32_t v) {
   // Operate on characters as unsigneds
-  unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
+  uint8_t* ptr = reinterpret_cast<uint8_t*>(dst);
   static const int B = 128;
   if (v < (1 << 7)) {
     *(ptr++) = v;
@@ -54,12 +54,12 @@ void PutVarint32(std::string* dst, uint32_t v) {
 
 char* EncodeVarint64(char* dst, uint64_t v) {
   static const int B = 128;
-  unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
+  uint8_t* ptr = reinterpret_cast<uint8_t*>(dst);
   while (v >= B) {
     *(ptr++) = v | B;
     v >>= 7;
   }
-  *(ptr++) = static_cast<unsigned char>(v);
+  *(ptr++) = static_cast<uint8_t>(v);
   return reinterpret_cast<char*>(ptr);
 }
 
@@ -87,7 +87,7 @@ const char* GetVarint32PtrFallback(const char* p, const char* limit,
                                    uint32_t* value) {
   uint32_t result = 0;
   for (uint32_t shift = 0; shift <= 28 && p < limit; shift += 7) {
-    uint32_t byte = *(reinterpret_cast<const unsigned char*>(p));
+    uint32_t byte = *(reinterpret_cast<const uint8_t*>(p));
     p++;
     if (byte & 128) {
       // More bytes are present
@@ -116,7 +116,7 @@ bool GetVarint32(Slice* input, uint32_t* value) {
 const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* value) {
   uint64_t result = 0;
   for (uint32_t shift = 0; shift <= 63 && p < limit; shift += 7) {
-    uint64_t byte = *(reinterpret_cast<const unsigned char*>(p));
+    uint64_t byte = *(reinterpret_cast<const uint8_t*>(p));
     p++;
     if (byte & 128) {
       // More bytes are present
diff --git a/util/coding.h b/util/coding.h
index 92a961f..1983ae7 100644
--- a/util/coding.h
+++ b/util/coding.h
@@ -150,7 +150,7 @@ const char* GetVarint32PtrFallback(const char* p, const char* limit,
 inline const char* GetVarint32Ptr(const char* p, const char* limit,
                                   uint32_t* value) {
   if (p < limit) {
-    uint32_t result = *(reinterpret_cast<const unsigned char*>(p));
+    uint32_t result = *(reinterpret_cast<const uint8_t*>(p));
     if ((result & 128) == 0) {
       *value = result;
       return p + 1;
diff --git a/util/crc32c_test.cc b/util/crc32c_test.cc
index dbd2ba4..18a8494 100644
--- a/util/crc32c_test.cc
+++ b/util/crc32c_test.cc
@@ -30,7 +30,7 @@ TEST(CRC, StandardResults) {
   }
   ASSERT_EQ(0x113fdb5c, Value(buf, sizeof(buf)));
 
-  unsigned char data[48] = {
+  uint8_t data[48] = {
       0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
       0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
       0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
diff --git a/util/hash.cc b/util/hash.cc
index 67dc134..dd47c11 100644
--- a/util/hash.cc
+++ b/util/hash.cc
@@ -38,13 +38,13 @@ uint32_t Hash(const char* data, size_t n, uint32_t seed) {
   // Pick up remaining bytes
   switch (limit - data) {
     case 3:
-      h += static_cast<unsigned char>(data[2]) << 16;
+      h += static_cast<uint8_t>(data[2]) << 16;
       FALLTHROUGH_INTENDED;
     case 2:
-      h += static_cast<unsigned char>(data[1]) << 8;
+      h += static_cast<uint8_t>(data[1]) << 8;
       FALLTHROUGH_INTENDED;
     case 1:
-      h += static_cast<unsigned char>(data[0]);
+      h += static_cast<uint8_t>(data[0]);
       h *= m;
       h ^= (h >> r);
       break;
diff --git a/util/hash_test.cc b/util/hash_test.cc
index 8f579cc..21f8171 100644
--- a/util/hash_test.cc
+++ b/util/hash_test.cc
@@ -10,11 +10,11 @@ namespace leveldb {
 class HASH {};
 
 TEST(HASH, SignedUnsignedIssue) {
-  const unsigned char data1[1] = {0x62};
-  const unsigned char data2[2] = {0xc3, 0x97};
-  const unsigned char data3[3] = {0xe2, 0x99, 0xa5};
-  const unsigned char data4[4] = {0xe1, 0x80, 0xb9, 0x32};
-  const unsigned char data5[48] = {
+  const uint8_t data1[1] = {0x62};
+  const uint8_t data2[2] = {0xc3, 0x97};
+  const uint8_t data3[3] = {0xe2, 0x99, 0xa5};
+  const uint8_t data4[4] = {0xe1, 0x80, 0xb9, 0x32};
+  const uint8_t data5[48] = {
       0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
       0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
       0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
diff --git a/util/logging.cc b/util/logging.cc
index 1ad8f1c..75e9d03 100644
--- a/util/logging.cc
+++ b/util/logging.cc
@@ -56,14 +56,13 @@ bool ConsumeDecimalNumber(Slice* in, uint64_t* val) {
 
   uint64_t value = 0;
 
-  // reinterpret_cast-ing from char* to unsigned char* to avoid signedness.
-  const unsigned char* start =
-      reinterpret_cast<const unsigned char*>(in->data());
+  // reinterpret_cast-ing from char* to uint8_t* to avoid signedness.
+  const uint8_t* start = reinterpret_cast<const uint8_t*>(in->data());
 
-  const unsigned char* end = start + in->size();
-  const unsigned char* current = start;
+  const uint8_t* end = start + in->size();
+  const uint8_t* current = start;
   for (; current != end; ++current) {
-    const unsigned char ch = *current;
+    const uint8_t ch = *current;
     if (ch < '0' || ch > '9') break;
 
     // Overflow check.

From 72a38ff7f206b3924ac009a12a1838d6a0bdab03 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Tue, 28 May 2019 16:17:44 -0700
Subject: [PATCH 094/181] Replace "> >" with ">>"

PiperOrigin-RevId: 250383036
---
 db/version_edit.h  | 6 +++---
 util/arena_test.cc | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/db/version_edit.h b/db/version_edit.h
index 86b2f22..0de4531 100644
--- a/db/version_edit.h
+++ b/db/version_edit.h
@@ -83,7 +83,7 @@ class VersionEdit {
  private:
   friend class VersionSet;
 
-  typedef std::set<std::pair<int, uint64_t> > DeletedFileSet;
+  typedef std::set<std::pair<int, uint64_t>> DeletedFileSet;
 
   std::string comparator_;
   uint64_t log_number_;
@@ -96,9 +96,9 @@ class VersionEdit {
   bool has_next_file_number_;
   bool has_last_sequence_;
 
-  std::vector<std::pair<int, InternalKey> > compact_pointers_;
+  std::vector<std::pair<int, InternalKey>> compact_pointers_;
   DeletedFileSet deleted_files_;
-  std::vector<std::pair<int, FileMetaData> > new_files_;
+  std::vector<std::pair<int, FileMetaData>> new_files_;
 };
 
 }  // namespace leveldb
diff --git a/util/arena_test.cc b/util/arena_test.cc
index f34095c..e917228 100644
--- a/util/arena_test.cc
+++ b/util/arena_test.cc
@@ -14,7 +14,7 @@ class ArenaTest {};
 TEST(ArenaTest, Empty) { Arena arena; }
 
 TEST(ArenaTest, Simple) {
-  std::vector<std::pair<size_t, char*> > allocated;
+  std::vector<std::pair<size_t, char*>> allocated;
   Arena arena;
   const int N = 100000;
   size_t bytes = 0;

From 6a90bb91ee72642241fdbeefa673f88370c7b245 Mon Sep 17 00:00:00 2001
From: neal-zhu <13126959424@163.com>
Date: Tue, 11 Jun 2019 19:16:49 +0800
Subject: [PATCH 095/181] use ForEachOverlapping to impl Get

---
 db/version_set.cc | 115 ++++++++++++++++++----------------------------
 1 file changed, 45 insertions(+), 70 deletions(-)

diff --git a/db/version_set.cc b/db/version_set.cc
index b62a2d0..625598d 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -330,94 +330,69 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
   const Comparator* ucmp = vset_->icmp_.user_comparator();
   Status s;
 
-  stats->seek_file = nullptr;
-  stats->seek_file_level = -1;
-  FileMetaData* last_file_read = nullptr;
-  int last_file_read_level = -1;
+  struct State {
+    GetStats* stats;
+    const ReadOptions* options;
+    Slice ikey;
+    Slice user_key;
+    const Comparator* ucmp;
+    std::string* value;
 
-  // We can search level-by-level since entries never hop across
-  // levels.  Therefore we are guaranteed that if we find data
-  // in a smaller level, later levels are irrelevant.
-  std::vector<FileMetaData*> tmp;
-  FileMetaData* tmp2;
-  for (int level = 0; level < config::kNumLevels; level++) {
-    size_t num_files = files_[level].size();
-    if (num_files == 0) continue;
+    VersionSet *vset;
+    Status s;
 
-    // Get the list of files to search in this level
-    FileMetaData* const* files = &files_[level][0];
-    if (level == 0) {
-      // Level-0 files may overlap each other.  Find all files that
-      // overlap user_key and process them in order from newest to oldest.
-      tmp.reserve(num_files);
-      for (uint32_t i = 0; i < num_files; i++) {
-        FileMetaData* f = files[i];
-        if (ucmp->Compare(user_key, f->smallest.user_key()) >= 0 &&
-            ucmp->Compare(user_key, f->largest.user_key()) <= 0) {
-          tmp.push_back(f);
-        }
-      }
-      if (tmp.empty()) continue;
+    static bool Match(void* arg, int level, FileMetaData* f) {
+      State* state = reinterpret_cast<State*>(arg);
 
-      std::sort(tmp.begin(), tmp.end(), NewestFirst);
-      files = &tmp[0];
-      num_files = tmp.size();
-    } else {
-      // Binary search to find earliest index whose largest key >= ikey.
-      uint32_t index = FindFile(vset_->icmp_, files_[level], ikey);
-      if (index >= num_files) {
-        files = nullptr;
-        num_files = 0;
-      } else {
-        tmp2 = files[index];
-        if (ucmp->Compare(user_key, tmp2->smallest.user_key()) < 0) {
-          // All of "tmp2" is past any data for user_key
-          files = nullptr;
-          num_files = 0;
-        } else {
-          files = &tmp2;
-          num_files = 1;
-        }
-      }
-    }
-
-    for (uint32_t i = 0; i < num_files; ++i) {
-      if (last_file_read != nullptr && stats->seek_file == nullptr) {
+      if (state->stats->seek_file == nullptr) {
         // We have had more than one seek for this read.  Charge the 1st file.
-        stats->seek_file = last_file_read;
-        stats->seek_file_level = last_file_read_level;
+        state->stats->seek_file = f;
+        state->stats->seek_file_level = level;
       }
 
-      FileMetaData* f = files[i];
-      last_file_read = f;
-      last_file_read_level = level;
-
       Saver saver;
       saver.state = kNotFound;
-      saver.ucmp = ucmp;
-      saver.user_key = user_key;
-      saver.value = value;
-      s = vset_->table_cache_->Get(options, f->number, f->file_size, ikey,
-                                   &saver, SaveValue);
+      saver.ucmp = state->ucmp;
+      saver.user_key = state->user_key;
+      saver.value = state->value;
+
+      Status s = state->vset->table_cache_->Get(*state->options, f->number,
+                                                f->file_size, state->ikey,
+                                                &saver, SaveValue);
       if (!s.ok()) {
-        return s;
+        state->s = s;
+        return false;
       }
       switch (saver.state) {
         case kNotFound:
-          break;  // Keep searching in other files
+          return true; // Keep saerching in other files
         case kFound:
-          return s;
+          state->s = s;
+          return false;
         case kDeleted:
-          s = Status::NotFound(Slice());  // Use empty error message for speed
-          return s;
+          return false;
         case kCorrupt:
-          s = Status::Corruption("corrupted key for ", user_key);
-          return s;
+          state->s = Status::Corruption("corrupted key for ", state->user_key);
+          return false;
       }
     }
-  }
+  };
 
-  return Status::NotFound(Slice());  // Use an empty error message for speed
+  stats->seek_file = nullptr;
+  stats->seek_file_level = -1;
+
+  State state;
+  state.s = Status::NotFound(Slice());
+  state.stats = stats;
+  state.ikey = ikey;
+  state.user_key = user_key;
+  state.ucmp = ucmp;
+  state.value = value;
+  state.vset = vset_;
+
+  ForEachOverlapping(user_key, ikey, &state, &State::Match);
+
+  return state.s;
 }
 
 bool Version::UpdateStats(const GetStats& stats) {

From 8fa7a937ee8f38d8869357b0f27f120c5c58f4c9 Mon Sep 17 00:00:00 2001
From: neal-zhu <13126959424@163.com>
Date: Tue, 11 Jun 2019 20:20:58 +0800
Subject: [PATCH 096/181] fix bug

---
 db/version_set.cc | 22 ++++++++++++++--------
 1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/db/version_set.cc b/db/version_set.cc
index 625598d..1c2781e 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -328,7 +328,9 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
   Slice ikey = k.internal_key();
   Slice user_key = k.user_key();
   const Comparator* ucmp = vset_->icmp_.user_comparator();
-  Status s;
+  
+  stats->seek_file = nullptr;
+  stats->seek_file_level = -1;
 
   struct State {
     GetStats* stats;
@@ -337,6 +339,8 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
     Slice user_key;
     const Comparator* ucmp;
     std::string* value;
+    FileMetaData *last_file_read;
+    int last_file_level;
 
     VersionSet *vset;
     Status s;
@@ -344,11 +348,13 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
     static bool Match(void* arg, int level, FileMetaData* f) {
       State* state = reinterpret_cast<State*>(arg);
 
-      if (state->stats->seek_file == nullptr) {
+      if (state->last_file_read != nullptr && state->stats->seek_file == nullptr) {
         // We have had more than one seek for this read.  Charge the 1st file.
-        state->stats->seek_file = f;
-        state->stats->seek_file_level = level;
+        state->stats->seek_file = state->last_file_read;
+        state->stats->seek_file_level = state->last_file_level;
       }
+      state->last_file_read = f;
+      state->last_file_level = level;
 
       Saver saver;
       saver.state = kNotFound;
@@ -378,18 +384,18 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
     }
   };
 
-  stats->seek_file = nullptr;
-  stats->seek_file_level = -1;
-
   State state;
   state.s = Status::NotFound(Slice());
   state.stats = stats;
+  state.last_file_read = nullptr;
+  state.last_file_level = -1;
+
   state.ikey = ikey;
   state.user_key = user_key;
   state.ucmp = ucmp;
   state.value = value;
   state.vset = vset_;
-
+  
   ForEachOverlapping(user_key, ikey, &state, &State::Match);
 
   return state.s;

From 177cd08629883c409f7a01f90f7084bc5518f1ef Mon Sep 17 00:00:00 2001
From: neal-zhu <13126959424@163.com>
Date: Tue, 11 Jun 2019 20:30:54 +0800
Subject: [PATCH 097/181] format

---
 db/version_set.cc | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/db/version_set.cc b/db/version_set.cc
index 1c2781e..d10108a 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -328,7 +328,7 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
   Slice ikey = k.internal_key();
   Slice user_key = k.user_key();
   const Comparator* ucmp = vset_->icmp_.user_comparator();
-  
+
   stats->seek_file = nullptr;
   stats->seek_file_level = -1;
 
@@ -339,16 +339,17 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
     Slice user_key;
     const Comparator* ucmp;
     std::string* value;
-    FileMetaData *last_file_read;
+    FileMetaData* last_file_read;
     int last_file_level;
 
-    VersionSet *vset;
+    VersionSet* vset;
     Status s;
 
     static bool Match(void* arg, int level, FileMetaData* f) {
       State* state = reinterpret_cast<State*>(arg);
 
-      if (state->last_file_read != nullptr && state->stats->seek_file == nullptr) {
+      if (state->last_file_read != nullptr &&
+          state->stats->seek_file == nullptr) {
         // We have had more than one seek for this read.  Charge the 1st file.
         state->stats->seek_file = state->last_file_read;
         state->stats->seek_file_level = state->last_file_level;
@@ -371,7 +372,7 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
       }
       switch (saver.state) {
         case kNotFound:
-          return true; // Keep saerching in other files
+          return true;  // Keep saerching in other files
         case kFound:
           state->s = s;
           return false;
@@ -395,7 +396,7 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
   state.ucmp = ucmp;
   state.value = value;
   state.vset = vset_;
-  
+
   ForEachOverlapping(user_key, ikey, &state, &State::Match);
 
   return state.s;

From f668239bb262609146496b854e1ec3cea9cd1a83 Mon Sep 17 00:00:00 2001
From: neal-zhu <13126959424@163.com>
Date: Tue, 11 Jun 2019 20:33:18 +0800
Subject: [PATCH 098/181] remove TODO in Version::ForEachOverlapping

---
 db/version_set.cc | 1 -
 1 file changed, 1 deletion(-)

diff --git a/db/version_set.cc b/db/version_set.cc
index d10108a..f63f461 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -281,7 +281,6 @@ static bool NewestFirst(FileMetaData* a, FileMetaData* b) {
 
 void Version::ForEachOverlapping(Slice user_key, Slice internal_key, void* arg,
                                  bool (*func)(void*, int, FileMetaData*)) {
-  // TODO(sanjay): Change Version::Get() to use this function.
   const Comparator* ucmp = vset_->icmp_.user_comparator();
 
   // Search level-0 in order from newest to oldest.

From 76ca1162768e5c89f1a49946a1f286c702ae27ae Mon Sep 17 00:00:00 2001
From: neal-zhu <13126959424@163.com>
Date: Wed, 12 Jun 2019 05:58:00 +0800
Subject: [PATCH 099/181] fix bug(uninitialized options pointer in State)

---
 db/version_set.cc | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/db/version_set.cc b/db/version_set.cc
index f63f461..932b861 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -327,7 +327,6 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
   Slice ikey = k.internal_key();
   Slice user_key = k.user_key();
   const Comparator* ucmp = vset_->icmp_.user_comparator();
-
   stats->seek_file = nullptr;
   stats->seek_file_level = -1;
 
@@ -339,7 +338,7 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
     const Comparator* ucmp;
     std::string* value;
     FileMetaData* last_file_read;
-    int last_file_level;
+    int last_file_read_level;
 
     VersionSet* vset;
     Status s;
@@ -347,14 +346,15 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
     static bool Match(void* arg, int level, FileMetaData* f) {
       State* state = reinterpret_cast<State*>(arg);
 
-      if (state->last_file_read != nullptr &&
-          state->stats->seek_file == nullptr) {
+      if (state->stats->seek_file == nullptr &&
+          state->last_file_read != nullptr) {
         // We have had more than one seek for this read.  Charge the 1st file.
         state->stats->seek_file = state->last_file_read;
-        state->stats->seek_file_level = state->last_file_level;
+        state->stats->seek_file_level = state->last_file_read_level;
       }
+
       state->last_file_read = f;
-      state->last_file_level = level;
+      state->last_file_read_level = level;
 
       Saver saver;
       saver.state = kNotFound;
@@ -388,8 +388,9 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
   state.s = Status::NotFound(Slice());
   state.stats = stats;
   state.last_file_read = nullptr;
-  state.last_file_level = -1;
+  state.last_file_read_level = -1;
 
+  state.options = &options;
   state.ikey = ikey;
   state.user_key = user_key;
   state.ucmp = ucmp;

From 107a75b62c19cce901ce10619b63c4b7acc9a0be Mon Sep 17 00:00:00 2001
From: neal-zhu <13126959424@163.com>
Date: Wed, 12 Jun 2019 07:05:00 +0800
Subject: [PATCH 100/181] cache Saver in State object

---
 db/version_set.cc | 41 ++++++++++++++++++++---------------------
 1 file changed, 20 insertions(+), 21 deletions(-)

diff --git a/db/version_set.cc b/db/version_set.cc
index 932b861..3ddddf3 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -324,13 +324,11 @@ void Version::ForEachOverlapping(Slice user_key, Slice internal_key, void* arg,
 
 Status Version::Get(const ReadOptions& options, const LookupKey& k,
                     std::string* value, GetStats* stats) {
-  Slice ikey = k.internal_key();
-  Slice user_key = k.user_key();
-  const Comparator* ucmp = vset_->icmp_.user_comparator();
   stats->seek_file = nullptr;
   stats->seek_file_level = -1;
 
   struct State {
+    Saver saver;
     GetStats* stats;
     const ReadOptions* options;
     Slice ikey;
@@ -342,6 +340,7 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
 
     VersionSet* vset;
     Status s;
+    bool found;
 
     static bool Match(void* arg, int level, FileMetaData* f) {
       State* state = reinterpret_cast<State*>(arg);
@@ -356,50 +355,50 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
       state->last_file_read = f;
       state->last_file_read_level = level;
 
-      Saver saver;
-      saver.state = kNotFound;
-      saver.ucmp = state->ucmp;
-      saver.user_key = state->user_key;
-      saver.value = state->value;
-
-      Status s = state->vset->table_cache_->Get(*state->options, f->number,
+      state->s = state->vset->table_cache_->Get(*state->options, f->number,
                                                 f->file_size, state->ikey,
-                                                &saver, SaveValue);
-      if (!s.ok()) {
-        state->s = s;
+                                                &state->saver, SaveValue);
+      if (!state->s.ok()) {
+        state->found = true;
         return false;
       }
-      switch (saver.state) {
+      switch (state->saver.state) {
         case kNotFound:
           return true;  // Keep saerching in other files
         case kFound:
-          state->s = s;
+          state->found = true;
           return false;
         case kDeleted:
           return false;
         case kCorrupt:
           state->s = Status::Corruption("corrupted key for ", state->user_key);
+          state->found = true;
           return false;
       }
     }
   };
 
   State state;
-  state.s = Status::NotFound(Slice());
+  state.found = false;
   state.stats = stats;
   state.last_file_read = nullptr;
   state.last_file_read_level = -1;
 
   state.options = &options;
-  state.ikey = ikey;
-  state.user_key = user_key;
-  state.ucmp = ucmp;
+  state.ikey = k.internal_key();
+  state.user_key = k.user_key();
+  state.ucmp = vset_->icmp_.user_comparator();
   state.value = value;
   state.vset = vset_;
 
-  ForEachOverlapping(user_key, ikey, &state, &State::Match);
+  state.saver.state = kNotFound;
+  state.saver.ucmp = state.ucmp;
+  state.saver.user_key = state.user_key;
+  state.saver.value = state.value;
 
-  return state.s;
+  ForEachOverlapping(state.user_key, state.ikey, &state, &State::Match);
+
+  return state.found ? state.s : Status::NotFound(Slice());
 }
 
 bool Version::UpdateStats(const GetStats& stats) {

From 69061b464ab1da287da9b7ffec1ed911b754403b Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Wed, 12 Jun 2019 09:47:38 -0700
Subject: [PATCH 101/181] Disable exceptions and RTTI in CMake configuration.

PiperOrigin-RevId: 252842234
---
 CMakeLists.txt | 26 ++++++++++++++++++++++++--
 1 file changed, 24 insertions(+), 2 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 1ea33ab..13ebbc9 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -49,9 +49,31 @@ check_cxx_symbol_exists(fdatasync "unistd.h" HAVE_FDATASYNC)
 check_cxx_symbol_exists(F_FULLFSYNC "fcntl.h" HAVE_FULLFSYNC)
 check_cxx_symbol_exists(O_CLOEXEC "fcntl.h" HAVE_O_CLOEXEC)
 
-include(CheckCXXSourceCompiles)
+if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+  # Disable C++ exceptions.
+  string(REGEX REPLACE "/EH[a-z]+" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHs-c-")
+  add_definitions(-D_HAS_EXCEPTIONS=0)
 
-set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wstrict-prototypes")
+  # Disable RTTI.
+  string(REGEX REPLACE "/GR" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR-")
+else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+  # Enable strict prototype warnings for C code in clang and gcc.
+  if(NOT CMAKE_C_FLAGS MATCHES "-Wstrict-prototypes")
+    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wstrict-prototypes")
+  endif(NOT CMAKE_C_FLAGS MATCHES "-Wstrict-prototypes")
+
+  # Disable C++ exceptions.
+  string(REGEX REPLACE "-fexceptions" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
+
+  # Disable RTTI.
+  string(REGEX REPLACE "-frtti" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti")
+endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+
+include(CheckCXXSourceCompiles)
 
 # Test whether -Wthread-safety is available. See
 # https://clang.llvm.org/docs/ThreadSafetyAnalysis.html

From e0d5f83a4f80060fe5b5d80025f0ad049bca430e Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Wed, 12 Jun 2019 14:05:14 -0700
Subject: [PATCH 102/181] Align EnvPosix and EnvWindows.

Fixes #695.

PiperOrigin-RevId: 252895299
---
 util/env_posix.cc   |  20 +-
 util/env_windows.cc | 523 ++++++++++++++++++++++++++------------------
 2 files changed, 316 insertions(+), 227 deletions(-)

diff --git a/util/env_posix.cc b/util/env_posix.cc
index 420e709..00ca9ae 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -45,7 +45,7 @@ int g_open_read_only_file_limit = -1;
 // Up to 1000 mmap regions for 64-bit binaries; none for 32-bit.
 constexpr const int kDefaultMmapLimit = (sizeof(void*) >= 8) ? 1000 : 0;
 
-// Can be set using EnvPosixTestHelper::SetReadOnlyMMapLimit.
+// Can be set using EnvPosixTestHelper::SetReadOnlyMMapLimit().
 int g_mmap_limit = kDefaultMmapLimit;
 
 // Common flags defined for all posix open operations
@@ -491,7 +491,8 @@ class PosixEnv : public Env {
  public:
   PosixEnv();
   ~PosixEnv() override {
-    static char msg[] = "PosixEnv singleton destroyed. Unsupported behavior!\n";
+    static const char msg[] =
+        "PosixEnv singleton destroyed. Unsupported behavior!\n";
     std::fwrite(msg, 1, sizeof(msg), stderr);
     std::abort();
   }
@@ -663,7 +664,10 @@ class PosixEnv : public Env {
                 void* background_work_arg) override;
 
   void StartThread(void (*thread_main)(void* thread_main_arg),
-                   void* thread_main_arg) override;
+                   void* thread_main_arg) override {
+    std::thread new_thread(thread_main, thread_main_arg);
+    new_thread.detach();
+  }
 
   Status GetTestDirectory(std::string* result) override {
     const char* env = std::getenv("TEST_TMPDIR");
@@ -708,7 +712,9 @@ class PosixEnv : public Env {
     return static_cast<uint64_t>(tv.tv_sec) * kUsecondsPerSecond + tv.tv_usec;
   }
 
-  void SleepForMicroseconds(int micros) override { ::usleep(micros); }
+  void SleepForMicroseconds(int micros) override {
+    std::this_thread::sleep_for(std::chrono::microseconds(micros));
+  }
 
  private:
   void BackgroundThreadMain();
@@ -869,12 +875,6 @@ using PosixDefaultEnv = SingletonEnv<PosixEnv>;
 
 }  // namespace
 
-void PosixEnv::StartThread(void (*thread_main)(void* thread_main_arg),
-                           void* thread_main_arg) {
-  std::thread new_thread(thread_main, thread_main_arg);
-  new_thread.detach();
-}
-
 void EnvPosixTestHelper::SetReadOnlyFDLimit(int limit) {
   PosixDefaultEnv::AssertEnvNotInitialized();
   g_open_read_only_file_limit = limit;
diff --git a/util/env_windows.cc b/util/env_windows.cc
index 09e3df6..2dd7794 100644
--- a/util/env_windows.cc
+++ b/util/env_windows.cc
@@ -13,9 +13,13 @@
 #include <atomic>
 #include <chrono>
 #include <condition_variable>
-#include <deque>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
 #include <memory>
 #include <mutex>
+#include <queue>
 #include <sstream>
 #include <string>
 #include <vector>
@@ -40,9 +44,9 @@ namespace {
 constexpr const size_t kWritableFileBufferSize = 65536;
 
 // Up to 1000 mmaps for 64-bit binaries; none for 32-bit.
-constexpr int kDefaultMmapLimit = sizeof(void*) >= 8 ? 1000 : 0;
+constexpr int kDefaultMmapLimit = (sizeof(void*) >= 8) ? 1000 : 0;
 
-// Modified by EnvWindowsTestHelper::SetReadOnlyMMapLimit().
+// Can be set by by EnvWindowsTestHelper::SetReadOnlyMMapLimit().
 int g_mmap_limit = kDefaultMmapLimit;
 
 std::string GetWindowsErrorMessage(DWORD error_code) {
@@ -71,9 +75,12 @@ Status WindowsError(const std::string& context, DWORD error_code) {
 class ScopedHandle {
  public:
   ScopedHandle(HANDLE handle) : handle_(handle) {}
+  ScopedHandle(const ScopedHandle&) = delete;
   ScopedHandle(ScopedHandle&& other) noexcept : handle_(other.Release()) {}
   ~ScopedHandle() { Close(); }
 
+  ScopedHandle& operator=(const ScopedHandle&) = delete;
+
   ScopedHandle& operator=(ScopedHandle&& rhs) noexcept {
     if (this != &rhs) handle_ = rhs.Release();
     return *this;
@@ -142,44 +149,43 @@ class Limiter {
 
 class WindowsSequentialFile : public SequentialFile {
  public:
-  WindowsSequentialFile(std::string fname, ScopedHandle file)
-      : filename_(fname), file_(std::move(file)) {}
+  WindowsSequentialFile(std::string filename, ScopedHandle handle)
+      : handle_(std::move(handle)), filename_(std::move(filename)) {}
   ~WindowsSequentialFile() override {}
 
   Status Read(size_t n, Slice* result, char* scratch) override {
-    Status s;
     DWORD bytes_read;
     // DWORD is 32-bit, but size_t could technically be larger. However leveldb
     // files are limited to leveldb::Options::max_file_size which is clamped to
     // 1<<30 or 1 GiB.
     assert(n <= std::numeric_limits<DWORD>::max());
-    if (!::ReadFile(file_.get(), scratch, static_cast<DWORD>(n), &bytes_read,
+    if (!::ReadFile(handle_.get(), scratch, static_cast<DWORD>(n), &bytes_read,
                     nullptr)) {
-      s = WindowsError(filename_, ::GetLastError());
-    } else {
-      *result = Slice(scratch, bytes_read);
+      return WindowsError(filename_, ::GetLastError());
     }
-    return s;
+
+    *result = Slice(scratch, bytes_read);
+    return Status::OK();
   }
 
   Status Skip(uint64_t n) override {
     LARGE_INTEGER distance;
     distance.QuadPart = n;
-    if (!::SetFilePointerEx(file_.get(), distance, nullptr, FILE_CURRENT)) {
+    if (!::SetFilePointerEx(handle_.get(), distance, nullptr, FILE_CURRENT)) {
       return WindowsError(filename_, ::GetLastError());
     }
     return Status::OK();
   }
 
  private:
-  std::string filename_;
-  ScopedHandle file_;
+  const ScopedHandle handle_;
+  const std::string filename_;
 };
 
 class WindowsRandomAccessFile : public RandomAccessFile {
  public:
-  WindowsRandomAccessFile(std::string fname, ScopedHandle handle)
-      : filename_(fname), handle_(std::move(handle)) {}
+  WindowsRandomAccessFile(std::string filename, ScopedHandle handle)
+      : handle_(std::move(handle)), filename_(std::move(filename)) {}
 
   ~WindowsRandomAccessFile() override = default;
 
@@ -204,107 +210,116 @@ class WindowsRandomAccessFile : public RandomAccessFile {
   }
 
  private:
-  std::string filename_;
-  ScopedHandle handle_;
+  const ScopedHandle handle_;
+  const std::string filename_;
 };
 
 class WindowsMmapReadableFile : public RandomAccessFile {
  public:
   // base[0,length-1] contains the mmapped contents of the file.
-  WindowsMmapReadableFile(std::string fname, void* base, size_t length,
-                          Limiter* limiter)
-      : filename_(std::move(fname)),
-        mmapped_region_(base),
+  WindowsMmapReadableFile(std::string filename, char* mmap_base, size_t length,
+                          Limiter* mmap_limiter)
+      : mmap_base_(mmap_base),
         length_(length),
-        limiter_(limiter) {}
+        mmap_limiter_(mmap_limiter),
+        filename_(std::move(filename)) {}
 
   ~WindowsMmapReadableFile() override {
-    ::UnmapViewOfFile(mmapped_region_);
-    limiter_->Release();
+    ::UnmapViewOfFile(mmap_base_);
+    mmap_limiter_->Release();
   }
 
   Status Read(uint64_t offset, size_t n, Slice* result,
               char* scratch) const override {
-    Status s;
     if (offset + n > length_) {
       *result = Slice();
-      s = WindowsError(filename_, ERROR_INVALID_PARAMETER);
-    } else {
-      *result = Slice(reinterpret_cast<char*>(mmapped_region_) + offset, n);
+      return WindowsError(filename_, ERROR_INVALID_PARAMETER);
     }
-    return s;
+
+    *result = Slice(mmap_base_ + offset, n);
+    return Status::OK();
   }
 
  private:
-  std::string filename_;
-  void* mmapped_region_;
-  size_t length_;
-  Limiter* limiter_;
+  char* const mmap_base_;
+  const size_t length_;
+  Limiter* const mmap_limiter_;
+  const std::string filename_;
 };
 
 class WindowsWritableFile : public WritableFile {
  public:
-  WindowsWritableFile(std::string fname, ScopedHandle handle)
-      : filename_(std::move(fname)), handle_(std::move(handle)), pos_(0) {}
+  WindowsWritableFile(std::string filename, ScopedHandle handle)
+      : pos_(0), handle_(std::move(handle)), filename_(std::move(filename)) {}
 
   ~WindowsWritableFile() override = default;
 
   Status Append(const Slice& data) override {
-    size_t n = data.size();
-    const char* p = data.data();
+    size_t write_size = data.size();
+    const char* write_data = data.data();
 
     // Fit as much as possible into buffer.
-    size_t copy = std::min(n, kWritableFileBufferSize - pos_);
-    memcpy(buf_ + pos_, p, copy);
-    p += copy;
-    n -= copy;
-    pos_ += copy;
-    if (n == 0) {
+    size_t copy_size = std::min(write_size, kWritableFileBufferSize - pos_);
+    std::memcpy(buf_ + pos_, write_data, copy_size);
+    write_data += copy_size;
+    write_size -= copy_size;
+    pos_ += copy_size;
+    if (write_size == 0) {
       return Status::OK();
     }
 
     // Can't fit in buffer, so need to do at least one write.
-    Status s = FlushBuffered();
-    if (!s.ok()) {
-      return s;
+    Status status = FlushBuffer();
+    if (!status.ok()) {
+      return status;
     }
 
     // Small writes go to buffer, large writes are written directly.
-    if (n < kWritableFileBufferSize) {
-      memcpy(buf_, p, n);
-      pos_ = n;
+    if (write_size < kWritableFileBufferSize) {
+      std::memcpy(buf_, write_data, write_size);
+      pos_ = write_size;
       return Status::OK();
     }
-    return WriteRaw(p, n);
+    return WriteUnbuffered(write_data, write_size);
   }
 
   Status Close() override {
-    Status result = FlushBuffered();
-    if (!handle_.Close() && result.ok()) {
-      result = WindowsError(filename_, ::GetLastError());
+    Status status = FlushBuffer();
+    if (!handle_.Close() && status.ok()) {
+      status = WindowsError(filename_, ::GetLastError());
     }
-    return result;
+    return status;
   }
 
-  Status Flush() override { return FlushBuffered(); }
+  Status Flush() override { return FlushBuffer(); }
 
   Status Sync() override {
-    // On Windows no need to sync parent directory. It's metadata will be
-    // updated via the creation of the new file, without an explicit sync.
-    return FlushBuffered();
+    // On Windows no need to sync parent directory. Its metadata will be updated
+    // via the creation of the new file, without an explicit sync.
+
+    Status status = FlushBuffer();
+    if (!status.ok()) {
+      return status;
+    }
+
+    if (!::FlushFileBuffers(handle_.get())) {
+      return Status::IOError(filename_,
+                             GetWindowsErrorMessage(::GetLastError()));
+    }
+    return Status::OK();
   }
 
  private:
-  Status FlushBuffered() {
-    Status s = WriteRaw(buf_, pos_);
+  Status FlushBuffer() {
+    Status status = WriteUnbuffered(buf_, pos_);
     pos_ = 0;
-    return s;
+    return status;
   }
 
-  Status WriteRaw(const char* p, size_t n) {
+  Status WriteUnbuffered(const char* data, size_t size) {
     DWORD bytes_written;
-    if (!::WriteFile(handle_.get(), p, static_cast<DWORD>(n), &bytes_written,
-                     nullptr)) {
+    if (!::WriteFile(handle_.get(), data, static_cast<DWORD>(size),
+                     &bytes_written, nullptr)) {
       return Status::IOError(filename_,
                              GetWindowsErrorMessage(::GetLastError()));
     }
@@ -312,10 +327,11 @@ class WindowsWritableFile : public WritableFile {
   }
 
   // buf_[0, pos_-1] contains data to be written to handle_.
-  const std::string filename_;
-  ScopedHandle handle_;
   char buf_[kWritableFileBufferSize];
   size_t pos_;
+
+  ScopedHandle handle_;
+  const std::string filename_;
 };
 
 // Lock or unlock the entire file as specified by |lock|. Returns true
@@ -337,124 +353,132 @@ bool LockOrUnlock(HANDLE handle, bool lock) {
 
 class WindowsFileLock : public FileLock {
  public:
-  WindowsFileLock(ScopedHandle handle, std::string name)
-      : handle_(std::move(handle)), name_(std::move(name)) {}
+  WindowsFileLock(ScopedHandle handle, std::string filename)
+      : handle_(std::move(handle)), filename_(std::move(filename)) {}
 
-  ScopedHandle& handle() { return handle_; }
-  const std::string& name() const { return name_; }
+  const ScopedHandle& handle() const { return handle_; }
+  const std::string& filename() const { return filename_; }
 
  private:
-  ScopedHandle handle_;
-  std::string name_;
+  const ScopedHandle handle_;
+  const std::string filename_;
 };
 
 class WindowsEnv : public Env {
  public:
   WindowsEnv();
   ~WindowsEnv() override {
-    static char msg[] = "Destroying Env::Default()\n";
-    fwrite(msg, 1, sizeof(msg), stderr);
-    abort();
+    static const char msg[] =
+        "WindowsEnv singleton destroyed. Unsupported behavior!\n";
+    std::fwrite(msg, 1, sizeof(msg), stderr);
+    std::abort();
   }
 
-  Status NewSequentialFile(const std::string& fname,
+  Status NewSequentialFile(const std::string& filename,
                            SequentialFile** result) override {
     *result = nullptr;
     DWORD desired_access = GENERIC_READ;
     DWORD share_mode = FILE_SHARE_READ;
-    ScopedHandle handle =
-        ::CreateFileA(fname.c_str(), desired_access, share_mode, nullptr,
-                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr);
+    ScopedHandle handle = ::CreateFileA(
+        filename.c_str(), desired_access, share_mode,
+        /*lpSecurityAttributes=*/nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL,
+        /*hTemplateFile=*/nullptr);
     if (!handle.is_valid()) {
-      return WindowsError(fname, ::GetLastError());
+      return WindowsError(filename, ::GetLastError());
     }
-    *result = new WindowsSequentialFile(fname, std::move(handle));
+
+    *result = new WindowsSequentialFile(filename, std::move(handle));
     return Status::OK();
   }
 
-  Status NewRandomAccessFile(const std::string& fname,
+  Status NewRandomAccessFile(const std::string& filename,
                              RandomAccessFile** result) override {
     *result = nullptr;
     DWORD desired_access = GENERIC_READ;
     DWORD share_mode = FILE_SHARE_READ;
-    DWORD file_flags = FILE_ATTRIBUTE_READONLY;
-
     ScopedHandle handle =
-        ::CreateFileA(fname.c_str(), desired_access, share_mode, nullptr,
-                      OPEN_EXISTING, file_flags, nullptr);
+        ::CreateFileA(filename.c_str(), desired_access, share_mode,
+                      /*lpSecurityAttributes=*/nullptr, OPEN_EXISTING,
+                      FILE_ATTRIBUTE_READONLY,
+                      /*hTemplateFile=*/nullptr);
     if (!handle.is_valid()) {
-      return WindowsError(fname, ::GetLastError());
+      return WindowsError(filename, ::GetLastError());
     }
     if (!mmap_limiter_.Acquire()) {
-      *result = new WindowsRandomAccessFile(fname, std::move(handle));
+      *result = new WindowsRandomAccessFile(filename, std::move(handle));
       return Status::OK();
     }
 
     LARGE_INTEGER file_size;
+    Status status;
     if (!::GetFileSizeEx(handle.get(), &file_size)) {
-      return WindowsError(fname, ::GetLastError());
+      mmap_limiter_.Release();
+      return WindowsError(filename, ::GetLastError());
     }
 
     ScopedHandle mapping =
         ::CreateFileMappingA(handle.get(),
                              /*security attributes=*/nullptr, PAGE_READONLY,
                              /*dwMaximumSizeHigh=*/0,
-                             /*dwMaximumSizeLow=*/0, nullptr);
+                             /*dwMaximumSizeLow=*/0,
+                             /*lpName=*/nullptr);
     if (mapping.is_valid()) {
-      void* base = MapViewOfFile(mapping.get(), FILE_MAP_READ, 0, 0, 0);
-      if (base) {
+      void* mmap_base = ::MapViewOfFile(mapping.get(), FILE_MAP_READ,
+                                        /*dwFileOffsetHigh=*/0,
+                                        /*dwFileOffsetLow=*/0,
+                                        /*dwNumberOfBytesToMap=*/0);
+      if (mmap_base) {
         *result = new WindowsMmapReadableFile(
-            fname, base, static_cast<size_t>(file_size.QuadPart),
-            &mmap_limiter_);
+            filename, reinterpret_cast<char*>(mmap_base),
+            static_cast<size_t>(file_size.QuadPart), &mmap_limiter_);
         return Status::OK();
       }
     }
-    Status s = WindowsError(fname, ::GetLastError());
-
-    if (!s.ok()) {
-      mmap_limiter_.Release();
-    }
-    return s;
+    mmap_limiter_.Release();
+    return WindowsError(filename, ::GetLastError());
   }
 
-  Status NewWritableFile(const std::string& fname,
+  Status NewWritableFile(const std::string& filename,
                          WritableFile** result) override {
     DWORD desired_access = GENERIC_WRITE;
-    DWORD share_mode = 0;
-
-    ScopedHandle handle =
-        ::CreateFileA(fname.c_str(), desired_access, share_mode, nullptr,
-                      CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr);
+    DWORD share_mode = 0;  // Exclusive access.
+    ScopedHandle handle = ::CreateFileA(
+        filename.c_str(), desired_access, share_mode,
+        /*lpSecurityAttributes=*/nullptr, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL,
+        /*hTemplateFile=*/nullptr);
     if (!handle.is_valid()) {
       *result = nullptr;
-      return WindowsError(fname, ::GetLastError());
+      return WindowsError(filename, ::GetLastError());
     }
 
-    *result = new WindowsWritableFile(fname, std::move(handle));
+    *result = new WindowsWritableFile(filename, std::move(handle));
     return Status::OK();
   }
 
-  Status NewAppendableFile(const std::string& fname,
+  Status NewAppendableFile(const std::string& filename,
                            WritableFile** result) override {
-    ScopedHandle handle =
-        ::CreateFileA(fname.c_str(), FILE_APPEND_DATA, 0, nullptr, OPEN_ALWAYS,
-                      FILE_ATTRIBUTE_NORMAL, nullptr);
+    DWORD desired_access = FILE_APPEND_DATA;
+    DWORD share_mode = 0;  // Exclusive access.
+    ScopedHandle handle = ::CreateFileA(
+        filename.c_str(), desired_access, share_mode,
+        /*lpSecurityAttributes=*/nullptr, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL,
+        /*hTemplateFile=*/nullptr);
     if (!handle.is_valid()) {
       *result = nullptr;
-      return WindowsError(fname, ::GetLastError());
+      return WindowsError(filename, ::GetLastError());
     }
 
-    *result = new WindowsWritableFile(fname, std::move(handle));
+    *result = new WindowsWritableFile(filename, std::move(handle));
     return Status::OK();
   }
 
-  bool FileExists(const std::string& fname) override {
-    return GetFileAttributesA(fname.c_str()) != INVALID_FILE_ATTRIBUTES;
+  bool FileExists(const std::string& filename) override {
+    return GetFileAttributesA(filename.c_str()) != INVALID_FILE_ATTRIBUTES;
   }
 
-  Status GetChildren(const std::string& dir,
+  Status GetChildren(const std::string& directory_path,
                      std::vector<std::string>* result) override {
-    const std::string find_pattern = dir + "\\*";
+    const std::string find_pattern = directory_path + "\\*";
     WIN32_FIND_DATAA find_data;
     HANDLE dir_handle = ::FindFirstFileA(find_pattern.c_str(), &find_data);
     if (dir_handle == INVALID_HANDLE_VALUE) {
@@ -462,7 +486,7 @@ class WindowsEnv : public Env {
       if (last_error == ERROR_FILE_NOT_FOUND) {
         return Status::OK();
       }
-      return WindowsError(dir, last_error);
+      return WindowsError(directory_path, last_error);
     }
     do {
       char base_name[_MAX_FNAME];
@@ -476,105 +500,109 @@ class WindowsEnv : public Env {
     DWORD last_error = ::GetLastError();
     ::FindClose(dir_handle);
     if (last_error != ERROR_NO_MORE_FILES) {
-      return WindowsError(dir, last_error);
+      return WindowsError(directory_path, last_error);
     }
     return Status::OK();
   }
 
-  Status DeleteFile(const std::string& fname) override {
-    if (!::DeleteFileA(fname.c_str())) {
-      return WindowsError(fname, ::GetLastError());
+  Status DeleteFile(const std::string& filename) override {
+    if (!::DeleteFileA(filename.c_str())) {
+      return WindowsError(filename, ::GetLastError());
     }
     return Status::OK();
   }
 
-  Status CreateDir(const std::string& name) override {
-    if (!::CreateDirectoryA(name.c_str(), nullptr)) {
-      return WindowsError(name, ::GetLastError());
+  Status CreateDir(const std::string& dirname) override {
+    if (!::CreateDirectoryA(dirname.c_str(), nullptr)) {
+      return WindowsError(dirname, ::GetLastError());
     }
     return Status::OK();
   }
 
-  Status DeleteDir(const std::string& name) override {
-    if (!::RemoveDirectoryA(name.c_str())) {
-      return WindowsError(name, ::GetLastError());
+  Status DeleteDir(const std::string& dirname) override {
+    if (!::RemoveDirectoryA(dirname.c_str())) {
+      return WindowsError(dirname, ::GetLastError());
     }
     return Status::OK();
   }
 
-  Status GetFileSize(const std::string& fname, uint64_t* size) override {
-    WIN32_FILE_ATTRIBUTE_DATA attrs;
-    if (!::GetFileAttributesExA(fname.c_str(), GetFileExInfoStandard, &attrs)) {
-      return WindowsError(fname, ::GetLastError());
+  Status GetFileSize(const std::string& filename, uint64_t* size) override {
+    WIN32_FILE_ATTRIBUTE_DATA file_attributes;
+    if (!::GetFileAttributesExA(filename.c_str(), GetFileExInfoStandard,
+                                &file_attributes)) {
+      return WindowsError(filename, ::GetLastError());
     }
     ULARGE_INTEGER file_size;
-    file_size.HighPart = attrs.nFileSizeHigh;
-    file_size.LowPart = attrs.nFileSizeLow;
+    file_size.HighPart = file_attributes.nFileSizeHigh;
+    file_size.LowPart = file_attributes.nFileSizeLow;
     *size = file_size.QuadPart;
     return Status::OK();
   }
 
-  Status RenameFile(const std::string& src,
-                    const std::string& target) override {
-    // Try a simple move first.  It will only succeed when |to_path| doesn't
-    // already exist.
-    if (::MoveFileA(src.c_str(), target.c_str())) {
+  Status RenameFile(const std::string& from, const std::string& to) override {
+    // Try a simple move first. It will only succeed when |to| doesn't already
+    // exist.
+    if (::MoveFileA(from.c_str(), to.c_str())) {
       return Status::OK();
     }
     DWORD move_error = ::GetLastError();
 
     // Try the full-blown replace if the move fails, as ReplaceFile will only
-    // succeed when |to_path| does exist. When writing to a network share, we
-    // may not be able to change the ACLs. Ignore ACL errors then
+    // succeed when |to| does exist. When writing to a network share, we may not
+    // be able to change the ACLs. Ignore ACL errors then
     // (REPLACEFILE_IGNORE_MERGE_ERRORS).
-    if (::ReplaceFileA(target.c_str(), src.c_str(), nullptr,
-                       REPLACEFILE_IGNORE_MERGE_ERRORS, nullptr, nullptr)) {
+    if (::ReplaceFileA(to.c_str(), from.c_str(), /*lpBackupFileName=*/nullptr,
+                       REPLACEFILE_IGNORE_MERGE_ERRORS,
+                       /*lpExclude=*/nullptr, /*lpReserved=*/nullptr)) {
       return Status::OK();
     }
     DWORD replace_error = ::GetLastError();
-    // In the case of FILE_ERROR_NOT_FOUND from ReplaceFile, it is likely
-    // that |to_path| does not exist. In this case, the more relevant error
-    // comes from the call to MoveFile.
+    // In the case of FILE_ERROR_NOT_FOUND from ReplaceFile, it is likely that
+    // |to| does not exist. In this case, the more relevant error comes from the
+    // call to MoveFile.
     if (replace_error == ERROR_FILE_NOT_FOUND ||
         replace_error == ERROR_PATH_NOT_FOUND) {
-      return WindowsError(src, move_error);
+      return WindowsError(from, move_error);
     } else {
-      return WindowsError(src, replace_error);
+      return WindowsError(from, replace_error);
     }
   }
 
-  Status LockFile(const std::string& fname, FileLock** lock) override {
+  Status LockFile(const std::string& filename, FileLock** lock) override {
     *lock = nullptr;
     Status result;
     ScopedHandle handle = ::CreateFileA(
-        fname.c_str(), GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ,
+        filename.c_str(), GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ,
         /*lpSecurityAttributes=*/nullptr, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL,
         nullptr);
     if (!handle.is_valid()) {
-      result = WindowsError(fname, ::GetLastError());
+      result = WindowsError(filename, ::GetLastError());
     } else if (!LockOrUnlock(handle.get(), true)) {
-      result = WindowsError("lock " + fname, ::GetLastError());
+      result = WindowsError("lock " + filename, ::GetLastError());
     } else {
-      *lock = new WindowsFileLock(std::move(handle), std::move(fname));
+      *lock = new WindowsFileLock(std::move(handle), filename);
     }
     return result;
   }
 
   Status UnlockFile(FileLock* lock) override {
-    std::unique_ptr<WindowsFileLock> my_lock(
-        reinterpret_cast<WindowsFileLock*>(lock));
-    Status result;
-    if (!LockOrUnlock(my_lock->handle().get(), false)) {
-      result = WindowsError("unlock", ::GetLastError());
+    WindowsFileLock* windows_file_lock =
+        reinterpret_cast<WindowsFileLock*>(lock);
+    if (!LockOrUnlock(windows_file_lock->handle().get(), false)) {
+      return WindowsError("unlock " + windows_file_lock->filename(),
+                          ::GetLastError());
     }
-    return result;
+    delete windows_file_lock;
+    return Status::OK();
   }
 
-  void Schedule(void (*function)(void*), void* arg) override;
+  void Schedule(void (*background_work_function)(void* background_work_arg),
+                void* background_work_arg) override;
 
-  void StartThread(void (*function)(void* arg), void* arg) override {
-    std::thread t(function, arg);
-    t.detach();
+  void StartThread(void (*thread_main)(void* thread_main_arg),
+                   void* thread_main_arg) override {
+    std::thread new_thread(thread_main, thread_main_arg);
+    new_thread.detach();
   }
 
   Status GetTestDirectory(std::string* result) override {
@@ -601,7 +629,7 @@ class WindowsEnv : public Env {
     std::FILE* fp = std::fopen(filename.c_str(), "w");
     if (fp == nullptr) {
       *result = nullptr;
-      return WindowsError("NewLogger", ::GetLastError());
+      return WindowsError(filename, ::GetLastError());
     } else {
       *result = new WindowsLogger(fp);
       return Status::OK();
@@ -626,86 +654,147 @@ class WindowsEnv : public Env {
   }
 
  private:
-  // Entry per Schedule() call
-  struct BGItem {
-    void* arg;
-    void (*function)(void*);
+  void BackgroundThreadMain();
+
+  static void BackgroundThreadEntryPoint(WindowsEnv* env) {
+    env->BackgroundThreadMain();
+  }
+
+  // Stores the work item data in a Schedule() call.
+  //
+  // Instances are constructed on the thread calling Schedule() and used on the
+  // background thread.
+  //
+  // This structure is thread-safe beacuse it is immutable.
+  struct BackgroundWorkItem {
+    explicit BackgroundWorkItem(void (*function)(void* arg), void* arg)
+        : function(function), arg(arg) {}
+
+    void (*const function)(void*);
+    void* const arg;
   };
 
-  // BGThread() is the body of the background thread
-  void BGThread();
+  port::Mutex background_work_mutex_;
+  port::CondVar background_work_cv_ GUARDED_BY(background_work_mutex_);
+  bool started_background_thread_ GUARDED_BY(background_work_mutex_);
 
-  std::mutex mu_;
-  std::condition_variable bgsignal_;
-  bool started_bgthread_;
-  std::deque<BGItem> queue_;
-  Limiter mmap_limiter_;
+  std::queue<BackgroundWorkItem> background_work_queue_
+      GUARDED_BY(background_work_mutex_);
+
+  Limiter mmap_limiter_;  // Thread-safe.
 };
 
 // Return the maximum number of concurrent mmaps.
-int MaxMmaps() {
-  if (g_mmap_limit >= 0) {
-    return g_mmap_limit;
-  }
-  // Up to 1000 mmaps for 64-bit binaries; none for smaller pointer sizes.
-  g_mmap_limit = sizeof(void*) >= 8 ? 1000 : 0;
-  return g_mmap_limit;
-}
+int MaxMmaps() { return g_mmap_limit; }
 
 WindowsEnv::WindowsEnv()
-    : started_bgthread_(false), mmap_limiter_(MaxMmaps()) {}
+    : background_work_cv_(&background_work_mutex_),
+      started_background_thread_(false),
+      mmap_limiter_(MaxMmaps()) {}
 
-void WindowsEnv::Schedule(void (*function)(void*), void* arg) {
-  std::lock_guard<std::mutex> guard(mu_);
+void WindowsEnv::Schedule(
+    void (*background_work_function)(void* background_work_arg),
+    void* background_work_arg) {
+  background_work_mutex_.Lock();
 
-  // Start background thread if necessary
-  if (!started_bgthread_) {
-    started_bgthread_ = true;
-    std::thread t(&WindowsEnv::BGThread, this);
-    t.detach();
+  // Start the background thread, if we haven't done so already.
+  if (!started_background_thread_) {
+    started_background_thread_ = true;
+    std::thread background_thread(WindowsEnv::BackgroundThreadEntryPoint, this);
+    background_thread.detach();
   }
 
-  // If the queue is currently empty, the background thread may currently be
-  // waiting.
-  if (queue_.empty()) {
-    bgsignal_.notify_one();
+  // If the queue is empty, the background thread may be waiting for work.
+  if (background_work_queue_.empty()) {
+    background_work_cv_.Signal();
   }
 
-  // Add to priority queue
-  queue_.push_back(BGItem());
-  queue_.back().function = function;
-  queue_.back().arg = arg;
+  background_work_queue_.emplace(background_work_function, background_work_arg);
+  background_work_mutex_.Unlock();
 }
 
-void WindowsEnv::BGThread() {
+void WindowsEnv::BackgroundThreadMain() {
   while (true) {
-    // Wait until there is an item that is ready to run
-    std::unique_lock<std::mutex> lk(mu_);
-    bgsignal_.wait(lk, [this] { return !queue_.empty(); });
+    background_work_mutex_.Lock();
 
-    void (*function)(void*) = queue_.front().function;
-    void* arg = queue_.front().arg;
-    queue_.pop_front();
+    // Wait until there is work to be done.
+    while (background_work_queue_.empty()) {
+      background_work_cv_.Wait();
+    }
 
-    lk.unlock();
-    (*function)(arg);
+    assert(!background_work_queue_.empty());
+    auto background_work_function = background_work_queue_.front().function;
+    void* background_work_arg = background_work_queue_.front().arg;
+    background_work_queue_.pop();
+
+    background_work_mutex_.Unlock();
+    background_work_function(background_work_arg);
   }
 }
 
+// Wraps an Env instance whose destructor is never created.
+//
+// Intended usage:
+//   using PlatformSingletonEnv = SingletonEnv<PlatformEnv>;
+//   void ConfigurePosixEnv(int param) {
+//     PlatformSingletonEnv::AssertEnvNotInitialized();
+//     // set global configuration flags.
+//   }
+//   Env* Env::Default() {
+//     static PlatformSingletonEnv default_env;
+//     return default_env.env();
+//   }
+template <typename EnvType>
+class SingletonEnv {
+ public:
+  SingletonEnv() {
+#if !defined(NDEBUG)
+    env_initialized_.store(true, std::memory_order::memory_order_relaxed);
+#endif  // !defined(NDEBUG)
+    static_assert(sizeof(env_storage_) >= sizeof(EnvType),
+                  "env_storage_ will not fit the Env");
+    static_assert(alignof(decltype(env_storage_)) >= alignof(EnvType),
+                  "env_storage_ does not meet the Env's alignment needs");
+    new (&env_storage_) EnvType();
+  }
+  ~SingletonEnv() = default;
+
+  SingletonEnv(const SingletonEnv&) = delete;
+  SingletonEnv& operator=(const SingletonEnv&) = delete;
+
+  Env* env() { return reinterpret_cast<Env*>(&env_storage_); }
+
+  static void AssertEnvNotInitialized() {
+#if !defined(NDEBUG)
+    assert(!env_initialized_.load(std::memory_order::memory_order_relaxed));
+#endif  // !defined(NDEBUG)
+  }
+
+ private:
+  typename std::aligned_storage<sizeof(EnvType), alignof(EnvType)>::type
+      env_storage_;
+#if !defined(NDEBUG)
+  static std::atomic<bool> env_initialized_;
+#endif  // !defined(NDEBUG)
+};
+
+#if !defined(NDEBUG)
+template <typename EnvType>
+std::atomic<bool> SingletonEnv<EnvType>::env_initialized_;
+#endif  // !defined(NDEBUG)
+
+using WindowsDefaultEnv = SingletonEnv<WindowsEnv>;
+
 }  // namespace
 
-static std::once_flag once;
-static Env* default_env;
-static void InitDefaultEnv() { default_env = new WindowsEnv(); }
-
 void EnvWindowsTestHelper::SetReadOnlyMMapLimit(int limit) {
-  assert(default_env == nullptr);
+  WindowsDefaultEnv::AssertEnvNotInitialized();
   g_mmap_limit = limit;
 }
 
 Env* Env::Default() {
-  std::call_once(once, InitDefaultEnv);
-  return default_env;
+  static WindowsDefaultEnv env_container;
+  return env_container.env();
 }
 
 }  // namespace leveldb

From 9ee91ac747ddf26f484d54f9aa474ccc4a2e0359 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Wed, 12 Jun 2019 15:28:22 -0700
Subject: [PATCH 103/181] Ending sentences with periods in README.md.

This change was submitted in https://github.com/google/leveldb/pull/575
by @prajwalchalla.

This fixes issue #523.

PiperOrigin-RevId: 252912613
---
 README.md | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/README.md b/README.md
index 0b660ae..0a312ab 100644
--- a/README.md
+++ b/README.md
@@ -195,7 +195,7 @@ internal APIs may be changed without warning.
 
 Guide to header files:
 
-* **include/db.h**: Main interface to the DB: Start here
+* **include/db.h**: Main interface to the DB: Start here.
 
 * **include/options.h**: Control over the behavior of an entire database,
 and also control over the behavior of individual reads and writes.
@@ -203,7 +203,7 @@ and also control over the behavior of individual reads and writes.
 * **include/comparator.h**: Abstraction for user-specified comparison function.
 If you want just bytewise comparison of keys, you can use the default
 comparator, but clients can write their own comparator implementations if they
-want custom ordering (e.g. to handle different character encodings, etc.)
+want custom ordering (e.g. to handle different character encodings, etc.).
 
 * **include/iterator.h**: Interface for iterating over data. You can get
 an iterator from a DB object.
@@ -219,7 +219,7 @@ and is used to report success and various kinds of errors.
 
 * **include/env.h**:
 Abstraction of the OS environment.  A posix implementation of this interface is
-in util/env_posix.cc
+in util/env_posix.cc.
 
 * **include/table.h, include/table_builder.h**: Lower-level modules that most
-clients probably won't use directly
+clients probably won't use directly.

From 046216a7ca6fb17a40cf8aa5598d90c825212a3d Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Wed, 12 Jun 2019 16:21:55 -0700
Subject: [PATCH 104/181] Add "leveldb" subdirectory to public include paths.

The documentation (README.md and index.md) referred to the
public headers using an incorrect path - fixing.

PiperOrigin-RevId: 252922925
---
 README.md    | 20 ++++++++++----------
 doc/index.md |  2 +-
 2 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/README.md b/README.md
index 0a312ab..dadfd56 100644
--- a/README.md
+++ b/README.md
@@ -189,37 +189,37 @@ uncompressed blocks in memory, the read performance improves again:
 See [doc/index.md](doc/index.md) for more explanation. See
 [doc/impl.md](doc/impl.md) for a brief overview of the implementation.
 
-The public interface is in include/*.h.  Callers should not include or
+The public interface is in include/leveldb/*.h.  Callers should not include or
 rely on the details of any other header files in this package.  Those
 internal APIs may be changed without warning.
 
 Guide to header files:
 
-* **include/db.h**: Main interface to the DB: Start here.
+* **include/leveldb/db.h**: Main interface to the DB: Start here.
 
-* **include/options.h**: Control over the behavior of an entire database,
+* **include/leveldb/options.h**: Control over the behavior of an entire database,
 and also control over the behavior of individual reads and writes.
 
-* **include/comparator.h**: Abstraction for user-specified comparison function.
+* **include/leveldb/comparator.h**: Abstraction for user-specified comparison function.
 If you want just bytewise comparison of keys, you can use the default
 comparator, but clients can write their own comparator implementations if they
 want custom ordering (e.g. to handle different character encodings, etc.).
 
-* **include/iterator.h**: Interface for iterating over data. You can get
+* **include/leveldb/iterator.h**: Interface for iterating over data. You can get
 an iterator from a DB object.
 
-* **include/write_batch.h**: Interface for atomically applying multiple
+* **include/leveldb/write_batch.h**: Interface for atomically applying multiple
 updates to a database.
 
-* **include/slice.h**: A simple module for maintaining a pointer and a
+* **include/leveldb/slice.h**: A simple module for maintaining a pointer and a
 length into some other byte array.
 
-* **include/status.h**: Status is returned from many of the public interfaces
+* **include/leveldb/status.h**: Status is returned from many of the public interfaces
 and is used to report success and various kinds of errors.
 
-* **include/env.h**:
+* **include/leveldb/env.h**:
 Abstraction of the OS environment.  A posix implementation of this interface is
 in util/env_posix.cc.
 
-* **include/table.h, include/table_builder.h**: Lower-level modules that most
+* **include/leveldb/table.h, include/leveldb/table_builder.h**: Lower-level modules that most
 clients probably won't use directly.
diff --git a/doc/index.md b/doc/index.md
index ea4609d..3d9a258 100644
--- a/doc/index.md
+++ b/doc/index.md
@@ -307,7 +307,7 @@ version numbers found in the keys to decide how to interpret them.
 ## Performance
 
 Performance can be tuned by changing the default values of the types defined in
-`include/leveldb/options.h`.
+`include/options.h`.
 
 ### Block size
 

From 53e280b56866ac4c90a9f5fcfe02ebdfd4a19832 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Thu, 13 Jun 2019 14:59:06 -0700
Subject: [PATCH 105/181] Simplify unlocking in DeleteObsoleteFiles.

A recent change (4cb80b7ddce6f) to DBImpl::DeleteObsoleteFiles
unlocked DBImpl::mutex_ while deleting files to allow for
greater concurrency. This change improves on the prior in
a few areas:

1. The table is evicted from the table cache before unlocking
   the mutex. This should only improve performance.
2. This implementation is slightly simpler, but at the cost of
   a bit more memory usage.
3. A comment adding more detail as to why the mutex is being
   unlocked and why it is safe to do so.

PiperOrigin-RevId: 253111645
---
 db/db_impl.cc | 27 +++++++++++++++------------
 1 file changed, 15 insertions(+), 12 deletions(-)

diff --git a/db/db_impl.cc b/db/db_impl.cc
index 7695d0b..4754ba3 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -229,32 +229,27 @@ void DBImpl::DeleteObsoleteFiles() {
     return;
   }
 
-  const uint64_t log_number = versions_->LogNumber();
-  const uint64_t prev_log_number = versions_->PrevLogNumber();
-  const uint64_t manifest_file_number = versions_->ManifestFileNumber();
-
   // Make a set of all of the live files
   std::set<uint64_t> live = pending_outputs_;
   versions_->AddLiveFiles(&live);
 
   std::vector<std::string> filenames;
   env_->GetChildren(dbname_, &filenames);  // Ignoring errors on purpose
-
-  // Unlock while deleting obsolete files
-  mutex_.Unlock();
   uint64_t number;
   FileType type;
-  for (size_t i = 0; i < filenames.size(); i++) {
-    if (ParseFileName(filenames[i], &number, &type)) {
+  std::vector<std::string> files_to_delete;
+  for (std::string& filename : filenames) {
+    if (ParseFileName(filename, &number, &type)) {
       bool keep = true;
       switch (type) {
         case kLogFile:
-          keep = ((number >= log_number) || (number == prev_log_number));
+          keep = ((number >= versions_->LogNumber()) ||
+                  (number == versions_->PrevLogNumber()));
           break;
         case kDescriptorFile:
           // Keep my manifest file, and any newer incarnations'
           // (in case there is a race that allows other incarnations)
-          keep = (number >= manifest_file_number);
+          keep = (number >= versions_->ManifestFileNumber());
           break;
         case kTableFile:
           keep = (live.find(number) != live.end());
@@ -272,15 +267,23 @@ void DBImpl::DeleteObsoleteFiles() {
       }
 
       if (!keep) {
+        files_to_delete.push_back(std::move(filename));
         if (type == kTableFile) {
           table_cache_->Evict(number);
         }
         Log(options_.info_log, "Delete type=%d #%lld\n", static_cast<int>(type),
             static_cast<unsigned long long>(number));
-        env_->DeleteFile(dbname_ + "/" + filenames[i]);
       }
     }
   }
+
+  // While deleting all files unblock other threads. All files being deleted
+  // have unique names which will not collide with newly created files and
+  // are therefore safe to delete while allowing other threads to proceed.
+  mutex_.Unlock();
+  for (const std::string& filename : files_to_delete) {
+    env_->DeleteFile(dbname_ + "/" + filename);
+  }
   mutex_.Lock();
 }
 

From 5e921896eedf87b0fb06bc8a1fd0991b9ac64131 Mon Sep 17 00:00:00 2001
From: neal-zhu <13126959424@163.com>
Date: Wed, 28 Aug 2019 23:43:34 +0800
Subject: [PATCH 106/181] drop fileds in State that are duplicates of fileds in
 Saver and fix typo

---
 db/version_set.cc | 19 +++++++------------
 1 file changed, 7 insertions(+), 12 deletions(-)

diff --git a/db/version_set.cc b/db/version_set.cc
index 3ddddf3..fd5e3ab 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -332,9 +332,6 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
     GetStats* stats;
     const ReadOptions* options;
     Slice ikey;
-    Slice user_key;
-    const Comparator* ucmp;
-    std::string* value;
     FileMetaData* last_file_read;
     int last_file_read_level;
 
@@ -364,14 +361,15 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
       }
       switch (state->saver.state) {
         case kNotFound:
-          return true;  // Keep saerching in other files
+          return true;  // Keep searching in other files
         case kFound:
           state->found = true;
           return false;
         case kDeleted:
           return false;
         case kCorrupt:
-          state->s = Status::Corruption("corrupted key for ", state->user_key);
+          state->s =
+              Status::Corruption("corrupted key for ", state->saver.user_key);
           state->found = true;
           return false;
       }
@@ -386,17 +384,14 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
 
   state.options = &options;
   state.ikey = k.internal_key();
-  state.user_key = k.user_key();
-  state.ucmp = vset_->icmp_.user_comparator();
-  state.value = value;
   state.vset = vset_;
 
   state.saver.state = kNotFound;
-  state.saver.ucmp = state.ucmp;
-  state.saver.user_key = state.user_key;
-  state.saver.value = state.value;
+  state.saver.ucmp = vset_->icmp_.user_comparator();
+  state.saver.user_key = k.user_key();
+  state.saver.value = value;
 
-  ForEachOverlapping(state.user_key, state.ikey, &state, &State::Match);
+  ForEachOverlapping(state.saver.user_key, state.ikey, &state, &State::Match);
 
   return state.found ? state.s : Status::NotFound(Slice());
 }

From 60db170a43a373d734c5b9f19693d36c75251c39 Mon Sep 17 00:00:00 2001
From: Sanjay Ghemawat <sanjay@google.com>
Date: Tue, 10 Sep 2019 11:09:31 -0700
Subject: [PATCH 107/181] Fix tsan problem in env_test.

PiperOrigin-RevId: 268265314
---
 util/env_test.cc | 102 ++++++++++++++++++++++++++---------------------
 1 file changed, 57 insertions(+), 45 deletions(-)

diff --git a/util/env_test.cc b/util/env_test.cc
index 9e2ad1e..7db03fc 100644
--- a/util/env_test.cc
+++ b/util/env_test.cc
@@ -5,7 +5,6 @@
 #include "leveldb/env.h"
 
 #include <algorithm>
-#include <atomic>
 
 #include "port/port.h"
 #include "port/thread_annotations.h"
@@ -24,16 +23,6 @@ class EnvTest {
   Env* env_;
 };
 
-namespace {
-
-static void SetAtomicBool(void* atomic_bool_ptr) {
-  std::atomic<bool>* atomic_bool =
-      reinterpret_cast<std::atomic<bool>*>(atomic_bool_ptr);
-  atomic_bool->store(true, std::memory_order_relaxed);
-}
-
-}  // namespace
-
 TEST(EnvTest, ReadWrite) {
   Random rnd(test::RandomSeed());
 
@@ -82,45 +71,73 @@ TEST(EnvTest, ReadWrite) {
 }
 
 TEST(EnvTest, RunImmediately) {
-  std::atomic<bool> called(false);
-  env_->Schedule(&SetAtomicBool, &called);
-  env_->SleepForMicroseconds(kDelayMicros);
-  ASSERT_TRUE(called.load(std::memory_order_relaxed));
-}
-
-TEST(EnvTest, RunMany) {
-  std::atomic<int> last_id(0);
-
-  struct Callback {
-    std::atomic<int>* const last_id_ptr_;  // Pointer to shared state.
-    const int id_;  // Order# for the execution of this callback.
-
-    Callback(std::atomic<int>* last_id_ptr, int id)
-        : last_id_ptr_(last_id_ptr), id_(id) {}
+  struct RunState {
+    port::Mutex mu;
+    port::CondVar cvar{&mu};
+    bool called = false;
 
     static void Run(void* arg) {
-      Callback* callback = reinterpret_cast<Callback*>(arg);
-      int current_id = callback->last_id_ptr_->load(std::memory_order_relaxed);
-      ASSERT_EQ(callback->id_ - 1, current_id);
-      callback->last_id_ptr_->store(callback->id_, std::memory_order_relaxed);
+      RunState* state = reinterpret_cast<RunState*>(arg);
+      MutexLock l(&state->mu);
+      ASSERT_EQ(state->called, false);
+      state->called = true;
+      state->cvar.Signal();
     }
   };
 
-  Callback callback1(&last_id, 1);
-  Callback callback2(&last_id, 2);
-  Callback callback3(&last_id, 3);
-  Callback callback4(&last_id, 4);
+  RunState state;
+  env_->Schedule(&RunState::Run, &state);
+
+  MutexLock l(&state.mu);
+  while (!state.called) {
+    state.cvar.Wait();
+  }
+}
+
+TEST(EnvTest, RunMany) {
+  struct RunState {
+    port::Mutex mu;
+    port::CondVar cvar{&mu};
+    int last_id = 0;
+  };
+
+  struct Callback {
+    RunState* state_;  // Pointer to shared state.
+    const int id_;  // Order# for the execution of this callback.
+
+    Callback(RunState* s, int id) : state_(s), id_(id) {}
+
+    static void Run(void* arg) {
+      Callback* callback = reinterpret_cast<Callback*>(arg);
+      RunState* state = callback->state_;
+
+      MutexLock l(&state->mu);
+      ASSERT_EQ(state->last_id, callback->id_ - 1);
+      state->last_id = callback->id_;
+      state->cvar.Signal();
+    }
+  };
+
+  RunState state;
+  Callback callback1(&state, 1);
+  Callback callback2(&state, 2);
+  Callback callback3(&state, 3);
+  Callback callback4(&state, 4);
   env_->Schedule(&Callback::Run, &callback1);
   env_->Schedule(&Callback::Run, &callback2);
   env_->Schedule(&Callback::Run, &callback3);
   env_->Schedule(&Callback::Run, &callback4);
 
-  env_->SleepForMicroseconds(kDelayMicros);
-  ASSERT_EQ(4, last_id.load(std::memory_order_relaxed));
+  MutexLock l(&state.mu);
+  while (state.last_id != 4) {
+    state.cvar.Wait();
+  }
 }
 
 struct State {
   port::Mutex mu;
+  port::CondVar cvar{&mu};
+
   int val GUARDED_BY(mu);
   int num_running GUARDED_BY(mu);
 
@@ -132,6 +149,7 @@ static void ThreadBody(void* arg) {
   s->mu.Lock();
   s->val += 1;
   s->num_running -= 1;
+  s->cvar.Signal();
   s->mu.Unlock();
 }
 
@@ -140,17 +158,11 @@ TEST(EnvTest, StartThread) {
   for (int i = 0; i < 3; i++) {
     env_->StartThread(&ThreadBody, &state);
   }
-  while (true) {
-    state.mu.Lock();
-    int num = state.num_running;
-    state.mu.Unlock();
-    if (num == 0) {
-      break;
-    }
-    env_->SleepForMicroseconds(kDelayMicros);
-  }
 
   MutexLock l(&state.mu);
+  while (state.num_running != 0) {
+    state.cvar.Wait();
+  }
   ASSERT_EQ(state.val, 3);
 }
 

From 45ee61579c1eb3accd6c88c922ec468dd61beea8 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Sun, 29 Sep 2019 20:33:37 -0700
Subject: [PATCH 108/181] Update Travis CI configuration.

* Use Ubuntu 18.04 and LLVM 9 on Travis.
* Fix bash conditionals: [ a == b ] should be [ a = b ].

PiperOrigin-RevId: 271898719
---
 .travis.yml | 21 +++++++++++----------
 1 file changed, 11 insertions(+), 10 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index 7c9eba2..b4acf13 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,8 +3,8 @@
 # This file can be validated on: http://lint.travis-ci.org/
 
 language: cpp
-dist: xenial
-osx_image: xcode10.2
+dist: bionic
+osx_image: xcode10.3
 
 compiler:
 - gcc
@@ -20,10 +20,11 @@ env:
 addons:
   apt:
     sources:
-    - llvm-toolchain-xenial-8
-    - ubuntu-toolchain-r-test
+    - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main'
+      key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
+    - sourceline: 'ppa:ubuntu-toolchain-r/test'
     packages:
-    - clang-8
+    - clang-9
     - cmake
     - gcc-9
     - g++-9
@@ -38,8 +39,8 @@ addons:
     - crc32c
     - gcc@9
     - gperftools
-    - kyotocabinet
-    - llvm@8
+    - kyoto-cabinet
+    - llvm@9
     - ninja
     - snappy
     - sqlite3
@@ -48,7 +49,7 @@ addons:
 install:
 # The following Homebrew packages aren't linked by default, and need to be
 # prepended to the path explicitly.
-- if [ "$TRAVIS_OS_NAME" == "osx" ]; then
+- if [ "$TRAVIS_OS_NAME" = "osx" ]; then
     export PATH="$(brew --prefix llvm)/bin:$PATH";
   fi
 # /usr/bin/gcc points to an older compiler on both Linux and macOS.
@@ -58,8 +59,8 @@ install:
 # Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values
 # below don't work on macOS. Fortunately, the path change above makes the
 # default values (clang and clang++) resolve to the correct compiler on macOS.
-- if [ "$TRAVIS_OS_NAME" == "linux" ]; then
-    if [ "$CXX" = "clang++" ]; then export CXX="clang++-8" CC="clang-8"; fi;
+- if [ "$TRAVIS_OS_NAME" = "linux" ]; then
+    if [ "$CXX" = "clang++" ]; then export CXX="clang++-9" CC="clang-9"; fi;
   fi
 - echo ${CC}
 - echo ${CXX}

From 370d532a00581ca79c87af7d7811e56de0ca52a8 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Tue, 1 Oct 2019 13:00:35 -0700
Subject: [PATCH 109/181] Using CMake's check_cxx_compiler_flag to check
 support for -Wthread-safety.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Previously used check_cxx_source_compiles to attempt a
build to determine support for clang thread safety checks.

This change is to support static analysis of the leveldb source by
lgtm.com (using Semmle). It failed to build with the following error:

```
[2019-07-04 22:29:58] [build] c++: error: unrecognized command line option ‘-Wthread-safety’; did you mean ‘-fthread-jumps’?
[2019-07-04 22:30:02] [build] make[2]: *** [CMakeFiles/leveldb.dir/build.make:66: CMakeFiles/leveldb.dir/db/builder.cc.o] Error 1
```

PiperOrigin-RevId: 272275528
---
 CMakeLists.txt | 21 ++++-----------------
 1 file changed, 4 insertions(+), 17 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 13ebbc9..a65afbf 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -73,25 +73,12 @@ else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
   set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti")
 endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
 
-include(CheckCXXSourceCompiles)
-
 # Test whether -Wthread-safety is available. See
 # https://clang.llvm.org/docs/ThreadSafetyAnalysis.html
-# -Werror is necessary because unknown attributes only generate warnings.
-set(OLD_CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS})
-list(APPEND CMAKE_REQUIRED_FLAGS -Werror -Wthread-safety)
-check_cxx_source_compiles("
-struct __attribute__((lockable)) Lock {
-  void Acquire() __attribute__((exclusive_lock_function()));
-  void Release() __attribute__((unlock_function()));
-};
-struct ThreadSafeType {
-  Lock lock_;
-  int data_ __attribute__((guarded_by(lock_)));
-};
-int main() { return 0; }
-"  HAVE_CLANG_THREAD_SAFETY)
-set(CMAKE_REQUIRED_FLAGS ${OLD_CMAKE_REQUIRED_FLAGS})
+include(CheckCXXCompilerFlag)
+check_cxx_compiler_flag(-Wthread-safety HAVE_CLANG_THREAD_SAFETY)
+
+include(CheckCXXSourceCompiles)
 
 # Test whether C++17 __has_include is available.
 check_cxx_source_compiles("

From f933ad16934098460622bbc62d17761802486393 Mon Sep 17 00:00:00 2001
From: Henry Lee <lee0906@hotmail.com>
Date: Tue, 15 Oct 2019 10:48:33 +1100
Subject: [PATCH 110/181] Remove unused variable kDelayMicros in env_test.cc

---
 util/env_test.cc | 2 --
 1 file changed, 2 deletions(-)

diff --git a/util/env_test.cc b/util/env_test.cc
index 7db03fc..2023e35 100644
--- a/util/env_test.cc
+++ b/util/env_test.cc
@@ -14,8 +14,6 @@
 
 namespace leveldb {
 
-static const int kDelayMicros = 100000;
-
 class EnvTest {
  public:
   EnvTest() : env_(Env::Default()) {}

From f2dae4e74ad3fb5f19722e4df29deddb85ff0872 Mon Sep 17 00:00:00 2001
From: Liao Tonglang <liaotonglang@gmail.com>
Date: Tue, 22 Oct 2019 18:46:43 +0800
Subject: [PATCH 111/181] fix typo in comment of LRUHandle

LRUHandle has no member "next_", fix it to "next" instead.
---
 util/cache.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/util/cache.cc b/util/cache.cc
index 12de306..a9b3e6c 100644
--- a/util/cache.cc
+++ b/util/cache.cc
@@ -53,7 +53,7 @@ struct LRUHandle {
   char key_data[1];  // Beginning of key
 
   Slice key() const {
-    // next_ is only equal to this if the LRU handle is the list head of an
+    // next is only equal to this if the LRU handle is the list head of an
     // empty list. List heads never have meaningful keys.
     assert(next != this);
 

From 657ba514298a726c7533f3106d3778062b59d75f Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Wed, 2 Oct 2019 09:35:37 -0700
Subject: [PATCH 112/181] Added return in Version::Get::State::Match to quiet
 warning.

Added unreached return at the end of Version::Get::State::Match
to stop this _incorrect_ warning:

    version_set.cc:376:5: warning: control reaches end of
    non-void function [-Wreturn-type]

This warning was being emitted when building with clang 6.0.1-10
and also emitted by lgtm.com when statically analyzing leveldb even
though all SaverState enumeration values were handled.

PiperOrigin-RevId: 272455474
---
 db/version_set.cc | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/db/version_set.cc b/db/version_set.cc
index fd5e3ab..cd07346 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -373,6 +373,10 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
           state->found = true;
           return false;
       }
+
+      // Not reached. Added to avoid false compilation warnings of
+      // "control reaches end of non-void function".
+      return false;
     }
   };
 

From 95d0ba1cb046bfd76619b8b80e14ee1b2897d219 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Mon, 28 Oct 2019 10:19:33 -0700
Subject: [PATCH 113/181] Renamed local variable in DBImpl::Write.

The local variable `updates` in DBImpl::Write was hiding the
`updates` parameter. Renamed to avoid this conflict.

PiperOrigin-RevId: 277089971
---
 db/db_impl.cc | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/db/db_impl.cc b/db/db_impl.cc
index 4754ba3..95e2bb4 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -1213,9 +1213,9 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) {
   uint64_t last_sequence = versions_->LastSequence();
   Writer* last_writer = &w;
   if (status.ok() && updates != nullptr) {  // nullptr batch is for compactions
-    WriteBatch* updates = BuildBatchGroup(&last_writer);
-    WriteBatchInternal::SetSequence(updates, last_sequence + 1);
-    last_sequence += WriteBatchInternal::Count(updates);
+    WriteBatch* write_batch = BuildBatchGroup(&last_writer);
+    WriteBatchInternal::SetSequence(write_batch, last_sequence + 1);
+    last_sequence += WriteBatchInternal::Count(write_batch);
 
     // Add to log and apply to memtable.  We can release the lock
     // during this phase since &w is currently responsible for logging
@@ -1223,7 +1223,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) {
     // into mem_.
     {
       mutex_.Unlock();
-      status = log_->AddRecord(WriteBatchInternal::Contents(updates));
+      status = log_->AddRecord(WriteBatchInternal::Contents(write_batch));
       bool sync_error = false;
       if (status.ok() && options.sync) {
         status = logfile_->Sync();
@@ -1232,7 +1232,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) {
         }
       }
       if (status.ok()) {
-        status = WriteBatchInternal::InsertInto(updates, mem_);
+        status = WriteBatchInternal::InsertInto(write_batch, mem_);
       }
       mutex_.Lock();
       if (sync_error) {
@@ -1242,7 +1242,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) {
         RecordBackgroundError(status);
       }
     }
-    if (updates == tmp_batch_) tmp_batch_->Clear();
+    if (write_batch == tmp_batch_) tmp_batch_->Clear();
 
     versions_->SetLastSequence(last_sequence);
   }

From cf4d9ab23de7ec36b8e00536b7450f02c639cd87 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Sun, 3 Nov 2019 21:38:38 -0800
Subject: [PATCH 114/181] Test CMake installation on Travis.

PiperOrigin-RevId: 278300591
---
 .travis.yml | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/.travis.yml b/.travis.yml
index b4acf13..42cbe64 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -70,6 +70,7 @@ install:
 before_script:
 - mkdir -p build && cd build
 - cmake .. -G Ninja -DCMAKE_BUILD_TYPE=$BUILD_TYPE
+    -DCMAKE_INSTALL_PREFIX=$HOME/.local
 - cmake --build .
 - cd ..
 
@@ -78,3 +79,4 @@ script:
 - "if [ -f build/db_bench ] ; then build/db_bench ; fi"
 - "if [ -f build/db_bench_sqlite3 ] ; then build/db_bench_sqlite3 ; fi"
 - "if [ -f build/db_bench_tree_db ] ; then build/db_bench_tree_db ; fi"
+- cd build && cmake --build . --target install

From 5abdf4c019e51fce59d34c21b13bf4e0a948828a Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Mon, 4 Nov 2019 11:38:53 -0800
Subject: [PATCH 115/181] Fix installed target definition.

Using CMAKE_INSTALL_INCLUDEDIR before including GNUINstallDirs results
in a broken installation when CMAKE_INSTALL_PREFIX is a non-standard
directory.

Inspired from https://github.com/google/crc32c/pull/39

PiperOrigin-RevId: 278427974
---
 CMakeLists.txt | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index a65afbf..7ccda94 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -106,6 +106,9 @@ if(BUILD_SHARED_LIBS)
   add_compile_options(-fvisibility=hidden)
 endif(BUILD_SHARED_LIBS)
 
+# Must be included before CMAKE_INSTALL_INCLUDEDIR is used.
+include(GNUInstallDirs)
+
 add_library(leveldb "")
 target_sources(leveldb
   PRIVATE
@@ -417,7 +420,6 @@ int main() {
 endif(LEVELDB_BUILD_BENCHMARKS)
 
 if(LEVELDB_INSTALL)
-  include(GNUInstallDirs)
   install(TARGETS leveldb
     EXPORT leveldbTargets
     RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}

From 0c40829872a9f00f38e11dc370ff8adb3e19f25b Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Tue, 5 Nov 2019 15:15:03 -0800
Subject: [PATCH 116/181] Remove redundant PROJECT_SOURCE_DIR usage from CMake
 config.

Inspired by https://github.com/google/crc32c/pull/32

PiperOrigin-RevId: 278718726
---
 CMakeLists.txt | 278 ++++++++++++++++++++++++-------------------------
 1 file changed, 139 insertions(+), 139 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7ccda94..1cb4625 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -92,13 +92,13 @@ set(LEVELDB_PUBLIC_INCLUDE_DIR "include/leveldb")
 set(LEVELDB_PORT_CONFIG_DIR "include/port")
 
 configure_file(
-  "${PROJECT_SOURCE_DIR}/port/port_config.h.in"
+  "port/port_config.h.in"
   "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
 )
 
 include_directories(
   "${PROJECT_BINARY_DIR}/include"
-  "${PROJECT_SOURCE_DIR}"
+  "."
 )
 
 if(BUILD_SHARED_LIBS)
@@ -113,75 +113,75 @@ add_library(leveldb "")
 target_sources(leveldb
   PRIVATE
     "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
-    "${PROJECT_SOURCE_DIR}/db/builder.cc"
-    "${PROJECT_SOURCE_DIR}/db/builder.h"
-    "${PROJECT_SOURCE_DIR}/db/c.cc"
-    "${PROJECT_SOURCE_DIR}/db/db_impl.cc"
-    "${PROJECT_SOURCE_DIR}/db/db_impl.h"
-    "${PROJECT_SOURCE_DIR}/db/db_iter.cc"
-    "${PROJECT_SOURCE_DIR}/db/db_iter.h"
-    "${PROJECT_SOURCE_DIR}/db/dbformat.cc"
-    "${PROJECT_SOURCE_DIR}/db/dbformat.h"
-    "${PROJECT_SOURCE_DIR}/db/dumpfile.cc"
-    "${PROJECT_SOURCE_DIR}/db/filename.cc"
-    "${PROJECT_SOURCE_DIR}/db/filename.h"
-    "${PROJECT_SOURCE_DIR}/db/log_format.h"
-    "${PROJECT_SOURCE_DIR}/db/log_reader.cc"
-    "${PROJECT_SOURCE_DIR}/db/log_reader.h"
-    "${PROJECT_SOURCE_DIR}/db/log_writer.cc"
-    "${PROJECT_SOURCE_DIR}/db/log_writer.h"
-    "${PROJECT_SOURCE_DIR}/db/memtable.cc"
-    "${PROJECT_SOURCE_DIR}/db/memtable.h"
-    "${PROJECT_SOURCE_DIR}/db/repair.cc"
-    "${PROJECT_SOURCE_DIR}/db/skiplist.h"
-    "${PROJECT_SOURCE_DIR}/db/snapshot.h"
-    "${PROJECT_SOURCE_DIR}/db/table_cache.cc"
-    "${PROJECT_SOURCE_DIR}/db/table_cache.h"
-    "${PROJECT_SOURCE_DIR}/db/version_edit.cc"
-    "${PROJECT_SOURCE_DIR}/db/version_edit.h"
-    "${PROJECT_SOURCE_DIR}/db/version_set.cc"
-    "${PROJECT_SOURCE_DIR}/db/version_set.h"
-    "${PROJECT_SOURCE_DIR}/db/write_batch_internal.h"
-    "${PROJECT_SOURCE_DIR}/db/write_batch.cc"
-    "${PROJECT_SOURCE_DIR}/port/port_stdcxx.h"
-    "${PROJECT_SOURCE_DIR}/port/port.h"
-    "${PROJECT_SOURCE_DIR}/port/thread_annotations.h"
-    "${PROJECT_SOURCE_DIR}/table/block_builder.cc"
-    "${PROJECT_SOURCE_DIR}/table/block_builder.h"
-    "${PROJECT_SOURCE_DIR}/table/block.cc"
-    "${PROJECT_SOURCE_DIR}/table/block.h"
-    "${PROJECT_SOURCE_DIR}/table/filter_block.cc"
-    "${PROJECT_SOURCE_DIR}/table/filter_block.h"
-    "${PROJECT_SOURCE_DIR}/table/format.cc"
-    "${PROJECT_SOURCE_DIR}/table/format.h"
-    "${PROJECT_SOURCE_DIR}/table/iterator_wrapper.h"
-    "${PROJECT_SOURCE_DIR}/table/iterator.cc"
-    "${PROJECT_SOURCE_DIR}/table/merger.cc"
-    "${PROJECT_SOURCE_DIR}/table/merger.h"
-    "${PROJECT_SOURCE_DIR}/table/table_builder.cc"
-    "${PROJECT_SOURCE_DIR}/table/table.cc"
-    "${PROJECT_SOURCE_DIR}/table/two_level_iterator.cc"
-    "${PROJECT_SOURCE_DIR}/table/two_level_iterator.h"
-    "${PROJECT_SOURCE_DIR}/util/arena.cc"
-    "${PROJECT_SOURCE_DIR}/util/arena.h"
-    "${PROJECT_SOURCE_DIR}/util/bloom.cc"
-    "${PROJECT_SOURCE_DIR}/util/cache.cc"
-    "${PROJECT_SOURCE_DIR}/util/coding.cc"
-    "${PROJECT_SOURCE_DIR}/util/coding.h"
-    "${PROJECT_SOURCE_DIR}/util/comparator.cc"
-    "${PROJECT_SOURCE_DIR}/util/crc32c.cc"
-    "${PROJECT_SOURCE_DIR}/util/crc32c.h"
-    "${PROJECT_SOURCE_DIR}/util/env.cc"
-    "${PROJECT_SOURCE_DIR}/util/filter_policy.cc"
-    "${PROJECT_SOURCE_DIR}/util/hash.cc"
-    "${PROJECT_SOURCE_DIR}/util/hash.h"
-    "${PROJECT_SOURCE_DIR}/util/logging.cc"
-    "${PROJECT_SOURCE_DIR}/util/logging.h"
-    "${PROJECT_SOURCE_DIR}/util/mutexlock.h"
-    "${PROJECT_SOURCE_DIR}/util/no_destructor.h"
-    "${PROJECT_SOURCE_DIR}/util/options.cc"
-    "${PROJECT_SOURCE_DIR}/util/random.h"
-    "${PROJECT_SOURCE_DIR}/util/status.cc"
+    "db/builder.cc"
+    "db/builder.h"
+    "db/c.cc"
+    "db/db_impl.cc"
+    "db/db_impl.h"
+    "db/db_iter.cc"
+    "db/db_iter.h"
+    "db/dbformat.cc"
+    "db/dbformat.h"
+    "db/dumpfile.cc"
+    "db/filename.cc"
+    "db/filename.h"
+    "db/log_format.h"
+    "db/log_reader.cc"
+    "db/log_reader.h"
+    "db/log_writer.cc"
+    "db/log_writer.h"
+    "db/memtable.cc"
+    "db/memtable.h"
+    "db/repair.cc"
+    "db/skiplist.h"
+    "db/snapshot.h"
+    "db/table_cache.cc"
+    "db/table_cache.h"
+    "db/version_edit.cc"
+    "db/version_edit.h"
+    "db/version_set.cc"
+    "db/version_set.h"
+    "db/write_batch_internal.h"
+    "db/write_batch.cc"
+    "port/port_stdcxx.h"
+    "port/port.h"
+    "port/thread_annotations.h"
+    "table/block_builder.cc"
+    "table/block_builder.h"
+    "table/block.cc"
+    "table/block.h"
+    "table/filter_block.cc"
+    "table/filter_block.h"
+    "table/format.cc"
+    "table/format.h"
+    "table/iterator_wrapper.h"
+    "table/iterator.cc"
+    "table/merger.cc"
+    "table/merger.h"
+    "table/table_builder.cc"
+    "table/table.cc"
+    "table/two_level_iterator.cc"
+    "table/two_level_iterator.h"
+    "util/arena.cc"
+    "util/arena.h"
+    "util/bloom.cc"
+    "util/cache.cc"
+    "util/coding.cc"
+    "util/coding.h"
+    "util/comparator.cc"
+    "util/crc32c.cc"
+    "util/crc32c.h"
+    "util/env.cc"
+    "util/filter_policy.cc"
+    "util/hash.cc"
+    "util/hash.h"
+    "util/logging.cc"
+    "util/logging.h"
+    "util/mutexlock.h"
+    "util/no_destructor.h"
+    "util/options.cc"
+    "util/random.h"
+    "util/status.cc"
 
   # Only CMake 3.3+ supports PUBLIC sources in targets exported by "install".
   $<$<VERSION_GREATER:CMAKE_VERSION,3.2>:PUBLIC>
@@ -205,22 +205,22 @@ target_sources(leveldb
 if (WIN32)
   target_sources(leveldb
     PRIVATE
-      "${PROJECT_SOURCE_DIR}/util/env_windows.cc"
-      "${PROJECT_SOURCE_DIR}/util/windows_logger.h"
+      "util/env_windows.cc"
+      "util/windows_logger.h"
   )
 else (WIN32)
   target_sources(leveldb
     PRIVATE
-      "${PROJECT_SOURCE_DIR}/util/env_posix.cc"
-      "${PROJECT_SOURCE_DIR}/util/posix_logger.h"
+      "util/env_posix.cc"
+      "util/posix_logger.h"
   )
 endif (WIN32)
 
 # MemEnv is not part of the interface and could be pulled to a separate library.
 target_sources(leveldb
   PRIVATE
-    "${PROJECT_SOURCE_DIR}/helpers/memenv/memenv.cc"
-    "${PROJECT_SOURCE_DIR}/helpers/memenv/memenv.h"
+    "helpers/memenv/memenv.cc"
+    "helpers/memenv/memenv.h"
 )
 
 target_include_directories(leveldb
@@ -275,7 +275,7 @@ find_package(Threads REQUIRED)
 target_link_libraries(leveldb Threads::Threads)
 
 add_executable(leveldbutil
-  "${PROJECT_SOURCE_DIR}/db/leveldbutil.cc"
+  "db/leveldbutil.cc"
 )
 target_link_libraries(leveldbutil leveldb)
 
@@ -289,10 +289,10 @@ if(LEVELDB_BUILD_TESTS)
     target_sources("${test_target_name}"
       PRIVATE
         "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
-        "${PROJECT_SOURCE_DIR}/util/testharness.cc"
-        "${PROJECT_SOURCE_DIR}/util/testharness.h"
-        "${PROJECT_SOURCE_DIR}/util/testutil.cc"
-        "${PROJECT_SOURCE_DIR}/util/testutil.h"
+        "util/testharness.cc"
+        "util/testharness.h"
+        "util/testutil.cc"
+        "util/testutil.h"
 
         "${test_file}"
     )
@@ -311,49 +311,49 @@ if(LEVELDB_BUILD_TESTS)
     add_test(NAME "${test_target_name}" COMMAND "${test_target_name}")
   endfunction(leveldb_test)
 
-  leveldb_test("${PROJECT_SOURCE_DIR}/db/c_test.c")
-  leveldb_test("${PROJECT_SOURCE_DIR}/db/fault_injection_test.cc")
+  leveldb_test("db/c_test.c")
+  leveldb_test("db/fault_injection_test.cc")
 
-  leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue178_test.cc")
-  leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue200_test.cc")
-  leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue320_test.cc")
+  leveldb_test("issues/issue178_test.cc")
+  leveldb_test("issues/issue200_test.cc")
+  leveldb_test("issues/issue320_test.cc")
 
-  leveldb_test("${PROJECT_SOURCE_DIR}/util/env_test.cc")
-  leveldb_test("${PROJECT_SOURCE_DIR}/util/status_test.cc")
-  leveldb_test("${PROJECT_SOURCE_DIR}/util/no_destructor_test.cc")
+  leveldb_test("util/env_test.cc")
+  leveldb_test("util/status_test.cc")
+  leveldb_test("util/no_destructor_test.cc")
 
   if(NOT BUILD_SHARED_LIBS)
-    leveldb_test("${PROJECT_SOURCE_DIR}/db/autocompact_test.cc")
-    leveldb_test("${PROJECT_SOURCE_DIR}/db/corruption_test.cc")
-    leveldb_test("${PROJECT_SOURCE_DIR}/db/db_test.cc")
-    leveldb_test("${PROJECT_SOURCE_DIR}/db/dbformat_test.cc")
-    leveldb_test("${PROJECT_SOURCE_DIR}/db/filename_test.cc")
-    leveldb_test("${PROJECT_SOURCE_DIR}/db/log_test.cc")
-    leveldb_test("${PROJECT_SOURCE_DIR}/db/recovery_test.cc")
-    leveldb_test("${PROJECT_SOURCE_DIR}/db/skiplist_test.cc")
-    leveldb_test("${PROJECT_SOURCE_DIR}/db/version_edit_test.cc")
-    leveldb_test("${PROJECT_SOURCE_DIR}/db/version_set_test.cc")
-    leveldb_test("${PROJECT_SOURCE_DIR}/db/write_batch_test.cc")
+    leveldb_test("db/autocompact_test.cc")
+    leveldb_test("db/corruption_test.cc")
+    leveldb_test("db/db_test.cc")
+    leveldb_test("db/dbformat_test.cc")
+    leveldb_test("db/filename_test.cc")
+    leveldb_test("db/log_test.cc")
+    leveldb_test("db/recovery_test.cc")
+    leveldb_test("db/skiplist_test.cc")
+    leveldb_test("db/version_edit_test.cc")
+    leveldb_test("db/version_set_test.cc")
+    leveldb_test("db/write_batch_test.cc")
 
-    leveldb_test("${PROJECT_SOURCE_DIR}/helpers/memenv/memenv_test.cc")
+    leveldb_test("helpers/memenv/memenv_test.cc")
 
-    leveldb_test("${PROJECT_SOURCE_DIR}/table/filter_block_test.cc")
-    leveldb_test("${PROJECT_SOURCE_DIR}/table/table_test.cc")
+    leveldb_test("table/filter_block_test.cc")
+    leveldb_test("table/table_test.cc")
 
-    leveldb_test("${PROJECT_SOURCE_DIR}/util/arena_test.cc")
-    leveldb_test("${PROJECT_SOURCE_DIR}/util/bloom_test.cc")
-    leveldb_test("${PROJECT_SOURCE_DIR}/util/cache_test.cc")
-    leveldb_test("${PROJECT_SOURCE_DIR}/util/coding_test.cc")
-    leveldb_test("${PROJECT_SOURCE_DIR}/util/crc32c_test.cc")
-    leveldb_test("${PROJECT_SOURCE_DIR}/util/hash_test.cc")
-    leveldb_test("${PROJECT_SOURCE_DIR}/util/logging_test.cc")
+    leveldb_test("util/arena_test.cc")
+    leveldb_test("util/bloom_test.cc")
+    leveldb_test("util/cache_test.cc")
+    leveldb_test("util/coding_test.cc")
+    leveldb_test("util/crc32c_test.cc")
+    leveldb_test("util/hash_test.cc")
+    leveldb_test("util/logging_test.cc")
 
     # TODO(costan): This test also uses
-    #               "${PROJECT_SOURCE_DIR}/util/env_{posix|windows}_test_helper.h"
+    #               "util/env_{posix|windows}_test_helper.h"
     if (WIN32)
-      leveldb_test("${PROJECT_SOURCE_DIR}/util/env_windows_test.cc")
+      leveldb_test("util/env_windows_test.cc")
     else (WIN32)
-      leveldb_test("${PROJECT_SOURCE_DIR}/util/env_posix_test.cc")
+      leveldb_test("util/env_posix_test.cc")
     endif (WIN32)
   endif(NOT BUILD_SHARED_LIBS)
 endif(LEVELDB_BUILD_TESTS)
@@ -366,12 +366,12 @@ if(LEVELDB_BUILD_BENCHMARKS)
     target_sources("${bench_target_name}"
       PRIVATE
         "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
-        "${PROJECT_SOURCE_DIR}/util/histogram.cc"
-        "${PROJECT_SOURCE_DIR}/util/histogram.h"
-        "${PROJECT_SOURCE_DIR}/util/testharness.cc"
-        "${PROJECT_SOURCE_DIR}/util/testharness.h"
-        "${PROJECT_SOURCE_DIR}/util/testutil.cc"
-        "${PROJECT_SOURCE_DIR}/util/testutil.h"
+        "util/histogram.cc"
+        "util/histogram.h"
+        "util/testharness.cc"
+        "util/testharness.h"
+        "util/testutil.cc"
+        "util/testutil.h"
 
         "${bench_file}"
     )
@@ -389,12 +389,12 @@ if(LEVELDB_BUILD_BENCHMARKS)
   endfunction(leveldb_benchmark)
 
   if(NOT BUILD_SHARED_LIBS)
-    leveldb_benchmark("${PROJECT_SOURCE_DIR}/benchmarks/db_bench.cc")
+    leveldb_benchmark("benchmarks/db_bench.cc")
   endif(NOT BUILD_SHARED_LIBS)
 
   check_library_exists(sqlite3 sqlite3_open "" HAVE_SQLITE3)
   if(HAVE_SQLITE3)
-    leveldb_benchmark("${PROJECT_SOURCE_DIR}/benchmarks/db_bench_sqlite3.cc")
+    leveldb_benchmark("benchmarks/db_bench_sqlite3.cc")
     target_link_libraries(db_bench_sqlite3 sqlite3)
   endif(HAVE_SQLITE3)
 
@@ -414,7 +414,7 @@ int main() {
   "  HAVE_KYOTOCABINET)
   set(CMAKE_REQUIRED_LIBRARIES ${OLD_CMAKE_REQURED_LIBRARIES})
   if(HAVE_KYOTOCABINET)
-    leveldb_benchmark("${PROJECT_SOURCE_DIR}/benchmarks/db_bench_tree_db.cc")
+    leveldb_benchmark("benchmarks/db_bench_tree_db.cc")
     target_link_libraries(db_bench_tree_db kyotocabinet)
   endif(HAVE_KYOTOCABINET)
 endif(LEVELDB_BUILD_BENCHMARKS)
@@ -428,21 +428,21 @@ if(LEVELDB_INSTALL)
   )
   install(
     FILES
-      "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h"
-      "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h"
-      "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h"
-      "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h"
-      "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h"
-      "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h"
-      "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h"
-      "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h"
-      "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h"
-      "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h"
-      "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h"
-      "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h"
-      "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h"
-      "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h"
-      "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h"
+      "${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h"
+      "${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h"
+      "${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h"
+      "${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h"
+      "${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h"
+      "${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h"
+      "${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h"
+      "${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h"
+      "${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h"
+      "${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h"
+      "${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h"
+      "${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h"
+      "${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h"
+      "${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h"
+      "${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h"
     DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/leveldb
   )
 
@@ -458,7 +458,7 @@ if(LEVELDB_INSTALL)
   )
   install(
     FILES
-      "${PROJECT_SOURCE_DIR}/cmake/leveldbConfig.cmake"
+      "cmake/leveldbConfig.cmake"
       "${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake"
     DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb"
   )

From 41c8d839149134a3a6c8908f185437f536a47211 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Thu, 7 Nov 2019 21:39:02 -0800
Subject: [PATCH 117/181] Align CMake configuration with related projects.

PiperOrigin-RevId: 279238007
---
 CMakeLists.txt               | 19 ++++++++++++-------
 cmake/leveldbConfig.cmake    |  1 -
 cmake/leveldbConfig.cmake.in |  9 +++++++++
 3 files changed, 21 insertions(+), 8 deletions(-)
 delete mode 100644 cmake/leveldbConfig.cmake
 create mode 100644 cmake/leveldbConfig.cmake.in

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 1cb4625..2ad1c6c 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -443,23 +443,28 @@ if(LEVELDB_INSTALL)
       "${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h"
       "${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h"
       "${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h"
-    DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/leveldb
+    DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/leveldb"
   )
 
   include(CMakePackageConfigHelpers)
+  configure_package_config_file(
+    "cmake/${PROJECT_NAME}Config.cmake.in"
+    "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}Config.cmake"
+    INSTALL_DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
+  )
   write_basic_package_version_file(
-      "${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake"
-      COMPATIBILITY SameMajorVersion
+    "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}ConfigVersion.cmake"
+    COMPATIBILITY SameMajorVersion
   )
   install(
     EXPORT leveldbTargets
     NAMESPACE leveldb::
-    DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb"
+    DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
   )
   install(
     FILES
-      "cmake/leveldbConfig.cmake"
-      "${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake"
-    DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb"
+      "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}Config.cmake"
+      "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}ConfigVersion.cmake"
+    DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
   )
 endif(LEVELDB_INSTALL)
diff --git a/cmake/leveldbConfig.cmake b/cmake/leveldbConfig.cmake
deleted file mode 100644
index eea6e5c..0000000
--- a/cmake/leveldbConfig.cmake
+++ /dev/null
@@ -1 +0,0 @@
-include("${CMAKE_CURRENT_LIST_DIR}/leveldbTargets.cmake")
diff --git a/cmake/leveldbConfig.cmake.in b/cmake/leveldbConfig.cmake.in
new file mode 100644
index 0000000..2572728
--- /dev/null
+++ b/cmake/leveldbConfig.cmake.in
@@ -0,0 +1,9 @@
+# Copyright 2019 The LevelDB Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+@PACKAGE_INIT@
+
+include("${CMAKE_CURRENT_LIST_DIR}/leveldbTargets.cmake")
+
+check_required_components(leveldb)
\ No newline at end of file

From ed72a3496ed01e1c6a28f743258623a58f6867ee Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Sun, 10 Nov 2019 18:03:43 -0800
Subject: [PATCH 118/181] Allow different C/C++ standards when this is used as
 a subproject.

Inspired by https://github.com/google/snappy/pull/85

PiperOrigin-RevId: 279649967
---
 CMakeLists.txt | 22 ++++++++++++++--------
 1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 2ad1c6c..e5c614c 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -6,15 +6,21 @@ cmake_minimum_required(VERSION 3.9)
 # Keep the version below in sync with the one in db.h
 project(leveldb VERSION 1.22.0 LANGUAGES C CXX)
 
-# This project can use C11, but will gracefully decay down to C89.
-set(CMAKE_C_STANDARD 11)
-set(CMAKE_C_STANDARD_REQUIRED OFF)
-set(CMAKE_C_EXTENSIONS OFF)
+# C standard can be overridden when this is used as a sub-project.
+if(NOT CMAKE_C_STANDARD)
+  # This project can use C11, but will gracefully decay down to C89.
+  set(CMAKE_C_STANDARD 11)
+  set(CMAKE_C_STANDARD_REQUIRED OFF)
+  set(CMAKE_C_EXTENSIONS OFF)
+endif(NOT CMAKE_C_STANDARD)
 
-# This project requires C++11.
-set(CMAKE_CXX_STANDARD 11)
-set(CMAKE_CXX_STANDARD_REQUIRED ON)
-set(CMAKE_CXX_EXTENSIONS OFF)
+# C++ standard can be overridden when this is used as a sub-project.
+if(NOT CMAKE_CXX_STANDARD)
+  # This project requires C++11.
+  set(CMAKE_CXX_STANDARD 11)
+  set(CMAKE_CXX_STANDARD_REQUIRED ON)
+  set(CMAKE_CXX_EXTENSIONS OFF)
+endif(NOT CMAKE_CXX_STANDARD)
 
 if (WIN32)
   set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_WINDOWS)

From 2c9c80bd539ca5aad5ea864ee6dd81c1ee3eb91e Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Mon, 11 Nov 2019 11:58:55 -0800
Subject: [PATCH 119/181] Move CI to Visual Studio 2019.

PiperOrigin-RevId: 279785825
---
 .appveyor.yml | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/.appveyor.yml b/.appveyor.yml
index c24b17e..448f183 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -8,9 +8,9 @@ environment:
   matrix:
     # AppVeyor currently has no custom job name feature.
     # http://help.appveyor.com/discussions/questions/1623-can-i-provide-a-friendly-name-for-jobs
-    - JOB: Visual Studio 2017
-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
-      CMAKE_GENERATOR: Visual Studio 15 2017
+    - JOB: Visual Studio 2019
+      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
+      CMAKE_GENERATOR: Visual Studio 16 2019
 
 platform:
   - x86
@@ -24,9 +24,10 @@ build_script:
   - git submodule update --init --recursive
   - mkdir build
   - cd build
-  - if "%platform%"=="x64" set CMAKE_GENERATOR=%CMAKE_GENERATOR% Win64
+  - if "%platform%"=="x86" (set CMAKE_GENERATOR_PLATFORM="Win32")
+      else (set CMAKE_GENERATOR_PLATFORM="%platform%")
   - cmake --version
-  - cmake .. -G "%CMAKE_GENERATOR%"
+  - cmake .. -G "%CMAKE_GENERATOR%" -A "%CMAKE_GENERATOR_PLATFORM%"
       -DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%"
   - cmake --build . --config "%CONFIGURATION%"
   - cd ..

From 1c58902bdcc8d129f3883606bbd8e59085b48878 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Thu, 21 Nov 2019 13:09:53 -0800
Subject: [PATCH 120/181] Switch testing harness to googletest.

PiperOrigin-RevId: 281815695
---
 .gitmodules                   |   3 +
 CMakeLists.txt                |  29 ++-
 README.md                     |   6 +
 db/autocompact_test.cc        |  25 +-
 db/corruption_test.cc         |  72 +++---
 db/db_test.cc                 | 418 +++++++++++++++++-----------------
 db/dbformat_test.cc           |  10 +-
 db/fault_injection_test.cc    |  40 ++--
 db/filename_test.cc           |   9 +-
 db/log_test.cc                |  87 +++----
 db/recovery_test.cc           |  66 +++---
 db/skiplist_test.cc           |  10 +-
 db/version_edit_test.cc       |  10 +-
 db/version_set_test.cc        |  38 ++--
 db/write_batch_test.cc        |  14 +-
 helpers/memenv/memenv_test.cc | 135 +++++------
 issues/issue178_test.cc       |  20 +-
 issues/issue200_test.cc       |  26 ++-
 issues/issue320_test.cc       |  19 +-
 table/filter_block_test.cc    |  15 +-
 table/table_test.cc           |  39 ++--
 util/arena_test.cc            |   9 +-
 util/bloom_test.cc            |  16 +-
 util/cache_test.cc            |  29 +--
 util/coding_test.cc           |  12 +-
 util/crc32c_test.cc           |  10 +-
 util/env_posix_test.cc        |  83 +++----
 util/env_test.cc              |  73 +++---
 util/env_windows_test.cc      |  19 +-
 util/hash_test.cc             |  10 +-
 util/logging_test.cc          |  12 +-
 util/no_destructor_test.cc    |  12 +-
 util/status_test.cc           |  10 +-
 util/testharness.cc           |  81 -------
 util/testharness.h            | 141 ------------
 util/testutil.cc              |   2 +
 util/testutil.h               |  16 ++
 37 files changed, 763 insertions(+), 863 deletions(-)
 create mode 100644 .gitmodules
 delete mode 100644 util/testharness.cc
 delete mode 100644 util/testharness.h

diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..5a4e85a
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "third_party/googletest"]
+	path = third_party/googletest
+	url = https://github.com/google/googletest.git
diff --git a/CMakeLists.txt b/CMakeLists.txt
index e5c614c..be41ba4 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -84,6 +84,10 @@ endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
 include(CheckCXXCompilerFlag)
 check_cxx_compiler_flag(-Wthread-safety HAVE_CLANG_THREAD_SAFETY)
 
+# Used by googletest.
+check_cxx_compiler_flag(-Wno-missing-field-initializers
+                        LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS)
+
 include(CheckCXXSourceCompiles)
 
 # Test whether C++17 __has_include is available.
@@ -288,6 +292,23 @@ target_link_libraries(leveldbutil leveldb)
 if(LEVELDB_BUILD_TESTS)
   enable_testing()
 
+  # Prevent overriding the parent project's compiler/linker settings on Windows.
+  set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
+  set(install_gtest OFF)
+  set(install_gmock OFF)
+  set(build_gmock ON)
+
+  # This project is tested using GoogleTest.
+  add_subdirectory("third_party/googletest")
+
+  # GoogleTest triggers a missing field initializers warning.
+  if(LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS)
+    set_property(TARGET gtest
+        APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers)
+    set_property(TARGET gmock
+        APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers)
+  endif(LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS)
+
   function(leveldb_test test_file)
     get_filename_component(test_target_name "${test_file}" NAME_WE)
 
@@ -295,14 +316,12 @@ if(LEVELDB_BUILD_TESTS)
     target_sources("${test_target_name}"
       PRIVATE
         "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
-        "util/testharness.cc"
-        "util/testharness.h"
         "util/testutil.cc"
         "util/testutil.h"
 
         "${test_file}"
     )
-    target_link_libraries("${test_target_name}" leveldb)
+    target_link_libraries("${test_target_name}" leveldb gmock gtest)
     target_compile_definitions("${test_target_name}"
       PRIVATE
         ${LEVELDB_PLATFORM_NAME}=1
@@ -374,14 +393,12 @@ if(LEVELDB_BUILD_BENCHMARKS)
         "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
         "util/histogram.cc"
         "util/histogram.h"
-        "util/testharness.cc"
-        "util/testharness.h"
         "util/testutil.cc"
         "util/testutil.h"
 
         "${bench_file}"
     )
-    target_link_libraries("${bench_target_name}" leveldb)
+    target_link_libraries("${bench_target_name}" leveldb gmock gtest)
     target_compile_definitions("${bench_target_name}"
       PRIVATE
         ${LEVELDB_PLATFORM_NAME}=1
diff --git a/README.md b/README.md
index dadfd56..28d29c1 100644
--- a/README.md
+++ b/README.md
@@ -27,6 +27,12 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
   * Only a single process (possibly multi-threaded) can access a particular database at a time.
   * There is no client-server support builtin to the library.  An application that needs such support will have to wrap their own server around the library.
 
+# Getting the Source
+
+```bash
+git clone --recurse-submodules https://github.com/google/leveldb.git
+```
+
 # Building
 
 This project supports [CMake](https://cmake.org/) out of the box.
diff --git a/db/autocompact_test.cc b/db/autocompact_test.cc
index e6c97a0..d4caf71 100644
--- a/db/autocompact_test.cc
+++ b/db/autocompact_test.cc
@@ -2,24 +2,24 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "db/db_impl.h"
 #include "leveldb/cache.h"
 #include "leveldb/db.h"
-#include "util/testharness.h"
 #include "util/testutil.h"
 
 namespace leveldb {
 
-class AutoCompactTest {
+class AutoCompactTest : public testing::Test {
  public:
   AutoCompactTest() {
-    dbname_ = test::TmpDir() + "/autocompact_test";
+    dbname_ = testing::TempDir() + "autocompact_test";
     tiny_cache_ = NewLRUCache(100);
     options_.block_cache = tiny_cache_;
     DestroyDB(dbname_, options_);
     options_.create_if_missing = true;
     options_.compression = kNoCompression;
-    ASSERT_OK(DB::Open(options_, dbname_, &db_));
+    EXPECT_LEVELDB_OK(DB::Open(options_, dbname_, &db_));
   }
 
   ~AutoCompactTest() {
@@ -62,15 +62,15 @@ void AutoCompactTest::DoReads(int n) {
 
   // Fill database
   for (int i = 0; i < kCount; i++) {
-    ASSERT_OK(db_->Put(WriteOptions(), Key(i), value));
+    ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), Key(i), value));
   }
-  ASSERT_OK(dbi->TEST_CompactMemTable());
+  ASSERT_LEVELDB_OK(dbi->TEST_CompactMemTable());
 
   // Delete everything
   for (int i = 0; i < kCount; i++) {
-    ASSERT_OK(db_->Delete(WriteOptions(), Key(i)));
+    ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), Key(i)));
   }
-  ASSERT_OK(dbi->TEST_CompactMemTable());
+  ASSERT_LEVELDB_OK(dbi->TEST_CompactMemTable());
 
   // Get initial measurement of the space we will be reading.
   const int64_t initial_size = Size(Key(0), Key(n));
@@ -103,10 +103,13 @@ void AutoCompactTest::DoReads(int n) {
   ASSERT_GE(final_other_size, initial_other_size / 5 - 1048576);
 }
 
-TEST(AutoCompactTest, ReadAll) { DoReads(kCount); }
+TEST_F(AutoCompactTest, ReadAll) { DoReads(kCount); }
 
-TEST(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); }
+TEST_F(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); }
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/db/corruption_test.cc b/db/corruption_test.cc
index 42f5237..4d20946 100644
--- a/db/corruption_test.cc
+++ b/db/corruption_test.cc
@@ -4,6 +4,7 @@
 
 #include <sys/types.h>
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "db/db_impl.h"
 #include "db/filename.h"
 #include "db/log_format.h"
@@ -13,14 +14,13 @@
 #include "leveldb/table.h"
 #include "leveldb/write_batch.h"
 #include "util/logging.h"
-#include "util/testharness.h"
 #include "util/testutil.h"
 
 namespace leveldb {
 
 static const int kValueSize = 1000;
 
-class CorruptionTest {
+class CorruptionTest : public testing::Test {
  public:
   CorruptionTest()
       : db_(nullptr),
@@ -46,12 +46,12 @@ class CorruptionTest {
     return DB::Open(options_, dbname_, &db_);
   }
 
-  void Reopen() { ASSERT_OK(TryReopen()); }
+  void Reopen() { ASSERT_LEVELDB_OK(TryReopen()); }
 
   void RepairDB() {
     delete db_;
     db_ = nullptr;
-    ASSERT_OK(::leveldb::RepairDB(dbname_, options_));
+    ASSERT_LEVELDB_OK(::leveldb::RepairDB(dbname_, options_));
   }
 
   void Build(int n) {
@@ -68,7 +68,7 @@ class CorruptionTest {
       if (i == n - 1) {
         options.sync = true;
       }
-      ASSERT_OK(db_->Write(options, &batch));
+      ASSERT_LEVELDB_OK(db_->Write(options, &batch));
     }
   }
 
@@ -112,7 +112,7 @@ class CorruptionTest {
   void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) {
     // Pick file to corrupt
     std::vector<std::string> filenames;
-    ASSERT_OK(env_.target()->GetChildren(dbname_, &filenames));
+    ASSERT_LEVELDB_OK(env_.target()->GetChildren(dbname_, &filenames));
     uint64_t number;
     FileType type;
     std::string fname;
@@ -127,7 +127,7 @@ class CorruptionTest {
     ASSERT_TRUE(!fname.empty()) << filetype;
 
     uint64_t file_size;
-    ASSERT_OK(env_.target()->GetFileSize(fname, &file_size));
+    ASSERT_LEVELDB_OK(env_.target()->GetFileSize(fname, &file_size));
 
     if (offset < 0) {
       // Relative to end of file; make it absolute
@@ -189,7 +189,7 @@ class CorruptionTest {
   Cache* tiny_cache_;
 };
 
-TEST(CorruptionTest, Recovery) {
+TEST_F(CorruptionTest, Recovery) {
   Build(100);
   Check(100, 100);
   Corrupt(kLogFile, 19, 1);  // WriteBatch tag for first record
@@ -200,13 +200,13 @@ TEST(CorruptionTest, Recovery) {
   Check(36, 36);
 }
 
-TEST(CorruptionTest, RecoverWriteError) {
+TEST_F(CorruptionTest, RecoverWriteError) {
   env_.writable_file_error_ = true;
   Status s = TryReopen();
   ASSERT_TRUE(!s.ok());
 }
 
-TEST(CorruptionTest, NewFileErrorDuringWrite) {
+TEST_F(CorruptionTest, NewFileErrorDuringWrite) {
   // Do enough writing to force minor compaction
   env_.writable_file_error_ = true;
   const int num = 3 + (Options().write_buffer_size / kValueSize);
@@ -223,7 +223,7 @@ TEST(CorruptionTest, NewFileErrorDuringWrite) {
   Reopen();
 }
 
-TEST(CorruptionTest, TableFile) {
+TEST_F(CorruptionTest, TableFile) {
   Build(100);
   DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
   dbi->TEST_CompactMemTable();
@@ -234,7 +234,7 @@ TEST(CorruptionTest, TableFile) {
   Check(90, 99);
 }
 
-TEST(CorruptionTest, TableFileRepair) {
+TEST_F(CorruptionTest, TableFileRepair) {
   options_.block_size = 2 * kValueSize;  // Limit scope of corruption
   options_.paranoid_checks = true;
   Reopen();
@@ -250,7 +250,7 @@ TEST(CorruptionTest, TableFileRepair) {
   Check(95, 99);
 }
 
-TEST(CorruptionTest, TableFileIndexData) {
+TEST_F(CorruptionTest, TableFileIndexData) {
   Build(10000);  // Enough to build multiple Tables
   DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
   dbi->TEST_CompactMemTable();
@@ -260,36 +260,36 @@ TEST(CorruptionTest, TableFileIndexData) {
   Check(5000, 9999);
 }
 
-TEST(CorruptionTest, MissingDescriptor) {
+TEST_F(CorruptionTest, MissingDescriptor) {
   Build(1000);
   RepairDB();
   Reopen();
   Check(1000, 1000);
 }
 
-TEST(CorruptionTest, SequenceNumberRecovery) {
-  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1"));
-  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2"));
-  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v3"));
-  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v4"));
-  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v5"));
+TEST_F(CorruptionTest, SequenceNumberRecovery) {
+  ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v1"));
+  ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v2"));
+  ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v3"));
+  ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v4"));
+  ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v5"));
   RepairDB();
   Reopen();
   std::string v;
-  ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
+  ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
   ASSERT_EQ("v5", v);
   // Write something.  If sequence number was not recovered properly,
   // it will be hidden by an earlier write.
-  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v6"));
-  ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
+  ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v6"));
+  ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
   ASSERT_EQ("v6", v);
   Reopen();
-  ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
+  ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
   ASSERT_EQ("v6", v);
 }
 
-TEST(CorruptionTest, CorruptedDescriptor) {
-  ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello"));
+TEST_F(CorruptionTest, CorruptedDescriptor) {
+  ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "hello"));
   DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
   dbi->TEST_CompactMemTable();
   dbi->TEST_CompactRange(0, nullptr, nullptr);
@@ -301,11 +301,11 @@ TEST(CorruptionTest, CorruptedDescriptor) {
   RepairDB();
   Reopen();
   std::string v;
-  ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
+  ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
   ASSERT_EQ("hello", v);
 }
 
-TEST(CorruptionTest, CompactionInputError) {
+TEST_F(CorruptionTest, CompactionInputError) {
   Build(10);
   DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
   dbi->TEST_CompactMemTable();
@@ -320,7 +320,7 @@ TEST(CorruptionTest, CompactionInputError) {
   Check(10000, 10000);
 }
 
-TEST(CorruptionTest, CompactionInputErrorParanoid) {
+TEST_F(CorruptionTest, CompactionInputErrorParanoid) {
   options_.paranoid_checks = true;
   options_.write_buffer_size = 512 << 10;
   Reopen();
@@ -341,22 +341,26 @@ TEST(CorruptionTest, CompactionInputErrorParanoid) {
   ASSERT_TRUE(!s.ok()) << "write did not fail in corrupted paranoid db";
 }
 
-TEST(CorruptionTest, UnrelatedKeys) {
+TEST_F(CorruptionTest, UnrelatedKeys) {
   Build(10);
   DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
   dbi->TEST_CompactMemTable();
   Corrupt(kTableFile, 100, 1);
 
   std::string tmp1, tmp2;
-  ASSERT_OK(db_->Put(WriteOptions(), Key(1000, &tmp1), Value(1000, &tmp2)));
+  ASSERT_LEVELDB_OK(
+      db_->Put(WriteOptions(), Key(1000, &tmp1), Value(1000, &tmp2)));
   std::string v;
-  ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
+  ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
   ASSERT_EQ(Value(1000, &tmp2).ToString(), v);
   dbi->TEST_CompactMemTable();
-  ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
+  ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
   ASSERT_EQ(Value(1000, &tmp2).ToString(), v);
 }
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/db/db_test.cc b/db/db_test.cc
index 9a8faf1..e8e3495 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -7,6 +7,7 @@
 #include <atomic>
 #include <string>
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "db/db_impl.h"
 #include "db/filename.h"
 #include "db/version_set.h"
@@ -20,7 +21,6 @@
 #include "util/hash.h"
 #include "util/logging.h"
 #include "util/mutexlock.h"
-#include "util/testharness.h"
 #include "util/testutil.h"
 
 namespace leveldb {
@@ -226,7 +226,7 @@ class SpecialEnv : public EnvWrapper {
   }
 };
 
-class DBTest {
+class DBTest : public testing::Test {
  public:
   std::string dbname_;
   SpecialEnv* env_;
@@ -236,7 +236,7 @@ class DBTest {
 
   DBTest() : env_(new SpecialEnv(Env::Default())), option_config_(kDefault) {
     filter_policy_ = NewBloomFilterPolicy(10);
-    dbname_ = test::TmpDir() + "/db_test";
+    dbname_ = testing::TempDir() + "db_test";
     DestroyDB(dbname_, Options());
     db_ = nullptr;
     Reopen();
@@ -283,7 +283,9 @@ class DBTest {
 
   DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); }
 
-  void Reopen(Options* options = nullptr) { ASSERT_OK(TryReopen(options)); }
+  void Reopen(Options* options = nullptr) {
+    ASSERT_LEVELDB_OK(TryReopen(options));
+  }
 
   void Close() {
     delete db_;
@@ -294,7 +296,7 @@ class DBTest {
     delete db_;
     db_ = nullptr;
     DestroyDB(dbname_, Options());
-    ASSERT_OK(TryReopen(options));
+    ASSERT_LEVELDB_OK(TryReopen(options));
   }
 
   Status TryReopen(Options* options) {
@@ -348,11 +350,11 @@ class DBTest {
     // Check reverse iteration results are the reverse of forward results
     size_t matched = 0;
     for (iter->SeekToLast(); iter->Valid(); iter->Prev()) {
-      ASSERT_LT(matched, forward.size());
-      ASSERT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
+      EXPECT_LT(matched, forward.size());
+      EXPECT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
       matched++;
     }
-    ASSERT_EQ(matched, forward.size());
+    EXPECT_EQ(matched, forward.size());
 
     delete iter;
     return result;
@@ -402,7 +404,7 @@ class DBTest {
 
   int NumTableFilesAtLevel(int level) {
     std::string property;
-    ASSERT_TRUE(db_->GetProperty(
+    EXPECT_TRUE(db_->GetProperty(
         "leveldb.num-files-at-level" + NumberToString(level), &property));
     return std::stoi(property);
   }
@@ -497,12 +499,12 @@ class DBTest {
 
   bool DeleteAnSSTFile() {
     std::vector<std::string> filenames;
-    ASSERT_OK(env_->GetChildren(dbname_, &filenames));
+    EXPECT_LEVELDB_OK(env_->GetChildren(dbname_, &filenames));
     uint64_t number;
     FileType type;
     for (size_t i = 0; i < filenames.size(); i++) {
       if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) {
-        ASSERT_OK(env_->DeleteFile(TableFileName(dbname_, number)));
+        EXPECT_LEVELDB_OK(env_->DeleteFile(TableFileName(dbname_, number)));
         return true;
       }
     }
@@ -512,7 +514,7 @@ class DBTest {
   // Returns number of files renamed.
   int RenameLDBToSST() {
     std::vector<std::string> filenames;
-    ASSERT_OK(env_->GetChildren(dbname_, &filenames));
+    EXPECT_LEVELDB_OK(env_->GetChildren(dbname_, &filenames));
     uint64_t number;
     FileType type;
     int files_renamed = 0;
@@ -520,7 +522,7 @@ class DBTest {
       if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) {
         const std::string from = TableFileName(dbname_, number);
         const std::string to = SSTTableFileName(dbname_, number);
-        ASSERT_OK(env_->RenameFile(from, to));
+        EXPECT_LEVELDB_OK(env_->RenameFile(from, to));
         files_renamed++;
       }
     }
@@ -535,63 +537,63 @@ class DBTest {
   int option_config_;
 };
 
-TEST(DBTest, Empty) {
+TEST_F(DBTest, Empty) {
   do {
     ASSERT_TRUE(db_ != nullptr);
     ASSERT_EQ("NOT_FOUND", Get("foo"));
   } while (ChangeOptions());
 }
 
-TEST(DBTest, EmptyKey) {
+TEST_F(DBTest, EmptyKey) {
   do {
-    ASSERT_OK(Put("", "v1"));
+    ASSERT_LEVELDB_OK(Put("", "v1"));
     ASSERT_EQ("v1", Get(""));
-    ASSERT_OK(Put("", "v2"));
+    ASSERT_LEVELDB_OK(Put("", "v2"));
     ASSERT_EQ("v2", Get(""));
   } while (ChangeOptions());
 }
 
-TEST(DBTest, EmptyValue) {
+TEST_F(DBTest, EmptyValue) {
   do {
-    ASSERT_OK(Put("key", "v1"));
+    ASSERT_LEVELDB_OK(Put("key", "v1"));
     ASSERT_EQ("v1", Get("key"));
-    ASSERT_OK(Put("key", ""));
+    ASSERT_LEVELDB_OK(Put("key", ""));
     ASSERT_EQ("", Get("key"));
-    ASSERT_OK(Put("key", "v2"));
+    ASSERT_LEVELDB_OK(Put("key", "v2"));
     ASSERT_EQ("v2", Get("key"));
   } while (ChangeOptions());
 }
 
-TEST(DBTest, ReadWrite) {
+TEST_F(DBTest, ReadWrite) {
   do {
-    ASSERT_OK(Put("foo", "v1"));
+    ASSERT_LEVELDB_OK(Put("foo", "v1"));
     ASSERT_EQ("v1", Get("foo"));
-    ASSERT_OK(Put("bar", "v2"));
-    ASSERT_OK(Put("foo", "v3"));
+    ASSERT_LEVELDB_OK(Put("bar", "v2"));
+    ASSERT_LEVELDB_OK(Put("foo", "v3"));
     ASSERT_EQ("v3", Get("foo"));
     ASSERT_EQ("v2", Get("bar"));
   } while (ChangeOptions());
 }
 
-TEST(DBTest, PutDeleteGet) {
+TEST_F(DBTest, PutDeleteGet) {
   do {
-    ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1"));
+    ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v1"));
     ASSERT_EQ("v1", Get("foo"));
-    ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2"));
+    ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v2"));
     ASSERT_EQ("v2", Get("foo"));
-    ASSERT_OK(db_->Delete(WriteOptions(), "foo"));
+    ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), "foo"));
     ASSERT_EQ("NOT_FOUND", Get("foo"));
   } while (ChangeOptions());
 }
 
-TEST(DBTest, GetFromImmutableLayer) {
+TEST_F(DBTest, GetFromImmutableLayer) {
   do {
     Options options = CurrentOptions();
     options.env = env_;
     options.write_buffer_size = 100000;  // Small write buffer
     Reopen(&options);
 
-    ASSERT_OK(Put("foo", "v1"));
+    ASSERT_LEVELDB_OK(Put("foo", "v1"));
     ASSERT_EQ("v1", Get("foo"));
 
     // Block sync calls.
@@ -604,17 +606,17 @@ TEST(DBTest, GetFromImmutableLayer) {
   } while (ChangeOptions());
 }
 
-TEST(DBTest, GetFromVersions) {
+TEST_F(DBTest, GetFromVersions) {
   do {
-    ASSERT_OK(Put("foo", "v1"));
+    ASSERT_LEVELDB_OK(Put("foo", "v1"));
     dbfull()->TEST_CompactMemTable();
     ASSERT_EQ("v1", Get("foo"));
   } while (ChangeOptions());
 }
 
-TEST(DBTest, GetMemUsage) {
+TEST_F(DBTest, GetMemUsage) {
   do {
-    ASSERT_OK(Put("foo", "v1"));
+    ASSERT_LEVELDB_OK(Put("foo", "v1"));
     std::string val;
     ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val));
     int mem_usage = std::stoi(val);
@@ -623,14 +625,14 @@ TEST(DBTest, GetMemUsage) {
   } while (ChangeOptions());
 }
 
-TEST(DBTest, GetSnapshot) {
+TEST_F(DBTest, GetSnapshot) {
   do {
     // Try with both a short key and a long key
     for (int i = 0; i < 2; i++) {
       std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
-      ASSERT_OK(Put(key, "v1"));
+      ASSERT_LEVELDB_OK(Put(key, "v1"));
       const Snapshot* s1 = db_->GetSnapshot();
-      ASSERT_OK(Put(key, "v2"));
+      ASSERT_LEVELDB_OK(Put(key, "v2"));
       ASSERT_EQ("v2", Get(key));
       ASSERT_EQ("v1", Get(key, s1));
       dbfull()->TEST_CompactMemTable();
@@ -641,16 +643,16 @@ TEST(DBTest, GetSnapshot) {
   } while (ChangeOptions());
 }
 
-TEST(DBTest, GetIdenticalSnapshots) {
+TEST_F(DBTest, GetIdenticalSnapshots) {
   do {
     // Try with both a short key and a long key
     for (int i = 0; i < 2; i++) {
       std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
-      ASSERT_OK(Put(key, "v1"));
+      ASSERT_LEVELDB_OK(Put(key, "v1"));
       const Snapshot* s1 = db_->GetSnapshot();
       const Snapshot* s2 = db_->GetSnapshot();
       const Snapshot* s3 = db_->GetSnapshot();
-      ASSERT_OK(Put(key, "v2"));
+      ASSERT_LEVELDB_OK(Put(key, "v2"));
       ASSERT_EQ("v2", Get(key));
       ASSERT_EQ("v1", Get(key, s1));
       ASSERT_EQ("v1", Get(key, s2));
@@ -666,13 +668,13 @@ TEST(DBTest, GetIdenticalSnapshots) {
   } while (ChangeOptions());
 }
 
-TEST(DBTest, IterateOverEmptySnapshot) {
+TEST_F(DBTest, IterateOverEmptySnapshot) {
   do {
     const Snapshot* snapshot = db_->GetSnapshot();
     ReadOptions read_options;
     read_options.snapshot = snapshot;
-    ASSERT_OK(Put("foo", "v1"));
-    ASSERT_OK(Put("foo", "v2"));
+    ASSERT_LEVELDB_OK(Put("foo", "v1"));
+    ASSERT_LEVELDB_OK(Put("foo", "v2"));
 
     Iterator* iterator1 = db_->NewIterator(read_options);
     iterator1->SeekToFirst();
@@ -690,41 +692,41 @@ TEST(DBTest, IterateOverEmptySnapshot) {
   } while (ChangeOptions());
 }
 
-TEST(DBTest, GetLevel0Ordering) {
+TEST_F(DBTest, GetLevel0Ordering) {
   do {
     // Check that we process level-0 files in correct order.  The code
     // below generates two level-0 files where the earlier one comes
     // before the later one in the level-0 file list since the earlier
     // one has a smaller "smallest" key.
-    ASSERT_OK(Put("bar", "b"));
-    ASSERT_OK(Put("foo", "v1"));
+    ASSERT_LEVELDB_OK(Put("bar", "b"));
+    ASSERT_LEVELDB_OK(Put("foo", "v1"));
     dbfull()->TEST_CompactMemTable();
-    ASSERT_OK(Put("foo", "v2"));
+    ASSERT_LEVELDB_OK(Put("foo", "v2"));
     dbfull()->TEST_CompactMemTable();
     ASSERT_EQ("v2", Get("foo"));
   } while (ChangeOptions());
 }
 
-TEST(DBTest, GetOrderedByLevels) {
+TEST_F(DBTest, GetOrderedByLevels) {
   do {
-    ASSERT_OK(Put("foo", "v1"));
+    ASSERT_LEVELDB_OK(Put("foo", "v1"));
     Compact("a", "z");
     ASSERT_EQ("v1", Get("foo"));
-    ASSERT_OK(Put("foo", "v2"));
+    ASSERT_LEVELDB_OK(Put("foo", "v2"));
     ASSERT_EQ("v2", Get("foo"));
     dbfull()->TEST_CompactMemTable();
     ASSERT_EQ("v2", Get("foo"));
   } while (ChangeOptions());
 }
 
-TEST(DBTest, GetPicksCorrectFile) {
+TEST_F(DBTest, GetPicksCorrectFile) {
   do {
     // Arrange to have multiple files in a non-level-0 level.
-    ASSERT_OK(Put("a", "va"));
+    ASSERT_LEVELDB_OK(Put("a", "va"));
     Compact("a", "b");
-    ASSERT_OK(Put("x", "vx"));
+    ASSERT_LEVELDB_OK(Put("x", "vx"));
     Compact("x", "y");
-    ASSERT_OK(Put("f", "vf"));
+    ASSERT_LEVELDB_OK(Put("f", "vf"));
     Compact("f", "g");
     ASSERT_EQ("va", Get("a"));
     ASSERT_EQ("vf", Get("f"));
@@ -732,7 +734,7 @@ TEST(DBTest, GetPicksCorrectFile) {
   } while (ChangeOptions());
 }
 
-TEST(DBTest, GetEncountersEmptyLevel) {
+TEST_F(DBTest, GetEncountersEmptyLevel) {
   do {
     // Arrange for the following to happen:
     //   * sstable A in level 0
@@ -770,7 +772,7 @@ TEST(DBTest, GetEncountersEmptyLevel) {
   } while (ChangeOptions());
 }
 
-TEST(DBTest, IterEmpty) {
+TEST_F(DBTest, IterEmpty) {
   Iterator* iter = db_->NewIterator(ReadOptions());
 
   iter->SeekToFirst();
@@ -785,8 +787,8 @@ TEST(DBTest, IterEmpty) {
   delete iter;
 }
 
-TEST(DBTest, IterSingle) {
-  ASSERT_OK(Put("a", "va"));
+TEST_F(DBTest, IterSingle) {
+  ASSERT_LEVELDB_OK(Put("a", "va"));
   Iterator* iter = db_->NewIterator(ReadOptions());
 
   iter->SeekToFirst();
@@ -823,10 +825,10 @@ TEST(DBTest, IterSingle) {
   delete iter;
 }
 
-TEST(DBTest, IterMulti) {
-  ASSERT_OK(Put("a", "va"));
-  ASSERT_OK(Put("b", "vb"));
-  ASSERT_OK(Put("c", "vc"));
+TEST_F(DBTest, IterMulti) {
+  ASSERT_LEVELDB_OK(Put("a", "va"));
+  ASSERT_LEVELDB_OK(Put("b", "vb"));
+  ASSERT_LEVELDB_OK(Put("c", "vc"));
   Iterator* iter = db_->NewIterator(ReadOptions());
 
   iter->SeekToFirst();
@@ -881,11 +883,11 @@ TEST(DBTest, IterMulti) {
   ASSERT_EQ(IterStatus(iter), "b->vb");
 
   // Make sure iter stays at snapshot
-  ASSERT_OK(Put("a", "va2"));
-  ASSERT_OK(Put("a2", "va3"));
-  ASSERT_OK(Put("b", "vb2"));
-  ASSERT_OK(Put("c", "vc2"));
-  ASSERT_OK(Delete("b"));
+  ASSERT_LEVELDB_OK(Put("a", "va2"));
+  ASSERT_LEVELDB_OK(Put("a2", "va3"));
+  ASSERT_LEVELDB_OK(Put("b", "vb2"));
+  ASSERT_LEVELDB_OK(Put("c", "vc2"));
+  ASSERT_LEVELDB_OK(Delete("b"));
   iter->SeekToFirst();
   ASSERT_EQ(IterStatus(iter), "a->va");
   iter->Next();
@@ -906,12 +908,12 @@ TEST(DBTest, IterMulti) {
   delete iter;
 }
 
-TEST(DBTest, IterSmallAndLargeMix) {
-  ASSERT_OK(Put("a", "va"));
-  ASSERT_OK(Put("b", std::string(100000, 'b')));
-  ASSERT_OK(Put("c", "vc"));
-  ASSERT_OK(Put("d", std::string(100000, 'd')));
-  ASSERT_OK(Put("e", std::string(100000, 'e')));
+TEST_F(DBTest, IterSmallAndLargeMix) {
+  ASSERT_LEVELDB_OK(Put("a", "va"));
+  ASSERT_LEVELDB_OK(Put("b", std::string(100000, 'b')));
+  ASSERT_LEVELDB_OK(Put("c", "vc"));
+  ASSERT_LEVELDB_OK(Put("d", std::string(100000, 'd')));
+  ASSERT_LEVELDB_OK(Put("e", std::string(100000, 'e')));
 
   Iterator* iter = db_->NewIterator(ReadOptions());
 
@@ -944,12 +946,12 @@ TEST(DBTest, IterSmallAndLargeMix) {
   delete iter;
 }
 
-TEST(DBTest, IterMultiWithDelete) {
+TEST_F(DBTest, IterMultiWithDelete) {
   do {
-    ASSERT_OK(Put("a", "va"));
-    ASSERT_OK(Put("b", "vb"));
-    ASSERT_OK(Put("c", "vc"));
-    ASSERT_OK(Delete("b"));
+    ASSERT_LEVELDB_OK(Put("a", "va"));
+    ASSERT_LEVELDB_OK(Put("b", "vb"));
+    ASSERT_LEVELDB_OK(Put("c", "vc"));
+    ASSERT_LEVELDB_OK(Delete("b"));
     ASSERT_EQ("NOT_FOUND", Get("b"));
 
     Iterator* iter = db_->NewIterator(ReadOptions());
@@ -961,35 +963,35 @@ TEST(DBTest, IterMultiWithDelete) {
   } while (ChangeOptions());
 }
 
-TEST(DBTest, Recover) {
+TEST_F(DBTest, Recover) {
   do {
-    ASSERT_OK(Put("foo", "v1"));
-    ASSERT_OK(Put("baz", "v5"));
+    ASSERT_LEVELDB_OK(Put("foo", "v1"));
+    ASSERT_LEVELDB_OK(Put("baz", "v5"));
 
     Reopen();
     ASSERT_EQ("v1", Get("foo"));
 
     ASSERT_EQ("v1", Get("foo"));
     ASSERT_EQ("v5", Get("baz"));
-    ASSERT_OK(Put("bar", "v2"));
-    ASSERT_OK(Put("foo", "v3"));
+    ASSERT_LEVELDB_OK(Put("bar", "v2"));
+    ASSERT_LEVELDB_OK(Put("foo", "v3"));
 
     Reopen();
     ASSERT_EQ("v3", Get("foo"));
-    ASSERT_OK(Put("foo", "v4"));
+    ASSERT_LEVELDB_OK(Put("foo", "v4"));
     ASSERT_EQ("v4", Get("foo"));
     ASSERT_EQ("v2", Get("bar"));
     ASSERT_EQ("v5", Get("baz"));
   } while (ChangeOptions());
 }
 
-TEST(DBTest, RecoveryWithEmptyLog) {
+TEST_F(DBTest, RecoveryWithEmptyLog) {
   do {
-    ASSERT_OK(Put("foo", "v1"));
-    ASSERT_OK(Put("foo", "v2"));
+    ASSERT_LEVELDB_OK(Put("foo", "v1"));
+    ASSERT_LEVELDB_OK(Put("foo", "v2"));
     Reopen();
     Reopen();
-    ASSERT_OK(Put("foo", "v3"));
+    ASSERT_LEVELDB_OK(Put("foo", "v3"));
     Reopen();
     ASSERT_EQ("v3", Get("foo"));
   } while (ChangeOptions());
@@ -997,7 +999,7 @@ TEST(DBTest, RecoveryWithEmptyLog) {
 
 // Check that writes done during a memtable compaction are recovered
 // if the database is shutdown during the memtable compaction.
-TEST(DBTest, RecoverDuringMemtableCompaction) {
+TEST_F(DBTest, RecoverDuringMemtableCompaction) {
   do {
     Options options = CurrentOptions();
     options.env = env_;
@@ -1005,10 +1007,12 @@ TEST(DBTest, RecoverDuringMemtableCompaction) {
     Reopen(&options);
 
     // Trigger a long memtable compaction and reopen the database during it
-    ASSERT_OK(Put("foo", "v1"));                         // Goes to 1st log file
-    ASSERT_OK(Put("big1", std::string(10000000, 'x')));  // Fills memtable
-    ASSERT_OK(Put("big2", std::string(1000, 'y')));      // Triggers compaction
-    ASSERT_OK(Put("bar", "v2"));                         // Goes to new log file
+    ASSERT_LEVELDB_OK(Put("foo", "v1"));  // Goes to 1st log file
+    ASSERT_LEVELDB_OK(
+        Put("big1", std::string(10000000, 'x')));  // Fills memtable
+    ASSERT_LEVELDB_OK(
+        Put("big2", std::string(1000, 'y')));  // Triggers compaction
+    ASSERT_LEVELDB_OK(Put("bar", "v2"));       // Goes to new log file
 
     Reopen(&options);
     ASSERT_EQ("v1", Get("foo"));
@@ -1024,7 +1028,7 @@ static std::string Key(int i) {
   return std::string(buf);
 }
 
-TEST(DBTest, MinorCompactionsHappen) {
+TEST_F(DBTest, MinorCompactionsHappen) {
   Options options = CurrentOptions();
   options.write_buffer_size = 10000;
   Reopen(&options);
@@ -1033,7 +1037,7 @@ TEST(DBTest, MinorCompactionsHappen) {
 
   int starting_num_tables = TotalTableFiles();
   for (int i = 0; i < N; i++) {
-    ASSERT_OK(Put(Key(i), Key(i) + std::string(1000, 'v')));
+    ASSERT_LEVELDB_OK(Put(Key(i), Key(i) + std::string(1000, 'v')));
   }
   int ending_num_tables = TotalTableFiles();
   ASSERT_GT(ending_num_tables, starting_num_tables);
@@ -1049,14 +1053,14 @@ TEST(DBTest, MinorCompactionsHappen) {
   }
 }
 
-TEST(DBTest, RecoverWithLargeLog) {
+TEST_F(DBTest, RecoverWithLargeLog) {
   {
     Options options = CurrentOptions();
     Reopen(&options);
-    ASSERT_OK(Put("big1", std::string(200000, '1')));
-    ASSERT_OK(Put("big2", std::string(200000, '2')));
-    ASSERT_OK(Put("small3", std::string(10, '3')));
-    ASSERT_OK(Put("small4", std::string(10, '4')));
+    ASSERT_LEVELDB_OK(Put("big1", std::string(200000, '1')));
+    ASSERT_LEVELDB_OK(Put("big2", std::string(200000, '2')));
+    ASSERT_LEVELDB_OK(Put("small3", std::string(10, '3')));
+    ASSERT_LEVELDB_OK(Put("small4", std::string(10, '4')));
     ASSERT_EQ(NumTableFilesAtLevel(0), 0);
   }
 
@@ -1073,7 +1077,7 @@ TEST(DBTest, RecoverWithLargeLog) {
   ASSERT_GT(NumTableFilesAtLevel(0), 1);
 }
 
-TEST(DBTest, CompactionsGenerateMultipleFiles) {
+TEST_F(DBTest, CompactionsGenerateMultipleFiles) {
   Options options = CurrentOptions();
   options.write_buffer_size = 100000000;  // Large write buffer
   Reopen(&options);
@@ -1085,7 +1089,7 @@ TEST(DBTest, CompactionsGenerateMultipleFiles) {
   std::vector<std::string> values;
   for (int i = 0; i < 80; i++) {
     values.push_back(RandomString(&rnd, 100000));
-    ASSERT_OK(Put(Key(i), values[i]));
+    ASSERT_LEVELDB_OK(Put(Key(i), values[i]));
   }
 
   // Reopening moves updates to level-0
@@ -1099,7 +1103,7 @@ TEST(DBTest, CompactionsGenerateMultipleFiles) {
   }
 }
 
-TEST(DBTest, RepeatedWritesToSameKey) {
+TEST_F(DBTest, RepeatedWritesToSameKey) {
   Options options = CurrentOptions();
   options.env = env_;
   options.write_buffer_size = 100000;  // Small write buffer
@@ -1118,7 +1122,7 @@ TEST(DBTest, RepeatedWritesToSameKey) {
   }
 }
 
-TEST(DBTest, SparseMerge) {
+TEST_F(DBTest, SparseMerge) {
   Options options = CurrentOptions();
   options.compression = kNoCompression;
   Reopen(&options);
@@ -1168,7 +1172,7 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) {
   return result;
 }
 
-TEST(DBTest, ApproximateSizes) {
+TEST_F(DBTest, ApproximateSizes) {
   do {
     Options options = CurrentOptions();
     options.write_buffer_size = 100000000;  // Large write buffer
@@ -1186,7 +1190,7 @@ TEST(DBTest, ApproximateSizes) {
     static const int S2 = 105000;  // Allow some expansion from metadata
     Random rnd(301);
     for (int i = 0; i < N; i++) {
-      ASSERT_OK(Put(Key(i), RandomString(&rnd, S1)));
+      ASSERT_LEVELDB_OK(Put(Key(i), RandomString(&rnd, S1)));
     }
 
     // 0 because GetApproximateSizes() does not account for memtable space
@@ -1227,7 +1231,7 @@ TEST(DBTest, ApproximateSizes) {
   } while (ChangeOptions());
 }
 
-TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
+TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
   do {
     Options options = CurrentOptions();
     options.compression = kNoCompression;
@@ -1235,18 +1239,18 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
 
     Random rnd(301);
     std::string big1 = RandomString(&rnd, 100000);
-    ASSERT_OK(Put(Key(0), RandomString(&rnd, 10000)));
-    ASSERT_OK(Put(Key(1), RandomString(&rnd, 10000)));
-    ASSERT_OK(Put(Key(2), big1));
-    ASSERT_OK(Put(Key(3), RandomString(&rnd, 10000)));
-    ASSERT_OK(Put(Key(4), big1));
-    ASSERT_OK(Put(Key(5), RandomString(&rnd, 10000)));
-    ASSERT_OK(Put(Key(6), RandomString(&rnd, 300000)));
-    ASSERT_OK(Put(Key(7), RandomString(&rnd, 10000)));
+    ASSERT_LEVELDB_OK(Put(Key(0), RandomString(&rnd, 10000)));
+    ASSERT_LEVELDB_OK(Put(Key(1), RandomString(&rnd, 10000)));
+    ASSERT_LEVELDB_OK(Put(Key(2), big1));
+    ASSERT_LEVELDB_OK(Put(Key(3), RandomString(&rnd, 10000)));
+    ASSERT_LEVELDB_OK(Put(Key(4), big1));
+    ASSERT_LEVELDB_OK(Put(Key(5), RandomString(&rnd, 10000)));
+    ASSERT_LEVELDB_OK(Put(Key(6), RandomString(&rnd, 300000)));
+    ASSERT_LEVELDB_OK(Put(Key(7), RandomString(&rnd, 10000)));
 
     if (options.reuse_logs) {
       // Need to force a memtable compaction since recovery does not do so.
-      ASSERT_OK(dbfull()->TEST_CompactMemTable());
+      ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
     }
 
     // Check sizes across recovery by reopening a few times
@@ -1270,7 +1274,7 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
   } while (ChangeOptions());
 }
 
-TEST(DBTest, IteratorPinsRef) {
+TEST_F(DBTest, IteratorPinsRef) {
   Put("foo", "hello");
 
   // Get iterator that will yield the current contents of the DB.
@@ -1279,7 +1283,8 @@ TEST(DBTest, IteratorPinsRef) {
   // Write to force compactions
   Put("foo", "newvalue1");
   for (int i = 0; i < 100; i++) {
-    ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v')));  // 100K values
+    ASSERT_LEVELDB_OK(
+        Put(Key(i), Key(i) + std::string(100000, 'v')));  // 100K values
   }
   Put("foo", "newvalue2");
 
@@ -1292,7 +1297,7 @@ TEST(DBTest, IteratorPinsRef) {
   delete iter;
 }
 
-TEST(DBTest, Snapshot) {
+TEST_F(DBTest, Snapshot) {
   do {
     Put("foo", "v1");
     const Snapshot* s1 = db_->GetSnapshot();
@@ -1321,7 +1326,7 @@ TEST(DBTest, Snapshot) {
   } while (ChangeOptions());
 }
 
-TEST(DBTest, HiddenValuesAreRemoved) {
+TEST_F(DBTest, HiddenValuesAreRemoved) {
   do {
     Random rnd(301);
     FillLevels("a", "z");
@@ -1333,7 +1338,7 @@ TEST(DBTest, HiddenValuesAreRemoved) {
     Put("foo", "tiny");
     Put("pastfoo2", "v2");  // Advance sequence number one more
 
-    ASSERT_OK(dbfull()->TEST_CompactMemTable());
+    ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
     ASSERT_GT(NumTableFilesAtLevel(0), 0);
 
     ASSERT_EQ(big, Get("foo", snapshot));
@@ -1352,9 +1357,9 @@ TEST(DBTest, HiddenValuesAreRemoved) {
   } while (ChangeOptions());
 }
 
-TEST(DBTest, DeletionMarkers1) {
+TEST_F(DBTest, DeletionMarkers1) {
   Put("foo", "v1");
-  ASSERT_OK(dbfull()->TEST_CompactMemTable());
+  ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
   const int last = config::kMaxMemCompactLevel;
   ASSERT_EQ(NumTableFilesAtLevel(last), 1);  // foo => v1 is now in last level
 
@@ -1368,7 +1373,7 @@ TEST(DBTest, DeletionMarkers1) {
   Delete("foo");
   Put("foo", "v2");
   ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
-  ASSERT_OK(dbfull()->TEST_CompactMemTable());  // Moves to level last-2
+  ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());  // Moves to level last-2
   ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
   Slice z("z");
   dbfull()->TEST_CompactRange(last - 2, nullptr, &z);
@@ -1381,9 +1386,9 @@ TEST(DBTest, DeletionMarkers1) {
   ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
 }
 
-TEST(DBTest, DeletionMarkers2) {
+TEST_F(DBTest, DeletionMarkers2) {
   Put("foo", "v1");
-  ASSERT_OK(dbfull()->TEST_CompactMemTable());
+  ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
   const int last = config::kMaxMemCompactLevel;
   ASSERT_EQ(NumTableFilesAtLevel(last), 1);  // foo => v1 is now in last level
 
@@ -1396,7 +1401,7 @@ TEST(DBTest, DeletionMarkers2) {
 
   Delete("foo");
   ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
-  ASSERT_OK(dbfull()->TEST_CompactMemTable());  // Moves to level last-2
+  ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());  // Moves to level last-2
   ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
   dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr);
   // DEL kept: "last" file overlaps
@@ -1407,17 +1412,17 @@ TEST(DBTest, DeletionMarkers2) {
   ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
 }
 
-TEST(DBTest, OverlapInLevel0) {
+TEST_F(DBTest, OverlapInLevel0) {
   do {
     ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config";
 
     // Fill levels 1 and 2 to disable the pushing of new memtables to levels >
     // 0.
-    ASSERT_OK(Put("100", "v100"));
-    ASSERT_OK(Put("999", "v999"));
+    ASSERT_LEVELDB_OK(Put("100", "v100"));
+    ASSERT_LEVELDB_OK(Put("999", "v999"));
     dbfull()->TEST_CompactMemTable();
-    ASSERT_OK(Delete("100"));
-    ASSERT_OK(Delete("999"));
+    ASSERT_LEVELDB_OK(Delete("100"));
+    ASSERT_LEVELDB_OK(Delete("999"));
     dbfull()->TEST_CompactMemTable();
     ASSERT_EQ("0,1,1", FilesPerLevel());
 
@@ -1425,12 +1430,12 @@ TEST(DBTest, OverlapInLevel0) {
     //  files[0]  200 .. 900
     //  files[1]  300 .. 500
     // Note that files are sorted by smallest key.
-    ASSERT_OK(Put("300", "v300"));
-    ASSERT_OK(Put("500", "v500"));
+    ASSERT_LEVELDB_OK(Put("300", "v300"));
+    ASSERT_LEVELDB_OK(Put("500", "v500"));
     dbfull()->TEST_CompactMemTable();
-    ASSERT_OK(Put("200", "v200"));
-    ASSERT_OK(Put("600", "v600"));
-    ASSERT_OK(Put("900", "v900"));
+    ASSERT_LEVELDB_OK(Put("200", "v200"));
+    ASSERT_LEVELDB_OK(Put("600", "v600"));
+    ASSERT_LEVELDB_OK(Put("900", "v900"));
     dbfull()->TEST_CompactMemTable();
     ASSERT_EQ("2,1,1", FilesPerLevel());
 
@@ -1442,23 +1447,23 @@ TEST(DBTest, OverlapInLevel0) {
     // Do a memtable compaction.  Before bug-fix, the compaction would
     // not detect the overlap with level-0 files and would incorrectly place
     // the deletion in a deeper level.
-    ASSERT_OK(Delete("600"));
+    ASSERT_LEVELDB_OK(Delete("600"));
     dbfull()->TEST_CompactMemTable();
     ASSERT_EQ("3", FilesPerLevel());
     ASSERT_EQ("NOT_FOUND", Get("600"));
   } while (ChangeOptions());
 }
 
-TEST(DBTest, L0_CompactionBug_Issue44_a) {
+TEST_F(DBTest, L0_CompactionBug_Issue44_a) {
   Reopen();
-  ASSERT_OK(Put("b", "v"));
+  ASSERT_LEVELDB_OK(Put("b", "v"));
   Reopen();
-  ASSERT_OK(Delete("b"));
-  ASSERT_OK(Delete("a"));
+  ASSERT_LEVELDB_OK(Delete("b"));
+  ASSERT_LEVELDB_OK(Delete("a"));
   Reopen();
-  ASSERT_OK(Delete("a"));
+  ASSERT_LEVELDB_OK(Delete("a"));
   Reopen();
-  ASSERT_OK(Put("a", "v"));
+  ASSERT_LEVELDB_OK(Put("a", "v"));
   Reopen();
   Reopen();
   ASSERT_EQ("(a->v)", Contents());
@@ -1466,7 +1471,7 @@ TEST(DBTest, L0_CompactionBug_Issue44_a) {
   ASSERT_EQ("(a->v)", Contents());
 }
 
-TEST(DBTest, L0_CompactionBug_Issue44_b) {
+TEST_F(DBTest, L0_CompactionBug_Issue44_b) {
   Reopen();
   Put("", "");
   Reopen();
@@ -1492,16 +1497,16 @@ TEST(DBTest, L0_CompactionBug_Issue44_b) {
   ASSERT_EQ("(->)(c->cv)", Contents());
 }
 
-TEST(DBTest, Fflush_Issue474) {
+TEST_F(DBTest, Fflush_Issue474) {
   static const int kNum = 100000;
   Random rnd(test::RandomSeed());
   for (int i = 0; i < kNum; i++) {
     fflush(nullptr);
-    ASSERT_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100)));
+    ASSERT_LEVELDB_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100)));
   }
 }
 
-TEST(DBTest, ComparatorCheck) {
+TEST_F(DBTest, ComparatorCheck) {
   class NewComparator : public Comparator {
    public:
     const char* Name() const override { return "leveldb.NewComparator"; }
@@ -1524,7 +1529,7 @@ TEST(DBTest, ComparatorCheck) {
       << s.ToString();
 }
 
-TEST(DBTest, CustomComparator) {
+TEST_F(DBTest, CustomComparator) {
   class NumberComparator : public Comparator {
    public:
     const char* Name() const override { return "test.NumberComparator"; }
@@ -1542,11 +1547,11 @@ TEST(DBTest, CustomComparator) {
    private:
     static int ToNumber(const Slice& x) {
       // Check that there are no extra characters.
-      ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
+      EXPECT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
           << EscapeString(x);
       int val;
       char ignored;
-      ASSERT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
+      EXPECT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
           << EscapeString(x);
       return val;
     }
@@ -1558,8 +1563,8 @@ TEST(DBTest, CustomComparator) {
   new_options.filter_policy = nullptr;   // Cannot use bloom filters
   new_options.write_buffer_size = 1000;  // Compact more often
   DestroyAndReopen(&new_options);
-  ASSERT_OK(Put("[10]", "ten"));
-  ASSERT_OK(Put("[0x14]", "twenty"));
+  ASSERT_LEVELDB_OK(Put("[10]", "ten"));
+  ASSERT_LEVELDB_OK(Put("[0x14]", "twenty"));
   for (int i = 0; i < 2; i++) {
     ASSERT_EQ("ten", Get("[10]"));
     ASSERT_EQ("ten", Get("[0xa]"));
@@ -1574,13 +1579,13 @@ TEST(DBTest, CustomComparator) {
     for (int i = 0; i < 1000; i++) {
       char buf[100];
       snprintf(buf, sizeof(buf), "[%d]", i * 10);
-      ASSERT_OK(Put(buf, buf));
+      ASSERT_LEVELDB_OK(Put(buf, buf));
     }
     Compact("[0]", "[1000000]");
   }
 }
 
-TEST(DBTest, ManualCompaction) {
+TEST_F(DBTest, ManualCompaction) {
   ASSERT_EQ(config::kMaxMemCompactLevel, 2)
       << "Need to update this test to match kMaxMemCompactLevel";
 
@@ -1614,8 +1619,8 @@ TEST(DBTest, ManualCompaction) {
   ASSERT_EQ("0,0,1", FilesPerLevel());
 }
 
-TEST(DBTest, DBOpen_Options) {
-  std::string dbname = test::TmpDir() + "/db_options_test";
+TEST_F(DBTest, DBOpen_Options) {
+  std::string dbname = testing::TempDir() + "db_options_test";
   DestroyDB(dbname, Options());
 
   // Does not exist, and create_if_missing == false: error
@@ -1629,7 +1634,7 @@ TEST(DBTest, DBOpen_Options) {
   // Does not exist, and create_if_missing == true: OK
   opts.create_if_missing = true;
   s = DB::Open(opts, dbname, &db);
-  ASSERT_OK(s);
+  ASSERT_LEVELDB_OK(s);
   ASSERT_TRUE(db != nullptr);
 
   delete db;
@@ -1646,15 +1651,15 @@ TEST(DBTest, DBOpen_Options) {
   opts.create_if_missing = true;
   opts.error_if_exists = false;
   s = DB::Open(opts, dbname, &db);
-  ASSERT_OK(s);
+  ASSERT_LEVELDB_OK(s);
   ASSERT_TRUE(db != nullptr);
 
   delete db;
   db = nullptr;
 }
 
-TEST(DBTest, DestroyEmptyDir) {
-  std::string dbname = test::TmpDir() + "/db_empty_dir";
+TEST_F(DBTest, DestroyEmptyDir) {
+  std::string dbname = testing::TempDir() + "db_empty_dir";
   TestEnv env(Env::Default());
   env.DeleteDir(dbname);
   ASSERT_TRUE(!env.FileExists(dbname));
@@ -1662,34 +1667,34 @@ TEST(DBTest, DestroyEmptyDir) {
   Options opts;
   opts.env = &env;
 
-  ASSERT_OK(env.CreateDir(dbname));
+  ASSERT_LEVELDB_OK(env.CreateDir(dbname));
   ASSERT_TRUE(env.FileExists(dbname));
   std::vector<std::string> children;
-  ASSERT_OK(env.GetChildren(dbname, &children));
+  ASSERT_LEVELDB_OK(env.GetChildren(dbname, &children));
   // The stock Env's do not filter out '.' and '..' special files.
   ASSERT_EQ(2, children.size());
-  ASSERT_OK(DestroyDB(dbname, opts));
+  ASSERT_LEVELDB_OK(DestroyDB(dbname, opts));
   ASSERT_TRUE(!env.FileExists(dbname));
 
   // Should also be destroyed if Env is filtering out dot files.
   env.SetIgnoreDotFiles(true);
-  ASSERT_OK(env.CreateDir(dbname));
+  ASSERT_LEVELDB_OK(env.CreateDir(dbname));
   ASSERT_TRUE(env.FileExists(dbname));
-  ASSERT_OK(env.GetChildren(dbname, &children));
+  ASSERT_LEVELDB_OK(env.GetChildren(dbname, &children));
   ASSERT_EQ(0, children.size());
-  ASSERT_OK(DestroyDB(dbname, opts));
+  ASSERT_LEVELDB_OK(DestroyDB(dbname, opts));
   ASSERT_TRUE(!env.FileExists(dbname));
 }
 
-TEST(DBTest, DestroyOpenDB) {
-  std::string dbname = test::TmpDir() + "/open_db_dir";
+TEST_F(DBTest, DestroyOpenDB) {
+  std::string dbname = testing::TempDir() + "open_db_dir";
   env_->DeleteDir(dbname);
   ASSERT_TRUE(!env_->FileExists(dbname));
 
   Options opts;
   opts.create_if_missing = true;
   DB* db = nullptr;
-  ASSERT_OK(DB::Open(opts, dbname, &db));
+  ASSERT_LEVELDB_OK(DB::Open(opts, dbname, &db));
   ASSERT_TRUE(db != nullptr);
 
   // Must fail to destroy an open db.
@@ -1701,23 +1706,23 @@ TEST(DBTest, DestroyOpenDB) {
   db = nullptr;
 
   // Should succeed destroying a closed db.
-  ASSERT_OK(DestroyDB(dbname, Options()));
+  ASSERT_LEVELDB_OK(DestroyDB(dbname, Options()));
   ASSERT_TRUE(!env_->FileExists(dbname));
 }
 
-TEST(DBTest, Locking) {
+TEST_F(DBTest, Locking) {
   DB* db2 = nullptr;
   Status s = DB::Open(CurrentOptions(), dbname_, &db2);
   ASSERT_TRUE(!s.ok()) << "Locking did not prevent re-opening db";
 }
 
 // Check that number of files does not grow when we are out of space
-TEST(DBTest, NoSpace) {
+TEST_F(DBTest, NoSpace) {
   Options options = CurrentOptions();
   options.env = env_;
   Reopen(&options);
 
-  ASSERT_OK(Put("foo", "v1"));
+  ASSERT_LEVELDB_OK(Put("foo", "v1"));
   ASSERT_EQ("v1", Get("foo"));
   Compact("a", "z");
   const int num_files = CountFiles();
@@ -1732,12 +1737,12 @@ TEST(DBTest, NoSpace) {
   ASSERT_LT(CountFiles(), num_files + 3);
 }
 
-TEST(DBTest, NonWritableFileSystem) {
+TEST_F(DBTest, NonWritableFileSystem) {
   Options options = CurrentOptions();
   options.write_buffer_size = 1000;
   options.env = env_;
   Reopen(&options);
-  ASSERT_OK(Put("foo", "v1"));
+  ASSERT_LEVELDB_OK(Put("foo", "v1"));
   // Force errors for new files.
   env_->non_writable_.store(true, std::memory_order_release);
   std::string big(100000, 'x');
@@ -1753,7 +1758,7 @@ TEST(DBTest, NonWritableFileSystem) {
   env_->non_writable_.store(false, std::memory_order_release);
 }
 
-TEST(DBTest, WriteSyncError) {
+TEST_F(DBTest, WriteSyncError) {
   // Check that log sync errors cause the DB to disallow future writes.
 
   // (a) Cause log sync calls to fail
@@ -1764,7 +1769,7 @@ TEST(DBTest, WriteSyncError) {
 
   // (b) Normal write should succeed
   WriteOptions w;
-  ASSERT_OK(db_->Put(w, "k1", "v1"));
+  ASSERT_LEVELDB_OK(db_->Put(w, "k1", "v1"));
   ASSERT_EQ("v1", Get("k1"));
 
   // (c) Do a sync write; should fail
@@ -1784,7 +1789,7 @@ TEST(DBTest, WriteSyncError) {
   ASSERT_EQ("NOT_FOUND", Get("k3"));
 }
 
-TEST(DBTest, ManifestWriteError) {
+TEST_F(DBTest, ManifestWriteError) {
   // Test for the following problem:
   // (a) Compaction produces file F
   // (b) Log record containing F is written to MANIFEST file, but Sync() fails
@@ -1803,7 +1808,7 @@ TEST(DBTest, ManifestWriteError) {
     options.create_if_missing = true;
     options.error_if_exists = false;
     DestroyAndReopen(&options);
-    ASSERT_OK(Put("foo", "bar"));
+    ASSERT_LEVELDB_OK(Put("foo", "bar"));
     ASSERT_EQ("bar", Get("foo"));
 
     // Memtable compaction (will succeed)
@@ -1824,8 +1829,8 @@ TEST(DBTest, ManifestWriteError) {
   }
 }
 
-TEST(DBTest, MissingSSTFile) {
-  ASSERT_OK(Put("foo", "bar"));
+TEST_F(DBTest, MissingSSTFile) {
+  ASSERT_LEVELDB_OK(Put("foo", "bar"));
   ASSERT_EQ("bar", Get("foo"));
 
   // Dump the memtable to disk.
@@ -1841,8 +1846,8 @@ TEST(DBTest, MissingSSTFile) {
   ASSERT_TRUE(s.ToString().find("issing") != std::string::npos) << s.ToString();
 }
 
-TEST(DBTest, StillReadSST) {
-  ASSERT_OK(Put("foo", "bar"));
+TEST_F(DBTest, StillReadSST) {
+  ASSERT_LEVELDB_OK(Put("foo", "bar"));
   ASSERT_EQ("bar", Get("foo"));
 
   // Dump the memtable to disk.
@@ -1857,18 +1862,18 @@ TEST(DBTest, StillReadSST) {
   ASSERT_EQ("bar", Get("foo"));
 }
 
-TEST(DBTest, FilesDeletedAfterCompaction) {
-  ASSERT_OK(Put("foo", "v2"));
+TEST_F(DBTest, FilesDeletedAfterCompaction) {
+  ASSERT_LEVELDB_OK(Put("foo", "v2"));
   Compact("a", "z");
   const int num_files = CountFiles();
   for (int i = 0; i < 10; i++) {
-    ASSERT_OK(Put("foo", "v2"));
+    ASSERT_LEVELDB_OK(Put("foo", "v2"));
     Compact("a", "z");
   }
   ASSERT_EQ(CountFiles(), num_files);
 }
 
-TEST(DBTest, BloomFilter) {
+TEST_F(DBTest, BloomFilter) {
   env_->count_random_reads_ = true;
   Options options = CurrentOptions();
   options.env = env_;
@@ -1879,11 +1884,11 @@ TEST(DBTest, BloomFilter) {
   // Populate multiple layers
   const int N = 10000;
   for (int i = 0; i < N; i++) {
-    ASSERT_OK(Put(Key(i), Key(i)));
+    ASSERT_LEVELDB_OK(Put(Key(i), Key(i)));
   }
   Compact("a", "z");
   for (int i = 0; i < N; i += 100) {
-    ASSERT_OK(Put(Key(i), Key(i)));
+    ASSERT_LEVELDB_OK(Put(Key(i), Key(i)));
   }
   dbfull()->TEST_CompactMemTable();
 
@@ -1955,7 +1960,7 @@ static void MTThreadBody(void* arg) {
       // We add some padding for force compactions.
       snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id,
                static_cast<int>(counter));
-      ASSERT_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
+      ASSERT_LEVELDB_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
     } else {
       // Read a value and verify that it matches the pattern written above.
       Status s = db->Get(ReadOptions(), Slice(keybuf), &value);
@@ -1963,7 +1968,7 @@ static void MTThreadBody(void* arg) {
         // Key has not yet been written
       } else {
         // Check that the writer thread counter is >= the counter in the value
-        ASSERT_OK(s);
+        ASSERT_LEVELDB_OK(s);
         int k, w, c;
         ASSERT_EQ(3, sscanf(value.c_str(), "%d.%d.%d", &k, &w, &c)) << value;
         ASSERT_EQ(k, key);
@@ -1980,7 +1985,7 @@ static void MTThreadBody(void* arg) {
 
 }  // namespace
 
-TEST(DBTest, MultiThreaded) {
+TEST_F(DBTest, MultiThreaded) {
   do {
     // Initialize state
     MTState mt;
@@ -2158,7 +2163,7 @@ static bool CompareIterators(int step, DB* model, DB* db,
   return ok;
 }
 
-TEST(DBTest, Randomized) {
+TEST_F(DBTest, Randomized) {
   Random rnd(test::RandomSeed());
   do {
     ModelDB model(CurrentOptions());
@@ -2176,13 +2181,13 @@ TEST(DBTest, Randomized) {
         k = RandomKey(&rnd);
         v = RandomString(
             &rnd, rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8));
-        ASSERT_OK(model.Put(WriteOptions(), k, v));
-        ASSERT_OK(db_->Put(WriteOptions(), k, v));
+        ASSERT_LEVELDB_OK(model.Put(WriteOptions(), k, v));
+        ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), k, v));
 
       } else if (p < 90) {  // Delete
         k = RandomKey(&rnd);
-        ASSERT_OK(model.Delete(WriteOptions(), k));
-        ASSERT_OK(db_->Delete(WriteOptions(), k));
+        ASSERT_LEVELDB_OK(model.Delete(WriteOptions(), k));
+        ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), k));
 
       } else {  // Multi-element batch
         WriteBatch b;
@@ -2201,8 +2206,8 @@ TEST(DBTest, Randomized) {
             b.Delete(k);
           }
         }
-        ASSERT_OK(model.Write(WriteOptions(), &b));
-        ASSERT_OK(db_->Write(WriteOptions(), &b));
+        ASSERT_LEVELDB_OK(model.Write(WriteOptions(), &b));
+        ASSERT_LEVELDB_OK(db_->Write(WriteOptions(), &b));
       }
 
       if ((step % 100) == 0) {
@@ -2233,14 +2238,14 @@ std::string MakeKey(unsigned int num) {
 }
 
 void BM_LogAndApply(int iters, int num_base_files) {
-  std::string dbname = test::TmpDir() + "/leveldb_test_benchmark";
+  std::string dbname = testing::TempDir() + "leveldb_test_benchmark";
   DestroyDB(dbname, Options());
 
   DB* db = nullptr;
   Options opts;
   opts.create_if_missing = true;
   Status s = DB::Open(opts, dbname, &db);
-  ASSERT_OK(s);
+  ASSERT_LEVELDB_OK(s);
   ASSERT_TRUE(db != nullptr);
 
   delete db;
@@ -2255,7 +2260,7 @@ void BM_LogAndApply(int iters, int num_base_files) {
   Options options;
   VersionSet vset(dbname, &options, nullptr, &cmp);
   bool save_manifest;
-  ASSERT_OK(vset.Recover(&save_manifest));
+  ASSERT_LEVELDB_OK(vset.Recover(&save_manifest));
   VersionEdit vbase;
   uint64_t fnum = 1;
   for (int i = 0; i < num_base_files; i++) {
@@ -2263,7 +2268,7 @@ void BM_LogAndApply(int iters, int num_base_files) {
     InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
     vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
   }
-  ASSERT_OK(vset.LogAndApply(&vbase, &mu));
+  ASSERT_LEVELDB_OK(vset.LogAndApply(&vbase, &mu));
 
   uint64_t start_micros = env->NowMicros();
 
@@ -2295,5 +2300,6 @@ int main(int argc, char** argv) {
     return 0;
   }
 
-  return leveldb::test::RunAllTests();
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
 }
diff --git a/db/dbformat_test.cc b/db/dbformat_test.cc
index 1209369..ca49e0a 100644
--- a/db/dbformat_test.cc
+++ b/db/dbformat_test.cc
@@ -3,8 +3,9 @@
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
 #include "db/dbformat.h"
+
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "util/logging.h"
-#include "util/testharness.h"
 
 namespace leveldb {
 
@@ -41,8 +42,6 @@ static void TestKey(const std::string& key, uint64_t seq, ValueType vt) {
   ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded));
 }
 
-class FormatTest {};
-
 TEST(FormatTest, InternalKey_EncodeDecode) {
   const char* keys[] = {"", "k", "hello", "longggggggggggggggggggggg"};
   const uint64_t seq[] = {1,
@@ -128,4 +127,7 @@ TEST(FormatTest, InternalKeyDebugString) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc
index 5b31bb8..80b8f12 100644
--- a/db/fault_injection_test.cc
+++ b/db/fault_injection_test.cc
@@ -9,6 +9,7 @@
 #include <map>
 #include <set>
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "db/db_impl.h"
 #include "db/filename.h"
 #include "db/log_format.h"
@@ -22,7 +23,6 @@
 #include "port/thread_annotations.h"
 #include "util/logging.h"
 #include "util/mutexlock.h"
-#include "util/testharness.h"
 #include "util/testutil.h"
 
 namespace leveldb {
@@ -300,7 +300,7 @@ void FaultInjectionTestEnv::UntrackFile(const std::string& f) {
 
 Status FaultInjectionTestEnv::DeleteFile(const std::string& f) {
   Status s = EnvWrapper::DeleteFile(f);
-  ASSERT_OK(s);
+  EXPECT_LEVELDB_OK(s);
   if (s.ok()) {
     UntrackFile(f);
   }
@@ -361,7 +361,7 @@ Status FileState::DropUnsyncedData() const {
   return Truncate(filename_, sync_pos);
 }
 
-class FaultInjectionTest {
+class FaultInjectionTest : public testing::Test {
  public:
   enum ExpectedVerifResult { VAL_EXPECT_NO_ERROR, VAL_EXPECT_ERROR };
   enum ResetMethod { RESET_DROP_UNSYNCED_DATA, RESET_DELETE_UNSYNCED_FILES };
@@ -376,7 +376,7 @@ class FaultInjectionTest {
       : env_(new FaultInjectionTestEnv),
         tiny_cache_(NewLRUCache(100)),
         db_(nullptr) {
-    dbname_ = test::TmpDir() + "/fault_test";
+    dbname_ = testing::TempDir() + "fault_test";
     DestroyDB(dbname_, Options());  // Destroy any db from earlier run
     options_.reuse_logs = true;
     options_.env = env_;
@@ -402,7 +402,7 @@ class FaultInjectionTest {
       batch.Clear();
       batch.Put(key, Value(i, &value_space));
       WriteOptions options;
-      ASSERT_OK(db_->Write(options, &batch));
+      ASSERT_LEVELDB_OK(db_->Write(options, &batch));
     }
   }
 
@@ -424,7 +424,7 @@ class FaultInjectionTest {
       s = ReadValue(i, &val);
       if (expected == VAL_EXPECT_NO_ERROR) {
         if (s.ok()) {
-          ASSERT_EQ(value_space, val);
+          EXPECT_EQ(value_space, val);
         }
       } else if (s.ok()) {
         fprintf(stderr, "Expected an error at %d, but was OK\n", i);
@@ -465,7 +465,7 @@ class FaultInjectionTest {
   void DeleteAllData() {
     Iterator* iter = db_->NewIterator(ReadOptions());
     for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-      ASSERT_OK(db_->Delete(WriteOptions(), iter->key()));
+      ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), iter->key()));
     }
 
     delete iter;
@@ -474,10 +474,10 @@ class FaultInjectionTest {
   void ResetDBState(ResetMethod reset_method) {
     switch (reset_method) {
       case RESET_DROP_UNSYNCED_DATA:
-        ASSERT_OK(env_->DropUnsyncedFileData());
+        ASSERT_LEVELDB_OK(env_->DropUnsyncedFileData());
         break;
       case RESET_DELETE_UNSYNCED_FILES:
-        ASSERT_OK(env_->DeleteFilesCreatedAfterLastDirSync());
+        ASSERT_LEVELDB_OK(env_->DeleteFilesCreatedAfterLastDirSync());
         break;
       default:
         assert(false);
@@ -496,10 +496,11 @@ class FaultInjectionTest {
     env_->SetFilesystemActive(false);
     CloseDB();
     ResetDBState(reset_method);
-    ASSERT_OK(OpenDB());
-    ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR));
-    ASSERT_OK(Verify(num_pre_sync, num_post_sync,
-                     FaultInjectionTest::VAL_EXPECT_ERROR));
+    ASSERT_LEVELDB_OK(OpenDB());
+    ASSERT_LEVELDB_OK(
+        Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR));
+    ASSERT_LEVELDB_OK(Verify(num_pre_sync, num_post_sync,
+                             FaultInjectionTest::VAL_EXPECT_ERROR));
   }
 
   void NoWriteTestPreFault() {}
@@ -507,12 +508,12 @@ class FaultInjectionTest {
   void NoWriteTestReopenWithFault(ResetMethod reset_method) {
     CloseDB();
     ResetDBState(reset_method);
-    ASSERT_OK(OpenDB());
+    ASSERT_LEVELDB_OK(OpenDB());
   }
 
   void DoTest() {
     Random rnd(0);
-    ASSERT_OK(OpenDB());
+    ASSERT_LEVELDB_OK(OpenDB());
     for (size_t idx = 0; idx < kNumIterations; idx++) {
       int num_pre_sync = rnd.Uniform(kMaxNumValues);
       int num_post_sync = rnd.Uniform(kMaxNumValues);
@@ -536,16 +537,19 @@ class FaultInjectionTest {
   }
 };
 
-TEST(FaultInjectionTest, FaultTestNoLogReuse) {
+TEST_F(FaultInjectionTest, FaultTestNoLogReuse) {
   ReuseLogs(false);
   DoTest();
 }
 
-TEST(FaultInjectionTest, FaultTestWithLogReuse) {
+TEST_F(FaultInjectionTest, FaultTestWithLogReuse) {
   ReuseLogs(true);
   DoTest();
 }
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/db/filename_test.cc b/db/filename_test.cc
index 952f320..ad0bc73 100644
--- a/db/filename_test.cc
+++ b/db/filename_test.cc
@@ -4,15 +4,13 @@
 
 #include "db/filename.h"
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "db/dbformat.h"
 #include "port/port.h"
 #include "util/logging.h"
-#include "util/testharness.h"
 
 namespace leveldb {
 
-class FileNameTest {};
-
 TEST(FileNameTest, Parse) {
   Slice db;
   FileType type;
@@ -128,4 +126,7 @@ TEST(FileNameTest, Construction) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/db/log_test.cc b/db/log_test.cc
index 0e31648..680f267 100644
--- a/db/log_test.cc
+++ b/db/log_test.cc
@@ -2,13 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "db/log_reader.h"
 #include "db/log_writer.h"
 #include "leveldb/env.h"
 #include "util/coding.h"
 #include "util/crc32c.h"
 #include "util/random.h"
-#include "util/testharness.h"
 
 namespace leveldb {
 namespace log {
@@ -36,7 +36,7 @@ static std::string RandomSkewedString(int i, Random* rnd) {
   return BigString(NumberString(i), rnd->Skewed(17));
 }
 
-class LogTest {
+class LogTest : public testing::Test {
  public:
   LogTest()
       : reading_(false),
@@ -177,7 +177,7 @@ class LogTest {
     StringSource() : force_error_(false), returned_partial_(false) {}
 
     Status Read(size_t n, Slice* result, char* scratch) override {
-      ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
+      EXPECT_TRUE(!returned_partial_) << "must not Read() after eof/error";
 
       if (force_error_) {
         force_error_ = false;
@@ -258,9 +258,9 @@ uint64_t LogTest::initial_offset_last_record_offsets_[] = {
 int LogTest::num_initial_offset_records_ =
     sizeof(LogTest::initial_offset_last_record_offsets_) / sizeof(uint64_t);
 
-TEST(LogTest, Empty) { ASSERT_EQ("EOF", Read()); }
+TEST_F(LogTest, Empty) { ASSERT_EQ("EOF", Read()); }
 
-TEST(LogTest, ReadWrite) {
+TEST_F(LogTest, ReadWrite) {
   Write("foo");
   Write("bar");
   Write("");
@@ -273,7 +273,7 @@ TEST(LogTest, ReadWrite) {
   ASSERT_EQ("EOF", Read());  // Make sure reads at eof work
 }
 
-TEST(LogTest, ManyBlocks) {
+TEST_F(LogTest, ManyBlocks) {
   for (int i = 0; i < 100000; i++) {
     Write(NumberString(i));
   }
@@ -283,7 +283,7 @@ TEST(LogTest, ManyBlocks) {
   ASSERT_EQ("EOF", Read());
 }
 
-TEST(LogTest, Fragmentation) {
+TEST_F(LogTest, Fragmentation) {
   Write("small");
   Write(BigString("medium", 50000));
   Write(BigString("large", 100000));
@@ -293,7 +293,7 @@ TEST(LogTest, Fragmentation) {
   ASSERT_EQ("EOF", Read());
 }
 
-TEST(LogTest, MarginalTrailer) {
+TEST_F(LogTest, MarginalTrailer) {
   // Make a trailer that is exactly the same length as an empty record.
   const int n = kBlockSize - 2 * kHeaderSize;
   Write(BigString("foo", n));
@@ -306,7 +306,7 @@ TEST(LogTest, MarginalTrailer) {
   ASSERT_EQ("EOF", Read());
 }
 
-TEST(LogTest, MarginalTrailer2) {
+TEST_F(LogTest, MarginalTrailer2) {
   // Make a trailer that is exactly the same length as an empty record.
   const int n = kBlockSize - 2 * kHeaderSize;
   Write(BigString("foo", n));
@@ -319,7 +319,7 @@ TEST(LogTest, MarginalTrailer2) {
   ASSERT_EQ("", ReportMessage());
 }
 
-TEST(LogTest, ShortTrailer) {
+TEST_F(LogTest, ShortTrailer) {
   const int n = kBlockSize - 2 * kHeaderSize + 4;
   Write(BigString("foo", n));
   ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
@@ -331,7 +331,7 @@ TEST(LogTest, ShortTrailer) {
   ASSERT_EQ("EOF", Read());
 }
 
-TEST(LogTest, AlignedEof) {
+TEST_F(LogTest, AlignedEof) {
   const int n = kBlockSize - 2 * kHeaderSize + 4;
   Write(BigString("foo", n));
   ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
@@ -339,7 +339,7 @@ TEST(LogTest, AlignedEof) {
   ASSERT_EQ("EOF", Read());
 }
 
-TEST(LogTest, OpenForAppend) {
+TEST_F(LogTest, OpenForAppend) {
   Write("hello");
   ReopenForAppend();
   Write("world");
@@ -348,7 +348,7 @@ TEST(LogTest, OpenForAppend) {
   ASSERT_EQ("EOF", Read());
 }
 
-TEST(LogTest, RandomRead) {
+TEST_F(LogTest, RandomRead) {
   const int N = 500;
   Random write_rnd(301);
   for (int i = 0; i < N; i++) {
@@ -363,7 +363,7 @@ TEST(LogTest, RandomRead) {
 
 // Tests of all the error paths in log_reader.cc follow:
 
-TEST(LogTest, ReadError) {
+TEST_F(LogTest, ReadError) {
   Write("foo");
   ForceError();
   ASSERT_EQ("EOF", Read());
@@ -371,7 +371,7 @@ TEST(LogTest, ReadError) {
   ASSERT_EQ("OK", MatchError("read error"));
 }
 
-TEST(LogTest, BadRecordType) {
+TEST_F(LogTest, BadRecordType) {
   Write("foo");
   // Type is stored in header[6]
   IncrementByte(6, 100);
@@ -381,7 +381,7 @@ TEST(LogTest, BadRecordType) {
   ASSERT_EQ("OK", MatchError("unknown record type"));
 }
 
-TEST(LogTest, TruncatedTrailingRecordIsIgnored) {
+TEST_F(LogTest, TruncatedTrailingRecordIsIgnored) {
   Write("foo");
   ShrinkSize(4);  // Drop all payload as well as a header byte
   ASSERT_EQ("EOF", Read());
@@ -390,7 +390,7 @@ TEST(LogTest, TruncatedTrailingRecordIsIgnored) {
   ASSERT_EQ("", ReportMessage());
 }
 
-TEST(LogTest, BadLength) {
+TEST_F(LogTest, BadLength) {
   const int kPayloadSize = kBlockSize - kHeaderSize;
   Write(BigString("bar", kPayloadSize));
   Write("foo");
@@ -401,7 +401,7 @@ TEST(LogTest, BadLength) {
   ASSERT_EQ("OK", MatchError("bad record length"));
 }
 
-TEST(LogTest, BadLengthAtEndIsIgnored) {
+TEST_F(LogTest, BadLengthAtEndIsIgnored) {
   Write("foo");
   ShrinkSize(1);
   ASSERT_EQ("EOF", Read());
@@ -409,7 +409,7 @@ TEST(LogTest, BadLengthAtEndIsIgnored) {
   ASSERT_EQ("", ReportMessage());
 }
 
-TEST(LogTest, ChecksumMismatch) {
+TEST_F(LogTest, ChecksumMismatch) {
   Write("foo");
   IncrementByte(0, 10);
   ASSERT_EQ("EOF", Read());
@@ -417,7 +417,7 @@ TEST(LogTest, ChecksumMismatch) {
   ASSERT_EQ("OK", MatchError("checksum mismatch"));
 }
 
-TEST(LogTest, UnexpectedMiddleType) {
+TEST_F(LogTest, UnexpectedMiddleType) {
   Write("foo");
   SetByte(6, kMiddleType);
   FixChecksum(0, 3);
@@ -426,7 +426,7 @@ TEST(LogTest, UnexpectedMiddleType) {
   ASSERT_EQ("OK", MatchError("missing start"));
 }
 
-TEST(LogTest, UnexpectedLastType) {
+TEST_F(LogTest, UnexpectedLastType) {
   Write("foo");
   SetByte(6, kLastType);
   FixChecksum(0, 3);
@@ -435,7 +435,7 @@ TEST(LogTest, UnexpectedLastType) {
   ASSERT_EQ("OK", MatchError("missing start"));
 }
 
-TEST(LogTest, UnexpectedFullType) {
+TEST_F(LogTest, UnexpectedFullType) {
   Write("foo");
   Write("bar");
   SetByte(6, kFirstType);
@@ -446,7 +446,7 @@ TEST(LogTest, UnexpectedFullType) {
   ASSERT_EQ("OK", MatchError("partial record without end"));
 }
 
-TEST(LogTest, UnexpectedFirstType) {
+TEST_F(LogTest, UnexpectedFirstType) {
   Write("foo");
   Write(BigString("bar", 100000));
   SetByte(6, kFirstType);
@@ -457,7 +457,7 @@ TEST(LogTest, UnexpectedFirstType) {
   ASSERT_EQ("OK", MatchError("partial record without end"));
 }
 
-TEST(LogTest, MissingLastIsIgnored) {
+TEST_F(LogTest, MissingLastIsIgnored) {
   Write(BigString("bar", kBlockSize));
   // Remove the LAST block, including header.
   ShrinkSize(14);
@@ -466,7 +466,7 @@ TEST(LogTest, MissingLastIsIgnored) {
   ASSERT_EQ(0, DroppedBytes());
 }
 
-TEST(LogTest, PartialLastIsIgnored) {
+TEST_F(LogTest, PartialLastIsIgnored) {
   Write(BigString("bar", kBlockSize));
   // Cause a bad record length in the LAST block.
   ShrinkSize(1);
@@ -475,7 +475,7 @@ TEST(LogTest, PartialLastIsIgnored) {
   ASSERT_EQ(0, DroppedBytes());
 }
 
-TEST(LogTest, SkipIntoMultiRecord) {
+TEST_F(LogTest, SkipIntoMultiRecord) {
   // Consider a fragmented record:
   //    first(R1), middle(R1), last(R1), first(R2)
   // If initial_offset points to a record after first(R1) but before first(R2)
@@ -491,7 +491,7 @@ TEST(LogTest, SkipIntoMultiRecord) {
   ASSERT_EQ("EOF", Read());
 }
 
-TEST(LogTest, ErrorJoinsRecords) {
+TEST_F(LogTest, ErrorJoinsRecords) {
   // Consider two fragmented records:
   //    first(R1) last(R1) first(R2) last(R2)
   // where the middle two fragments disappear.  We do not want
@@ -514,47 +514,50 @@ TEST(LogTest, ErrorJoinsRecords) {
   ASSERT_GE(dropped, 2 * kBlockSize);
 }
 
-TEST(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); }
+TEST_F(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); }
 
-TEST(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); }
+TEST_F(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); }
 
-TEST(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); }
+TEST_F(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); }
 
-TEST(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); }
+TEST_F(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); }
 
-TEST(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); }
+TEST_F(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); }
 
-TEST(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); }
+TEST_F(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); }
 
-TEST(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); }
+TEST_F(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); }
 
-TEST(LogTest, ReadFourthFirstBlockTrailer) {
+TEST_F(LogTest, ReadFourthFirstBlockTrailer) {
   CheckInitialOffsetRecord(log::kBlockSize - 4, 3);
 }
 
-TEST(LogTest, ReadFourthMiddleBlock) {
+TEST_F(LogTest, ReadFourthMiddleBlock) {
   CheckInitialOffsetRecord(log::kBlockSize + 1, 3);
 }
 
-TEST(LogTest, ReadFourthLastBlock) {
+TEST_F(LogTest, ReadFourthLastBlock) {
   CheckInitialOffsetRecord(2 * log::kBlockSize + 1, 3);
 }
 
-TEST(LogTest, ReadFourthStart) {
+TEST_F(LogTest, ReadFourthStart) {
   CheckInitialOffsetRecord(
       2 * (kHeaderSize + 1000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
       3);
 }
 
-TEST(LogTest, ReadInitialOffsetIntoBlockPadding) {
+TEST_F(LogTest, ReadInitialOffsetIntoBlockPadding) {
   CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5);
 }
 
-TEST(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); }
+TEST_F(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); }
 
-TEST(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); }
+TEST_F(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); }
 
 }  // namespace log
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/db/recovery_test.cc b/db/recovery_test.cc
index 547a959..0657743 100644
--- a/db/recovery_test.cc
+++ b/db/recovery_test.cc
@@ -2,6 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "db/db_impl.h"
 #include "db/filename.h"
 #include "db/version_set.h"
@@ -10,15 +11,14 @@
 #include "leveldb/env.h"
 #include "leveldb/write_batch.h"
 #include "util/logging.h"
-#include "util/testharness.h"
 #include "util/testutil.h"
 
 namespace leveldb {
 
-class RecoveryTest {
+class RecoveryTest : public testing::Test {
  public:
   RecoveryTest() : env_(Env::Default()), db_(nullptr) {
-    dbname_ = test::TmpDir() + "/recovery_test";
+    dbname_ = testing::TempDir() + "/recovery_test";
     DestroyDB(dbname_, Options());
     Open();
   }
@@ -63,7 +63,7 @@ class RecoveryTest {
   }
 
   void Open(Options* options = nullptr) {
-    ASSERT_OK(OpenWithStatus(options));
+    ASSERT_LEVELDB_OK(OpenWithStatus(options));
     ASSERT_EQ(1, NumLogs());
   }
 
@@ -84,7 +84,8 @@ class RecoveryTest {
 
   std::string ManifestFileName() {
     std::string current;
-    ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), &current));
+    EXPECT_LEVELDB_OK(
+        ReadFileToString(env_, CurrentFileName(dbname_), &current));
     size_t len = current.size();
     if (len > 0 && current[len - 1] == '\n') {
       current.resize(len - 1);
@@ -100,18 +101,20 @@ class RecoveryTest {
     Close();
     std::vector<uint64_t> logs = GetFiles(kLogFile);
     for (size_t i = 0; i < logs.size(); i++) {
-      ASSERT_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]);
+      EXPECT_LEVELDB_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]);
     }
     return logs.size();
   }
 
-  void DeleteManifestFile() { ASSERT_OK(env_->DeleteFile(ManifestFileName())); }
+  void DeleteManifestFile() {
+    ASSERT_LEVELDB_OK(env_->DeleteFile(ManifestFileName()));
+  }
 
   uint64_t FirstLogFile() { return GetFiles(kLogFile)[0]; }
 
   std::vector<uint64_t> GetFiles(FileType t) {
     std::vector<std::string> filenames;
-    ASSERT_OK(env_->GetChildren(dbname_, &filenames));
+    EXPECT_LEVELDB_OK(env_->GetChildren(dbname_, &filenames));
     std::vector<uint64_t> result;
     for (size_t i = 0; i < filenames.size(); i++) {
       uint64_t number;
@@ -129,7 +132,7 @@ class RecoveryTest {
 
   uint64_t FileSize(const std::string& fname) {
     uint64_t result;
-    ASSERT_OK(env_->GetFileSize(fname, &result)) << fname;
+    EXPECT_LEVELDB_OK(env_->GetFileSize(fname, &result)) << fname;
     return result;
   }
 
@@ -139,13 +142,13 @@ class RecoveryTest {
   void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) {
     std::string fname = LogFileName(dbname_, lognum);
     WritableFile* file;
-    ASSERT_OK(env_->NewWritableFile(fname, &file));
+    ASSERT_LEVELDB_OK(env_->NewWritableFile(fname, &file));
     log::Writer writer(file);
     WriteBatch batch;
     batch.Put(key, val);
     WriteBatchInternal::SetSequence(&batch, seq);
-    ASSERT_OK(writer.AddRecord(WriteBatchInternal::Contents(&batch)));
-    ASSERT_OK(file->Flush());
+    ASSERT_LEVELDB_OK(writer.AddRecord(WriteBatchInternal::Contents(&batch)));
+    ASSERT_LEVELDB_OK(file->Flush());
     delete file;
   }
 
@@ -155,12 +158,12 @@ class RecoveryTest {
   DB* db_;
 };
 
-TEST(RecoveryTest, ManifestReused) {
+TEST_F(RecoveryTest, ManifestReused) {
   if (!CanAppend()) {
     fprintf(stderr, "skipping test because env does not support appending\n");
     return;
   }
-  ASSERT_OK(Put("foo", "bar"));
+  ASSERT_LEVELDB_OK(Put("foo", "bar"));
   Close();
   std::string old_manifest = ManifestFileName();
   Open();
@@ -171,12 +174,12 @@ TEST(RecoveryTest, ManifestReused) {
   ASSERT_EQ("bar", Get("foo"));
 }
 
-TEST(RecoveryTest, LargeManifestCompacted) {
+TEST_F(RecoveryTest, LargeManifestCompacted) {
   if (!CanAppend()) {
     fprintf(stderr, "skipping test because env does not support appending\n");
     return;
   }
-  ASSERT_OK(Put("foo", "bar"));
+  ASSERT_LEVELDB_OK(Put("foo", "bar"));
   Close();
   std::string old_manifest = ManifestFileName();
 
@@ -184,10 +187,10 @@ TEST(RecoveryTest, LargeManifestCompacted) {
   {
     uint64_t len = FileSize(old_manifest);
     WritableFile* file;
-    ASSERT_OK(env()->NewAppendableFile(old_manifest, &file));
+    ASSERT_LEVELDB_OK(env()->NewAppendableFile(old_manifest, &file));
     std::string zeroes(3 * 1048576 - static_cast<size_t>(len), 0);
-    ASSERT_OK(file->Append(zeroes));
-    ASSERT_OK(file->Flush());
+    ASSERT_LEVELDB_OK(file->Append(zeroes));
+    ASSERT_LEVELDB_OK(file->Flush());
     delete file;
   }
 
@@ -202,8 +205,8 @@ TEST(RecoveryTest, LargeManifestCompacted) {
   ASSERT_EQ("bar", Get("foo"));
 }
 
-TEST(RecoveryTest, NoLogFiles) {
-  ASSERT_OK(Put("foo", "bar"));
+TEST_F(RecoveryTest, NoLogFiles) {
+  ASSERT_LEVELDB_OK(Put("foo", "bar"));
   ASSERT_EQ(1, DeleteLogFiles());
   Open();
   ASSERT_EQ("NOT_FOUND", Get("foo"));
@@ -211,13 +214,13 @@ TEST(RecoveryTest, NoLogFiles) {
   ASSERT_EQ("NOT_FOUND", Get("foo"));
 }
 
-TEST(RecoveryTest, LogFileReuse) {
+TEST_F(RecoveryTest, LogFileReuse) {
   if (!CanAppend()) {
     fprintf(stderr, "skipping test because env does not support appending\n");
     return;
   }
   for (int i = 0; i < 2; i++) {
-    ASSERT_OK(Put("foo", "bar"));
+    ASSERT_LEVELDB_OK(Put("foo", "bar"));
     if (i == 0) {
       // Compact to ensure current log is empty
       CompactMemTable();
@@ -241,13 +244,13 @@ TEST(RecoveryTest, LogFileReuse) {
   }
 }
 
-TEST(RecoveryTest, MultipleMemTables) {
+TEST_F(RecoveryTest, MultipleMemTables) {
   // Make a large log.
   const int kNum = 1000;
   for (int i = 0; i < kNum; i++) {
     char buf[100];
     snprintf(buf, sizeof(buf), "%050d", i);
-    ASSERT_OK(Put(buf, buf));
+    ASSERT_LEVELDB_OK(Put(buf, buf));
   }
   ASSERT_EQ(0, NumTables());
   Close();
@@ -270,8 +273,8 @@ TEST(RecoveryTest, MultipleMemTables) {
   }
 }
 
-TEST(RecoveryTest, MultipleLogFiles) {
-  ASSERT_OK(Put("foo", "bar"));
+TEST_F(RecoveryTest, MultipleLogFiles) {
+  ASSERT_LEVELDB_OK(Put("foo", "bar"));
   Close();
   ASSERT_EQ(1, NumLogs());
 
@@ -316,8 +319,8 @@ TEST(RecoveryTest, MultipleLogFiles) {
   ASSERT_EQ("there", Get("hi"));
 }
 
-TEST(RecoveryTest, ManifestMissing) {
-  ASSERT_OK(Put("foo", "bar"));
+TEST_F(RecoveryTest, ManifestMissing) {
+  ASSERT_LEVELDB_OK(Put("foo", "bar"));
   Close();
   DeleteManifestFile();
 
@@ -327,4 +330,7 @@ TEST(RecoveryTest, ManifestMissing) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc
index 9fa2d96..04b9fa7 100644
--- a/db/skiplist_test.cc
+++ b/db/skiplist_test.cc
@@ -7,13 +7,14 @@
 #include <atomic>
 #include <set>
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "leveldb/env.h"
 #include "port/port.h"
 #include "port/thread_annotations.h"
 #include "util/arena.h"
 #include "util/hash.h"
 #include "util/random.h"
-#include "util/testharness.h"
+#include "util/testutil.h"
 
 namespace leveldb {
 
@@ -31,8 +32,6 @@ struct Comparator {
   }
 };
 
-class SkipTest {};
-
 TEST(SkipTest, Empty) {
   Arena arena;
   Comparator cmp;
@@ -366,4 +365,7 @@ TEST(SkipTest, Concurrent5) { RunConcurrent(5); }
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/db/version_edit_test.cc b/db/version_edit_test.cc
index 0b7cda8..228fa3b 100644
--- a/db/version_edit_test.cc
+++ b/db/version_edit_test.cc
@@ -3,7 +3,8 @@
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
 #include "db/version_edit.h"
-#include "util/testharness.h"
+
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 
 namespace leveldb {
 
@@ -17,8 +18,6 @@ static void TestEncodeDecode(const VersionEdit& edit) {
   ASSERT_EQ(encoded, encoded2);
 }
 
-class VersionEditTest {};
-
 TEST(VersionEditTest, EncodeDecode) {
   static const uint64_t kBig = 1ull << 50;
 
@@ -41,4 +40,7 @@ TEST(VersionEditTest, EncodeDecode) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/db/version_set_test.cc b/db/version_set_test.cc
index c1056a1..71b19a7 100644
--- a/db/version_set_test.cc
+++ b/db/version_set_test.cc
@@ -3,13 +3,14 @@
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
 #include "db/version_set.h"
+
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "util/logging.h"
-#include "util/testharness.h"
 #include "util/testutil.h"
 
 namespace leveldb {
 
-class FindFileTest {
+class FindFileTest : public testing::Test {
  public:
   FindFileTest() : disjoint_sorted_files_(true) {}
 
@@ -50,7 +51,7 @@ class FindFileTest {
   std::vector<FileMetaData*> files_;
 };
 
-TEST(FindFileTest, Empty) {
+TEST_F(FindFileTest, Empty) {
   ASSERT_EQ(0, Find("foo"));
   ASSERT_TRUE(!Overlaps("a", "z"));
   ASSERT_TRUE(!Overlaps(nullptr, "z"));
@@ -58,7 +59,7 @@ TEST(FindFileTest, Empty) {
   ASSERT_TRUE(!Overlaps(nullptr, nullptr));
 }
 
-TEST(FindFileTest, Single) {
+TEST_F(FindFileTest, Single) {
   Add("p", "q");
   ASSERT_EQ(0, Find("a"));
   ASSERT_EQ(0, Find("p"));
@@ -88,7 +89,7 @@ TEST(FindFileTest, Single) {
   ASSERT_TRUE(Overlaps(nullptr, nullptr));
 }
 
-TEST(FindFileTest, Multiple) {
+TEST_F(FindFileTest, Multiple) {
   Add("150", "200");
   Add("200", "250");
   Add("300", "350");
@@ -126,7 +127,7 @@ TEST(FindFileTest, Multiple) {
   ASSERT_TRUE(Overlaps("450", "500"));
 }
 
-TEST(FindFileTest, MultipleNullBoundaries) {
+TEST_F(FindFileTest, MultipleNullBoundaries) {
   Add("150", "200");
   Add("200", "250");
   Add("300", "350");
@@ -146,7 +147,7 @@ TEST(FindFileTest, MultipleNullBoundaries) {
   ASSERT_TRUE(Overlaps("450", nullptr));
 }
 
-TEST(FindFileTest, OverlapSequenceChecks) {
+TEST_F(FindFileTest, OverlapSequenceChecks) {
   Add("200", "200", 5000, 3000);
   ASSERT_TRUE(!Overlaps("199", "199"));
   ASSERT_TRUE(!Overlaps("201", "300"));
@@ -155,7 +156,7 @@ TEST(FindFileTest, OverlapSequenceChecks) {
   ASSERT_TRUE(Overlaps("200", "210"));
 }
 
-TEST(FindFileTest, OverlappingFiles) {
+TEST_F(FindFileTest, OverlappingFiles) {
   Add("150", "600");
   Add("400", "500");
   disjoint_sorted_files_ = false;
@@ -177,7 +178,7 @@ void AddBoundaryInputs(const InternalKeyComparator& icmp,
                        const std::vector<FileMetaData*>& level_files,
                        std::vector<FileMetaData*>* compaction_files);
 
-class AddBoundaryInputsTest {
+class AddBoundaryInputsTest : public testing::Test {
  public:
   std::vector<FileMetaData*> level_files_;
   std::vector<FileMetaData*> compaction_files_;
@@ -204,13 +205,13 @@ class AddBoundaryInputsTest {
   }
 };
 
-TEST(AddBoundaryInputsTest, TestEmptyFileSets) {
+TEST_F(AddBoundaryInputsTest, TestEmptyFileSets) {
   AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
   ASSERT_TRUE(compaction_files_.empty());
   ASSERT_TRUE(level_files_.empty());
 }
 
-TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) {
+TEST_F(AddBoundaryInputsTest, TestEmptyLevelFiles) {
   FileMetaData* f1 =
       CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
                          InternalKey(InternalKey("100", 1, kTypeValue)));
@@ -222,7 +223,7 @@ TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) {
   ASSERT_TRUE(level_files_.empty());
 }
 
-TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) {
+TEST_F(AddBoundaryInputsTest, TestEmptyCompactionFiles) {
   FileMetaData* f1 =
       CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
                          InternalKey(InternalKey("100", 1, kTypeValue)));
@@ -234,7 +235,7 @@ TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) {
   ASSERT_EQ(f1, level_files_[0]);
 }
 
-TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) {
+TEST_F(AddBoundaryInputsTest, TestNoBoundaryFiles) {
   FileMetaData* f1 =
       CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
                          InternalKey(InternalKey("100", 1, kTypeValue)));
@@ -255,7 +256,7 @@ TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) {
   ASSERT_EQ(2, compaction_files_.size());
 }
 
-TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) {
+TEST_F(AddBoundaryInputsTest, TestOneBoundaryFiles) {
   FileMetaData* f1 =
       CreateFileMetaData(1, InternalKey("100", 3, kTypeValue),
                          InternalKey(InternalKey("100", 2, kTypeValue)));
@@ -277,7 +278,7 @@ TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) {
   ASSERT_EQ(f2, compaction_files_[1]);
 }
 
-TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) {
+TEST_F(AddBoundaryInputsTest, TestTwoBoundaryFiles) {
   FileMetaData* f1 =
       CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
                          InternalKey(InternalKey("100", 5, kTypeValue)));
@@ -300,7 +301,7 @@ TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) {
   ASSERT_EQ(f2, compaction_files_[2]);
 }
 
-TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) {
+TEST_F(AddBoundaryInputsTest, TestDisjoinFilePointers) {
   FileMetaData* f1 =
       CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
                          InternalKey(InternalKey("100", 5, kTypeValue)));
@@ -329,4 +330,7 @@ TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc
index c32317f..b33993a 100644
--- a/db/write_batch_test.cc
+++ b/db/write_batch_test.cc
@@ -2,13 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include "leveldb/db.h"
-
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "db/memtable.h"
 #include "db/write_batch_internal.h"
+#include "leveldb/db.h"
 #include "leveldb/env.h"
 #include "util/logging.h"
-#include "util/testharness.h"
 
 namespace leveldb {
 
@@ -22,7 +21,7 @@ static std::string PrintContents(WriteBatch* b) {
   Iterator* iter = mem->NewIterator();
   for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
     ParsedInternalKey ikey;
-    ASSERT_TRUE(ParseInternalKey(iter->key(), &ikey));
+    EXPECT_TRUE(ParseInternalKey(iter->key(), &ikey));
     switch (ikey.type) {
       case kTypeValue:
         state.append("Put(");
@@ -52,8 +51,6 @@ static std::string PrintContents(WriteBatch* b) {
   return state;
 }
 
-class WriteBatchTest {};
-
 TEST(WriteBatchTest, Empty) {
   WriteBatch batch;
   ASSERT_EQ("", PrintContents(&batch));
@@ -134,4 +131,7 @@ TEST(WriteBatchTest, ApproximateSize) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/helpers/memenv/memenv_test.cc b/helpers/memenv/memenv_test.cc
index 94ad06b..72e22da 100644
--- a/helpers/memenv/memenv_test.cc
+++ b/helpers/memenv/memenv_test.cc
@@ -7,14 +7,15 @@
 #include <string>
 #include <vector>
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "db/db_impl.h"
 #include "leveldb/db.h"
 #include "leveldb/env.h"
-#include "util/testharness.h"
+#include "util/testutil.h"
 
 namespace leveldb {
 
-class MemEnvTest {
+class MemEnvTest : public testing::Test {
  public:
   MemEnvTest() : env_(NewMemEnv(Env::Default())) {}
   ~MemEnvTest() { delete env_; }
@@ -22,55 +23,55 @@ class MemEnvTest {
   Env* env_;
 };
 
-TEST(MemEnvTest, Basics) {
+TEST_F(MemEnvTest, Basics) {
   uint64_t file_size;
   WritableFile* writable_file;
   std::vector<std::string> children;
 
-  ASSERT_OK(env_->CreateDir("/dir"));
+  ASSERT_LEVELDB_OK(env_->CreateDir("/dir"));
 
   // Check that the directory is empty.
   ASSERT_TRUE(!env_->FileExists("/dir/non_existent"));
   ASSERT_TRUE(!env_->GetFileSize("/dir/non_existent", &file_size).ok());
-  ASSERT_OK(env_->GetChildren("/dir", &children));
+  ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children));
   ASSERT_EQ(0, children.size());
 
   // Create a file.
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file));
-  ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
+  ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
+  ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
   ASSERT_EQ(0, file_size);
   delete writable_file;
 
   // Check that the file exists.
   ASSERT_TRUE(env_->FileExists("/dir/f"));
-  ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
+  ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
   ASSERT_EQ(0, file_size);
-  ASSERT_OK(env_->GetChildren("/dir", &children));
+  ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children));
   ASSERT_EQ(1, children.size());
   ASSERT_EQ("f", children[0]);
 
   // Write to the file.
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file));
-  ASSERT_OK(writable_file->Append("abc"));
+  ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
+  ASSERT_LEVELDB_OK(writable_file->Append("abc"));
   delete writable_file;
 
   // Check that append works.
-  ASSERT_OK(env_->NewAppendableFile("/dir/f", &writable_file));
-  ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
+  ASSERT_LEVELDB_OK(env_->NewAppendableFile("/dir/f", &writable_file));
+  ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
   ASSERT_EQ(3, file_size);
-  ASSERT_OK(writable_file->Append("hello"));
+  ASSERT_LEVELDB_OK(writable_file->Append("hello"));
   delete writable_file;
 
   // Check for expected size.
-  ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
+  ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
   ASSERT_EQ(8, file_size);
 
   // Check that renaming works.
   ASSERT_TRUE(!env_->RenameFile("/dir/non_existent", "/dir/g").ok());
-  ASSERT_OK(env_->RenameFile("/dir/f", "/dir/g"));
+  ASSERT_LEVELDB_OK(env_->RenameFile("/dir/f", "/dir/g"));
   ASSERT_TRUE(!env_->FileExists("/dir/f"));
   ASSERT_TRUE(env_->FileExists("/dir/g"));
-  ASSERT_OK(env_->GetFileSize("/dir/g", &file_size));
+  ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/g", &file_size));
   ASSERT_EQ(8, file_size);
 
   // Check that opening non-existent file fails.
@@ -83,48 +84,49 @@ TEST(MemEnvTest, Basics) {
 
   // Check that deleting works.
   ASSERT_TRUE(!env_->DeleteFile("/dir/non_existent").ok());
-  ASSERT_OK(env_->DeleteFile("/dir/g"));
+  ASSERT_LEVELDB_OK(env_->DeleteFile("/dir/g"));
   ASSERT_TRUE(!env_->FileExists("/dir/g"));
-  ASSERT_OK(env_->GetChildren("/dir", &children));
+  ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children));
   ASSERT_EQ(0, children.size());
-  ASSERT_OK(env_->DeleteDir("/dir"));
+  ASSERT_LEVELDB_OK(env_->DeleteDir("/dir"));
 }
 
-TEST(MemEnvTest, ReadWrite) {
+TEST_F(MemEnvTest, ReadWrite) {
   WritableFile* writable_file;
   SequentialFile* seq_file;
   RandomAccessFile* rand_file;
   Slice result;
   char scratch[100];
 
-  ASSERT_OK(env_->CreateDir("/dir"));
+  ASSERT_LEVELDB_OK(env_->CreateDir("/dir"));
 
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file));
-  ASSERT_OK(writable_file->Append("hello "));
-  ASSERT_OK(writable_file->Append("world"));
+  ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
+  ASSERT_LEVELDB_OK(writable_file->Append("hello "));
+  ASSERT_LEVELDB_OK(writable_file->Append("world"));
   delete writable_file;
 
   // Read sequentially.
-  ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
-  ASSERT_OK(seq_file->Read(5, &result, scratch));  // Read "hello".
+  ASSERT_LEVELDB_OK(env_->NewSequentialFile("/dir/f", &seq_file));
+  ASSERT_LEVELDB_OK(seq_file->Read(5, &result, scratch));  // Read "hello".
   ASSERT_EQ(0, result.compare("hello"));
-  ASSERT_OK(seq_file->Skip(1));
-  ASSERT_OK(seq_file->Read(1000, &result, scratch));  // Read "world".
+  ASSERT_LEVELDB_OK(seq_file->Skip(1));
+  ASSERT_LEVELDB_OK(seq_file->Read(1000, &result, scratch));  // Read "world".
   ASSERT_EQ(0, result.compare("world"));
-  ASSERT_OK(seq_file->Read(1000, &result, scratch));  // Try reading past EOF.
+  ASSERT_LEVELDB_OK(
+      seq_file->Read(1000, &result, scratch));  // Try reading past EOF.
   ASSERT_EQ(0, result.size());
-  ASSERT_OK(seq_file->Skip(100));  // Try to skip past end of file.
-  ASSERT_OK(seq_file->Read(1000, &result, scratch));
+  ASSERT_LEVELDB_OK(seq_file->Skip(100));  // Try to skip past end of file.
+  ASSERT_LEVELDB_OK(seq_file->Read(1000, &result, scratch));
   ASSERT_EQ(0, result.size());
   delete seq_file;
 
   // Random reads.
-  ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file));
-  ASSERT_OK(rand_file->Read(6, 5, &result, scratch));  // Read "world".
+  ASSERT_LEVELDB_OK(env_->NewRandomAccessFile("/dir/f", &rand_file));
+  ASSERT_LEVELDB_OK(rand_file->Read(6, 5, &result, scratch));  // Read "world".
   ASSERT_EQ(0, result.compare("world"));
-  ASSERT_OK(rand_file->Read(0, 5, &result, scratch));  // Read "hello".
+  ASSERT_LEVELDB_OK(rand_file->Read(0, 5, &result, scratch));  // Read "hello".
   ASSERT_EQ(0, result.compare("hello"));
-  ASSERT_OK(rand_file->Read(10, 100, &result, scratch));  // Read "d".
+  ASSERT_LEVELDB_OK(rand_file->Read(10, 100, &result, scratch));  // Read "d".
   ASSERT_EQ(0, result.compare("d"));
 
   // Too high offset.
@@ -132,30 +134,30 @@ TEST(MemEnvTest, ReadWrite) {
   delete rand_file;
 }
 
-TEST(MemEnvTest, Locks) {
+TEST_F(MemEnvTest, Locks) {
   FileLock* lock;
 
   // These are no-ops, but we test they return success.
-  ASSERT_OK(env_->LockFile("some file", &lock));
-  ASSERT_OK(env_->UnlockFile(lock));
+  ASSERT_LEVELDB_OK(env_->LockFile("some file", &lock));
+  ASSERT_LEVELDB_OK(env_->UnlockFile(lock));
 }
 
-TEST(MemEnvTest, Misc) {
+TEST_F(MemEnvTest, Misc) {
   std::string test_dir;
-  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
   ASSERT_TRUE(!test_dir.empty());
 
   WritableFile* writable_file;
-  ASSERT_OK(env_->NewWritableFile("/a/b", &writable_file));
+  ASSERT_LEVELDB_OK(env_->NewWritableFile("/a/b", &writable_file));
 
   // These are no-ops, but we test they return success.
-  ASSERT_OK(writable_file->Sync());
-  ASSERT_OK(writable_file->Flush());
-  ASSERT_OK(writable_file->Close());
+  ASSERT_LEVELDB_OK(writable_file->Sync());
+  ASSERT_LEVELDB_OK(writable_file->Flush());
+  ASSERT_LEVELDB_OK(writable_file->Close());
   delete writable_file;
 }
 
-TEST(MemEnvTest, LargeWrite) {
+TEST_F(MemEnvTest, LargeWrite) {
   const size_t kWriteSize = 300 * 1024;
   char* scratch = new char[kWriteSize * 2];
 
@@ -165,21 +167,21 @@ TEST(MemEnvTest, LargeWrite) {
   }
 
   WritableFile* writable_file;
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file));
-  ASSERT_OK(writable_file->Append("foo"));
-  ASSERT_OK(writable_file->Append(write_data));
+  ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
+  ASSERT_LEVELDB_OK(writable_file->Append("foo"));
+  ASSERT_LEVELDB_OK(writable_file->Append(write_data));
   delete writable_file;
 
   SequentialFile* seq_file;
   Slice result;
-  ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
-  ASSERT_OK(seq_file->Read(3, &result, scratch));  // Read "foo".
+  ASSERT_LEVELDB_OK(env_->NewSequentialFile("/dir/f", &seq_file));
+  ASSERT_LEVELDB_OK(seq_file->Read(3, &result, scratch));  // Read "foo".
   ASSERT_EQ(0, result.compare("foo"));
 
   size_t read = 0;
   std::string read_data;
   while (read < kWriteSize) {
-    ASSERT_OK(seq_file->Read(kWriteSize - read, &result, scratch));
+    ASSERT_LEVELDB_OK(seq_file->Read(kWriteSize - read, &result, scratch));
     read_data.append(result.data(), result.size());
     read += result.size();
   }
@@ -188,30 +190,30 @@ TEST(MemEnvTest, LargeWrite) {
   delete[] scratch;
 }
 
-TEST(MemEnvTest, OverwriteOpenFile) {
+TEST_F(MemEnvTest, OverwriteOpenFile) {
   const char kWrite1Data[] = "Write #1 data";
   const size_t kFileDataLen = sizeof(kWrite1Data) - 1;
-  const std::string kTestFileName = test::TmpDir() + "/leveldb-TestFile.dat";
+  const std::string kTestFileName = testing::TempDir() + "leveldb-TestFile.dat";
 
-  ASSERT_OK(WriteStringToFile(env_, kWrite1Data, kTestFileName));
+  ASSERT_LEVELDB_OK(WriteStringToFile(env_, kWrite1Data, kTestFileName));
 
   RandomAccessFile* rand_file;
-  ASSERT_OK(env_->NewRandomAccessFile(kTestFileName, &rand_file));
+  ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(kTestFileName, &rand_file));
 
   const char kWrite2Data[] = "Write #2 data";
-  ASSERT_OK(WriteStringToFile(env_, kWrite2Data, kTestFileName));
+  ASSERT_LEVELDB_OK(WriteStringToFile(env_, kWrite2Data, kTestFileName));
 
   // Verify that overwriting an open file will result in the new file data
   // being read from files opened before the write.
   Slice result;
   char scratch[kFileDataLen];
-  ASSERT_OK(rand_file->Read(0, kFileDataLen, &result, scratch));
+  ASSERT_LEVELDB_OK(rand_file->Read(0, kFileDataLen, &result, scratch));
   ASSERT_EQ(0, result.compare(kWrite2Data));
 
   delete rand_file;
 }
 
-TEST(MemEnvTest, DBTest) {
+TEST_F(MemEnvTest, DBTest) {
   Options options;
   options.create_if_missing = true;
   options.env = env_;
@@ -220,14 +222,14 @@ TEST(MemEnvTest, DBTest) {
   const Slice keys[] = {Slice("aaa"), Slice("bbb"), Slice("ccc")};
   const Slice vals[] = {Slice("foo"), Slice("bar"), Slice("baz")};
 
-  ASSERT_OK(DB::Open(options, "/dir/db", &db));
+  ASSERT_LEVELDB_OK(DB::Open(options, "/dir/db", &db));
   for (size_t i = 0; i < 3; ++i) {
-    ASSERT_OK(db->Put(WriteOptions(), keys[i], vals[i]));
+    ASSERT_LEVELDB_OK(db->Put(WriteOptions(), keys[i], vals[i]));
   }
 
   for (size_t i = 0; i < 3; ++i) {
     std::string res;
-    ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
+    ASSERT_LEVELDB_OK(db->Get(ReadOptions(), keys[i], &res));
     ASSERT_TRUE(res == vals[i]);
   }
 
@@ -243,11 +245,11 @@ TEST(MemEnvTest, DBTest) {
   delete iterator;
 
   DBImpl* dbi = reinterpret_cast<DBImpl*>(db);
-  ASSERT_OK(dbi->TEST_CompactMemTable());
+  ASSERT_LEVELDB_OK(dbi->TEST_CompactMemTable());
 
   for (size_t i = 0; i < 3; ++i) {
     std::string res;
-    ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
+    ASSERT_LEVELDB_OK(db->Get(ReadOptions(), keys[i], &res));
     ASSERT_TRUE(res == vals[i]);
   }
 
@@ -256,4 +258,7 @@ TEST(MemEnvTest, DBTest) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/issues/issue178_test.cc b/issues/issue178_test.cc
index d50ffeb..4a52a1b 100644
--- a/issues/issue178_test.cc
+++ b/issues/issue178_test.cc
@@ -7,9 +7,10 @@
 #include <iostream>
 #include <sstream>
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "leveldb/db.h"
 #include "leveldb/write_batch.h"
-#include "util/testharness.h"
+#include "util/testutil.h"
 
 namespace {
 
@@ -23,11 +24,9 @@ std::string Key1(int i) {
 
 std::string Key2(int i) { return Key1(i) + "_xxx"; }
 
-class Issue178 {};
-
 TEST(Issue178, Test) {
   // Get rid of any state from an old run.
-  std::string dbpath = leveldb::test::TmpDir() + "/leveldb_cbug_test";
+  std::string dbpath = testing::TempDir() + "leveldb_cbug_test";
   DestroyDB(dbpath, leveldb::Options());
 
   // Open database.  Disable compression since it affects the creation
@@ -37,28 +36,28 @@ TEST(Issue178, Test) {
   leveldb::Options db_options;
   db_options.create_if_missing = true;
   db_options.compression = leveldb::kNoCompression;
-  ASSERT_OK(leveldb::DB::Open(db_options, dbpath, &db));
+  ASSERT_LEVELDB_OK(leveldb::DB::Open(db_options, dbpath, &db));
 
   // create first key range
   leveldb::WriteBatch batch;
   for (size_t i = 0; i < kNumKeys; i++) {
     batch.Put(Key1(i), "value for range 1 key");
   }
-  ASSERT_OK(db->Write(leveldb::WriteOptions(), &batch));
+  ASSERT_LEVELDB_OK(db->Write(leveldb::WriteOptions(), &batch));
 
   // create second key range
   batch.Clear();
   for (size_t i = 0; i < kNumKeys; i++) {
     batch.Put(Key2(i), "value for range 2 key");
   }
-  ASSERT_OK(db->Write(leveldb::WriteOptions(), &batch));
+  ASSERT_LEVELDB_OK(db->Write(leveldb::WriteOptions(), &batch));
 
   // delete second key range
   batch.Clear();
   for (size_t i = 0; i < kNumKeys; i++) {
     batch.Delete(Key2(i));
   }
-  ASSERT_OK(db->Write(leveldb::WriteOptions(), &batch));
+  ASSERT_LEVELDB_OK(db->Write(leveldb::WriteOptions(), &batch));
 
   // compact database
   std::string start_key = Key1(0);
@@ -85,4 +84,7 @@ TEST(Issue178, Test) {
 
 }  // anonymous namespace
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/issues/issue200_test.cc b/issues/issue200_test.cc
index 877b2af..ee08bc6 100644
--- a/issues/issue200_test.cc
+++ b/issues/issue200_test.cc
@@ -6,35 +6,34 @@
 // to forward, the current key can be yielded unexpectedly if a new
 // mutation has been added just before the current key.
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "leveldb/db.h"
-#include "util/testharness.h"
+#include "util/testutil.h"
 
 namespace leveldb {
 
-class Issue200 {};
-
 TEST(Issue200, Test) {
   // Get rid of any state from an old run.
-  std::string dbpath = test::TmpDir() + "/leveldb_issue200_test";
+  std::string dbpath = testing::TempDir() + "leveldb_issue200_test";
   DestroyDB(dbpath, Options());
 
   DB* db;
   Options options;
   options.create_if_missing = true;
-  ASSERT_OK(DB::Open(options, dbpath, &db));
+  ASSERT_LEVELDB_OK(DB::Open(options, dbpath, &db));
 
   WriteOptions write_options;
-  ASSERT_OK(db->Put(write_options, "1", "b"));
-  ASSERT_OK(db->Put(write_options, "2", "c"));
-  ASSERT_OK(db->Put(write_options, "3", "d"));
-  ASSERT_OK(db->Put(write_options, "4", "e"));
-  ASSERT_OK(db->Put(write_options, "5", "f"));
+  ASSERT_LEVELDB_OK(db->Put(write_options, "1", "b"));
+  ASSERT_LEVELDB_OK(db->Put(write_options, "2", "c"));
+  ASSERT_LEVELDB_OK(db->Put(write_options, "3", "d"));
+  ASSERT_LEVELDB_OK(db->Put(write_options, "4", "e"));
+  ASSERT_LEVELDB_OK(db->Put(write_options, "5", "f"));
 
   ReadOptions read_options;
   Iterator* iter = db->NewIterator(read_options);
 
   // Add an element that should not be reflected in the iterator.
-  ASSERT_OK(db->Put(write_options, "25", "cd"));
+  ASSERT_LEVELDB_OK(db->Put(write_options, "25", "cd"));
 
   iter->Seek("5");
   ASSERT_EQ(iter->key().ToString(), "5");
@@ -54,4 +53,7 @@ TEST(Issue200, Test) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/issues/issue320_test.cc b/issues/issue320_test.cc
index c5fcbfc..c289ab4 100644
--- a/issues/issue320_test.cc
+++ b/issues/issue320_test.cc
@@ -9,9 +9,10 @@
 #include <string>
 #include <vector>
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "leveldb/db.h"
 #include "leveldb/write_batch.h"
-#include "util/testharness.h"
+#include "util/testutil.h"
 
 namespace leveldb {
 
@@ -37,8 +38,6 @@ std::string CreateRandomString(int32_t index) {
 
 }  // namespace
 
-class Issue320 {};
-
 TEST(Issue320, Test) {
   std::srand(0);
 
@@ -53,8 +52,8 @@ TEST(Issue320, Test) {
   Options options;
   options.create_if_missing = true;
 
-  std::string dbpath = test::TmpDir() + "/leveldb_issue320_test";
-  ASSERT_OK(DB::Open(options, dbpath, &db));
+  std::string dbpath = testing::TempDir() + "leveldb_issue320_test";
+  ASSERT_LEVELDB_OK(DB::Open(options, dbpath, &db));
 
   uint32_t target_size = 10000;
   uint32_t num_items = 0;
@@ -78,7 +77,8 @@ TEST(Issue320, Test) {
           CreateRandomString(index), CreateRandomString(index)));
       batch.Put(test_map[index]->first, test_map[index]->second);
     } else {
-      ASSERT_OK(db->Get(readOptions, test_map[index]->first, &old_value));
+      ASSERT_LEVELDB_OK(
+          db->Get(readOptions, test_map[index]->first, &old_value));
       if (old_value != test_map[index]->second) {
         std::cout << "ERROR incorrect value returned by Get" << std::endl;
         std::cout << "  count=" << count << std::endl;
@@ -102,7 +102,7 @@ TEST(Issue320, Test) {
       }
     }
 
-    ASSERT_OK(db->Write(writeOptions, &batch));
+    ASSERT_LEVELDB_OK(db->Write(writeOptions, &batch));
 
     if (keep_snapshots && GenerateRandomNumber(10) == 0) {
       int i = GenerateRandomNumber(snapshots.size());
@@ -125,4 +125,7 @@ TEST(Issue320, Test) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/table/filter_block_test.cc b/table/filter_block_test.cc
index 8b33bbd..53be948 100644
--- a/table/filter_block_test.cc
+++ b/table/filter_block_test.cc
@@ -4,11 +4,11 @@
 
 #include "table/filter_block.h"
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "leveldb/filter_policy.h"
 #include "util/coding.h"
 #include "util/hash.h"
 #include "util/logging.h"
-#include "util/testharness.h"
 #include "util/testutil.h"
 
 namespace leveldb {
@@ -36,12 +36,12 @@ class TestHashFilter : public FilterPolicy {
   }
 };
 
-class FilterBlockTest {
+class FilterBlockTest : public testing::Test {
  public:
   TestHashFilter policy_;
 };
 
-TEST(FilterBlockTest, EmptyBuilder) {
+TEST_F(FilterBlockTest, EmptyBuilder) {
   FilterBlockBuilder builder(&policy_);
   Slice block = builder.Finish();
   ASSERT_EQ("\\x00\\x00\\x00\\x00\\x0b", EscapeString(block));
@@ -50,7 +50,7 @@ TEST(FilterBlockTest, EmptyBuilder) {
   ASSERT_TRUE(reader.KeyMayMatch(100000, "foo"));
 }
 
-TEST(FilterBlockTest, SingleChunk) {
+TEST_F(FilterBlockTest, SingleChunk) {
   FilterBlockBuilder builder(&policy_);
   builder.StartBlock(100);
   builder.AddKey("foo");
@@ -71,7 +71,7 @@ TEST(FilterBlockTest, SingleChunk) {
   ASSERT_TRUE(!reader.KeyMayMatch(100, "other"));
 }
 
-TEST(FilterBlockTest, MultiChunk) {
+TEST_F(FilterBlockTest, MultiChunk) {
   FilterBlockBuilder builder(&policy_);
 
   // First filter
@@ -121,4 +121,7 @@ TEST(FilterBlockTest, MultiChunk) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/table/table_test.cc b/table/table_test.cc
index f689a27..09d1b5d 100644
--- a/table/table_test.cc
+++ b/table/table_test.cc
@@ -7,6 +7,7 @@
 #include <map>
 #include <string>
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "db/dbformat.h"
 #include "db/memtable.h"
 #include "db/write_batch_internal.h"
@@ -18,7 +19,6 @@
 #include "table/block_builder.h"
 #include "table/format.h"
 #include "util/random.h"
-#include "util/testharness.h"
 #include "util/testutil.h"
 
 namespace leveldb {
@@ -219,12 +219,12 @@ class TableConstructor : public Constructor {
 
     for (const auto& kvp : data) {
       builder.Add(kvp.first, kvp.second);
-      ASSERT_TRUE(builder.status().ok());
+      EXPECT_LEVELDB_OK(builder.status());
     }
     Status s = builder.Finish();
-    ASSERT_TRUE(s.ok()) << s.ToString();
+    EXPECT_LEVELDB_OK(s);
 
-    ASSERT_EQ(sink.contents().size(), builder.FileSize());
+    EXPECT_EQ(sink.contents().size(), builder.FileSize());
 
     // Open the table
     source_ = new StringSource(sink.contents());
@@ -340,7 +340,7 @@ class DBConstructor : public Constructor {
     for (const auto& kvp : data) {
       WriteBatch batch;
       batch.Put(kvp.first, kvp.second);
-      ASSERT_TRUE(db_->Write(WriteOptions(), &batch).ok());
+      EXPECT_TRUE(db_->Write(WriteOptions(), &batch).ok());
     }
     return Status::OK();
   }
@@ -352,7 +352,7 @@ class DBConstructor : public Constructor {
 
  private:
   void NewDB() {
-    std::string name = test::TmpDir() + "/table_testdb";
+    std::string name = testing::TempDir() + "table_testdb";
 
     Options options;
     options.comparator = comparator_;
@@ -403,7 +403,7 @@ static const TestArgs kTestArgList[] = {
 };
 static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]);
 
-class Harness {
+class Harness : public testing::Test {
  public:
   Harness() : constructor_(nullptr) {}
 
@@ -609,7 +609,7 @@ class Harness {
 };
 
 // Test empty table/block.
-TEST(Harness, Empty) {
+TEST_F(Harness, Empty) {
   for (int i = 0; i < kNumTestArgs; i++) {
     Init(kTestArgList[i]);
     Random rnd(test::RandomSeed() + 1);
@@ -620,7 +620,7 @@ TEST(Harness, Empty) {
 // Special test for a block with no restart entries.  The C++ leveldb
 // code never generates such blocks, but the Java version of leveldb
 // seems to.
-TEST(Harness, ZeroRestartPointsInBlock) {
+TEST_F(Harness, ZeroRestartPointsInBlock) {
   char data[sizeof(uint32_t)];
   memset(data, 0, sizeof(data));
   BlockContents contents;
@@ -639,7 +639,7 @@ TEST(Harness, ZeroRestartPointsInBlock) {
 }
 
 // Test the empty key
-TEST(Harness, SimpleEmptyKey) {
+TEST_F(Harness, SimpleEmptyKey) {
   for (int i = 0; i < kNumTestArgs; i++) {
     Init(kTestArgList[i]);
     Random rnd(test::RandomSeed() + 1);
@@ -648,7 +648,7 @@ TEST(Harness, SimpleEmptyKey) {
   }
 }
 
-TEST(Harness, SimpleSingle) {
+TEST_F(Harness, SimpleSingle) {
   for (int i = 0; i < kNumTestArgs; i++) {
     Init(kTestArgList[i]);
     Random rnd(test::RandomSeed() + 2);
@@ -657,7 +657,7 @@ TEST(Harness, SimpleSingle) {
   }
 }
 
-TEST(Harness, SimpleMulti) {
+TEST_F(Harness, SimpleMulti) {
   for (int i = 0; i < kNumTestArgs; i++) {
     Init(kTestArgList[i]);
     Random rnd(test::RandomSeed() + 3);
@@ -668,7 +668,7 @@ TEST(Harness, SimpleMulti) {
   }
 }
 
-TEST(Harness, SimpleSpecialKey) {
+TEST_F(Harness, SimpleSpecialKey) {
   for (int i = 0; i < kNumTestArgs; i++) {
     Init(kTestArgList[i]);
     Random rnd(test::RandomSeed() + 4);
@@ -677,7 +677,7 @@ TEST(Harness, SimpleSpecialKey) {
   }
 }
 
-TEST(Harness, Randomized) {
+TEST_F(Harness, Randomized) {
   for (int i = 0; i < kNumTestArgs; i++) {
     Init(kTestArgList[i]);
     Random rnd(test::RandomSeed() + 5);
@@ -697,7 +697,7 @@ TEST(Harness, Randomized) {
   }
 }
 
-TEST(Harness, RandomizedLongDB) {
+TEST_F(Harness, RandomizedLongDB) {
   Random rnd(test::RandomSeed());
   TestArgs args = {DB_TEST, false, 16};
   Init(args);
@@ -721,8 +721,6 @@ TEST(Harness, RandomizedLongDB) {
   ASSERT_GT(files, 0);
 }
 
-class MemTableTest {};
-
 TEST(MemTableTest, Simple) {
   InternalKeyComparator cmp(BytewiseComparator());
   MemTable* memtable = new MemTable(cmp);
@@ -757,8 +755,6 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) {
   return result;
 }
 
-class TableTest {};
-
 TEST(TableTest, ApproximateOffsetOfPlain) {
   TableConstructor c(BytewiseComparator());
   c.Add("k01", "hello");
@@ -832,4 +828,7 @@ TEST(TableTest, ApproximateOffsetOfCompressed) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/util/arena_test.cc b/util/arena_test.cc
index e917228..3f8855b 100644
--- a/util/arena_test.cc
+++ b/util/arena_test.cc
@@ -4,13 +4,11 @@
 
 #include "util/arena.h"
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "util/random.h"
-#include "util/testharness.h"
 
 namespace leveldb {
 
-class ArenaTest {};
-
 TEST(ArenaTest, Empty) { Arena arena; }
 
 TEST(ArenaTest, Simple) {
@@ -62,4 +60,7 @@ TEST(ArenaTest, Simple) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/util/bloom_test.cc b/util/bloom_test.cc
index 436daa9..bcbd7f6 100644
--- a/util/bloom_test.cc
+++ b/util/bloom_test.cc
@@ -2,11 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "leveldb/filter_policy.h"
-
 #include "util/coding.h"
 #include "util/logging.h"
-#include "util/testharness.h"
 #include "util/testutil.h"
 
 namespace leveldb {
@@ -18,7 +17,7 @@ static Slice Key(int i, char* buffer) {
   return Slice(buffer, sizeof(uint32_t));
 }
 
-class BloomTest {
+class BloomTest : public testing::Test {
  public:
   BloomTest() : policy_(NewBloomFilterPolicy(10)) {}
 
@@ -80,12 +79,12 @@ class BloomTest {
   std::vector<std::string> keys_;
 };
 
-TEST(BloomTest, EmptyFilter) {
+TEST_F(BloomTest, EmptyFilter) {
   ASSERT_TRUE(!Matches("hello"));
   ASSERT_TRUE(!Matches("world"));
 }
 
-TEST(BloomTest, Small) {
+TEST_F(BloomTest, Small) {
   Add("hello");
   Add("world");
   ASSERT_TRUE(Matches("hello"));
@@ -107,7 +106,7 @@ static int NextLength(int length) {
   return length;
 }
 
-TEST(BloomTest, VaryingLengths) {
+TEST_F(BloomTest, VaryingLengths) {
   char buffer[sizeof(int)];
 
   // Count number of filters that significantly exceed the false positive rate
@@ -153,4 +152,7 @@ TEST(BloomTest, VaryingLengths) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/util/cache_test.cc b/util/cache_test.cc
index 974334b..8ce9463 100644
--- a/util/cache_test.cc
+++ b/util/cache_test.cc
@@ -5,8 +5,9 @@
 #include "leveldb/cache.h"
 
 #include <vector>
+
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "util/coding.h"
-#include "util/testharness.h"
 
 namespace leveldb {
 
@@ -23,7 +24,7 @@ static int DecodeKey(const Slice& k) {
 static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
 static int DecodeValue(void* v) { return reinterpret_cast<uintptr_t>(v); }
 
-class CacheTest {
+class CacheTest : public testing::Test {
  public:
   static void Deleter(const Slice& key, void* v) {
     current_->deleted_keys_.push_back(DecodeKey(key));
@@ -59,12 +60,11 @@ class CacheTest {
   }
 
   void Erase(int key) { cache_->Erase(EncodeKey(key)); }
-
   static CacheTest* current_;
 };
 CacheTest* CacheTest::current_;
 
-TEST(CacheTest, HitAndMiss) {
+TEST_F(CacheTest, HitAndMiss) {
   ASSERT_EQ(-1, Lookup(100));
 
   Insert(100, 101);
@@ -87,7 +87,7 @@ TEST(CacheTest, HitAndMiss) {
   ASSERT_EQ(101, deleted_values_[0]);
 }
 
-TEST(CacheTest, Erase) {
+TEST_F(CacheTest, Erase) {
   Erase(200);
   ASSERT_EQ(0, deleted_keys_.size());
 
@@ -106,7 +106,7 @@ TEST(CacheTest, Erase) {
   ASSERT_EQ(1, deleted_keys_.size());
 }
 
-TEST(CacheTest, EntriesArePinned) {
+TEST_F(CacheTest, EntriesArePinned) {
   Insert(100, 101);
   Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
   ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
@@ -131,7 +131,7 @@ TEST(CacheTest, EntriesArePinned) {
   ASSERT_EQ(102, deleted_values_[1]);
 }
 
-TEST(CacheTest, EvictionPolicy) {
+TEST_F(CacheTest, EvictionPolicy) {
   Insert(100, 101);
   Insert(200, 201);
   Insert(300, 301);
@@ -150,7 +150,7 @@ TEST(CacheTest, EvictionPolicy) {
   cache_->Release(h);
 }
 
-TEST(CacheTest, UseExceedsCacheSize) {
+TEST_F(CacheTest, UseExceedsCacheSize) {
   // Overfill the cache, keeping handles on all inserted entries.
   std::vector<Cache::Handle*> h;
   for (int i = 0; i < kCacheSize + 100; i++) {
@@ -167,7 +167,7 @@ TEST(CacheTest, UseExceedsCacheSize) {
   }
 }
 
-TEST(CacheTest, HeavyEntries) {
+TEST_F(CacheTest, HeavyEntries) {
   // Add a bunch of light and heavy entries and then count the combined
   // size of items still in the cache, which must be approximately the
   // same as the total capacity.
@@ -194,13 +194,13 @@ TEST(CacheTest, HeavyEntries) {
   ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10);
 }
 
-TEST(CacheTest, NewId) {
+TEST_F(CacheTest, NewId) {
   uint64_t a = cache_->NewId();
   uint64_t b = cache_->NewId();
   ASSERT_NE(a, b);
 }
 
-TEST(CacheTest, Prune) {
+TEST_F(CacheTest, Prune) {
   Insert(1, 100);
   Insert(2, 200);
 
@@ -213,7 +213,7 @@ TEST(CacheTest, Prune) {
   ASSERT_EQ(-1, Lookup(2));
 }
 
-TEST(CacheTest, ZeroSizeCache) {
+TEST_F(CacheTest, ZeroSizeCache) {
   delete cache_;
   cache_ = NewLRUCache(0);
 
@@ -223,4 +223,7 @@ TEST(CacheTest, ZeroSizeCache) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/util/coding_test.cc b/util/coding_test.cc
index 0d2a0c5..db83367 100644
--- a/util/coding_test.cc
+++ b/util/coding_test.cc
@@ -2,15 +2,14 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include "util/coding.h"
+
 #include <vector>
 
-#include "util/coding.h"
-#include "util/testharness.h"
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 
 namespace leveldb {
 
-class Coding {};
-
 TEST(Coding, Fixed32) {
   std::string s;
   for (uint32_t v = 0; v < 100000; v++) {
@@ -193,4 +192,7 @@ TEST(Coding, Strings) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/util/crc32c_test.cc b/util/crc32c_test.cc
index 18a8494..1e2aae7 100644
--- a/util/crc32c_test.cc
+++ b/util/crc32c_test.cc
@@ -3,13 +3,12 @@
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
 #include "util/crc32c.h"
-#include "util/testharness.h"
+
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 
 namespace leveldb {
 namespace crc32c {
 
-class CRC {};
-
 TEST(CRC, StandardResults) {
   // From rfc3720 section B.4.
   char buf[32];
@@ -56,4 +55,7 @@ TEST(CRC, Mask) {
 }  // namespace crc32c
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc
index 9675d73..5ee2248 100644
--- a/util/env_posix_test.cc
+++ b/util/env_posix_test.cc
@@ -13,10 +13,11 @@
 #include <unordered_set>
 #include <vector>
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "leveldb/env.h"
 #include "port/port.h"
 #include "util/env_posix_test_helper.h"
-#include "util/testharness.h"
+#include "util/testutil.h"
 
 #if HAVE_O_CLOEXEC
 
@@ -168,7 +169,7 @@ namespace leveldb {
 static const int kReadOnlyFileLimit = 4;
 static const int kMMapLimit = 4;
 
-class EnvPosixTest {
+class EnvPosixTest : public testing::Test {
  public:
   static void SetFileLimits(int read_only_file_limit, int mmap_limit) {
     EnvPosixTestHelper::SetReadOnlyFDLimit(read_only_file_limit);
@@ -180,10 +181,10 @@ class EnvPosixTest {
   Env* env_;
 };
 
-TEST(EnvPosixTest, TestOpenOnRead) {
+TEST_F(EnvPosixTest, TestOpenOnRead) {
   // Write some test data to a single file that will be opened |n| times.
   std::string test_dir;
-  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
   std::string test_file = test_dir + "/open_on_read.txt";
 
   FILE* f = fopen(test_file.c_str(), "we");
@@ -197,133 +198,133 @@ TEST(EnvPosixTest, TestOpenOnRead) {
   const int kNumFiles = kReadOnlyFileLimit + kMMapLimit + 5;
   leveldb::RandomAccessFile* files[kNumFiles] = {0};
   for (int i = 0; i < kNumFiles; i++) {
-    ASSERT_OK(env_->NewRandomAccessFile(test_file, &files[i]));
+    ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(test_file, &files[i]));
   }
   char scratch;
   Slice read_result;
   for (int i = 0; i < kNumFiles; i++) {
-    ASSERT_OK(files[i]->Read(i, 1, &read_result, &scratch));
+    ASSERT_LEVELDB_OK(files[i]->Read(i, 1, &read_result, &scratch));
     ASSERT_EQ(kFileData[i], read_result[0]);
   }
   for (int i = 0; i < kNumFiles; i++) {
     delete files[i];
   }
-  ASSERT_OK(env_->DeleteFile(test_file));
+  ASSERT_LEVELDB_OK(env_->DeleteFile(test_file));
 }
 
 #if HAVE_O_CLOEXEC
 
-TEST(EnvPosixTest, TestCloseOnExecSequentialFile) {
+TEST_F(EnvPosixTest, TestCloseOnExecSequentialFile) {
   std::unordered_set<int> open_fds;
   GetOpenFileDescriptors(&open_fds);
 
   std::string test_dir;
-  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
   std::string file_path = test_dir + "/close_on_exec_sequential.txt";
-  ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+  ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
 
   leveldb::SequentialFile* file = nullptr;
-  ASSERT_OK(env_->NewSequentialFile(file_path, &file));
+  ASSERT_LEVELDB_OK(env_->NewSequentialFile(file_path, &file));
   CheckCloseOnExecDoesNotLeakFDs(open_fds);
   delete file;
 
-  ASSERT_OK(env_->DeleteFile(file_path));
+  ASSERT_LEVELDB_OK(env_->DeleteFile(file_path));
 }
 
-TEST(EnvPosixTest, TestCloseOnExecRandomAccessFile) {
+TEST_F(EnvPosixTest, TestCloseOnExecRandomAccessFile) {
   std::unordered_set<int> open_fds;
   GetOpenFileDescriptors(&open_fds);
 
   std::string test_dir;
-  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
   std::string file_path = test_dir + "/close_on_exec_random_access.txt";
-  ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+  ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
 
   // Exhaust the RandomAccessFile mmap limit. This way, the test
   // RandomAccessFile instance below is backed by a file descriptor, not by an
   // mmap region.
   leveldb::RandomAccessFile* mmapped_files[kReadOnlyFileLimit] = {nullptr};
   for (int i = 0; i < kReadOnlyFileLimit; i++) {
-    ASSERT_OK(env_->NewRandomAccessFile(file_path, &mmapped_files[i]));
+    ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(file_path, &mmapped_files[i]));
   }
 
   leveldb::RandomAccessFile* file = nullptr;
-  ASSERT_OK(env_->NewRandomAccessFile(file_path, &file));
+  ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(file_path, &file));
   CheckCloseOnExecDoesNotLeakFDs(open_fds);
   delete file;
 
   for (int i = 0; i < kReadOnlyFileLimit; i++) {
     delete mmapped_files[i];
   }
-  ASSERT_OK(env_->DeleteFile(file_path));
+  ASSERT_LEVELDB_OK(env_->DeleteFile(file_path));
 }
 
-TEST(EnvPosixTest, TestCloseOnExecWritableFile) {
+TEST_F(EnvPosixTest, TestCloseOnExecWritableFile) {
   std::unordered_set<int> open_fds;
   GetOpenFileDescriptors(&open_fds);
 
   std::string test_dir;
-  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
   std::string file_path = test_dir + "/close_on_exec_writable.txt";
-  ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+  ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
 
   leveldb::WritableFile* file = nullptr;
-  ASSERT_OK(env_->NewWritableFile(file_path, &file));
+  ASSERT_LEVELDB_OK(env_->NewWritableFile(file_path, &file));
   CheckCloseOnExecDoesNotLeakFDs(open_fds);
   delete file;
 
-  ASSERT_OK(env_->DeleteFile(file_path));
+  ASSERT_LEVELDB_OK(env_->DeleteFile(file_path));
 }
 
-TEST(EnvPosixTest, TestCloseOnExecAppendableFile) {
+TEST_F(EnvPosixTest, TestCloseOnExecAppendableFile) {
   std::unordered_set<int> open_fds;
   GetOpenFileDescriptors(&open_fds);
 
   std::string test_dir;
-  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
   std::string file_path = test_dir + "/close_on_exec_appendable.txt";
-  ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+  ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
 
   leveldb::WritableFile* file = nullptr;
-  ASSERT_OK(env_->NewAppendableFile(file_path, &file));
+  ASSERT_LEVELDB_OK(env_->NewAppendableFile(file_path, &file));
   CheckCloseOnExecDoesNotLeakFDs(open_fds);
   delete file;
 
-  ASSERT_OK(env_->DeleteFile(file_path));
+  ASSERT_LEVELDB_OK(env_->DeleteFile(file_path));
 }
 
-TEST(EnvPosixTest, TestCloseOnExecLockFile) {
+TEST_F(EnvPosixTest, TestCloseOnExecLockFile) {
   std::unordered_set<int> open_fds;
   GetOpenFileDescriptors(&open_fds);
 
   std::string test_dir;
-  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
   std::string file_path = test_dir + "/close_on_exec_lock.txt";
-  ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+  ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
 
   leveldb::FileLock* lock = nullptr;
-  ASSERT_OK(env_->LockFile(file_path, &lock));
+  ASSERT_LEVELDB_OK(env_->LockFile(file_path, &lock));
   CheckCloseOnExecDoesNotLeakFDs(open_fds);
-  ASSERT_OK(env_->UnlockFile(lock));
+  ASSERT_LEVELDB_OK(env_->UnlockFile(lock));
 
-  ASSERT_OK(env_->DeleteFile(file_path));
+  ASSERT_LEVELDB_OK(env_->DeleteFile(file_path));
 }
 
-TEST(EnvPosixTest, TestCloseOnExecLogger) {
+TEST_F(EnvPosixTest, TestCloseOnExecLogger) {
   std::unordered_set<int> open_fds;
   GetOpenFileDescriptors(&open_fds);
 
   std::string test_dir;
-  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
   std::string file_path = test_dir + "/close_on_exec_logger.txt";
-  ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+  ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
 
   leveldb::Logger* file = nullptr;
-  ASSERT_OK(env_->NewLogger(file_path, &file));
+  ASSERT_LEVELDB_OK(env_->NewLogger(file_path, &file));
   CheckCloseOnExecDoesNotLeakFDs(open_fds);
   delete file;
 
-  ASSERT_OK(env_->DeleteFile(file_path));
+  ASSERT_LEVELDB_OK(env_->DeleteFile(file_path));
 }
 
 #endif  // HAVE_O_CLOEXEC
@@ -346,5 +347,7 @@ int main(int argc, char** argv) {
   // All tests currently run with the same read-only file limits.
   leveldb::EnvPosixTest::SetFileLimits(leveldb::kReadOnlyFileLimit,
                                        leveldb::kMMapLimit);
-  return leveldb::test::RunAllTests();
+
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
 }
diff --git a/util/env_test.cc b/util/env_test.cc
index 7db03fc..2a1f73b 100644
--- a/util/env_test.cc
+++ b/util/env_test.cc
@@ -6,32 +6,32 @@
 
 #include <algorithm>
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "port/port.h"
 #include "port/thread_annotations.h"
 #include "util/mutexlock.h"
-#include "util/testharness.h"
 #include "util/testutil.h"
 
 namespace leveldb {
 
 static const int kDelayMicros = 100000;
 
-class EnvTest {
+class EnvTest : public testing::Test {
  public:
   EnvTest() : env_(Env::Default()) {}
 
   Env* env_;
 };
 
-TEST(EnvTest, ReadWrite) {
+TEST_F(EnvTest, ReadWrite) {
   Random rnd(test::RandomSeed());
 
   // Get file to use for testing.
   std::string test_dir;
-  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
   std::string test_file_name = test_dir + "/open_on_read.txt";
   WritableFile* writable_file;
-  ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file));
+  ASSERT_LEVELDB_OK(env_->NewWritableFile(test_file_name, &writable_file));
 
   // Fill a file with data generated via a sequence of randomly sized writes.
   static const size_t kDataSize = 10 * 1048576;
@@ -40,26 +40,26 @@ TEST(EnvTest, ReadWrite) {
     int len = rnd.Skewed(18);  // Up to 2^18 - 1, but typically much smaller
     std::string r;
     test::RandomString(&rnd, len, &r);
-    ASSERT_OK(writable_file->Append(r));
+    ASSERT_LEVELDB_OK(writable_file->Append(r));
     data += r;
     if (rnd.OneIn(10)) {
-      ASSERT_OK(writable_file->Flush());
+      ASSERT_LEVELDB_OK(writable_file->Flush());
     }
   }
-  ASSERT_OK(writable_file->Sync());
-  ASSERT_OK(writable_file->Close());
+  ASSERT_LEVELDB_OK(writable_file->Sync());
+  ASSERT_LEVELDB_OK(writable_file->Close());
   delete writable_file;
 
   // Read all data using a sequence of randomly sized reads.
   SequentialFile* sequential_file;
-  ASSERT_OK(env_->NewSequentialFile(test_file_name, &sequential_file));
+  ASSERT_LEVELDB_OK(env_->NewSequentialFile(test_file_name, &sequential_file));
   std::string read_result;
   std::string scratch;
   while (read_result.size() < data.size()) {
     int len = std::min<int>(rnd.Skewed(18), data.size() - read_result.size());
     scratch.resize(std::max(len, 1));  // at least 1 so &scratch[0] is legal
     Slice read;
-    ASSERT_OK(sequential_file->Read(len, &read, &scratch[0]));
+    ASSERT_LEVELDB_OK(sequential_file->Read(len, &read, &scratch[0]));
     if (len > 0) {
       ASSERT_GT(read.size(), 0);
     }
@@ -70,7 +70,7 @@ TEST(EnvTest, ReadWrite) {
   delete sequential_file;
 }
 
-TEST(EnvTest, RunImmediately) {
+TEST_F(EnvTest, RunImmediately) {
   struct RunState {
     port::Mutex mu;
     port::CondVar cvar{&mu};
@@ -94,7 +94,7 @@ TEST(EnvTest, RunImmediately) {
   }
 }
 
-TEST(EnvTest, RunMany) {
+TEST_F(EnvTest, RunMany) {
   struct RunState {
     port::Mutex mu;
     port::CondVar cvar{&mu};
@@ -153,7 +153,7 @@ static void ThreadBody(void* arg) {
   s->mu.Unlock();
 }
 
-TEST(EnvTest, StartThread) {
+TEST_F(EnvTest, StartThread) {
   State state(0, 3);
   for (int i = 0; i < 3; i++) {
     env_->StartThread(&ThreadBody, &state);
@@ -166,10 +166,10 @@ TEST(EnvTest, StartThread) {
   ASSERT_EQ(state.val, 3);
 }
 
-TEST(EnvTest, TestOpenNonExistentFile) {
+TEST_F(EnvTest, TestOpenNonExistentFile) {
   // Write some test data to a single file that will be opened |n| times.
   std::string test_dir;
-  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
 
   std::string non_existent_file = test_dir + "/non_existent_file";
   ASSERT_TRUE(!env_->FileExists(non_existent_file));
@@ -184,54 +184,57 @@ TEST(EnvTest, TestOpenNonExistentFile) {
   ASSERT_TRUE(status.IsNotFound());
 }
 
-TEST(EnvTest, ReopenWritableFile) {
+TEST_F(EnvTest, ReopenWritableFile) {
   std::string test_dir;
-  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
   std::string test_file_name = test_dir + "/reopen_writable_file.txt";
   env_->DeleteFile(test_file_name);
 
   WritableFile* writable_file;
-  ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file));
+  ASSERT_LEVELDB_OK(env_->NewWritableFile(test_file_name, &writable_file));
   std::string data("hello world!");
-  ASSERT_OK(writable_file->Append(data));
-  ASSERT_OK(writable_file->Close());
+  ASSERT_LEVELDB_OK(writable_file->Append(data));
+  ASSERT_LEVELDB_OK(writable_file->Close());
   delete writable_file;
 
-  ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file));
+  ASSERT_LEVELDB_OK(env_->NewWritableFile(test_file_name, &writable_file));
   data = "42";
-  ASSERT_OK(writable_file->Append(data));
-  ASSERT_OK(writable_file->Close());
+  ASSERT_LEVELDB_OK(writable_file->Append(data));
+  ASSERT_LEVELDB_OK(writable_file->Close());
   delete writable_file;
 
-  ASSERT_OK(ReadFileToString(env_, test_file_name, &data));
+  ASSERT_LEVELDB_OK(ReadFileToString(env_, test_file_name, &data));
   ASSERT_EQ(std::string("42"), data);
   env_->DeleteFile(test_file_name);
 }
 
-TEST(EnvTest, ReopenAppendableFile) {
+TEST_F(EnvTest, ReopenAppendableFile) {
   std::string test_dir;
-  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
   std::string test_file_name = test_dir + "/reopen_appendable_file.txt";
   env_->DeleteFile(test_file_name);
 
   WritableFile* appendable_file;
-  ASSERT_OK(env_->NewAppendableFile(test_file_name, &appendable_file));
+  ASSERT_LEVELDB_OK(env_->NewAppendableFile(test_file_name, &appendable_file));
   std::string data("hello world!");
-  ASSERT_OK(appendable_file->Append(data));
-  ASSERT_OK(appendable_file->Close());
+  ASSERT_LEVELDB_OK(appendable_file->Append(data));
+  ASSERT_LEVELDB_OK(appendable_file->Close());
   delete appendable_file;
 
-  ASSERT_OK(env_->NewAppendableFile(test_file_name, &appendable_file));
+  ASSERT_LEVELDB_OK(env_->NewAppendableFile(test_file_name, &appendable_file));
   data = "42";
-  ASSERT_OK(appendable_file->Append(data));
-  ASSERT_OK(appendable_file->Close());
+  ASSERT_LEVELDB_OK(appendable_file->Append(data));
+  ASSERT_LEVELDB_OK(appendable_file->Close());
   delete appendable_file;
 
-  ASSERT_OK(ReadFileToString(env_, test_file_name, &data));
+  ASSERT_LEVELDB_OK(ReadFileToString(env_, test_file_name, &data));
   ASSERT_EQ(std::string("hello world!42"), data);
   env_->DeleteFile(test_file_name);
 }
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/util/env_windows_test.cc b/util/env_windows_test.cc
index 3c22133..b926107 100644
--- a/util/env_windows_test.cc
+++ b/util/env_windows_test.cc
@@ -2,17 +2,17 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "leveldb/env.h"
-
 #include "port/port.h"
 #include "util/env_windows_test_helper.h"
-#include "util/testharness.h"
+#include "util/testutil.h"
 
 namespace leveldb {
 
 static const int kMMapLimit = 4;
 
-class EnvWindowsTest {
+class EnvWindowsTest : public testing::Test {
  public:
   static void SetFileLimits(int mmap_limit) {
     EnvWindowsTestHelper::SetReadOnlyMMapLimit(mmap_limit);
@@ -23,10 +23,10 @@ class EnvWindowsTest {
   Env* env_;
 };
 
-TEST(EnvWindowsTest, TestOpenOnRead) {
+TEST_F(EnvWindowsTest, TestOpenOnRead) {
   // Write some test data to a single file that will be opened |n| times.
   std::string test_dir;
-  ASSERT_OK(env_->GetTestDirectory(&test_dir));
+  ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
   std::string test_file = test_dir + "/open_on_read.txt";
 
   FILE* f = fopen(test_file.c_str(), "w");
@@ -41,18 +41,18 @@ TEST(EnvWindowsTest, TestOpenOnRead) {
   const int kNumFiles = kMMapLimit + 5;
   leveldb::RandomAccessFile* files[kNumFiles] = {0};
   for (int i = 0; i < kNumFiles; i++) {
-    ASSERT_OK(env_->NewRandomAccessFile(test_file, &files[i]));
+    ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(test_file, &files[i]));
   }
   char scratch;
   Slice read_result;
   for (int i = 0; i < kNumFiles; i++) {
-    ASSERT_OK(files[i]->Read(i, 1, &read_result, &scratch));
+    ASSERT_LEVELDB_OK(files[i]->Read(i, 1, &read_result, &scratch));
     ASSERT_EQ(kFileData[i], read_result[0]);
   }
   for (int i = 0; i < kNumFiles; i++) {
     delete files[i];
   }
-  ASSERT_OK(env_->DeleteFile(test_file));
+  ASSERT_LEVELDB_OK(env_->DeleteFile(test_file));
 }
 
 }  // namespace leveldb
@@ -60,5 +60,6 @@ TEST(EnvWindowsTest, TestOpenOnRead) {
 int main(int argc, char** argv) {
   // All tests currently run with the same read-only file limits.
   leveldb::EnvWindowsTest::SetFileLimits(leveldb::kMMapLimit);
-  return leveldb::test::RunAllTests();
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
 }
diff --git a/util/hash_test.cc b/util/hash_test.cc
index 21f8171..e970c1e 100644
--- a/util/hash_test.cc
+++ b/util/hash_test.cc
@@ -3,12 +3,11 @@
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
 #include "util/hash.h"
-#include "util/testharness.h"
+
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 
 namespace leveldb {
 
-class HASH {};
-
 TEST(HASH, SignedUnsignedIssue) {
   const uint8_t data1[1] = {0x62};
   const uint8_t data2[2] = {0xc3, 0x97};
@@ -41,4 +40,7 @@ TEST(HASH, SignedUnsignedIssue) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/util/logging_test.cc b/util/logging_test.cc
index 389cbeb..92417aa 100644
--- a/util/logging_test.cc
+++ b/util/logging_test.cc
@@ -2,17 +2,16 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include "util/logging.h"
+
 #include <limits>
 #include <string>
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "leveldb/slice.h"
-#include "util/logging.h"
-#include "util/testharness.h"
 
 namespace leveldb {
 
-class Logging {};
-
 TEST(Logging, NumberToString) {
   ASSERT_EQ("0", NumberToString(0));
   ASSERT_EQ("1", NumberToString(1));
@@ -140,4 +139,7 @@ TEST(Logging, ConsumeDecimalNumberNoDigits) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/util/no_destructor_test.cc b/util/no_destructor_test.cc
index b41caca..edafb08 100644
--- a/util/no_destructor_test.cc
+++ b/util/no_destructor_test.cc
@@ -2,12 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include "util/no_destructor.h"
+
 #include <cstdint>
 #include <cstdlib>
 #include <utility>
 
-#include "util/no_destructor.h"
-#include "util/testharness.h"
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 
 namespace leveldb {
 
@@ -28,8 +29,6 @@ constexpr const uint64_t kGoldenB = 0xaabbccddeeffaabb;
 
 }  // namespace
 
-class NoDestructorTest {};
-
 TEST(NoDestructorTest, StackInstance) {
   NoDestructor<DoNotDestruct> instance(kGoldenA, kGoldenB);
   ASSERT_EQ(kGoldenA, instance.get()->a);
@@ -44,4 +43,7 @@ TEST(NoDestructorTest, StaticInstance) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/util/status_test.cc b/util/status_test.cc
index 2842319..b7e2444 100644
--- a/util/status_test.cc
+++ b/util/status_test.cc
@@ -2,11 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include "leveldb/status.h"
+
 #include <utility>
 
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "leveldb/slice.h"
-#include "leveldb/status.h"
-#include "util/testharness.h"
 
 namespace leveldb {
 
@@ -37,4 +38,7 @@ TEST(Status, MoveConstructor) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/util/testharness.cc b/util/testharness.cc
deleted file mode 100644
index 318ecfa..0000000
--- a/util/testharness.cc
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "util/testharness.h"
-
-#include <stdlib.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-
-#include <string>
-#include <vector>
-
-#include "leveldb/env.h"
-
-namespace leveldb {
-namespace test {
-
-namespace {
-struct Test {
-  const char* base;
-  const char* name;
-  void (*func)();
-};
-std::vector<Test>* tests;
-}  // namespace
-
-bool RegisterTest(const char* base, const char* name, void (*func)()) {
-  if (tests == nullptr) {
-    tests = new std::vector<Test>;
-  }
-  Test t;
-  t.base = base;
-  t.name = name;
-  t.func = func;
-  tests->push_back(t);
-  return true;
-}
-
-int RunAllTests() {
-  const char* matcher = getenv("LEVELDB_TESTS");
-
-  int num = 0;
-  if (tests != nullptr) {
-    for (size_t i = 0; i < tests->size(); i++) {
-      const Test& t = (*tests)[i];
-      if (matcher != nullptr) {
-        std::string name = t.base;
-        name.push_back('.');
-        name.append(t.name);
-        if (strstr(name.c_str(), matcher) == nullptr) {
-          continue;
-        }
-      }
-      fprintf(stderr, "==== Test %s.%s\n", t.base, t.name);
-      (*t.func)();
-      ++num;
-    }
-  }
-  fprintf(stderr, "==== PASSED %d tests\n", num);
-  return 0;
-}
-
-std::string TmpDir() {
-  std::string dir;
-  Status s = Env::Default()->GetTestDirectory(&dir);
-  ASSERT_TRUE(s.ok()) << s.ToString();
-  return dir;
-}
-
-int RandomSeed() {
-  const char* env = getenv("TEST_RANDOM_SEED");
-  int result = (env != nullptr ? atoi(env) : 301);
-  if (result <= 0) {
-    result = 301;
-  }
-  return result;
-}
-
-}  // namespace test
-}  // namespace leveldb
diff --git a/util/testharness.h b/util/testharness.h
deleted file mode 100644
index 72cd162..0000000
--- a/util/testharness.h
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef STORAGE_LEVELDB_UTIL_TESTHARNESS_H_
-#define STORAGE_LEVELDB_UTIL_TESTHARNESS_H_
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include <sstream>
-
-#include "leveldb/status.h"
-
-namespace leveldb {
-namespace test {
-
-// Run some of the tests registered by the TEST() macro.  If the
-// environment variable "LEVELDB_TESTS" is not set, runs all tests.
-// Otherwise, runs only the tests whose name contains the value of
-// "LEVELDB_TESTS" as a substring.  E.g., suppose the tests are:
-//    TEST(Foo, Hello) { ... }
-//    TEST(Foo, World) { ... }
-// LEVELDB_TESTS=Hello will run the first test
-// LEVELDB_TESTS=o     will run both tests
-// LEVELDB_TESTS=Junk  will run no tests
-//
-// Returns 0 if all tests pass.
-// Dies or returns a non-zero value if some test fails.
-int RunAllTests();
-
-// Return the directory to use for temporary storage.
-std::string TmpDir();
-
-// Return a randomization seed for this run.  Typically returns the
-// same number on repeated invocations of this binary, but automated
-// runs may be able to vary the seed.
-int RandomSeed();
-
-// An instance of Tester is allocated to hold temporary state during
-// the execution of an assertion.
-class Tester {
- private:
-  bool ok_;
-  const char* fname_;
-  int line_;
-  std::stringstream ss_;
-
- public:
-  Tester(const char* f, int l) : ok_(true), fname_(f), line_(l) {}
-
-  ~Tester() {
-    if (!ok_) {
-      fprintf(stderr, "%s:%d:%s\n", fname_, line_, ss_.str().c_str());
-      exit(1);
-    }
-  }
-
-  Tester& Is(bool b, const char* msg) {
-    if (!b) {
-      ss_ << " Assertion failure " << msg;
-      ok_ = false;
-    }
-    return *this;
-  }
-
-  Tester& IsOk(const Status& s) {
-    if (!s.ok()) {
-      ss_ << " " << s.ToString();
-      ok_ = false;
-    }
-    return *this;
-  }
-
-#define BINARY_OP(name, op)                          \
-  template <class X, class Y>                        \
-  Tester& name(const X& x, const Y& y) {             \
-    if (!(x op y)) {                                 \
-      ss_ << " failed: " << x << (" " #op " ") << y; \
-      ok_ = false;                                   \
-    }                                                \
-    return *this;                                    \
-  }
-
-  BINARY_OP(IsEq, ==)
-  BINARY_OP(IsNe, !=)
-  BINARY_OP(IsGe, >=)
-  BINARY_OP(IsGt, >)
-  BINARY_OP(IsLe, <=)
-  BINARY_OP(IsLt, <)
-#undef BINARY_OP
-
-  // Attach the specified value to the error message if an error has occurred
-  template <class V>
-  Tester& operator<<(const V& value) {
-    if (!ok_) {
-      ss_ << " " << value;
-    }
-    return *this;
-  }
-};
-
-#define ASSERT_TRUE(c) ::leveldb::test::Tester(__FILE__, __LINE__).Is((c), #c)
-#define ASSERT_OK(s) ::leveldb::test::Tester(__FILE__, __LINE__).IsOk((s))
-#define ASSERT_EQ(a, b) \
-  ::leveldb::test::Tester(__FILE__, __LINE__).IsEq((a), (b))
-#define ASSERT_NE(a, b) \
-  ::leveldb::test::Tester(__FILE__, __LINE__).IsNe((a), (b))
-#define ASSERT_GE(a, b) \
-  ::leveldb::test::Tester(__FILE__, __LINE__).IsGe((a), (b))
-#define ASSERT_GT(a, b) \
-  ::leveldb::test::Tester(__FILE__, __LINE__).IsGt((a), (b))
-#define ASSERT_LE(a, b) \
-  ::leveldb::test::Tester(__FILE__, __LINE__).IsLe((a), (b))
-#define ASSERT_LT(a, b) \
-  ::leveldb::test::Tester(__FILE__, __LINE__).IsLt((a), (b))
-
-#define TCONCAT(a, b) TCONCAT1(a, b)
-#define TCONCAT1(a, b) a##b
-
-#define TEST(base, name)                                              \
-  class TCONCAT(_Test_, name) : public base {                         \
-   public:                                                            \
-    void _Run();                                                      \
-    static void _RunIt() {                                            \
-      TCONCAT(_Test_, name) t;                                        \
-      t._Run();                                                       \
-    }                                                                 \
-  };                                                                  \
-  bool TCONCAT(_Test_ignored_, name) = ::leveldb::test::RegisterTest( \
-      #base, #name, &TCONCAT(_Test_, name)::_RunIt);                  \
-  void TCONCAT(_Test_, name)::_Run()
-
-// Register the specified test.  Typically not used directly, but
-// invoked via the macro expansion of TEST.
-bool RegisterTest(const char* base, const char* name, void (*func)());
-
-}  // namespace test
-}  // namespace leveldb
-
-#endif  // STORAGE_LEVELDB_UTIL_TESTHARNESS_H_
diff --git a/util/testutil.cc b/util/testutil.cc
index 6b151b9..5f77b08 100644
--- a/util/testutil.cc
+++ b/util/testutil.cc
@@ -4,6 +4,8 @@
 
 #include "util/testutil.h"
 
+#include <string>
+
 #include "util/random.h"
 
 namespace leveldb {
diff --git a/util/testutil.h b/util/testutil.h
index bb4051b..5765afb 100644
--- a/util/testutil.h
+++ b/util/testutil.h
@@ -5,6 +5,8 @@
 #ifndef STORAGE_LEVELDB_UTIL_TESTUTIL_H_
 #define STORAGE_LEVELDB_UTIL_TESTUTIL_H_
 
+#include "third_party/googletest/googlemock/include/gmock/gmock.h"
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
 #include "helpers/memenv/memenv.h"
 #include "leveldb/env.h"
 #include "leveldb/slice.h"
@@ -13,6 +15,20 @@
 namespace leveldb {
 namespace test {
 
+MATCHER(IsOK, "") { return arg.ok(); }
+
+// Macros for testing the results of functions that return leveldb::Status or
+// util::StatusOr<T> (for any type T).
+#define EXPECT_LEVELDB_OK(expression) \
+  EXPECT_THAT(expression, leveldb::test::IsOK())
+#define ASSERT_LEVELDB_OK(expression) \
+  ASSERT_THAT(expression, leveldb::test::IsOK())
+
+// Returns the random seed used at the start of the current test run.
+inline int RandomSeed() {
+  return testing::UnitTest::GetInstance()->random_seed();
+}
+
 // Store in *dst a random string of length "len" and return a Slice that
 // references the generated data.
 Slice RandomString(Random* rnd, int len, std::string* dst);

From db8352187b2c2d037b6fe215b7f82415789ec71f Mon Sep 17 00:00:00 2001
From: Victor Costan <pwnall@chromium.org>
Date: Mon, 25 Nov 2019 07:22:35 -0800
Subject: [PATCH 121/181] Fixup for adding the third_party/googletest
 submodule. (#754)

---
 third_party/googletest | 1 +
 1 file changed, 1 insertion(+)
 create mode 160000 third_party/googletest

diff --git a/third_party/googletest b/third_party/googletest
new file mode 160000
index 0000000..c27aceb
--- /dev/null
+++ b/third_party/googletest
@@ -0,0 +1 @@
+Subproject commit c27acebba3b3c7d94209e0467b0a801db4af73ed

From 583a42b5961dfd1804b5588a0ad2723becc8cc6a Mon Sep 17 00:00:00 2001
From: leveldb Team <no-reply@google.com>
Date: Mon, 25 Nov 2019 09:29:06 -0800
Subject: [PATCH 122/181] Internal change.

PiperOrigin-RevId: 282373286
---
 db/autocompact_test.cc        | 2 +-
 db/corruption_test.cc         | 2 +-
 db/db_test.cc                 | 2 +-
 db/dbformat_test.cc           | 2 +-
 db/fault_injection_test.cc    | 2 +-
 db/filename_test.cc           | 2 +-
 db/log_test.cc                | 2 +-
 db/recovery_test.cc           | 2 +-
 db/skiplist_test.cc           | 2 +-
 db/version_edit_test.cc       | 2 +-
 db/version_set_test.cc        | 2 +-
 db/write_batch_test.cc        | 2 +-
 helpers/memenv/memenv_test.cc | 2 +-
 issues/issue178_test.cc       | 2 +-
 issues/issue200_test.cc       | 2 +-
 issues/issue320_test.cc       | 2 +-
 table/filter_block_test.cc    | 2 +-
 table/table_test.cc           | 2 +-
 third_party/googletest        | 1 -
 util/arena_test.cc            | 2 +-
 util/bloom_test.cc            | 2 +-
 util/cache_test.cc            | 2 +-
 util/coding_test.cc           | 2 +-
 util/crc32c_test.cc           | 2 +-
 util/env_posix_test.cc        | 2 +-
 util/env_test.cc              | 2 +-
 util/env_windows_test.cc      | 2 +-
 util/hash_test.cc             | 2 +-
 util/logging_test.cc          | 2 +-
 util/no_destructor_test.cc    | 2 +-
 util/status_test.cc           | 2 +-
 util/testutil.h               | 4 ++--
 32 files changed, 32 insertions(+), 33 deletions(-)
 delete mode 160000 third_party/googletest

diff --git a/db/autocompact_test.cc b/db/autocompact_test.cc
index d4caf71..9779c95 100644
--- a/db/autocompact_test.cc
+++ b/db/autocompact_test.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "db/db_impl.h"
 #include "leveldb/cache.h"
 #include "leveldb/db.h"
diff --git a/db/corruption_test.cc b/db/corruption_test.cc
index 4d20946..b22f9e7 100644
--- a/db/corruption_test.cc
+++ b/db/corruption_test.cc
@@ -4,7 +4,7 @@
 
 #include <sys/types.h>
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "db/db_impl.h"
 #include "db/filename.h"
 #include "db/log_format.h"
diff --git a/db/db_test.cc b/db/db_test.cc
index e8e3495..1bd5afc 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -7,7 +7,7 @@
 #include <atomic>
 #include <string>
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "db/db_impl.h"
 #include "db/filename.h"
 #include "db/version_set.h"
diff --git a/db/dbformat_test.cc b/db/dbformat_test.cc
index ca49e0a..4a11c4a 100644
--- a/db/dbformat_test.cc
+++ b/db/dbformat_test.cc
@@ -4,7 +4,7 @@
 
 #include "db/dbformat.h"
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "util/logging.h"
 
 namespace leveldb {
diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc
index 80b8f12..db8580c 100644
--- a/db/fault_injection_test.cc
+++ b/db/fault_injection_test.cc
@@ -9,7 +9,7 @@
 #include <map>
 #include <set>
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "db/db_impl.h"
 #include "db/filename.h"
 #include "db/log_format.h"
diff --git a/db/filename_test.cc b/db/filename_test.cc
index ad0bc73..f291d72 100644
--- a/db/filename_test.cc
+++ b/db/filename_test.cc
@@ -4,7 +4,7 @@
 
 #include "db/filename.h"
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "db/dbformat.h"
 #include "port/port.h"
 #include "util/logging.h"
diff --git a/db/log_test.cc b/db/log_test.cc
index 680f267..c765e93 100644
--- a/db/log_test.cc
+++ b/db/log_test.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "db/log_reader.h"
 #include "db/log_writer.h"
 #include "leveldb/env.h"
diff --git a/db/recovery_test.cc b/db/recovery_test.cc
index 0657743..cf6574e 100644
--- a/db/recovery_test.cc
+++ b/db/recovery_test.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "db/db_impl.h"
 #include "db/filename.h"
 #include "db/version_set.h"
diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc
index 04b9fa7..7c5d09b 100644
--- a/db/skiplist_test.cc
+++ b/db/skiplist_test.cc
@@ -7,7 +7,7 @@
 #include <atomic>
 #include <set>
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "leveldb/env.h"
 #include "port/port.h"
 #include "port/thread_annotations.h"
diff --git a/db/version_edit_test.cc b/db/version_edit_test.cc
index 228fa3b..39ea8b7 100644
--- a/db/version_edit_test.cc
+++ b/db/version_edit_test.cc
@@ -4,7 +4,7 @@
 
 #include "db/version_edit.h"
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 
 namespace leveldb {
 
diff --git a/db/version_set_test.cc b/db/version_set_test.cc
index 71b19a7..dee6b4c 100644
--- a/db/version_set_test.cc
+++ b/db/version_set_test.cc
@@ -4,7 +4,7 @@
 
 #include "db/version_set.h"
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "util/logging.h"
 #include "util/testutil.h"
 
diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc
index b33993a..64df9b8 100644
--- a/db/write_batch_test.cc
+++ b/db/write_batch_test.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "db/memtable.h"
 #include "db/write_batch_internal.h"
 #include "leveldb/db.h"
diff --git a/helpers/memenv/memenv_test.cc b/helpers/memenv/memenv_test.cc
index 72e22da..2001101 100644
--- a/helpers/memenv/memenv_test.cc
+++ b/helpers/memenv/memenv_test.cc
@@ -7,7 +7,7 @@
 #include <string>
 #include <vector>
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "db/db_impl.h"
 #include "leveldb/db.h"
 #include "leveldb/env.h"
diff --git a/issues/issue178_test.cc b/issues/issue178_test.cc
index 4a52a1b..7fc43ea 100644
--- a/issues/issue178_test.cc
+++ b/issues/issue178_test.cc
@@ -7,7 +7,7 @@
 #include <iostream>
 #include <sstream>
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "leveldb/db.h"
 #include "leveldb/write_batch.h"
 #include "util/testutil.h"
diff --git a/issues/issue200_test.cc b/issues/issue200_test.cc
index ee08bc6..4eba23a 100644
--- a/issues/issue200_test.cc
+++ b/issues/issue200_test.cc
@@ -6,7 +6,7 @@
 // to forward, the current key can be yielded unexpectedly if a new
 // mutation has been added just before the current key.
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "leveldb/db.h"
 #include "util/testutil.h"
 
diff --git a/issues/issue320_test.cc b/issues/issue320_test.cc
index c289ab4..c08296a 100644
--- a/issues/issue320_test.cc
+++ b/issues/issue320_test.cc
@@ -9,7 +9,7 @@
 #include <string>
 #include <vector>
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "leveldb/db.h"
 #include "leveldb/write_batch.h"
 #include "util/testutil.h"
diff --git a/table/filter_block_test.cc b/table/filter_block_test.cc
index 53be948..91a6be2 100644
--- a/table/filter_block_test.cc
+++ b/table/filter_block_test.cc
@@ -4,7 +4,7 @@
 
 #include "table/filter_block.h"
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "leveldb/filter_policy.h"
 #include "util/coding.h"
 #include "util/hash.h"
diff --git a/table/table_test.cc b/table/table_test.cc
index 09d1b5d..713b63e 100644
--- a/table/table_test.cc
+++ b/table/table_test.cc
@@ -7,7 +7,7 @@
 #include <map>
 #include <string>
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "db/dbformat.h"
 #include "db/memtable.h"
 #include "db/write_batch_internal.h"
diff --git a/third_party/googletest b/third_party/googletest
deleted file mode 160000
index c27aceb..0000000
--- a/third_party/googletest
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit c27acebba3b3c7d94209e0467b0a801db4af73ed
diff --git a/util/arena_test.cc b/util/arena_test.cc
index 3f8855b..90226fe 100644
--- a/util/arena_test.cc
+++ b/util/arena_test.cc
@@ -4,7 +4,7 @@
 
 #include "util/arena.h"
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "util/random.h"
 
 namespace leveldb {
diff --git a/util/bloom_test.cc b/util/bloom_test.cc
index bcbd7f6..bcf14dc 100644
--- a/util/bloom_test.cc
+++ b/util/bloom_test.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "leveldb/filter_policy.h"
 #include "util/coding.h"
 #include "util/logging.h"
diff --git a/util/cache_test.cc b/util/cache_test.cc
index 8ce9463..b5d9873 100644
--- a/util/cache_test.cc
+++ b/util/cache_test.cc
@@ -6,7 +6,7 @@
 
 #include <vector>
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "util/coding.h"
 
 namespace leveldb {
diff --git a/util/coding_test.cc b/util/coding_test.cc
index db83367..aa6c748 100644
--- a/util/coding_test.cc
+++ b/util/coding_test.cc
@@ -6,7 +6,7 @@
 
 #include <vector>
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 
 namespace leveldb {
 
diff --git a/util/crc32c_test.cc b/util/crc32c_test.cc
index 1e2aae7..647e561 100644
--- a/util/crc32c_test.cc
+++ b/util/crc32c_test.cc
@@ -4,7 +4,7 @@
 
 #include "util/crc32c.h"
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 
 namespace leveldb {
 namespace crc32c {
diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc
index 5ee2248..ed4ac96 100644
--- a/util/env_posix_test.cc
+++ b/util/env_posix_test.cc
@@ -13,7 +13,7 @@
 #include <unordered_set>
 #include <vector>
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "leveldb/env.h"
 #include "port/port.h"
 #include "util/env_posix_test_helper.h"
diff --git a/util/env_test.cc b/util/env_test.cc
index 2a1f73b..b35ba05 100644
--- a/util/env_test.cc
+++ b/util/env_test.cc
@@ -6,7 +6,7 @@
 
 #include <algorithm>
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "port/port.h"
 #include "port/thread_annotations.h"
 #include "util/mutexlock.h"
diff --git a/util/env_windows_test.cc b/util/env_windows_test.cc
index b926107..c75ca7b 100644
--- a/util/env_windows_test.cc
+++ b/util/env_windows_test.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "leveldb/env.h"
 #include "port/port.h"
 #include "util/env_windows_test_helper.h"
diff --git a/util/hash_test.cc b/util/hash_test.cc
index e970c1e..6d6771f 100644
--- a/util/hash_test.cc
+++ b/util/hash_test.cc
@@ -4,7 +4,7 @@
 
 #include "util/hash.h"
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 
 namespace leveldb {
 
diff --git a/util/logging_test.cc b/util/logging_test.cc
index 92417aa..24e1fe9 100644
--- a/util/logging_test.cc
+++ b/util/logging_test.cc
@@ -7,7 +7,7 @@
 #include <limits>
 #include <string>
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "leveldb/slice.h"
 
 namespace leveldb {
diff --git a/util/no_destructor_test.cc b/util/no_destructor_test.cc
index edafb08..68fdfee 100644
--- a/util/no_destructor_test.cc
+++ b/util/no_destructor_test.cc
@@ -8,7 +8,7 @@
 #include <cstdlib>
 #include <utility>
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 
 namespace leveldb {
 
diff --git a/util/status_test.cc b/util/status_test.cc
index b7e2444..914b386 100644
--- a/util/status_test.cc
+++ b/util/status_test.cc
@@ -6,7 +6,7 @@
 
 #include <utility>
 
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gtest/gtest.h"
 #include "leveldb/slice.h"
 
 namespace leveldb {
diff --git a/util/testutil.h b/util/testutil.h
index 5765afb..cc67d96 100644
--- a/util/testutil.h
+++ b/util/testutil.h
@@ -5,8 +5,8 @@
 #ifndef STORAGE_LEVELDB_UTIL_TESTUTIL_H_
 #define STORAGE_LEVELDB_UTIL_TESTUTIL_H_
 
-#include "third_party/googletest/googlemock/include/gmock/gmock.h"
-#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
 #include "helpers/memenv/memenv.h"
 #include "leveldb/env.h"
 #include "leveldb/slice.h"

From e36b831851fd2ff33ca0d9bee65cde7f395da10b Mon Sep 17 00:00:00 2001
From: Victor Costan <pwnall@chromium.org>
Date: Mon, 2 Dec 2019 12:18:34 -0800
Subject: [PATCH 123/181] Fixup for adding the third_party/googletest
 submodule.

---
 third_party/googletest | 1 +
 1 file changed, 1 insertion(+)
 create mode 160000 third_party/googletest

diff --git a/third_party/googletest b/third_party/googletest
new file mode 160000
index 0000000..c27aceb
--- /dev/null
+++ b/third_party/googletest
@@ -0,0 +1 @@
+Subproject commit c27acebba3b3c7d94209e0467b0a801db4af73ed

From 58a89bbcb28d02d5704c5fff7aeb6e72f7ca2431 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Mon, 2 Dec 2019 13:37:34 -0800
Subject: [PATCH 124/181] Add WITHOUT ROWID to SQLite benchmark.

The SQLite-specific schema feature is documented at
https://www.sqlite.org/withoutrowid.html and
https://www.sqlite.org/rowidtable.html.

By default, SQLite stores each table in a B-tree keyed by an integer,
called the ROWID. Any index, including the PRIMARY KEY index, is a
separate B-tree mapping index keys to ROWIDs. Tables without ROWIDs are
stored in a B-tree keyed by the primary key. Additional indexes (the
PRIMARY KEY index is implicitly built into the table) are stored as
B-trees mapping index keys to row primary keys.

This CL introduces a boolean --use-rowids flag to db_bench_sqlite. When
the flag is false (default), the schema of the test table includes
WITHOUT ROWID. The test table uses a primary key, so adding WITHOUT
ROWID to the schema reduces the number of B-trees used by the benchmark
from 2 to 1. This brings SQLite's disk usage closer to LevelDB.

When WITHOUT ROWID is used, SQLite fares better (than today) on
benchmarks with small (16-byte) keys, and worse on benchmarks with large
(100kb) keys.

Baseline results:
fillseq      :      21.310 micros/op;    5.2 MB/s
fillseqsync  :     146.377 micros/op;    0.8 MB/s (10000 ops)
fillseqbatch :       2.065 micros/op;   53.6 MB/s
fillrandom   :      34.767 micros/op;    3.2 MB/s
fillrandsync :     159.943 micros/op;    0.7 MB/s (10000 ops)
fillrandbatch :      15.055 micros/op;    7.3 MB/s
overwrite    :      43.660 micros/op;    2.5 MB/s
overwritebatch :      27.691 micros/op;    4.0 MB/s
readrandom   :      12.725 micros/op;
readseq      :       2.602 micros/op;   36.7 MB/s
fillrand100K :     606.333 micros/op;  157.3 MB/s (1000 ops)
fillseq100K  :     657.457 micros/op;  145.1 MB/s (1000 ops)
readseq      :      46.523 micros/op; 2049.9 MB/s
readrand100K :      54.943 micros/op;

Results after this CL:
fillseq      :      16.231 micros/op;    6.8 MB/s
fillseqsync  :     147.460 micros/op;    0.8 MB/s (10000 ops)
fillseqbatch :       2.294 micros/op;   48.2 MB/s
fillrandom   :      27.871 micros/op;    4.0 MB/s
fillrandsync :     141.979 micros/op;    0.8 MB/s (10000 ops)
fillrandbatch :      16.087 micros/op;    6.9 MB/s
overwrite    :      26.829 micros/op;    4.1 MB/s
overwritebatch :      19.014 micros/op;    5.8 MB/s
readrandom   :      11.657 micros/op;
readseq      :       0.155 micros/op;  615.0 MB/s
fillrand100K :     816.812 micros/op;  116.8 MB/s (1000 ops)
fillseq100K  :     754.689 micros/op;  126.4 MB/s (1000 ops)
readseq      :      47.112 micros/op; 2024.3 MB/s
readrand100K :     287.679 micros/op;

Results after this CL, with --use-rowids=1
fillseq      :      20.655 micros/op;    5.4 MB/s
fillseqsync  :     146.408 micros/op;    0.8 MB/s (10000 ops)
fillseqbatch :       2.045 micros/op;   54.1 MB/s
fillrandom   :      34.080 micros/op;    3.2 MB/s
fillrandsync :     154.582 micros/op;    0.7 MB/s (10000 ops)
fillrandbatch :      14.404 micros/op;    7.7 MB/s
overwrite    :      42.928 micros/op;    2.6 MB/s
overwritebatch :      27.829 micros/op;    4.0 MB/s
readrandom   :      12.835 micros/op;
readseq      :       2.483 micros/op;   38.4 MB/s
fillrand100K :     603.265 micros/op;  158.1 MB/s (1000 ops)
fillseq100K  :     662.473 micros/op;  144.0 MB/s (1000 ops)
readseq      :      45.478 micros/op; 2097.0 MB/s
readrand100K :      54.439 micros/op;
PiperOrigin-RevId: 283407101
---
 benchmarks/db_bench_sqlite3.cc | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/benchmarks/db_bench_sqlite3.cc b/benchmarks/db_bench_sqlite3.cc
index f183f4f..d3fe339 100644
--- a/benchmarks/db_bench_sqlite3.cc
+++ b/benchmarks/db_bench_sqlite3.cc
@@ -69,6 +69,9 @@ static int FLAGS_num_pages = 4096;
 // benchmark will fail.
 static bool FLAGS_use_existing_db = false;
 
+// If true, the SQLite table has ROWIDs.
+static bool FLAGS_use_rowids = false;
+
 // If true, we allow batch writes to occur
 static bool FLAGS_transaction = true;
 
@@ -462,6 +465,7 @@ class Benchmark {
     std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE";
     std::string create_stmt =
         "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
+    if (!FLAGS_use_rowids) create_stmt += " WITHOUT ROWID";
     std::string stmt_array[] = {locking_stmt, create_stmt};
     int stmt_array_length = sizeof(stmt_array) / sizeof(std::string);
     for (int i = 0; i < stmt_array_length; i++) {
@@ -678,6 +682,9 @@ int main(int argc, char** argv) {
     } else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 &&
                (n == 0 || n == 1)) {
       FLAGS_use_existing_db = n;
+    } else if (sscanf(argv[i], "--use_rowids=%d%c", &n, &junk) == 1 &&
+               (n == 0 || n == 1)) {
+      FLAGS_use_rowids = n;
     } else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) {
       FLAGS_num = n;
     } else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) {

From d152b23f3b787f67a0ac3a40498e13831f3778d7 Mon Sep 17 00:00:00 2001
From: leveldb Team <no-reply@google.com>
Date: Tue, 3 Dec 2019 13:15:21 -0800
Subject: [PATCH 125/181] Defend against inclusion of windows.h in tests that
 invoke Env::DeleteFile.

PiperOrigin-RevId: 283607548
---
 benchmarks/db_bench.cc        | 5 +++++
 db/db_test.cc                 | 5 +++++
 db/fault_injection_test.cc    | 5 +++++
 db/recovery_test.cc           | 5 +++++
 helpers/memenv/memenv_test.cc | 5 +++++
 util/env_test.cc              | 5 +++++
 6 files changed, 30 insertions(+)

diff --git a/benchmarks/db_bench.cc b/benchmarks/db_bench.cc
index 3696023..397e23f 100644
--- a/benchmarks/db_bench.cc
+++ b/benchmarks/db_bench.cc
@@ -18,6 +18,11 @@
 #include "util/random.h"
 #include "util/testutil.h"
 
+#if defined(_WIN32) && defined(DeleteFile)
+// See rationale in env.h
+#undef DeleteFile
+#endif
+
 // Comma-separated list of operations to run in the specified order
 //   Actual benchmarks:
 //      fillseq       -- write N values in sequential key order in async mode
diff --git a/db/db_test.cc b/db/db_test.cc
index 1bd5afc..3f41c36 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -23,6 +23,11 @@
 #include "util/mutexlock.h"
 #include "util/testutil.h"
 
+#if defined(_WIN32) && defined(DeleteFile)
+// See rationale in env.h
+#undef DeleteFile
+#endif
+
 namespace leveldb {
 
 static std::string RandomString(Random* rnd, int len) {
diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc
index db8580c..b2d2adb 100644
--- a/db/fault_injection_test.cc
+++ b/db/fault_injection_test.cc
@@ -25,6 +25,11 @@
 #include "util/mutexlock.h"
 #include "util/testutil.h"
 
+#if defined(_WIN32) && defined(DeleteFile)
+// See rationale in env.h
+#undef DeleteFile
+#endif
+
 namespace leveldb {
 
 static const int kValueSize = 1000;
diff --git a/db/recovery_test.cc b/db/recovery_test.cc
index cf6574e..ea137e6 100644
--- a/db/recovery_test.cc
+++ b/db/recovery_test.cc
@@ -13,6 +13,11 @@
 #include "util/logging.h"
 #include "util/testutil.h"
 
+#if defined(_WIN32) && defined(DeleteFile)
+// See rationale in env.h
+#undef DeleteFile
+#endif
+
 namespace leveldb {
 
 class RecoveryTest : public testing::Test {
diff --git a/helpers/memenv/memenv_test.cc b/helpers/memenv/memenv_test.cc
index 2001101..619fe51 100644
--- a/helpers/memenv/memenv_test.cc
+++ b/helpers/memenv/memenv_test.cc
@@ -13,6 +13,11 @@
 #include "leveldb/env.h"
 #include "util/testutil.h"
 
+#if defined(_WIN32) && defined(DeleteFile)
+// See rationale in env.h
+#undef DeleteFile
+#endif
+
 namespace leveldb {
 
 class MemEnvTest : public testing::Test {
diff --git a/util/env_test.cc b/util/env_test.cc
index b35ba05..09e9d39 100644
--- a/util/env_test.cc
+++ b/util/env_test.cc
@@ -12,6 +12,11 @@
 #include "util/mutexlock.h"
 #include "util/testutil.h"
 
+#if defined(_WIN32) && defined(DeleteFile)
+// See rationale in env.h
+#undef DeleteFile
+#endif
+
 namespace leveldb {
 
 static const int kDelayMicros = 100000;

From a0191e5563b7a6c24b39edcbdbff29e602e0acfc Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Wed, 8 Jan 2020 09:14:53 -0800
Subject: [PATCH 126/181] Add Env::Remove{File,Dir} which obsolete
 Env::Delete{File,Dir}.

The "DeleteFile" method name causes pain for Windows developers, because
<windows.h> #defines a DeleteFile macro to DeleteFileW or DeleteFileA.
Current code uses workarounds, like #undefining DeleteFile everywhere an
Env is declared, implemented, or used.

This CL removes the need for workarounds by renaming Env::DeleteFile to
Env::RemoveFile. For consistency, Env::DeleteDir is also renamed to
Env::RemoveDir. A few internal methods are also renamed for consistency.
Software that supports Windows is expected to migrate any Env
implementations and usage to Remove{File,Dir}, and never use the name
Env::Delete{File,Dir} in its code.

The renaming is done in a backwards-compatible way, at the risk of
making it slightly more difficult to build a new correct Env
implementation. The backwards compatibility is achieved using the
following hacks:

1) Env::Remove{File,Dir} methods are added, with a default
    implementation that calls into Env::Delete{File,Dir}. This makes old
    Env implementations compatible with code that calls into the updated
    API.
2) The Env::Delete{File,Dir} methods are no longer pure virtuals.
    Instead, they gain a default implementation that calls into
    Env::Remove{File,Dir}. This makes updated Env implementations
    compatible with code that calls into the old API.

The cost of this approach is that it's possible to write an Env without
overriding either Rename{File,Dir} or Delete{File,Dir}, without getting
a compiler warning. However, attempting to run the test suite will
immediately fail with an infinite call stack ending in
{Remove,Delete}{File,Dir}, making developers aware of the problem.

PiperOrigin-RevId: 288710907
---
 benchmarks/db_bench.cc         |  4 +-
 benchmarks/db_bench_sqlite3.cc |  2 +-
 benchmarks/db_bench_tree_db.cc |  2 +-
 db/builder.cc                  |  2 +-
 db/db_impl.cc                  | 20 +++++-----
 db/db_impl.h                   |  2 +-
 db/db_test.cc                  |  8 ++--
 db/fault_injection_test.cc     | 22 +++++-----
 db/filename.cc                 |  2 +-
 db/recovery_test.cc            | 12 +++---
 db/repair.cc                   |  6 +--
 db/version_edit.cc             |  2 +-
 db/version_edit.h              |  2 +-
 db/version_edit_test.cc        |  2 +-
 db/version_set.cc              |  4 +-
 doc/impl.md                    |  2 +-
 helpers/memenv/memenv.cc       | 10 ++---
 helpers/memenv/memenv_test.cc  |  6 +--
 include/leveldb/env.h          | 73 ++++++++++++++++++++++++----------
 util/env.cc                    | 16 +++++++-
 util/env_posix.cc              |  4 +-
 util/env_posix_test.cc         | 14 +++----
 util/env_test.cc               |  8 ++--
 util/env_windows.cc            |  8 +---
 util/env_windows_test.cc       |  2 +-
 25 files changed, 138 insertions(+), 97 deletions(-)

diff --git a/benchmarks/db_bench.cc b/benchmarks/db_bench.cc
index 397e23f..03da9d8 100644
--- a/benchmarks/db_bench.cc
+++ b/benchmarks/db_bench.cc
@@ -414,7 +414,7 @@ class Benchmark {
     g_env->GetChildren(FLAGS_db, &files);
     for (size_t i = 0; i < files.size(); i++) {
       if (Slice(files[i]).starts_with("heap-")) {
-        g_env->DeleteFile(std::string(FLAGS_db) + "/" + files[i]);
+        g_env->RemoveFile(std::string(FLAGS_db) + "/" + files[i]);
       }
     }
     if (!FLAGS_use_existing_db) {
@@ -912,7 +912,7 @@ class Benchmark {
     delete file;
     if (!ok) {
       fprintf(stderr, "heap profiling not supported\n");
-      g_env->DeleteFile(fname);
+      g_env->RemoveFile(fname);
     }
   }
 };
diff --git a/benchmarks/db_bench_sqlite3.cc b/benchmarks/db_bench_sqlite3.cc
index d3fe339..9c32a2d 100644
--- a/benchmarks/db_bench_sqlite3.cc
+++ b/benchmarks/db_bench_sqlite3.cc
@@ -328,7 +328,7 @@ class Benchmark {
           std::string file_name(test_dir);
           file_name += "/";
           file_name += files[i];
-          Env::Default()->DeleteFile(file_name.c_str());
+          Env::Default()->RemoveFile(file_name.c_str());
         }
       }
     }
diff --git a/benchmarks/db_bench_tree_db.cc b/benchmarks/db_bench_tree_db.cc
index b2f6646..43f0f65 100644
--- a/benchmarks/db_bench_tree_db.cc
+++ b/benchmarks/db_bench_tree_db.cc
@@ -301,7 +301,7 @@ class Benchmark {
           std::string file_name(test_dir);
           file_name += "/";
           file_name += files[i];
-          Env::Default()->DeleteFile(file_name.c_str());
+          Env::Default()->RemoveFile(file_name.c_str());
         }
       }
     }
diff --git a/db/builder.cc b/db/builder.cc
index 9520ee4..943e857 100644
--- a/db/builder.cc
+++ b/db/builder.cc
@@ -71,7 +71,7 @@ Status BuildTable(const std::string& dbname, Env* env, const Options& options,
   if (s.ok() && meta->file_size > 0) {
     // Keep it
   } else {
-    env->DeleteFile(fname);
+    env->RemoveFile(fname);
   }
   return s;
 }
diff --git a/db/db_impl.cc b/db/db_impl.cc
index 95e2bb4..ba0a46d 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -206,7 +206,7 @@ Status DBImpl::NewDB() {
     // Make "CURRENT" file that points to the new manifest file.
     s = SetCurrentFile(env_, dbname_, 1);
   } else {
-    env_->DeleteFile(manifest);
+    env_->RemoveFile(manifest);
   }
   return s;
 }
@@ -220,7 +220,7 @@ void DBImpl::MaybeIgnoreError(Status* s) const {
   }
 }
 
-void DBImpl::DeleteObsoleteFiles() {
+void DBImpl::RemoveObsoleteFiles() {
   mutex_.AssertHeld();
 
   if (!bg_error_.ok()) {
@@ -282,7 +282,7 @@ void DBImpl::DeleteObsoleteFiles() {
   // are therefore safe to delete while allowing other threads to proceed.
   mutex_.Unlock();
   for (const std::string& filename : files_to_delete) {
-    env_->DeleteFile(dbname_ + "/" + filename);
+    env_->RemoveFile(dbname_ + "/" + filename);
   }
   mutex_.Lock();
 }
@@ -569,7 +569,7 @@ void DBImpl::CompactMemTable() {
     imm_->Unref();
     imm_ = nullptr;
     has_imm_.store(false, std::memory_order_release);
-    DeleteObsoleteFiles();
+    RemoveObsoleteFiles();
   } else {
     RecordBackgroundError(s);
   }
@@ -729,7 +729,7 @@ void DBImpl::BackgroundCompaction() {
     // Move file to next level
     assert(c->num_input_files(0) == 1);
     FileMetaData* f = c->input(0, 0);
-    c->edit()->DeleteFile(c->level(), f->number);
+    c->edit()->RemoveFile(c->level(), f->number);
     c->edit()->AddFile(c->level() + 1, f->number, f->file_size, f->smallest,
                        f->largest);
     status = versions_->LogAndApply(c->edit(), &mutex_);
@@ -749,7 +749,7 @@ void DBImpl::BackgroundCompaction() {
     }
     CleanupCompaction(compact);
     c->ReleaseInputs();
-    DeleteObsoleteFiles();
+    RemoveObsoleteFiles();
   }
   delete c;
 
@@ -1506,7 +1506,7 @@ Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
     s = impl->versions_->LogAndApply(&edit, &impl->mutex_);
   }
   if (s.ok()) {
-    impl->DeleteObsoleteFiles();
+    impl->RemoveObsoleteFiles();
     impl->MaybeScheduleCompaction();
   }
   impl->mutex_.Unlock();
@@ -1539,15 +1539,15 @@ Status DestroyDB(const std::string& dbname, const Options& options) {
     for (size_t i = 0; i < filenames.size(); i++) {
       if (ParseFileName(filenames[i], &number, &type) &&
           type != kDBLockFile) {  // Lock file will be deleted at end
-        Status del = env->DeleteFile(dbname + "/" + filenames[i]);
+        Status del = env->RemoveFile(dbname + "/" + filenames[i]);
         if (result.ok() && !del.ok()) {
           result = del;
         }
       }
     }
     env->UnlockFile(lock);  // Ignore error since state is already gone
-    env->DeleteFile(lockname);
-    env->DeleteDir(dbname);  // Ignore error in case dir contains other files
+    env->RemoveFile(lockname);
+    env->RemoveDir(dbname);  // Ignore error in case dir contains other files
   }
   return result;
 }
diff --git a/db/db_impl.h b/db/db_impl.h
index 685735c..c7b0172 100644
--- a/db/db_impl.h
+++ b/db/db_impl.h
@@ -116,7 +116,7 @@ class DBImpl : public DB {
   void MaybeIgnoreError(Status* s) const;
 
   // Delete any unneeded files and stale in-memory entries.
-  void DeleteObsoleteFiles() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+  void RemoveObsoleteFiles() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
 
   // Compact the in-memory write buffer to disk.  Switches to a new
   // log-file/memtable and writes a new descriptor iff successful.
diff --git a/db/db_test.cc b/db/db_test.cc
index 3f41c36..2ee6761 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -509,7 +509,7 @@ class DBTest : public testing::Test {
     FileType type;
     for (size_t i = 0; i < filenames.size(); i++) {
       if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) {
-        EXPECT_LEVELDB_OK(env_->DeleteFile(TableFileName(dbname_, number)));
+        EXPECT_LEVELDB_OK(env_->RemoveFile(TableFileName(dbname_, number)));
         return true;
       }
     }
@@ -1666,7 +1666,7 @@ TEST_F(DBTest, DBOpen_Options) {
 TEST_F(DBTest, DestroyEmptyDir) {
   std::string dbname = testing::TempDir() + "db_empty_dir";
   TestEnv env(Env::Default());
-  env.DeleteDir(dbname);
+  env.RemoveDir(dbname);
   ASSERT_TRUE(!env.FileExists(dbname));
 
   Options opts;
@@ -1693,7 +1693,7 @@ TEST_F(DBTest, DestroyEmptyDir) {
 
 TEST_F(DBTest, DestroyOpenDB) {
   std::string dbname = testing::TempDir() + "open_db_dir";
-  env_->DeleteDir(dbname);
+  env_->RemoveDir(dbname);
   ASSERT_TRUE(!env_->FileExists(dbname));
 
   Options opts;
@@ -2279,7 +2279,7 @@ void BM_LogAndApply(int iters, int num_base_files) {
 
   for (int i = 0; i < iters; i++) {
     VersionEdit vedit;
-    vedit.DeleteFile(2, fnum);
+    vedit.RemoveFile(2, fnum);
     InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
     InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
     vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc
index b2d2adb..60e4631 100644
--- a/db/fault_injection_test.cc
+++ b/db/fault_injection_test.cc
@@ -77,7 +77,7 @@ Status Truncate(const std::string& filename, uint64_t length) {
       if (s.ok()) {
         s = env->RenameFile(tmp_name, filename);
       } else {
-        env->DeleteFile(tmp_name);
+        env->RemoveFile(tmp_name);
       }
     }
   }
@@ -138,12 +138,12 @@ class FaultInjectionTestEnv : public EnvWrapper {
                          WritableFile** result) override;
   Status NewAppendableFile(const std::string& fname,
                            WritableFile** result) override;
-  Status DeleteFile(const std::string& f) override;
+  Status RemoveFile(const std::string& f) override;
   Status RenameFile(const std::string& s, const std::string& t) override;
 
   void WritableFileClosed(const FileState& state);
   Status DropUnsyncedFileData();
-  Status DeleteFilesCreatedAfterLastDirSync();
+  Status RemoveFilesCreatedAfterLastDirSync();
   void DirWasSynced();
   bool IsFileCreatedSinceLastDirSync(const std::string& filename);
   void ResetState();
@@ -303,8 +303,8 @@ void FaultInjectionTestEnv::UntrackFile(const std::string& f) {
   new_files_since_last_dir_sync_.erase(f);
 }
 
-Status FaultInjectionTestEnv::DeleteFile(const std::string& f) {
-  Status s = EnvWrapper::DeleteFile(f);
+Status FaultInjectionTestEnv::RemoveFile(const std::string& f) {
+  Status s = EnvWrapper::RemoveFile(f);
   EXPECT_LEVELDB_OK(s);
   if (s.ok()) {
     UntrackFile(f);
@@ -340,17 +340,17 @@ void FaultInjectionTestEnv::ResetState() {
   SetFilesystemActive(true);
 }
 
-Status FaultInjectionTestEnv::DeleteFilesCreatedAfterLastDirSync() {
-  // Because DeleteFile access this container make a copy to avoid deadlock
+Status FaultInjectionTestEnv::RemoveFilesCreatedAfterLastDirSync() {
+  // Because RemoveFile access this container make a copy to avoid deadlock
   mutex_.Lock();
   std::set<std::string> new_files(new_files_since_last_dir_sync_.begin(),
                                   new_files_since_last_dir_sync_.end());
   mutex_.Unlock();
   Status status;
   for (const auto& new_file : new_files) {
-    Status delete_status = DeleteFile(new_file);
-    if (!delete_status.ok() && status.ok()) {
-      status = std::move(delete_status);
+    Status remove_status = RemoveFile(new_file);
+    if (!remove_status.ok() && status.ok()) {
+      status = std::move(remove_status);
     }
   }
   return status;
@@ -482,7 +482,7 @@ class FaultInjectionTest : public testing::Test {
         ASSERT_LEVELDB_OK(env_->DropUnsyncedFileData());
         break;
       case RESET_DELETE_UNSYNCED_FILES:
-        ASSERT_LEVELDB_OK(env_->DeleteFilesCreatedAfterLastDirSync());
+        ASSERT_LEVELDB_OK(env_->RemoveFilesCreatedAfterLastDirSync());
         break;
       default:
         assert(false);
diff --git a/db/filename.cc b/db/filename.cc
index 85de45c..9b451fc 100644
--- a/db/filename.cc
+++ b/db/filename.cc
@@ -133,7 +133,7 @@ Status SetCurrentFile(Env* env, const std::string& dbname,
     s = env->RenameFile(tmp, CurrentFileName(dbname));
   }
   if (!s.ok()) {
-    env->DeleteFile(tmp);
+    env->RemoveFile(tmp);
   }
   return s;
 }
diff --git a/db/recovery_test.cc b/db/recovery_test.cc
index ea137e6..04b39ae 100644
--- a/db/recovery_test.cc
+++ b/db/recovery_test.cc
@@ -100,19 +100,19 @@ class RecoveryTest : public testing::Test {
 
   std::string LogName(uint64_t number) { return LogFileName(dbname_, number); }
 
-  size_t DeleteLogFiles() {
+  size_t RemoveLogFiles() {
     // Linux allows unlinking open files, but Windows does not.
     // Closing the db allows for file deletion.
     Close();
     std::vector<uint64_t> logs = GetFiles(kLogFile);
     for (size_t i = 0; i < logs.size(); i++) {
-      EXPECT_LEVELDB_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]);
+      EXPECT_LEVELDB_OK(env_->RemoveFile(LogName(logs[i]))) << LogName(logs[i]);
     }
     return logs.size();
   }
 
-  void DeleteManifestFile() {
-    ASSERT_LEVELDB_OK(env_->DeleteFile(ManifestFileName()));
+  void RemoveManifestFile() {
+    ASSERT_LEVELDB_OK(env_->RemoveFile(ManifestFileName()));
   }
 
   uint64_t FirstLogFile() { return GetFiles(kLogFile)[0]; }
@@ -212,7 +212,7 @@ TEST_F(RecoveryTest, LargeManifestCompacted) {
 
 TEST_F(RecoveryTest, NoLogFiles) {
   ASSERT_LEVELDB_OK(Put("foo", "bar"));
-  ASSERT_EQ(1, DeleteLogFiles());
+  ASSERT_EQ(1, RemoveLogFiles());
   Open();
   ASSERT_EQ("NOT_FOUND", Get("foo"));
   Open();
@@ -327,7 +327,7 @@ TEST_F(RecoveryTest, MultipleLogFiles) {
 TEST_F(RecoveryTest, ManifestMissing) {
   ASSERT_LEVELDB_OK(Put("foo", "bar"));
   Close();
-  DeleteManifestFile();
+  RemoveManifestFile();
 
   Status status = OpenWithStatus();
   ASSERT_TRUE(status.IsCorruption());
diff --git a/db/repair.cc b/db/repair.cc
index d9d12ba..d2a495e 100644
--- a/db/repair.cc
+++ b/db/repair.cc
@@ -341,7 +341,7 @@ class Repairer {
       }
     }
     if (!s.ok()) {
-      env_->DeleteFile(copy);
+      env_->RemoveFile(copy);
     }
   }
 
@@ -386,7 +386,7 @@ class Repairer {
     file = nullptr;
 
     if (!status.ok()) {
-      env_->DeleteFile(tmp);
+      env_->RemoveFile(tmp);
     } else {
       // Discard older manifests
       for (size_t i = 0; i < manifests_.size(); i++) {
@@ -398,7 +398,7 @@ class Repairer {
       if (status.ok()) {
         status = SetCurrentFile(env_, dbname_, 1);
       } else {
-        env_->DeleteFile(tmp);
+        env_->RemoveFile(tmp);
       }
     }
     return status;
diff --git a/db/version_edit.cc b/db/version_edit.cc
index cd770ef..3e9012f 100644
--- a/db/version_edit.cc
+++ b/db/version_edit.cc
@@ -232,7 +232,7 @@ std::string VersionEdit::DebugString() const {
     r.append(compact_pointers_[i].second.DebugString());
   }
   for (const auto& deleted_files_kvp : deleted_files_) {
-    r.append("\n  DeleteFile: ");
+    r.append("\n  RemoveFile: ");
     AppendNumberTo(&r, deleted_files_kvp.first);
     r.append(" ");
     AppendNumberTo(&r, deleted_files_kvp.second);
diff --git a/db/version_edit.h b/db/version_edit.h
index 0de4531..137b4b1 100644
--- a/db/version_edit.h
+++ b/db/version_edit.h
@@ -71,7 +71,7 @@ class VersionEdit {
   }
 
   // Delete the specified "file" from the specified "level".
-  void DeleteFile(int level, uint64_t file) {
+  void RemoveFile(int level, uint64_t file) {
     deleted_files_.insert(std::make_pair(level, file));
   }
 
diff --git a/db/version_edit_test.cc b/db/version_edit_test.cc
index 39ea8b7..acafab0 100644
--- a/db/version_edit_test.cc
+++ b/db/version_edit_test.cc
@@ -27,7 +27,7 @@ TEST(VersionEditTest, EncodeDecode) {
     edit.AddFile(3, kBig + 300 + i, kBig + 400 + i,
                  InternalKey("foo", kBig + 500 + i, kTypeValue),
                  InternalKey("zoo", kBig + 600 + i, kTypeDeletion));
-    edit.DeleteFile(4, kBig + 700 + i);
+    edit.RemoveFile(4, kBig + 700 + i);
     edit.SetCompactPointer(i, InternalKey("x", kBig + 900 + i, kTypeValue));
   }
 
diff --git a/db/version_set.cc b/db/version_set.cc
index cd07346..2d5e51a 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -853,7 +853,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
       delete descriptor_file_;
       descriptor_log_ = nullptr;
       descriptor_file_ = nullptr;
-      env_->DeleteFile(new_manifest_file);
+      env_->RemoveFile(new_manifest_file);
     }
   }
 
@@ -1502,7 +1502,7 @@ bool Compaction::IsTrivialMove() const {
 void Compaction::AddInputDeletions(VersionEdit* edit) {
   for (int which = 0; which < 2; which++) {
     for (size_t i = 0; i < inputs_[which].size(); i++) {
-      edit->DeleteFile(level_ + which, inputs_[which][i]->number);
+      edit->RemoveFile(level_ + which, inputs_[which][i]->number);
     }
   }
 }
diff --git a/doc/impl.md b/doc/impl.md
index cacabb9..45187a2 100644
--- a/doc/impl.md
+++ b/doc/impl.md
@@ -166,7 +166,7 @@ So maybe even the sharding is not necessary on modern filesystems?
 
 ## Garbage collection of files
 
-`DeleteObsoleteFiles()` is called at the end of every compaction and at the end
+`RemoveObsoleteFiles()` is called at the end of every compaction and at the end
 of recovery. It finds the names of all files in the database. It deletes all log
 files that are not the current log file. It deletes all table files that are not
 referenced from some level and are not the output of an active compaction.
diff --git a/helpers/memenv/memenv.cc b/helpers/memenv/memenv.cc
index 31d2bc0..383c78b 100644
--- a/helpers/memenv/memenv.cc
+++ b/helpers/memenv/memenv.cc
@@ -309,7 +309,7 @@ class InMemoryEnv : public EnvWrapper {
     return Status::OK();
   }
 
-  void DeleteFileInternal(const std::string& fname)
+  void RemoveFileInternal(const std::string& fname)
       EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
     if (file_map_.find(fname) == file_map_.end()) {
       return;
@@ -319,19 +319,19 @@ class InMemoryEnv : public EnvWrapper {
     file_map_.erase(fname);
   }
 
-  Status DeleteFile(const std::string& fname) override {
+  Status RemoveFile(const std::string& fname) override {
     MutexLock lock(&mutex_);
     if (file_map_.find(fname) == file_map_.end()) {
       return Status::IOError(fname, "File not found");
     }
 
-    DeleteFileInternal(fname);
+    RemoveFileInternal(fname);
     return Status::OK();
   }
 
   Status CreateDir(const std::string& dirname) override { return Status::OK(); }
 
-  Status DeleteDir(const std::string& dirname) override { return Status::OK(); }
+  Status RemoveDir(const std::string& dirname) override { return Status::OK(); }
 
   Status GetFileSize(const std::string& fname, uint64_t* file_size) override {
     MutexLock lock(&mutex_);
@@ -350,7 +350,7 @@ class InMemoryEnv : public EnvWrapper {
       return Status::IOError(src, "File not found");
     }
 
-    DeleteFileInternal(target);
+    RemoveFileInternal(target);
     file_map_[target] = file_map_[src];
     file_map_.erase(src);
     return Status::OK();
diff --git a/helpers/memenv/memenv_test.cc b/helpers/memenv/memenv_test.cc
index 619fe51..93186ab 100644
--- a/helpers/memenv/memenv_test.cc
+++ b/helpers/memenv/memenv_test.cc
@@ -88,12 +88,12 @@ TEST_F(MemEnvTest, Basics) {
   ASSERT_TRUE(!rand_file);
 
   // Check that deleting works.
-  ASSERT_TRUE(!env_->DeleteFile("/dir/non_existent").ok());
-  ASSERT_LEVELDB_OK(env_->DeleteFile("/dir/g"));
+  ASSERT_TRUE(!env_->RemoveFile("/dir/non_existent").ok());
+  ASSERT_LEVELDB_OK(env_->RemoveFile("/dir/g"));
   ASSERT_TRUE(!env_->FileExists("/dir/g"));
   ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children));
   ASSERT_EQ(0, children.size());
-  ASSERT_LEVELDB_OK(env_->DeleteDir("/dir"));
+  ASSERT_LEVELDB_OK(env_->RemoveDir("/dir"));
 }
 
 TEST_F(MemEnvTest, ReadWrite) {
diff --git a/include/leveldb/env.h b/include/leveldb/env.h
index 112fe96..6501fa4 100644
--- a/include/leveldb/env.h
+++ b/include/leveldb/env.h
@@ -22,21 +22,18 @@
 #include "leveldb/export.h"
 #include "leveldb/status.h"
 
+// This workaround can be removed when leveldb::Env::DeleteFile is removed.
 #if defined(_WIN32)
-// The leveldb::Env class below contains a DeleteFile method.
-// At the same time, <windows.h>, a fairly popular header
-// file for Windows applications, defines a DeleteFile macro.
+// On Windows, the method name DeleteFile (below) introduces the risk of
+// triggering undefined behavior by exposing the compiler to different
+// declarations of the Env class in different translation units.
 //
-// Without any intervention on our part, the result of this
-// unfortunate coincidence is that the name of the
-// leveldb::Env::DeleteFile method seen by the compiler depends on
-// whether <windows.h> was included before or after the LevelDB
-// headers.
+// This is because <windows.h>, a fairly popular header file for Windows
+// applications, defines a DeleteFile macro. So, files that include the Windows
+// header before this header will contain an altered Env declaration.
 //
-// To avoid headaches, we undefined DeleteFile (if defined) and
-// redefine it at the bottom of this file. This way <windows.h>
-// can be included before this file (or not at all) and the
-// exported method will always be leveldb::Env::DeleteFile.
+// This workaround ensures that the compiler sees the same Env declaration,
+// independently of whether <windows.h> was included.
 #if defined(DeleteFile)
 #undef DeleteFile
 #define LEVELDB_DELETEFILE_UNDEFINED
@@ -54,7 +51,7 @@ class WritableFile;
 
 class LEVELDB_EXPORT Env {
  public:
-  Env() = default;
+  Env();
 
   Env(const Env&) = delete;
   Env& operator=(const Env&) = delete;
@@ -122,15 +119,48 @@ class LEVELDB_EXPORT Env {
   // Original contents of *results are dropped.
   virtual Status GetChildren(const std::string& dir,
                              std::vector<std::string>* result) = 0;
-
   // Delete the named file.
-  virtual Status DeleteFile(const std::string& fname) = 0;
+  //
+  // The default implementation calls DeleteFile, to support legacy Env
+  // implementations. Updated Env implementations must override RemoveFile and
+  // ignore the existence of DeleteFile. Updated code calling into the Env API
+  // must call RemoveFile instead of DeleteFile.
+  //
+  // A future release will remove DeleteDir and the default implementation of
+  // RemoveDir.
+  virtual Status RemoveFile(const std::string& fname);
+
+  // DEPRECATED: Modern Env implementations should override RemoveFile instead.
+  //
+  // The default implementation calls RemoveFile, to support legacy Env user
+  // code that calls this method on modern Env implementations. Modern Env user
+  // code should call RemoveFile.
+  //
+  // A future release will remove this method.
+  virtual Status DeleteFile(const std::string& fname);
 
   // Create the specified directory.
   virtual Status CreateDir(const std::string& dirname) = 0;
 
   // Delete the specified directory.
-  virtual Status DeleteDir(const std::string& dirname) = 0;
+  //
+  // The default implementation calls DeleteDir, to support legacy Env
+  // implementations. Updated Env implementations must override RemoveDir and
+  // ignore the existence of DeleteDir. Modern code calling into the Env API
+  // must call RemoveDir instead of DeleteDir.
+  //
+  // A future release will remove DeleteDir and the default implementation of
+  // RemoveDir.
+  virtual Status RemoveDir(const std::string& dirname);
+
+  // DEPRECATED: Modern Env implementations should override RemoveDir instead.
+  //
+  // The default implementation calls RemoveDir, to support legacy Env user
+  // code that calls this method on modern Env implementations. Modern Env user
+  // code should call RemoveDir.
+  //
+  // A future release will remove this method.
+  virtual Status DeleteDir(const std::string& dirname);
 
   // Store the size of fname in *file_size.
   virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) = 0;
@@ -333,14 +363,14 @@ class LEVELDB_EXPORT EnvWrapper : public Env {
                      std::vector<std::string>* r) override {
     return target_->GetChildren(dir, r);
   }
-  Status DeleteFile(const std::string& f) override {
-    return target_->DeleteFile(f);
+  Status RemoveFile(const std::string& f) override {
+    return target_->RemoveFile(f);
   }
   Status CreateDir(const std::string& d) override {
     return target_->CreateDir(d);
   }
-  Status DeleteDir(const std::string& d) override {
-    return target_->DeleteDir(d);
+  Status RemoveDir(const std::string& d) override {
+    return target_->RemoveDir(d);
   }
   Status GetFileSize(const std::string& f, uint64_t* s) override {
     return target_->GetFileSize(f, s);
@@ -375,7 +405,8 @@ class LEVELDB_EXPORT EnvWrapper : public Env {
 
 }  // namespace leveldb
 
-// Redefine DeleteFile if necessary.
+// This workaround can be removed when leveldb::Env::DeleteFile is removed.
+// Redefine DeleteFile if it was undefined earlier.
 #if defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED)
 #if defined(UNICODE)
 #define DeleteFile DeleteFileW
diff --git a/util/env.cc b/util/env.cc
index d2f0aef..40e6071 100644
--- a/util/env.cc
+++ b/util/env.cc
@@ -4,14 +4,28 @@
 
 #include "leveldb/env.h"
 
+// This workaround can be removed when leveldb::Env::DeleteFile is removed.
+// See env.h for justification.
+#if defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED)
+#undef DeleteFile
+#endif
+
 namespace leveldb {
 
+Env::Env() = default;
+
 Env::~Env() = default;
 
 Status Env::NewAppendableFile(const std::string& fname, WritableFile** result) {
   return Status::NotSupported("NewAppendableFile", fname);
 }
 
+Status Env::RemoveDir(const std::string& dirname) { return DeleteDir(dirname); }
+Status Env::DeleteDir(const std::string& dirname) { return RemoveDir(dirname); }
+
+Status Env::RemoveFile(const std::string& fname) { return DeleteFile(fname); }
+Status Env::DeleteFile(const std::string& fname) { return RemoveFile(fname); }
+
 SequentialFile::~SequentialFile() = default;
 
 RandomAccessFile::~RandomAccessFile() = default;
@@ -47,7 +61,7 @@ static Status DoWriteStringToFile(Env* env, const Slice& data,
   }
   delete file;  // Will auto-close if we did not close above
   if (!s.ok()) {
-    env->DeleteFile(fname);
+    env->RemoveFile(fname);
   }
   return s;
 }
diff --git a/util/env_posix.cc b/util/env_posix.cc
index 00ca9ae..d84cd1e 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -587,7 +587,7 @@ class PosixEnv : public Env {
     return Status::OK();
   }
 
-  Status DeleteFile(const std::string& filename) override {
+  Status RemoveFile(const std::string& filename) override {
     if (::unlink(filename.c_str()) != 0) {
       return PosixError(filename, errno);
     }
@@ -601,7 +601,7 @@ class PosixEnv : public Env {
     return Status::OK();
   }
 
-  Status DeleteDir(const std::string& dirname) override {
+  Status RemoveDir(const std::string& dirname) override {
     if (::rmdir(dirname.c_str()) != 0) {
       return PosixError(dirname, errno);
     }
diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc
index ed4ac96..36f226f 100644
--- a/util/env_posix_test.cc
+++ b/util/env_posix_test.cc
@@ -209,7 +209,7 @@ TEST_F(EnvPosixTest, TestOpenOnRead) {
   for (int i = 0; i < kNumFiles; i++) {
     delete files[i];
   }
-  ASSERT_LEVELDB_OK(env_->DeleteFile(test_file));
+  ASSERT_LEVELDB_OK(env_->RemoveFile(test_file));
 }
 
 #if HAVE_O_CLOEXEC
@@ -228,7 +228,7 @@ TEST_F(EnvPosixTest, TestCloseOnExecSequentialFile) {
   CheckCloseOnExecDoesNotLeakFDs(open_fds);
   delete file;
 
-  ASSERT_LEVELDB_OK(env_->DeleteFile(file_path));
+  ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
 }
 
 TEST_F(EnvPosixTest, TestCloseOnExecRandomAccessFile) {
@@ -256,7 +256,7 @@ TEST_F(EnvPosixTest, TestCloseOnExecRandomAccessFile) {
   for (int i = 0; i < kReadOnlyFileLimit; i++) {
     delete mmapped_files[i];
   }
-  ASSERT_LEVELDB_OK(env_->DeleteFile(file_path));
+  ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
 }
 
 TEST_F(EnvPosixTest, TestCloseOnExecWritableFile) {
@@ -273,7 +273,7 @@ TEST_F(EnvPosixTest, TestCloseOnExecWritableFile) {
   CheckCloseOnExecDoesNotLeakFDs(open_fds);
   delete file;
 
-  ASSERT_LEVELDB_OK(env_->DeleteFile(file_path));
+  ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
 }
 
 TEST_F(EnvPosixTest, TestCloseOnExecAppendableFile) {
@@ -290,7 +290,7 @@ TEST_F(EnvPosixTest, TestCloseOnExecAppendableFile) {
   CheckCloseOnExecDoesNotLeakFDs(open_fds);
   delete file;
 
-  ASSERT_LEVELDB_OK(env_->DeleteFile(file_path));
+  ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
 }
 
 TEST_F(EnvPosixTest, TestCloseOnExecLockFile) {
@@ -307,7 +307,7 @@ TEST_F(EnvPosixTest, TestCloseOnExecLockFile) {
   CheckCloseOnExecDoesNotLeakFDs(open_fds);
   ASSERT_LEVELDB_OK(env_->UnlockFile(lock));
 
-  ASSERT_LEVELDB_OK(env_->DeleteFile(file_path));
+  ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
 }
 
 TEST_F(EnvPosixTest, TestCloseOnExecLogger) {
@@ -324,7 +324,7 @@ TEST_F(EnvPosixTest, TestCloseOnExecLogger) {
   CheckCloseOnExecDoesNotLeakFDs(open_fds);
   delete file;
 
-  ASSERT_LEVELDB_OK(env_->DeleteFile(file_path));
+  ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
 }
 
 #endif  // HAVE_O_CLOEXEC
diff --git a/util/env_test.cc b/util/env_test.cc
index 09e9d39..223090e 100644
--- a/util/env_test.cc
+++ b/util/env_test.cc
@@ -193,7 +193,7 @@ TEST_F(EnvTest, ReopenWritableFile) {
   std::string test_dir;
   ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
   std::string test_file_name = test_dir + "/reopen_writable_file.txt";
-  env_->DeleteFile(test_file_name);
+  env_->RemoveFile(test_file_name);
 
   WritableFile* writable_file;
   ASSERT_LEVELDB_OK(env_->NewWritableFile(test_file_name, &writable_file));
@@ -210,14 +210,14 @@ TEST_F(EnvTest, ReopenWritableFile) {
 
   ASSERT_LEVELDB_OK(ReadFileToString(env_, test_file_name, &data));
   ASSERT_EQ(std::string("42"), data);
-  env_->DeleteFile(test_file_name);
+  env_->RemoveFile(test_file_name);
 }
 
 TEST_F(EnvTest, ReopenAppendableFile) {
   std::string test_dir;
   ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
   std::string test_file_name = test_dir + "/reopen_appendable_file.txt";
-  env_->DeleteFile(test_file_name);
+  env_->RemoveFile(test_file_name);
 
   WritableFile* appendable_file;
   ASSERT_LEVELDB_OK(env_->NewAppendableFile(test_file_name, &appendable_file));
@@ -234,7 +234,7 @@ TEST_F(EnvTest, ReopenAppendableFile) {
 
   ASSERT_LEVELDB_OK(ReadFileToString(env_, test_file_name, &data));
   ASSERT_EQ(std::string("hello world!42"), data);
-  env_->DeleteFile(test_file_name);
+  env_->RemoveFile(test_file_name);
 }
 
 }  // namespace leveldb
diff --git a/util/env_windows.cc b/util/env_windows.cc
index 2dd7794..449f564 100644
--- a/util/env_windows.cc
+++ b/util/env_windows.cc
@@ -33,10 +33,6 @@
 #include "util/mutexlock.h"
 #include "util/windows_logger.h"
 
-#if defined(DeleteFile)
-#undef DeleteFile
-#endif  // defined(DeleteFile)
-
 namespace leveldb {
 
 namespace {
@@ -505,7 +501,7 @@ class WindowsEnv : public Env {
     return Status::OK();
   }
 
-  Status DeleteFile(const std::string& filename) override {
+  Status RemoveFile(const std::string& filename) override {
     if (!::DeleteFileA(filename.c_str())) {
       return WindowsError(filename, ::GetLastError());
     }
@@ -519,7 +515,7 @@ class WindowsEnv : public Env {
     return Status::OK();
   }
 
-  Status DeleteDir(const std::string& dirname) override {
+  Status RemoveDir(const std::string& dirname) override {
     if (!::RemoveDirectoryA(dirname.c_str())) {
       return WindowsError(dirname, ::GetLastError());
     }
diff --git a/util/env_windows_test.cc b/util/env_windows_test.cc
index c75ca7b..15c0274 100644
--- a/util/env_windows_test.cc
+++ b/util/env_windows_test.cc
@@ -52,7 +52,7 @@ TEST_F(EnvWindowsTest, TestOpenOnRead) {
   for (int i = 0; i < kNumFiles; i++) {
     delete files[i];
   }
-  ASSERT_LEVELDB_OK(env_->DeleteFile(test_file));
+  ASSERT_LEVELDB_OK(env_->RemoveFile(test_file));
 }
 
 }  // namespace leveldb

From 5903e7a1125cacaa1d44367b5b84fe9208e42884 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Fri, 10 Jan 2020 10:45:16 -0800
Subject: [PATCH 127/181] Remove Windows workarounds in some tests.

leveldb::Env::DeleteFile was replaced with leveldb::Env::RemoveFile in
all tests. This allows us to remove workarounds for windows.h #defining
DeleteFile.
PiperOrigin-RevId: 289121105
---
 benchmarks/db_bench.cc        | 5 -----
 db/db_test.cc                 | 5 -----
 db/fault_injection_test.cc    | 5 -----
 db/recovery_test.cc           | 5 -----
 helpers/memenv/memenv_test.cc | 5 -----
 util/env_test.cc              | 5 -----
 6 files changed, 30 deletions(-)

diff --git a/benchmarks/db_bench.cc b/benchmarks/db_bench.cc
index 03da9d8..82ed892 100644
--- a/benchmarks/db_bench.cc
+++ b/benchmarks/db_bench.cc
@@ -18,11 +18,6 @@
 #include "util/random.h"
 #include "util/testutil.h"
 
-#if defined(_WIN32) && defined(DeleteFile)
-// See rationale in env.h
-#undef DeleteFile
-#endif
-
 // Comma-separated list of operations to run in the specified order
 //   Actual benchmarks:
 //      fillseq       -- write N values in sequential key order in async mode
diff --git a/db/db_test.cc b/db/db_test.cc
index 2ee6761..8cd90f3 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -23,11 +23,6 @@
 #include "util/mutexlock.h"
 #include "util/testutil.h"
 
-#if defined(_WIN32) && defined(DeleteFile)
-// See rationale in env.h
-#undef DeleteFile
-#endif
-
 namespace leveldb {
 
 static std::string RandomString(Random* rnd, int len) {
diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc
index 60e4631..8f2b647 100644
--- a/db/fault_injection_test.cc
+++ b/db/fault_injection_test.cc
@@ -25,11 +25,6 @@
 #include "util/mutexlock.h"
 #include "util/testutil.h"
 
-#if defined(_WIN32) && defined(DeleteFile)
-// See rationale in env.h
-#undef DeleteFile
-#endif
-
 namespace leveldb {
 
 static const int kValueSize = 1000;
diff --git a/db/recovery_test.cc b/db/recovery_test.cc
index 04b39ae..e5cc916 100644
--- a/db/recovery_test.cc
+++ b/db/recovery_test.cc
@@ -13,11 +13,6 @@
 #include "util/logging.h"
 #include "util/testutil.h"
 
-#if defined(_WIN32) && defined(DeleteFile)
-// See rationale in env.h
-#undef DeleteFile
-#endif
-
 namespace leveldb {
 
 class RecoveryTest : public testing::Test {
diff --git a/helpers/memenv/memenv_test.cc b/helpers/memenv/memenv_test.cc
index 93186ab..3f03cb6 100644
--- a/helpers/memenv/memenv_test.cc
+++ b/helpers/memenv/memenv_test.cc
@@ -13,11 +13,6 @@
 #include "leveldb/env.h"
 #include "util/testutil.h"
 
-#if defined(_WIN32) && defined(DeleteFile)
-// See rationale in env.h
-#undef DeleteFile
-#endif
-
 namespace leveldb {
 
 class MemEnvTest : public testing::Test {
diff --git a/util/env_test.cc b/util/env_test.cc
index 223090e..491ef43 100644
--- a/util/env_test.cc
+++ b/util/env_test.cc
@@ -12,11 +12,6 @@
 #include "util/mutexlock.h"
 #include "util/testutil.h"
 
-#if defined(_WIN32) && defined(DeleteFile)
-// See rationale in env.h
-#undef DeleteFile
-#endif
-
 namespace leveldb {
 
 static const int kDelayMicros = 100000;

From ba369ddbaffcfe635dd620d1aa68473b56267065 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Mon, 13 Apr 2020 15:21:41 +0000
Subject: [PATCH 128/181] Use LLVM 10 on Travis CI.

PiperOrigin-RevId: 306236199
---
 .travis.yml | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index 42cbe64..766fdc9 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -20,11 +20,11 @@ env:
 addons:
   apt:
     sources:
-    - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main'
+    - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-10 main'
       key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
     - sourceline: 'ppa:ubuntu-toolchain-r/test'
     packages:
-    - clang-9
+    - clang-10
     - cmake
     - gcc-9
     - g++-9
@@ -40,7 +40,7 @@ addons:
     - gcc@9
     - gperftools
     - kyoto-cabinet
-    - llvm@9
+    - llvm@10
     - ninja
     - snappy
     - sqlite3
@@ -60,7 +60,7 @@ install:
 # below don't work on macOS. Fortunately, the path change above makes the
 # default values (clang and clang++) resolve to the correct compiler on macOS.
 - if [ "$TRAVIS_OS_NAME" = "linux" ]; then
-    if [ "$CXX" = "clang++" ]; then export CXX="clang++-9" CC="clang-9"; fi;
+    if [ "$CXX" = "clang++" ]; then export CXX="clang++-10" CC="clang-10"; fi;
   fi
 - echo ${CC}
 - echo ${CXX}

From 201f52201f5dd9701e7a8ceaa0ec4d344e69e022 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Mon, 13 Apr 2020 23:18:12 +0000
Subject: [PATCH 129/181] Remove leveldb::port::kLittleEndian.

Clang 10 includes the optimizations described in
https://bugs.llvm.org/show_bug.cgi?id=41761. This means that the
platform-independent implementations of {Decode,Encode}Fixed{32,64}()
compile to one instruction on the most recent Clang and GCC.

PiperOrigin-RevId: 306330166
---
 CMakeLists.txt        |  3 ---
 port/port_config.h.in |  6 ------
 port/port_example.h   |  4 ----
 port/port_stdcxx.h    |  2 --
 util/coding.h         | 50 ++++---------------------------------------
 5 files changed, 4 insertions(+), 61 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index be41ba4..ae9b0f7 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -34,9 +34,6 @@ option(LEVELDB_BUILD_TESTS "Build LevelDB's unit tests" ON)
 option(LEVELDB_BUILD_BENCHMARKS "Build LevelDB's benchmarks" ON)
 option(LEVELDB_INSTALL "Install LevelDB's header and library" ON)
 
-include(TestBigEndian)
-test_big_endian(LEVELDB_IS_BIG_ENDIAN)
-
 include(CheckIncludeFile)
 check_include_file("unistd.h" HAVE_UNISTD_H)
 
diff --git a/port/port_config.h.in b/port/port_config.h.in
index 2127315..272671d 100644
--- a/port/port_config.h.in
+++ b/port/port_config.h.in
@@ -30,10 +30,4 @@
 #cmakedefine01 HAVE_SNAPPY
 #endif  // !defined(HAVE_SNAPPY)
 
-// Define to 1 if your processor stores words with the most significant byte
-// first (like Motorola and SPARC, unlike Intel and VAX).
-#if !defined(LEVELDB_IS_BIG_ENDIAN)
-#cmakedefine01 LEVELDB_IS_BIG_ENDIAN
-#endif  // !defined(LEVELDB_IS_BIG_ENDIAN)
-
 #endif  // STORAGE_LEVELDB_PORT_PORT_CONFIG_H_
\ No newline at end of file
diff --git a/port/port_example.h b/port/port_example.h
index 1a8fca2..a665910 100644
--- a/port/port_example.h
+++ b/port/port_example.h
@@ -18,10 +18,6 @@ namespace port {
 // TODO(jorlow): Many of these belong more in the environment class rather than
 //               here. We should try moving them and see if it affects perf.
 
-// The following boolean constant must be true on a little-endian machine
-// and false otherwise.
-static const bool kLittleEndian = true /* or some other expression */;
-
 // ------------------ Threading -------------------
 
 // A Mutex represents an exclusive lock.
diff --git a/port/port_stdcxx.h b/port/port_stdcxx.h
index e9cb0e5..2bda48d 100644
--- a/port/port_stdcxx.h
+++ b/port/port_stdcxx.h
@@ -41,8 +41,6 @@
 namespace leveldb {
 namespace port {
 
-static const bool kLittleEndian = !LEVELDB_IS_BIG_ENDIAN;
-
 class CondVar;
 
 // Thinly wraps std::mutex.
diff --git a/util/coding.h b/util/coding.h
index 1983ae7..f0bb57b 100644
--- a/util/coding.h
+++ b/util/coding.h
@@ -48,29 +48,13 @@ int VarintLength(uint64_t v);
 char* EncodeVarint32(char* dst, uint32_t value);
 char* EncodeVarint64(char* dst, uint64_t value);
 
-// TODO(costan): Remove port::kLittleEndian and the fast paths based on
-//               std::memcpy when clang learns to optimize the generic code, as
-//               described in https://bugs.llvm.org/show_bug.cgi?id=41761
-//
-// The platform-independent code in DecodeFixed{32,64}() gets optimized to mov
-// on x86 and ldr on ARM64, by both clang and gcc. However, only gcc optimizes
-// the platform-independent code in EncodeFixed{32,64}() to mov / str.
-
 // Lower-level versions of Put... that write directly into a character buffer
 // REQUIRES: dst has enough space for the value being written
 
 inline void EncodeFixed32(char* dst, uint32_t value) {
   uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
 
-  if (port::kLittleEndian) {
-    // Fast path for little-endian CPUs. All major compilers optimize this to a
-    // single mov (x86_64) / str (ARM) instruction.
-    std::memcpy(buffer, &value, sizeof(uint32_t));
-    return;
-  }
-
-  // Platform-independent code.
-  // Currently, only gcc optimizes this to a single mov / str instruction.
+  // Recent clang and gcc optimize this to a single mov / str instruction.
   buffer[0] = static_cast<uint8_t>(value);
   buffer[1] = static_cast<uint8_t>(value >> 8);
   buffer[2] = static_cast<uint8_t>(value >> 16);
@@ -80,15 +64,7 @@ inline void EncodeFixed32(char* dst, uint32_t value) {
 inline void EncodeFixed64(char* dst, uint64_t value) {
   uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
 
-  if (port::kLittleEndian) {
-    // Fast path for little-endian CPUs. All major compilers optimize this to a
-    // single mov (x86_64) / str (ARM) instruction.
-    std::memcpy(buffer, &value, sizeof(uint64_t));
-    return;
-  }
-
-  // Platform-independent code.
-  // Currently, only gcc optimizes this to a single mov / str instruction.
+  // Recent clang and gcc optimize this to a single mov / str instruction.
   buffer[0] = static_cast<uint8_t>(value);
   buffer[1] = static_cast<uint8_t>(value >> 8);
   buffer[2] = static_cast<uint8_t>(value >> 16);
@@ -105,16 +81,7 @@ inline void EncodeFixed64(char* dst, uint64_t value) {
 inline uint32_t DecodeFixed32(const char* ptr) {
   const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
 
-  if (port::kLittleEndian) {
-    // Fast path for little-endian CPUs. All major compilers optimize this to a
-    // single mov (x86_64) / ldr (ARM) instruction.
-    uint32_t result;
-    std::memcpy(&result, buffer, sizeof(uint32_t));
-    return result;
-  }
-
-  // Platform-independent code.
-  // Clang and gcc optimize this to a single mov / ldr instruction.
+  // Recent clang and gcc optimize this to a single mov / ldr instruction.
   return (static_cast<uint32_t>(buffer[0])) |
          (static_cast<uint32_t>(buffer[1]) << 8) |
          (static_cast<uint32_t>(buffer[2]) << 16) |
@@ -124,16 +91,7 @@ inline uint32_t DecodeFixed32(const char* ptr) {
 inline uint64_t DecodeFixed64(const char* ptr) {
   const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
 
-  if (port::kLittleEndian) {
-    // Fast path for little-endian CPUs. All major compilers optimize this to a
-    // single mov (x86_64) / ldr (ARM) instruction.
-    uint64_t result;
-    std::memcpy(&result, buffer, sizeof(uint64_t));
-    return result;
-  }
-
-  // Platform-independent code.
-  // Clang and gcc optimize this to a single mov / ldr instruction.
+  // Recent clang and gcc optimize this to a single mov / ldr instruction.
   return (static_cast<uint64_t>(buffer[0])) |
          (static_cast<uint64_t>(buffer[1]) << 8) |
          (static_cast<uint64_t>(buffer[2]) << 16) |

From 10bc0f2595b8672c0c1756f22051ec420036fdf2 Mon Sep 17 00:00:00 2001
From: lntotk <imjunwang@163.com>
Date: Fri, 24 Apr 2020 02:00:12 +0000
Subject: [PATCH 130/181] remove unnessary status judge

---
 table/table.cc | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/table/table.cc b/table/table.cc
index b07bc88..29e835f 100644
--- a/table/table.cc
+++ b/table/table.cc
@@ -54,13 +54,11 @@ Status Table::Open(const Options& options, RandomAccessFile* file,
 
   // Read the index block
   BlockContents index_block_contents;
-  if (s.ok()) {
-    ReadOptions opt;
-    if (options.paranoid_checks) {
-      opt.verify_checksums = true;
-    }
-    s = ReadBlock(file, opt, footer.index_handle(), &index_block_contents);
+  ReadOptions opt;
+  if (options.paranoid_checks) {
+    opt.verify_checksums = true;
   }
+  s = ReadBlock(file, opt, footer.index_handle(), &index_block_contents);
 
   if (s.ok()) {
     // We've successfully read the footer and the index block: we're

From 98a3b8cf6531220c5ecfe124ebfe7d29deb1251b Mon Sep 17 00:00:00 2001
From: leveldb Team <no-reply@google.com>
Date: Fri, 17 Apr 2020 21:19:50 +0000
Subject: [PATCH 131/181] change const to constexpr

PiperOrigin-RevId: 307113877
---
 db/skiplist_test.cc | 3 +--
 util/cache_test.cc  | 2 +-
 2 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc
index 7c5d09b..9c70c5b 100644
--- a/db/skiplist_test.cc
+++ b/db/skiplist_test.cc
@@ -151,7 +151,7 @@ TEST(SkipTest, InsertAndLookup) {
 // been concurrently added since the iterator started.
 class ConcurrentTest {
  private:
-  static const uint32_t K = 4;
+  static constexpr uint32_t K = 4;
 
   static uint64_t key(Key key) { return (key >> 40); }
   static uint64_t gen(Key key) { return (key >> 8) & 0xffffffffu; }
@@ -280,7 +280,6 @@ class ConcurrentTest {
     }
   }
 };
-const uint32_t ConcurrentTest::K;
 
 // Simple test that does single-threaded testing of the ConcurrentTest
 // scaffolding.
diff --git a/util/cache_test.cc b/util/cache_test.cc
index b5d9873..79cfc27 100644
--- a/util/cache_test.cc
+++ b/util/cache_test.cc
@@ -31,7 +31,7 @@ class CacheTest : public testing::Test {
     current_->deleted_values_.push_back(DecodeValue(v));
   }
 
-  static const int kCacheSize = 1000;
+  static constexpr int kCacheSize = 1000;
   std::vector<int> deleted_keys_;
   std::vector<int> deleted_values_;
   Cache* cache_;

From 23d67e7c1f4396919bd0c73c0eced13a0dac37f3 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Tue, 28 Apr 2020 16:41:33 +0000
Subject: [PATCH 132/181] Fix C++11 build.

PiperOrigin-RevId: 308839805
---
 db/skiplist_test.cc | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc
index 9c70c5b..b548017 100644
--- a/db/skiplist_test.cc
+++ b/db/skiplist_test.cc
@@ -281,6 +281,9 @@ class ConcurrentTest {
   }
 };
 
+// Needed when building in C++11 mode.
+constexpr uint32_t ConcurrentTest::K;
+
 // Simple test that does single-threaded testing of the ConcurrentTest
 // scaffolding.
 TEST(SkipTest, ConcurrentWithoutThreads) {

From 3f934e3705444a3df80b128ddefc4cf440441ffe Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Wed, 29 Apr 2020 19:59:39 +0000
Subject: [PATCH 133/181] Switch from C headers to C++ headers.

This CL makes the following substitutions.

* assert.h -> cassert
* math.h -> cmath
* stdarg.h -> cstdarg
* stddef.h -> cstddef
* stdint.h -> cstdint
* stdio.h -> cstdio
* stdlib.h -> cstdlib
* string.h -> cstring

PiperOrigin-RevId: 309080151
---
 benchmarks/db_bench.cc          | 5 +++--
 benchmarks/db_bench_sqlite3.cc  | 5 +++--
 benchmarks/db_bench_tree_db.cc  | 5 +++--
 db/db_impl.cc                   | 5 ++---
 db/db_iter.h                    | 2 +-
 db/dbformat.cc                  | 3 +--
 db/dumpfile.cc                  | 2 +-
 db/filename.cc                  | 4 ++--
 db/filename.h                   | 3 +--
 db/leveldbutil.cc               | 2 +-
 db/log_reader.cc                | 2 +-
 db/log_reader.h                 | 2 +-
 db/log_writer.cc                | 2 +-
 db/log_writer.h                 | 2 +-
 db/table_cache.h                | 3 +--
 db/version_set.cc               | 3 +--
 helpers/memenv/memenv.cc        | 3 +--
 include/leveldb/cache.h         | 2 +-
 include/leveldb/db.h            | 4 ++--
 include/leveldb/env.h           | 5 ++---
 include/leveldb/options.h       | 2 +-
 include/leveldb/slice.h         | 7 +++----
 include/leveldb/table.h         | 2 +-
 include/leveldb/table_builder.h | 2 +-
 table/block.h                   | 4 ++--
 table/block_builder.cc          | 3 +--
 table/block_builder.h           | 3 +--
 table/filter_block.h            | 5 ++---
 table/format.h                  | 3 +--
 table/table_builder.cc          | 2 +-
 util/cache.cc                   | 9 +++++----
 util/crc32c.cc                  | 4 ++--
 util/crc32c.h                   | 4 ++--
 util/hash.cc                    | 2 +-
 util/hash.h                     | 4 ++--
 util/histogram.cc               | 4 ++--
 util/logging.cc                 | 8 +++-----
 util/logging.h                  | 5 ++---
 util/random.h                   | 2 +-
 util/status.cc                  | 2 +-
 40 files changed, 65 insertions(+), 76 deletions(-)

diff --git a/benchmarks/db_bench.cc b/benchmarks/db_bench.cc
index 82ed892..3dcd751 100644
--- a/benchmarks/db_bench.cc
+++ b/benchmarks/db_bench.cc
@@ -2,10 +2,11 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include <stdio.h>
-#include <stdlib.h>
 #include <sys/types.h>
 
+#include <cstdio>
+#include <cstdlib>
+
 #include "leveldb/cache.h"
 #include "leveldb/db.h"
 #include "leveldb/env.h"
diff --git a/benchmarks/db_bench_sqlite3.cc b/benchmarks/db_bench_sqlite3.cc
index 9c32a2d..2563481 100644
--- a/benchmarks/db_bench_sqlite3.cc
+++ b/benchmarks/db_bench_sqlite3.cc
@@ -3,8 +3,9 @@
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
 #include <sqlite3.h>
-#include <stdio.h>
-#include <stdlib.h>
+
+#include <cstdio>
+#include <cstdlib>
 
 #include "util/histogram.h"
 #include "util/random.h"
diff --git a/benchmarks/db_bench_tree_db.cc b/benchmarks/db_bench_tree_db.cc
index 43f0f65..60ab3b0 100644
--- a/benchmarks/db_bench_tree_db.cc
+++ b/benchmarks/db_bench_tree_db.cc
@@ -3,8 +3,9 @@
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
 #include <kcpolydb.h>
-#include <stdio.h>
-#include <stdlib.h>
+
+#include <cstdio>
+#include <cstdlib>
 
 #include "util/histogram.h"
 #include "util/random.h"
diff --git a/db/db_impl.cc b/db/db_impl.cc
index ba0a46d..ca53485 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -4,11 +4,10 @@
 
 #include "db/db_impl.h"
 
-#include <stdint.h>
-#include <stdio.h>
-
 #include <algorithm>
 #include <atomic>
+#include <cstdint>
+#include <cstdio>
 #include <set>
 #include <string>
 #include <vector>
diff --git a/db/db_iter.h b/db/db_iter.h
index fd93e91..5977fc8 100644
--- a/db/db_iter.h
+++ b/db/db_iter.h
@@ -5,7 +5,7 @@
 #ifndef STORAGE_LEVELDB_DB_DB_ITER_H_
 #define STORAGE_LEVELDB_DB_DB_ITER_H_
 
-#include <stdint.h>
+#include <cstdint>
 
 #include "db/dbformat.h"
 #include "leveldb/db.h"
diff --git a/db/dbformat.cc b/db/dbformat.cc
index 459eddf..019aa92 100644
--- a/db/dbformat.cc
+++ b/db/dbformat.cc
@@ -4,8 +4,7 @@
 
 #include "db/dbformat.h"
 
-#include <stdio.h>
-
+#include <cstdio>
 #include <sstream>
 
 #include "port/port.h"
diff --git a/db/dumpfile.cc b/db/dumpfile.cc
index 77d5900..6085475 100644
--- a/db/dumpfile.cc
+++ b/db/dumpfile.cc
@@ -4,7 +4,7 @@
 
 #include "leveldb/dumpfile.h"
 
-#include <stdio.h>
+#include <cstdio>
 
 #include "db/dbformat.h"
 #include "db/filename.h"
diff --git a/db/filename.cc b/db/filename.cc
index 9b451fc..f6bec00 100644
--- a/db/filename.cc
+++ b/db/filename.cc
@@ -4,8 +4,8 @@
 
 #include "db/filename.h"
 
-#include <ctype.h>
-#include <stdio.h>
+#include <cassert>
+#include <cstdio>
 
 #include "db/dbformat.h"
 #include "leveldb/env.h"
diff --git a/db/filename.h b/db/filename.h
index 524e813..563c6d8 100644
--- a/db/filename.h
+++ b/db/filename.h
@@ -7,8 +7,7 @@
 #ifndef STORAGE_LEVELDB_DB_FILENAME_H_
 #define STORAGE_LEVELDB_DB_FILENAME_H_
 
-#include <stdint.h>
-
+#include <cstdint>
 #include <string>
 
 #include "leveldb/slice.h"
diff --git a/db/leveldbutil.cc b/db/leveldbutil.cc
index 55cdcc5..8e94abd 100644
--- a/db/leveldbutil.cc
+++ b/db/leveldbutil.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include <stdio.h>
+#include <cstdio>
 
 #include "leveldb/dumpfile.h"
 #include "leveldb/env.h"
diff --git a/db/log_reader.cc b/db/log_reader.cc
index b770fee..dcd4b75 100644
--- a/db/log_reader.cc
+++ b/db/log_reader.cc
@@ -4,7 +4,7 @@
 
 #include "db/log_reader.h"
 
-#include <stdio.h>
+#include <cstdio>
 
 #include "leveldb/env.h"
 #include "util/coding.h"
diff --git a/db/log_reader.h b/db/log_reader.h
index 001da89..75d53f7 100644
--- a/db/log_reader.h
+++ b/db/log_reader.h
@@ -5,7 +5,7 @@
 #ifndef STORAGE_LEVELDB_DB_LOG_READER_H_
 #define STORAGE_LEVELDB_DB_LOG_READER_H_
 
-#include <stdint.h>
+#include <cstdint>
 
 #include "db/log_format.h"
 #include "leveldb/slice.h"
diff --git a/db/log_writer.cc b/db/log_writer.cc
index bfb16fb..ad66bfb 100644
--- a/db/log_writer.cc
+++ b/db/log_writer.cc
@@ -4,7 +4,7 @@
 
 #include "db/log_writer.h"
 
-#include <stdint.h>
+#include <cstdint>
 
 #include "leveldb/env.h"
 #include "util/coding.h"
diff --git a/db/log_writer.h b/db/log_writer.h
index c0a2114..ad36794 100644
--- a/db/log_writer.h
+++ b/db/log_writer.h
@@ -5,7 +5,7 @@
 #ifndef STORAGE_LEVELDB_DB_LOG_WRITER_H_
 #define STORAGE_LEVELDB_DB_LOG_WRITER_H_
 
-#include <stdint.h>
+#include <cstdint>
 
 #include "db/log_format.h"
 #include "leveldb/slice.h"
diff --git a/db/table_cache.h b/db/table_cache.h
index 93069c8..aac9bfc 100644
--- a/db/table_cache.h
+++ b/db/table_cache.h
@@ -7,8 +7,7 @@
 #ifndef STORAGE_LEVELDB_DB_TABLE_CACHE_H_
 #define STORAGE_LEVELDB_DB_TABLE_CACHE_H_
 
-#include <stdint.h>
-
+#include <cstdint>
 #include <string>
 
 #include "db/dbformat.h"
diff --git a/db/version_set.cc b/db/version_set.cc
index 2d5e51a..f23ae14 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -4,9 +4,8 @@
 
 #include "db/version_set.h"
 
-#include <stdio.h>
-
 #include <algorithm>
+#include <cstdio>
 
 #include "db/filename.h"
 #include "db/log_reader.h"
diff --git a/helpers/memenv/memenv.cc b/helpers/memenv/memenv.cc
index 383c78b..0da4e76 100644
--- a/helpers/memenv/memenv.cc
+++ b/helpers/memenv/memenv.cc
@@ -4,8 +4,7 @@
 
 #include "helpers/memenv/memenv.h"
 
-#include <string.h>
-
+#include <cstring>
 #include <limits>
 #include <map>
 #include <string>
diff --git a/include/leveldb/cache.h b/include/leveldb/cache.h
index 7d1a221..98c95ac 100644
--- a/include/leveldb/cache.h
+++ b/include/leveldb/cache.h
@@ -18,7 +18,7 @@
 #ifndef STORAGE_LEVELDB_INCLUDE_CACHE_H_
 #define STORAGE_LEVELDB_INCLUDE_CACHE_H_
 
-#include <stdint.h>
+#include <cstdint>
 
 #include "leveldb/export.h"
 #include "leveldb/slice.h"
diff --git a/include/leveldb/db.h b/include/leveldb/db.h
index b73014a..2a995ec 100644
--- a/include/leveldb/db.h
+++ b/include/leveldb/db.h
@@ -5,8 +5,8 @@
 #ifndef STORAGE_LEVELDB_INCLUDE_DB_H_
 #define STORAGE_LEVELDB_INCLUDE_DB_H_
 
-#include <stdint.h>
-#include <stdio.h>
+#include <cstdint>
+#include <cstdio>
 
 #include "leveldb/export.h"
 #include "leveldb/iterator.h"
diff --git a/include/leveldb/env.h b/include/leveldb/env.h
index 6501fa4..3ef0393 100644
--- a/include/leveldb/env.h
+++ b/include/leveldb/env.h
@@ -13,9 +13,8 @@
 #ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_
 #define STORAGE_LEVELDB_INCLUDE_ENV_H_
 
-#include <stdarg.h>
-#include <stdint.h>
-
+#include <cstdarg>
+#include <cstdint>
 #include <string>
 #include <vector>
 
diff --git a/include/leveldb/options.h b/include/leveldb/options.h
index b748772..0f285bc 100644
--- a/include/leveldb/options.h
+++ b/include/leveldb/options.h
@@ -5,7 +5,7 @@
 #ifndef STORAGE_LEVELDB_INCLUDE_OPTIONS_H_
 #define STORAGE_LEVELDB_INCLUDE_OPTIONS_H_
 
-#include <stddef.h>
+#include <cstddef>
 
 #include "leveldb/export.h"
 
diff --git a/include/leveldb/slice.h b/include/leveldb/slice.h
index 2df417d..37cb821 100644
--- a/include/leveldb/slice.h
+++ b/include/leveldb/slice.h
@@ -15,10 +15,9 @@
 #ifndef STORAGE_LEVELDB_INCLUDE_SLICE_H_
 #define STORAGE_LEVELDB_INCLUDE_SLICE_H_
 
-#include <assert.h>
-#include <stddef.h>
-#include <string.h>
-
+#include <cassert>
+#include <cstddef>
+#include <cstring>
 #include <string>
 
 #include "leveldb/export.h"
diff --git a/include/leveldb/table.h b/include/leveldb/table.h
index 25c6013..a30e903 100644
--- a/include/leveldb/table.h
+++ b/include/leveldb/table.h
@@ -5,7 +5,7 @@
 #ifndef STORAGE_LEVELDB_INCLUDE_TABLE_H_
 #define STORAGE_LEVELDB_INCLUDE_TABLE_H_
 
-#include <stdint.h>
+#include <cstdint>
 
 #include "leveldb/export.h"
 #include "leveldb/iterator.h"
diff --git a/include/leveldb/table_builder.h b/include/leveldb/table_builder.h
index 7d8896b..85710c3 100644
--- a/include/leveldb/table_builder.h
+++ b/include/leveldb/table_builder.h
@@ -13,7 +13,7 @@
 #ifndef STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_
 #define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_
 
-#include <stdint.h>
+#include <cstdint>
 
 #include "leveldb/export.h"
 #include "leveldb/options.h"
diff --git a/table/block.h b/table/block.h
index c8f1f7b..5224108 100644
--- a/table/block.h
+++ b/table/block.h
@@ -5,8 +5,8 @@
 #ifndef STORAGE_LEVELDB_TABLE_BLOCK_H_
 #define STORAGE_LEVELDB_TABLE_BLOCK_H_
 
-#include <stddef.h>
-#include <stdint.h>
+#include <cstddef>
+#include <cstdint>
 
 #include "leveldb/iterator.h"
 
diff --git a/table/block_builder.cc b/table/block_builder.cc
index 919cff5..37d4008 100644
--- a/table/block_builder.cc
+++ b/table/block_builder.cc
@@ -28,9 +28,8 @@
 
 #include "table/block_builder.h"
 
-#include <assert.h>
-
 #include <algorithm>
+#include <cassert>
 
 #include "leveldb/comparator.h"
 #include "leveldb/options.h"
diff --git a/table/block_builder.h b/table/block_builder.h
index f91f5e6..7a481cd 100644
--- a/table/block_builder.h
+++ b/table/block_builder.h
@@ -5,8 +5,7 @@
 #ifndef STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
 #define STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
 
-#include <stdint.h>
-
+#include <cstdint>
 #include <vector>
 
 #include "leveldb/slice.h"
diff --git a/table/filter_block.h b/table/filter_block.h
index 73b5399..25ab75b 100644
--- a/table/filter_block.h
+++ b/table/filter_block.h
@@ -9,9 +9,8 @@
 #ifndef STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_
 #define STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_
 
-#include <stddef.h>
-#include <stdint.h>
-
+#include <cstddef>
+#include <cstdint>
 #include <string>
 #include <vector>
 
diff --git a/table/format.h b/table/format.h
index e49dfdc..f6ea304 100644
--- a/table/format.h
+++ b/table/format.h
@@ -5,8 +5,7 @@
 #ifndef STORAGE_LEVELDB_TABLE_FORMAT_H_
 #define STORAGE_LEVELDB_TABLE_FORMAT_H_
 
-#include <stdint.h>
-
+#include <cstdint>
 #include <string>
 
 #include "leveldb/slice.h"
diff --git a/table/table_builder.cc b/table/table_builder.cc
index 278febf..29a619d 100644
--- a/table/table_builder.cc
+++ b/table/table_builder.cc
@@ -4,7 +4,7 @@
 
 #include "leveldb/table_builder.h"
 
-#include <assert.h>
+#include <cassert>
 
 #include "leveldb/comparator.h"
 #include "leveldb/env.h"
diff --git a/util/cache.cc b/util/cache.cc
index 12de306..509e5eb 100644
--- a/util/cache.cc
+++ b/util/cache.cc
@@ -2,11 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include <assert.h>
-#include <stdio.h>
-#include <stdlib.h>
-
 #include "leveldb/cache.h"
+
+#include <cassert>
+#include <cstdio>
+#include <cstdlib>
+
 #include "port/port.h"
 #include "port/thread_annotations.h"
 #include "util/hash.h"
diff --git a/util/crc32c.cc b/util/crc32c.cc
index c2e61f7..3f18908 100644
--- a/util/crc32c.cc
+++ b/util/crc32c.cc
@@ -6,8 +6,8 @@
 
 #include "util/crc32c.h"
 
-#include <stddef.h>
-#include <stdint.h>
+#include <cstddef>
+#include <cstdint>
 
 #include "port/port.h"
 #include "util/coding.h"
diff --git a/util/crc32c.h b/util/crc32c.h
index 98fabb0..b420b5f 100644
--- a/util/crc32c.h
+++ b/util/crc32c.h
@@ -5,8 +5,8 @@
 #ifndef STORAGE_LEVELDB_UTIL_CRC32C_H_
 #define STORAGE_LEVELDB_UTIL_CRC32C_H_
 
-#include <stddef.h>
-#include <stdint.h>
+#include <cstddef>
+#include <cstdint>
 
 namespace leveldb {
 namespace crc32c {
diff --git a/util/hash.cc b/util/hash.cc
index dd47c11..8122fa8 100644
--- a/util/hash.cc
+++ b/util/hash.cc
@@ -4,7 +4,7 @@
 
 #include "util/hash.h"
 
-#include <string.h>
+#include <cstring>
 
 #include "util/coding.h"
 
diff --git a/util/hash.h b/util/hash.h
index 74bdb6e..87ab279 100644
--- a/util/hash.h
+++ b/util/hash.h
@@ -7,8 +7,8 @@
 #ifndef STORAGE_LEVELDB_UTIL_HASH_H_
 #define STORAGE_LEVELDB_UTIL_HASH_H_
 
-#include <stddef.h>
-#include <stdint.h>
+#include <cstddef>
+#include <cstdint>
 
 namespace leveldb {
 
diff --git a/util/histogram.cc b/util/histogram.cc
index 65092c8..d110d28 100644
--- a/util/histogram.cc
+++ b/util/histogram.cc
@@ -4,8 +4,8 @@
 
 #include "util/histogram.h"
 
-#include <math.h>
-#include <stdio.h>
+#include <cmath>
+#include <cstdio>
 
 #include "port/port.h"
 
diff --git a/util/logging.cc b/util/logging.cc
index 75e9d03..39d8551 100644
--- a/util/logging.cc
+++ b/util/logging.cc
@@ -4,11 +4,9 @@
 
 #include "util/logging.h"
 
-#include <errno.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <stdlib.h>
-
+#include <cstdarg>
+#include <cstdio>
+#include <cstdlib>
 #include <limits>
 
 #include "leveldb/env.h"
diff --git a/util/logging.h b/util/logging.h
index 8ff2da8..a0394b2 100644
--- a/util/logging.h
+++ b/util/logging.h
@@ -8,9 +8,8 @@
 #ifndef STORAGE_LEVELDB_UTIL_LOGGING_H_
 #define STORAGE_LEVELDB_UTIL_LOGGING_H_
 
-#include <stdint.h>
-#include <stdio.h>
-
+#include <cstdint>
+#include <cstdio>
 #include <string>
 
 #include "port/port.h"
diff --git a/util/random.h b/util/random.h
index 76f7daf..fe76ab4 100644
--- a/util/random.h
+++ b/util/random.h
@@ -5,7 +5,7 @@
 #ifndef STORAGE_LEVELDB_UTIL_RANDOM_H_
 #define STORAGE_LEVELDB_UTIL_RANDOM_H_
 
-#include <stdint.h>
+#include <cstdint>
 
 namespace leveldb {
 
diff --git a/util/status.cc b/util/status.cc
index 15ce747..6b6528b 100644
--- a/util/status.cc
+++ b/util/status.cc
@@ -4,7 +4,7 @@
 
 #include "leveldb/status.h"
 
-#include <stdio.h>
+#include <cstdio>
 
 #include "port/port.h"
 

From a6b3a2012e9c598258a295aef74d88b796c47a2b Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Wed, 29 Apr 2020 22:31:41 +0000
Subject: [PATCH 134/181] Add some std:: qualifiers to types and functions.

PiperOrigin-RevId: 309110431
---
 benchmarks/db_bench.cc         | 136 +++++++++++++++++----------------
 benchmarks/db_bench_sqlite3.cc |  98 ++++++++++++------------
 benchmarks/db_bench_tree_db.cc |  98 +++++++++++++-----------
 db/autocompact_test.cc         |   6 +-
 db/c.cc                        |   4 +-
 db/corruption_test.cc          |  11 +--
 db/db_impl.cc                  |  30 ++++----
 db/db_iter.cc                  |   4 +-
 db/db_test.cc                  |  73 +++++++++---------
 db/dbformat.cc                 |   2 +-
 db/fault_injection_test.cc     |   4 +-
 db/filename.cc                 |   8 +-
 db/leveldbutil.cc              |   9 ++-
 db/log_reader.cc               |   2 +-
 db/log_test.cc                 |   2 +-
 db/memtable.cc                 |   4 +-
 db/recovery_test.cc            |  13 ++--
 db/repair.cc                   |   3 +-
 db/skiplist_test.cc            |   2 +-
 db/version_set.cc              |  19 ++---
 helpers/memenv/memenv.cc       |   6 +-
 include/leveldb/env.h          |   2 +-
 issues/issue178_test.cc        |   2 +-
 table/table_test.cc            |  32 ++++----
 util/bloom_test.cc             |  15 ++--
 util/cache.cc                  |   2 +-
 util/env.cc                    |   4 +-
 util/env_posix_test.cc         |   6 +-
 util/env_windows_test.cc       |   4 +-
 util/histogram.cc              |  20 ++---
 util/logging.cc                |   6 +-
 util/posix_logger.h            |   8 +-
 util/status.cc                 |  16 ++--
 util/windows_logger.h          |   8 +-
 34 files changed, 345 insertions(+), 314 deletions(-)

diff --git a/benchmarks/db_bench.cc b/benchmarks/db_bench.cc
index 3dcd751..288b119 100644
--- a/benchmarks/db_bench.cc
+++ b/benchmarks/db_bench.cc
@@ -221,8 +221,8 @@ class Stats {
       double micros = now - last_op_finish_;
       hist_.Add(micros);
       if (micros > 20000) {
-        fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
-        fflush(stderr);
+        std::fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
+        std::fflush(stderr);
       }
       last_op_finish_ = now;
     }
@@ -243,8 +243,8 @@ class Stats {
         next_report_ += 50000;
       else
         next_report_ += 100000;
-      fprintf(stderr, "... finished %d ops%30s\r", done_, "");
-      fflush(stderr);
+      std::fprintf(stderr, "... finished %d ops%30s\r", done_, "");
+      std::fflush(stderr);
     }
   }
 
@@ -261,18 +261,20 @@ class Stats {
       // elapsed times.
       double elapsed = (finish_ - start_) * 1e-6;
       char rate[100];
-      snprintf(rate, sizeof(rate), "%6.1f MB/s",
-               (bytes_ / 1048576.0) / elapsed);
+      std::snprintf(rate, sizeof(rate), "%6.1f MB/s",
+                    (bytes_ / 1048576.0) / elapsed);
       extra = rate;
     }
     AppendWithSpace(&extra, message_);
 
-    fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
-            seconds_ * 1e6 / done_, (extra.empty() ? "" : " "), extra.c_str());
+    std::fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
+                 name.ToString().c_str(), seconds_ * 1e6 / done_,
+                 (extra.empty() ? "" : " "), extra.c_str());
     if (FLAGS_histogram) {
-      fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
+      std::fprintf(stdout, "Microseconds per op:\n%s\n",
+                   hist_.ToString().c_str());
     }
-    fflush(stdout);
+    std::fflush(stdout);
   }
 };
 
@@ -323,51 +325,55 @@ class Benchmark {
   void PrintHeader() {
     const int kKeySize = 16;
     PrintEnvironment();
-    fprintf(stdout, "Keys:       %d bytes each\n", kKeySize);
-    fprintf(stdout, "Values:     %d bytes each (%d bytes after compression)\n",
-            FLAGS_value_size,
-            static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
-    fprintf(stdout, "Entries:    %d\n", num_);
-    fprintf(stdout, "RawSize:    %.1f MB (estimated)\n",
-            ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
-             1048576.0));
-    fprintf(stdout, "FileSize:   %.1f MB (estimated)\n",
-            (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
-             1048576.0));
+    std::fprintf(stdout, "Keys:       %d bytes each\n", kKeySize);
+    std::fprintf(
+        stdout, "Values:     %d bytes each (%d bytes after compression)\n",
+        FLAGS_value_size,
+        static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
+    std::fprintf(stdout, "Entries:    %d\n", num_);
+    std::fprintf(stdout, "RawSize:    %.1f MB (estimated)\n",
+                 ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
+                  1048576.0));
+    std::fprintf(
+        stdout, "FileSize:   %.1f MB (estimated)\n",
+        (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
+         1048576.0));
     PrintWarnings();
-    fprintf(stdout, "------------------------------------------------\n");
+    std::fprintf(stdout, "------------------------------------------------\n");
   }
 
   void PrintWarnings() {
 #if defined(__GNUC__) && !defined(__OPTIMIZE__)
-    fprintf(
+    std::fprintf(
         stdout,
         "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
 #endif
 #ifndef NDEBUG
-    fprintf(stdout,
-            "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
+    std::fprintf(
+        stdout,
+        "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
 #endif
 
     // See if snappy is working by attempting to compress a compressible string
     const char text[] = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy";
     std::string compressed;
     if (!port::Snappy_Compress(text, sizeof(text), &compressed)) {
-      fprintf(stdout, "WARNING: Snappy compression is not enabled\n");
+      std::fprintf(stdout, "WARNING: Snappy compression is not enabled\n");
     } else if (compressed.size() >= sizeof(text)) {
-      fprintf(stdout, "WARNING: Snappy compression is not effective\n");
+      std::fprintf(stdout, "WARNING: Snappy compression is not effective\n");
     }
   }
 
   void PrintEnvironment() {
-    fprintf(stderr, "LevelDB:    version %d.%d\n", kMajorVersion,
-            kMinorVersion);
+    std::fprintf(stderr, "LevelDB:    version %d.%d\n", kMajorVersion,
+                 kMinorVersion);
 
 #if defined(__linux)
     time_t now = time(nullptr);
-    fprintf(stderr, "Date:       %s", ctime(&now));  // ctime() adds newline
+    std::fprintf(stderr, "Date:       %s",
+                 ctime(&now));  // ctime() adds newline
 
-    FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
+    FILE* cpuinfo = std::fopen("/proc/cpuinfo", "r");
     if (cpuinfo != nullptr) {
       char line[1000];
       int num_cpus = 0;
@@ -387,9 +393,9 @@ class Benchmark {
           cache_size = val.ToString();
         }
       }
-      fclose(cpuinfo);
-      fprintf(stderr, "CPU:        %d * %s\n", num_cpus, cpu_type.c_str());
-      fprintf(stderr, "CPUCache:   %s\n", cache_size.c_str());
+      std::fclose(cpuinfo);
+      std::fprintf(stderr, "CPU:        %d * %s\n", num_cpus, cpu_type.c_str());
+      std::fprintf(stderr, "CPUCache:   %s\n", cache_size.c_str());
     }
 #endif
   }
@@ -516,14 +522,15 @@ class Benchmark {
         PrintStats("leveldb.sstables");
       } else {
         if (!name.empty()) {  // No error message for empty name
-          fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
+          std::fprintf(stderr, "unknown benchmark '%s'\n",
+                       name.ToString().c_str());
         }
       }
 
       if (fresh_db) {
         if (FLAGS_use_existing_db) {
-          fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n",
-                  name.ToString().c_str());
+          std::fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n",
+                       name.ToString().c_str());
           method = nullptr;
         } else {
           delete db_;
@@ -625,7 +632,7 @@ class Benchmark {
       bytes += size;
     }
     // Print so result is not dead
-    fprintf(stderr, "... crc=0x%x\r", static_cast<unsigned int>(crc));
+    std::fprintf(stderr, "... crc=0x%x\r", static_cast<unsigned int>(crc));
 
     thread->stats.AddBytes(bytes);
     thread->stats.AddMessage(label);
@@ -649,8 +656,8 @@ class Benchmark {
       thread->stats.AddMessage("(snappy failure)");
     } else {
       char buf[100];
-      snprintf(buf, sizeof(buf), "(output: %.1f%%)",
-               (produced * 100.0) / bytes);
+      std::snprintf(buf, sizeof(buf), "(output: %.1f%%)",
+                    (produced * 100.0) / bytes);
       thread->stats.AddMessage(buf);
       thread->stats.AddBytes(bytes);
     }
@@ -692,8 +699,8 @@ class Benchmark {
     options.reuse_logs = FLAGS_reuse_logs;
     Status s = DB::Open(options, FLAGS_db, &db_);
     if (!s.ok()) {
-      fprintf(stderr, "open error: %s\n", s.ToString().c_str());
-      exit(1);
+      std::fprintf(stderr, "open error: %s\n", s.ToString().c_str());
+      std::exit(1);
     }
   }
 
@@ -712,7 +719,7 @@ class Benchmark {
   void DoWrite(ThreadState* thread, bool seq) {
     if (num_ != FLAGS_num) {
       char msg[100];
-      snprintf(msg, sizeof(msg), "(%d ops)", num_);
+      std::snprintf(msg, sizeof(msg), "(%d ops)", num_);
       thread->stats.AddMessage(msg);
     }
 
@@ -725,15 +732,15 @@ class Benchmark {
       for (int j = 0; j < entries_per_batch_; j++) {
         const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
         char key[100];
-        snprintf(key, sizeof(key), "%016d", k);
+        std::snprintf(key, sizeof(key), "%016d", k);
         batch.Put(key, gen.Generate(value_size_));
         bytes += value_size_ + strlen(key);
         thread->stats.FinishedSingleOp();
       }
       s = db_->Write(write_options_, &batch);
       if (!s.ok()) {
-        fprintf(stderr, "put error: %s\n", s.ToString().c_str());
-        exit(1);
+        std::fprintf(stderr, "put error: %s\n", s.ToString().c_str());
+        std::exit(1);
       }
     }
     thread->stats.AddBytes(bytes);
@@ -772,14 +779,14 @@ class Benchmark {
     for (int i = 0; i < reads_; i++) {
       char key[100];
       const int k = thread->rand.Next() % FLAGS_num;
-      snprintf(key, sizeof(key), "%016d", k);
+      std::snprintf(key, sizeof(key), "%016d", k);
       if (db_->Get(options, key, &value).ok()) {
         found++;
       }
       thread->stats.FinishedSingleOp();
     }
     char msg[100];
-    snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
+    std::snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
     thread->stats.AddMessage(msg);
   }
 
@@ -789,7 +796,7 @@ class Benchmark {
     for (int i = 0; i < reads_; i++) {
       char key[100];
       const int k = thread->rand.Next() % FLAGS_num;
-      snprintf(key, sizeof(key), "%016d.", k);
+      std::snprintf(key, sizeof(key), "%016d.", k);
       db_->Get(options, key, &value);
       thread->stats.FinishedSingleOp();
     }
@@ -802,7 +809,7 @@ class Benchmark {
     for (int i = 0; i < reads_; i++) {
       char key[100];
       const int k = thread->rand.Next() % range;
-      snprintf(key, sizeof(key), "%016d", k);
+      std::snprintf(key, sizeof(key), "%016d", k);
       db_->Get(options, key, &value);
       thread->stats.FinishedSingleOp();
     }
@@ -815,14 +822,14 @@ class Benchmark {
       Iterator* iter = db_->NewIterator(options);
       char key[100];
       const int k = thread->rand.Next() % FLAGS_num;
-      snprintf(key, sizeof(key), "%016d", k);
+      std::snprintf(key, sizeof(key), "%016d", k);
       iter->Seek(key);
       if (iter->Valid() && iter->key() == key) found++;
       delete iter;
       thread->stats.FinishedSingleOp();
     }
     char msg[100];
-    snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
+    std::snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
     thread->stats.AddMessage(msg);
   }
 
@@ -835,14 +842,14 @@ class Benchmark {
       for (int j = 0; j < entries_per_batch_; j++) {
         const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
         char key[100];
-        snprintf(key, sizeof(key), "%016d", k);
+        std::snprintf(key, sizeof(key), "%016d", k);
         batch.Delete(key);
         thread->stats.FinishedSingleOp();
       }
       s = db_->Write(write_options_, &batch);
       if (!s.ok()) {
-        fprintf(stderr, "del error: %s\n", s.ToString().c_str());
-        exit(1);
+        std::fprintf(stderr, "del error: %s\n", s.ToString().c_str());
+        std::exit(1);
       }
     }
   }
@@ -868,11 +875,11 @@ class Benchmark {
 
         const int k = thread->rand.Next() % FLAGS_num;
         char key[100];
-        snprintf(key, sizeof(key), "%016d", k);
+        std::snprintf(key, sizeof(key), "%016d", k);
         Status s = db_->Put(write_options_, key, gen.Generate(value_size_));
         if (!s.ok()) {
-          fprintf(stderr, "put error: %s\n", s.ToString().c_str());
-          exit(1);
+          std::fprintf(stderr, "put error: %s\n", s.ToString().c_str());
+          std::exit(1);
         }
       }
 
@@ -888,7 +895,7 @@ class Benchmark {
     if (!db_->GetProperty(key, &stats)) {
       stats = "(failed)";
     }
-    fprintf(stdout, "\n%s\n", stats.c_str());
+    std::fprintf(stdout, "\n%s\n", stats.c_str());
   }
 
   static void WriteToFile(void* arg, const char* buf, int n) {
@@ -897,17 +904,18 @@ class Benchmark {
 
   void HeapProfile() {
     char fname[100];
-    snprintf(fname, sizeof(fname), "%s/heap-%04d", FLAGS_db, ++heap_counter_);
+    std::snprintf(fname, sizeof(fname), "%s/heap-%04d", FLAGS_db,
+                  ++heap_counter_);
     WritableFile* file;
     Status s = g_env->NewWritableFile(fname, &file);
     if (!s.ok()) {
-      fprintf(stderr, "%s\n", s.ToString().c_str());
+      std::fprintf(stderr, "%s\n", s.ToString().c_str());
       return;
     }
     bool ok = port::GetHeapProfile(WriteToFile, file);
     delete file;
     if (!ok) {
-      fprintf(stderr, "heap profiling not supported\n");
+      std::fprintf(stderr, "heap profiling not supported\n");
       g_env->RemoveFile(fname);
     }
   }
@@ -962,8 +970,8 @@ int main(int argc, char** argv) {
     } else if (strncmp(argv[i], "--db=", 5) == 0) {
       FLAGS_db = argv[i] + 5;
     } else {
-      fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
-      exit(1);
+      std::fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
+      std::exit(1);
     }
   }
 
diff --git a/benchmarks/db_bench_sqlite3.cc b/benchmarks/db_bench_sqlite3.cc
index 2563481..c9be652 100644
--- a/benchmarks/db_bench_sqlite3.cc
+++ b/benchmarks/db_bench_sqlite3.cc
@@ -84,23 +84,23 @@ static const char* FLAGS_db = nullptr;
 
 inline static void ExecErrorCheck(int status, char* err_msg) {
   if (status != SQLITE_OK) {
-    fprintf(stderr, "SQL error: %s\n", err_msg);
+    std::fprintf(stderr, "SQL error: %s\n", err_msg);
     sqlite3_free(err_msg);
-    exit(1);
+    std::exit(1);
   }
 }
 
 inline static void StepErrorCheck(int status) {
   if (status != SQLITE_DONE) {
-    fprintf(stderr, "SQL step error: status = %d\n", status);
-    exit(1);
+    std::fprintf(stderr, "SQL step error: status = %d\n", status);
+    std::exit(1);
   }
 }
 
 inline static void ErrorCheck(int status) {
   if (status != SQLITE_OK) {
-    fprintf(stderr, "sqlite3 error: status = %d\n", status);
-    exit(1);
+    std::fprintf(stderr, "sqlite3 error: status = %d\n", status);
+    std::exit(1);
   }
 }
 
@@ -182,36 +182,38 @@ class Benchmark {
   void PrintHeader() {
     const int kKeySize = 16;
     PrintEnvironment();
-    fprintf(stdout, "Keys:       %d bytes each\n", kKeySize);
-    fprintf(stdout, "Values:     %d bytes each\n", FLAGS_value_size);
-    fprintf(stdout, "Entries:    %d\n", num_);
-    fprintf(stdout, "RawSize:    %.1f MB (estimated)\n",
-            ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
-             1048576.0));
+    std::fprintf(stdout, "Keys:       %d bytes each\n", kKeySize);
+    std::fprintf(stdout, "Values:     %d bytes each\n", FLAGS_value_size);
+    std::fprintf(stdout, "Entries:    %d\n", num_);
+    std::fprintf(stdout, "RawSize:    %.1f MB (estimated)\n",
+                 ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
+                  1048576.0));
     PrintWarnings();
-    fprintf(stdout, "------------------------------------------------\n");
+    std::fprintf(stdout, "------------------------------------------------\n");
   }
 
   void PrintWarnings() {
 #if defined(__GNUC__) && !defined(__OPTIMIZE__)
-    fprintf(
+    std::fprintf(
         stdout,
         "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
 #endif
 #ifndef NDEBUG
-    fprintf(stdout,
-            "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
+    std::fprintf(
+        stdout,
+        "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
 #endif
   }
 
   void PrintEnvironment() {
-    fprintf(stderr, "SQLite:     version %s\n", SQLITE_VERSION);
+    std::fprintf(stderr, "SQLite:     version %s\n", SQLITE_VERSION);
 
 #if defined(__linux)
     time_t now = time(nullptr);
-    fprintf(stderr, "Date:       %s", ctime(&now));  // ctime() adds newline
+    std::fprintf(stderr, "Date:       %s",
+                 ctime(&now));  // ctime() adds newline
 
-    FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
+    FILE* cpuinfo = std::fopen("/proc/cpuinfo", "r");
     if (cpuinfo != nullptr) {
       char line[1000];
       int num_cpus = 0;
@@ -231,9 +233,9 @@ class Benchmark {
           cache_size = val.ToString();
         }
       }
-      fclose(cpuinfo);
-      fprintf(stderr, "CPU:        %d * %s\n", num_cpus, cpu_type.c_str());
-      fprintf(stderr, "CPUCache:   %s\n", cache_size.c_str());
+      std::fclose(cpuinfo);
+      std::fprintf(stderr, "CPU:        %d * %s\n", num_cpus, cpu_type.c_str());
+      std::fprintf(stderr, "CPUCache:   %s\n", cache_size.c_str());
     }
 #endif
   }
@@ -254,8 +256,8 @@ class Benchmark {
       double micros = (now - last_op_finish_) * 1e6;
       hist_.Add(micros);
       if (micros > 20000) {
-        fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
-        fflush(stderr);
+        std::fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
+        std::fflush(stderr);
       }
       last_op_finish_ = now;
     }
@@ -276,8 +278,8 @@ class Benchmark {
         next_report_ += 50000;
       else
         next_report_ += 100000;
-      fprintf(stderr, "... finished %d ops%30s\r", done_, "");
-      fflush(stderr);
+      std::fprintf(stderr, "... finished %d ops%30s\r", done_, "");
+      std::fflush(stderr);
     }
   }
 
@@ -290,8 +292,8 @@ class Benchmark {
 
     if (bytes_ > 0) {
       char rate[100];
-      snprintf(rate, sizeof(rate), "%6.1f MB/s",
-               (bytes_ / 1048576.0) / (finish - start_));
+      std::snprintf(rate, sizeof(rate), "%6.1f MB/s",
+                    (bytes_ / 1048576.0) / (finish - start_));
       if (!message_.empty()) {
         message_ = std::string(rate) + " " + message_;
       } else {
@@ -299,13 +301,14 @@ class Benchmark {
       }
     }
 
-    fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
-            (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
-            message_.c_str());
+    std::fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
+                 name.ToString().c_str(), (finish - start_) * 1e6 / done_,
+                 (message_.empty() ? "" : " "), message_.c_str());
     if (FLAGS_histogram) {
-      fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
+      std::fprintf(stdout, "Microseconds per op:\n%s\n",
+                   hist_.ToString().c_str());
     }
-    fflush(stdout);
+    std::fflush(stdout);
   }
 
  public:
@@ -405,7 +408,8 @@ class Benchmark {
       } else {
         known = false;
         if (name != Slice()) {  // No error message for empty name
-          fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
+          std::fprintf(stderr, "unknown benchmark '%s'\n",
+                       name.ToString().c_str());
         }
       }
       if (known) {
@@ -425,26 +429,26 @@ class Benchmark {
     // Open database
     std::string tmp_dir;
     Env::Default()->GetTestDirectory(&tmp_dir);
-    snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db",
-             tmp_dir.c_str(), db_num_);
+    std::snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db",
+                  tmp_dir.c_str(), db_num_);
     status = sqlite3_open(file_name, &db_);
     if (status) {
-      fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_));
-      exit(1);
+      std::fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_));
+      std::exit(1);
     }
 
     // Change SQLite cache size
     char cache_size[100];
-    snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d",
-             FLAGS_num_pages);
+    std::snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d",
+                  FLAGS_num_pages);
     status = sqlite3_exec(db_, cache_size, nullptr, nullptr, &err_msg);
     ExecErrorCheck(status, err_msg);
 
     // FLAGS_page_size is defaulted to 1024
     if (FLAGS_page_size != 1024) {
       char page_size[100];
-      snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d",
-               FLAGS_page_size);
+      std::snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d",
+                    FLAGS_page_size);
       status = sqlite3_exec(db_, page_size, nullptr, nullptr, &err_msg);
       ExecErrorCheck(status, err_msg);
     }
@@ -492,7 +496,7 @@ class Benchmark {
 
     if (num_entries != num_) {
       char msg[100];
-      snprintf(msg, sizeof(msg), "(%d ops)", num_entries);
+      std::snprintf(msg, sizeof(msg), "(%d ops)", num_entries);
       message_ = msg;
     }
 
@@ -539,7 +543,7 @@ class Benchmark {
         const int k =
             (order == SEQUENTIAL) ? i + j : (rand_.Next() % num_entries);
         char key[100];
-        snprintf(key, sizeof(key), "%016d", k);
+        std::snprintf(key, sizeof(key), "%016d", k);
 
         // Bind KV values into replace_stmt
         status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC);
@@ -612,7 +616,7 @@ class Benchmark {
         // Create key value
         char key[100];
         int k = (order == SEQUENTIAL) ? i + j : (rand_.Next() % reads_);
-        snprintf(key, sizeof(key), "%016d", k);
+        std::snprintf(key, sizeof(key), "%016d", k);
 
         // Bind key value into read_stmt
         status = sqlite3_bind_blob(read_stmt, 1, key, 16, SQLITE_STATIC);
@@ -704,8 +708,8 @@ int main(int argc, char** argv) {
     } else if (strncmp(argv[i], "--db=", 5) == 0) {
       FLAGS_db = argv[i] + 5;
     } else {
-      fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
-      exit(1);
+      std::fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
+      std::exit(1);
     }
   }
 
diff --git a/benchmarks/db_bench_tree_db.cc b/benchmarks/db_bench_tree_db.cc
index 60ab3b0..533600b 100644
--- a/benchmarks/db_bench_tree_db.cc
+++ b/benchmarks/db_bench_tree_db.cc
@@ -75,7 +75,7 @@ static const char* FLAGS_db = nullptr;
 inline static void DBSynchronize(kyotocabinet::TreeDB* db_) {
   // Synchronize will flush writes to disk
   if (!db_->synchronize()) {
-    fprintf(stderr, "synchronize error: %s\n", db_->error().name());
+    std::fprintf(stderr, "synchronize error: %s\n", db_->error().name());
   }
 }
 
@@ -150,42 +150,47 @@ class Benchmark {
   void PrintHeader() {
     const int kKeySize = 16;
     PrintEnvironment();
-    fprintf(stdout, "Keys:       %d bytes each\n", kKeySize);
-    fprintf(stdout, "Values:     %d bytes each (%d bytes after compression)\n",
-            FLAGS_value_size,
-            static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
-    fprintf(stdout, "Entries:    %d\n", num_);
-    fprintf(stdout, "RawSize:    %.1f MB (estimated)\n",
-            ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
-             1048576.0));
-    fprintf(stdout, "FileSize:   %.1f MB (estimated)\n",
-            (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
-             1048576.0));
+    std::fprintf(stdout, "Keys:       %d bytes each\n", kKeySize);
+    std::fprintf(
+        stdout, "Values:     %d bytes each (%d bytes after compression)\n",
+        FLAGS_value_size,
+        static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
+    std::fprintf(stdout, "Entries:    %d\n", num_);
+    std::fprintf(stdout, "RawSize:    %.1f MB (estimated)\n",
+                 ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
+                  1048576.0));
+    std::fprintf(
+        stdout, "FileSize:   %.1f MB (estimated)\n",
+        (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
+         1048576.0));
     PrintWarnings();
-    fprintf(stdout, "------------------------------------------------\n");
+    std::fprintf(stdout, "------------------------------------------------\n");
   }
 
   void PrintWarnings() {
 #if defined(__GNUC__) && !defined(__OPTIMIZE__)
-    fprintf(
+    std::fprintf(
         stdout,
         "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
 #endif
 #ifndef NDEBUG
-    fprintf(stdout,
-            "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
+    std::fprintf(
+        stdout,
+        "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
 #endif
   }
 
   void PrintEnvironment() {
-    fprintf(stderr, "Kyoto Cabinet:    version %s, lib ver %d, lib rev %d\n",
-            kyotocabinet::VERSION, kyotocabinet::LIBVER, kyotocabinet::LIBREV);
+    std::fprintf(
+        stderr, "Kyoto Cabinet:    version %s, lib ver %d, lib rev %d\n",
+        kyotocabinet::VERSION, kyotocabinet::LIBVER, kyotocabinet::LIBREV);
 
 #if defined(__linux)
     time_t now = time(nullptr);
-    fprintf(stderr, "Date:           %s", ctime(&now));  // ctime() adds newline
+    std::fprintf(stderr, "Date:           %s",
+                 ctime(&now));  // ctime() adds newline
 
-    FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
+    FILE* cpuinfo = std::fopen("/proc/cpuinfo", "r");
     if (cpuinfo != nullptr) {
       char line[1000];
       int num_cpus = 0;
@@ -205,9 +210,10 @@ class Benchmark {
           cache_size = val.ToString();
         }
       }
-      fclose(cpuinfo);
-      fprintf(stderr, "CPU:            %d * %s\n", num_cpus, cpu_type.c_str());
-      fprintf(stderr, "CPUCache:       %s\n", cache_size.c_str());
+      std::fclose(cpuinfo);
+      std::fprintf(stderr, "CPU:            %d * %s\n", num_cpus,
+                   cpu_type.c_str());
+      std::fprintf(stderr, "CPUCache:       %s\n", cache_size.c_str());
     }
 #endif
   }
@@ -228,8 +234,8 @@ class Benchmark {
       double micros = (now - last_op_finish_) * 1e6;
       hist_.Add(micros);
       if (micros > 20000) {
-        fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
-        fflush(stderr);
+        std::fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
+        std::fflush(stderr);
       }
       last_op_finish_ = now;
     }
@@ -250,8 +256,8 @@ class Benchmark {
         next_report_ += 50000;
       else
         next_report_ += 100000;
-      fprintf(stderr, "... finished %d ops%30s\r", done_, "");
-      fflush(stderr);
+      std::fprintf(stderr, "... finished %d ops%30s\r", done_, "");
+      std::fflush(stderr);
     }
   }
 
@@ -264,8 +270,8 @@ class Benchmark {
 
     if (bytes_ > 0) {
       char rate[100];
-      snprintf(rate, sizeof(rate), "%6.1f MB/s",
-               (bytes_ / 1048576.0) / (finish - start_));
+      std::snprintf(rate, sizeof(rate), "%6.1f MB/s",
+                    (bytes_ / 1048576.0) / (finish - start_));
       if (!message_.empty()) {
         message_ = std::string(rate) + " " + message_;
       } else {
@@ -273,13 +279,14 @@ class Benchmark {
       }
     }
 
-    fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
-            (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
-            message_.c_str());
+    std::fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
+                 name.ToString().c_str(), (finish - start_) * 1e6 / done_,
+                 (message_.empty() ? "" : " "), message_.c_str());
     if (FLAGS_histogram) {
-      fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
+      std::fprintf(stdout, "Microseconds per op:\n%s\n",
+                   hist_.ToString().c_str());
     }
-    fflush(stdout);
+    std::fflush(stdout);
   }
 
  public:
@@ -310,7 +317,7 @@ class Benchmark {
 
   ~Benchmark() {
     if (!db_->close()) {
-      fprintf(stderr, "close error: %s\n", db_->error().name());
+      std::fprintf(stderr, "close error: %s\n", db_->error().name());
     }
   }
 
@@ -374,7 +381,8 @@ class Benchmark {
       } else {
         known = false;
         if (name != Slice()) {  // No error message for empty name
-          fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
+          std::fprintf(stderr, "unknown benchmark '%s'\n",
+                       name.ToString().c_str());
         }
       }
       if (known) {
@@ -393,8 +401,8 @@ class Benchmark {
     db_num_++;
     std::string test_dir;
     Env::Default()->GetTestDirectory(&test_dir);
-    snprintf(file_name, sizeof(file_name), "%s/dbbench_polyDB-%d.kct",
-             test_dir.c_str(), db_num_);
+    std::snprintf(file_name, sizeof(file_name), "%s/dbbench_polyDB-%d.kct",
+                  test_dir.c_str(), db_num_);
 
     // Create tuning options and open the database
     int open_options =
@@ -413,7 +421,7 @@ class Benchmark {
       open_options |= kyotocabinet::PolyDB::OAUTOSYNC;
     }
     if (!db_->open(file_name, open_options)) {
-      fprintf(stderr, "open error: %s\n", db_->error().name());
+      std::fprintf(stderr, "open error: %s\n", db_->error().name());
     }
   }
 
@@ -433,7 +441,7 @@ class Benchmark {
 
     if (num_entries != num_) {
       char msg[100];
-      snprintf(msg, sizeof(msg), "(%d ops)", num_entries);
+      std::snprintf(msg, sizeof(msg), "(%d ops)", num_entries);
       message_ = msg;
     }
 
@@ -441,11 +449,11 @@ class Benchmark {
     for (int i = 0; i < num_entries; i++) {
       const int k = (order == SEQUENTIAL) ? i : (rand_.Next() % num_entries);
       char key[100];
-      snprintf(key, sizeof(key), "%016d", k);
+      std::snprintf(key, sizeof(key), "%016d", k);
       bytes_ += value_size + strlen(key);
       std::string cpp_key = key;
       if (!db_->set(cpp_key, gen_.Generate(value_size).ToString())) {
-        fprintf(stderr, "set error: %s\n", db_->error().name());
+        std::fprintf(stderr, "set error: %s\n", db_->error().name());
       }
       FinishedSingleOp();
     }
@@ -467,7 +475,7 @@ class Benchmark {
     for (int i = 0; i < reads_; i++) {
       char key[100];
       const int k = rand_.Next() % reads_;
-      snprintf(key, sizeof(key), "%016d", k);
+      std::snprintf(key, sizeof(key), "%016d", k);
       db_->get(key, &value);
       FinishedSingleOp();
     }
@@ -505,8 +513,8 @@ int main(int argc, char** argv) {
     } else if (strncmp(argv[i], "--db=", 5) == 0) {
       FLAGS_db = argv[i] + 5;
     } else {
-      fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
-      exit(1);
+      std::fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
+      std::exit(1);
     }
   }
 
diff --git a/db/autocompact_test.cc b/db/autocompact_test.cc
index 9779c95..3b7241b 100644
--- a/db/autocompact_test.cc
+++ b/db/autocompact_test.cc
@@ -30,7 +30,7 @@ class AutoCompactTest : public testing::Test {
 
   std::string Key(int i) {
     char buf[100];
-    snprintf(buf, sizeof(buf), "key%06d", i);
+    std::snprintf(buf, sizeof(buf), "key%06d", i);
     return std::string(buf);
   }
 
@@ -89,8 +89,8 @@ void AutoCompactTest::DoReads(int n) {
     // Wait a little bit to allow any triggered compactions to complete.
     Env::Default()->SleepForMicroseconds(1000000);
     uint64_t size = Size(Key(0), Key(n));
-    fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", read + 1,
-            size / 1048576.0, Size(Key(n), Key(kCount)) / 1048576.0);
+    std::fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", read + 1,
+                 size / 1048576.0, Size(Key(n), Key(kCount)) / 1048576.0);
     if (size <= initial_size / 10) {
       break;
     }
diff --git a/db/c.cc b/db/c.cc
index 3a492f9..b5c9251 100644
--- a/db/c.cc
+++ b/db/c.cc
@@ -158,7 +158,7 @@ static bool SaveError(char** errptr, const Status& s) {
 
 static char* CopyString(const std::string& str) {
   char* result = reinterpret_cast<char*>(malloc(sizeof(char) * str.size()));
-  memcpy(result, str.data(), sizeof(char) * str.size());
+  std::memcpy(result, str.data(), sizeof(char) * str.size());
   return result;
 }
 
@@ -548,7 +548,7 @@ char* leveldb_env_get_test_directory(leveldb_env_t* env) {
   }
 
   char* buffer = static_cast<char*>(malloc(result.size() + 1));
-  memcpy(buffer, result.data(), result.size());
+  std::memcpy(buffer, result.data(), result.size());
   buffer[result.size()] = '\0';
   return buffer;
 }
diff --git a/db/corruption_test.cc b/db/corruption_test.cc
index b22f9e7..a31f448 100644
--- a/db/corruption_test.cc
+++ b/db/corruption_test.cc
@@ -58,7 +58,7 @@ class CorruptionTest : public testing::Test {
     std::string key_space, value_space;
     WriteBatch batch;
     for (int i = 0; i < n; i++) {
-      // if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n);
+      // if ((i % 100) == 0) std::fprintf(stderr, "@ %d of %d\n", i, n);
       Slice key = Key(i, &key_space);
       batch.Clear();
       batch.Put(key, Value(i, &value_space));
@@ -102,9 +102,10 @@ class CorruptionTest : public testing::Test {
     }
     delete iter;
 
-    fprintf(stderr,
-            "expected=%d..%d; got=%d; bad_keys=%d; bad_values=%d; missed=%d\n",
-            min_expected, max_expected, correct, bad_keys, bad_values, missed);
+    std::fprintf(
+        stderr,
+        "expected=%d..%d; got=%d; bad_keys=%d; bad_values=%d; missed=%d\n",
+        min_expected, max_expected, correct, bad_keys, bad_values, missed);
     ASSERT_LE(min_expected, correct);
     ASSERT_GE(max_expected, correct);
   }
@@ -169,7 +170,7 @@ class CorruptionTest : public testing::Test {
   // Return the ith key
   Slice Key(int i, std::string* storage) {
     char buf[100];
-    snprintf(buf, sizeof(buf), "%016d", i);
+    std::snprintf(buf, sizeof(buf), "%016d", i);
     storage->assign(buf, strlen(buf));
     return Slice(*storage);
   }
diff --git a/db/db_impl.cc b/db/db_impl.cc
index ca53485..59b834f 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -350,8 +350,8 @@ Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) {
   }
   if (!expected.empty()) {
     char buf[50];
-    snprintf(buf, sizeof(buf), "%d missing files; e.g.",
-             static_cast<int>(expected.size()));
+    std::snprintf(buf, sizeof(buf), "%d missing files; e.g.",
+                  static_cast<int>(expected.size()));
     return Status::Corruption(buf, TableFileName(dbname_, *(expected.begin())));
   }
 
@@ -1396,26 +1396,26 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
       return false;
     } else {
       char buf[100];
-      snprintf(buf, sizeof(buf), "%d",
-               versions_->NumLevelFiles(static_cast<int>(level)));
+      std::snprintf(buf, sizeof(buf), "%d",
+                    versions_->NumLevelFiles(static_cast<int>(level)));
       *value = buf;
       return true;
     }
   } else if (in == "stats") {
     char buf[200];
-    snprintf(buf, sizeof(buf),
-             "                               Compactions\n"
-             "Level  Files Size(MB) Time(sec) Read(MB) Write(MB)\n"
-             "--------------------------------------------------\n");
+    std::snprintf(buf, sizeof(buf),
+                  "                               Compactions\n"
+                  "Level  Files Size(MB) Time(sec) Read(MB) Write(MB)\n"
+                  "--------------------------------------------------\n");
     value->append(buf);
     for (int level = 0; level < config::kNumLevels; level++) {
       int files = versions_->NumLevelFiles(level);
       if (stats_[level].micros > 0 || files > 0) {
-        snprintf(buf, sizeof(buf), "%3d %8d %8.0f %9.0f %8.0f %9.0f\n", level,
-                 files, versions_->NumLevelBytes(level) / 1048576.0,
-                 stats_[level].micros / 1e6,
-                 stats_[level].bytes_read / 1048576.0,
-                 stats_[level].bytes_written / 1048576.0);
+        std::snprintf(buf, sizeof(buf), "%3d %8d %8.0f %9.0f %8.0f %9.0f\n",
+                      level, files, versions_->NumLevelBytes(level) / 1048576.0,
+                      stats_[level].micros / 1e6,
+                      stats_[level].bytes_read / 1048576.0,
+                      stats_[level].bytes_written / 1048576.0);
         value->append(buf);
       }
     }
@@ -1432,8 +1432,8 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
       total_usage += imm_->ApproximateMemoryUsage();
     }
     char buf[50];
-    snprintf(buf, sizeof(buf), "%llu",
-             static_cast<unsigned long long>(total_usage));
+    std::snprintf(buf, sizeof(buf), "%llu",
+                  static_cast<unsigned long long>(total_usage));
     value->append(buf);
     return true;
   }
diff --git a/db/db_iter.cc b/db/db_iter.cc
index 98715a9..532c2db 100644
--- a/db/db_iter.cc
+++ b/db/db_iter.cc
@@ -21,9 +21,9 @@ static void DumpInternalIter(Iterator* iter) {
   for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
     ParsedInternalKey k;
     if (!ParseInternalKey(iter->key(), &k)) {
-      fprintf(stderr, "Corrupt '%s'\n", EscapeString(iter->key()).c_str());
+      std::fprintf(stderr, "Corrupt '%s'\n", EscapeString(iter->key()).c_str());
     } else {
-      fprintf(stderr, "@ '%s'\n", k.DebugString().c_str());
+      std::fprintf(stderr, "@ '%s'\n", k.DebugString().c_str());
     }
   }
 }
diff --git a/db/db_test.cc b/db/db_test.cc
index 8cd90f3..3a45731 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -424,7 +424,7 @@ class DBTest : public testing::Test {
     for (int level = 0; level < config::kNumLevels; level++) {
       int f = NumTableFilesAtLevel(level);
       char buf[100];
-      snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
+      std::snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
       result += buf;
       if (f > 0) {
         last_non_zero_offset = result.size();
@@ -469,14 +469,14 @@ class DBTest : public testing::Test {
   }
 
   void DumpFileCounts(const char* label) {
-    fprintf(stderr, "---\n%s:\n", label);
-    fprintf(
+    std::fprintf(stderr, "---\n%s:\n", label);
+    std::fprintf(
         stderr, "maxoverlap: %lld\n",
         static_cast<long long>(dbfull()->TEST_MaxNextLevelOverlappingBytes()));
     for (int level = 0; level < config::kNumLevels; level++) {
       int num = NumTableFilesAtLevel(level);
       if (num > 0) {
-        fprintf(stderr, "  level %3d : %d files\n", level, num);
+        std::fprintf(stderr, "  level %3d : %d files\n", level, num);
       }
     }
   }
@@ -1024,7 +1024,7 @@ TEST_F(DBTest, RecoverDuringMemtableCompaction) {
 
 static std::string Key(int i) {
   char buf[100];
-  snprintf(buf, sizeof(buf), "key%06d", i);
+  std::snprintf(buf, sizeof(buf), "key%06d", i);
   return std::string(buf);
 }
 
@@ -1118,7 +1118,7 @@ TEST_F(DBTest, RepeatedWritesToSameKey) {
   for (int i = 0; i < 5 * kMaxFiles; i++) {
     Put("key", value);
     ASSERT_LE(TotalTableFiles(), kMaxFiles);
-    fprintf(stderr, "after %d: %d files\n", i + 1, TotalTableFiles());
+    std::fprintf(stderr, "after %d: %d files\n", i + 1, TotalTableFiles());
   }
 }
 
@@ -1140,7 +1140,7 @@ TEST_F(DBTest, SparseMerge) {
   // Write approximately 100MB of "B" values
   for (int i = 0; i < 100000; i++) {
     char key[100];
-    snprintf(key, sizeof(key), "B%010d", i);
+    std::snprintf(key, sizeof(key), "B%010d", i);
     Put(key, value);
   }
   Put("C", "vc");
@@ -1165,9 +1165,9 @@ TEST_F(DBTest, SparseMerge) {
 static bool Between(uint64_t val, uint64_t low, uint64_t high) {
   bool result = (val >= low) && (val <= high);
   if (!result) {
-    fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
-            (unsigned long long)(val), (unsigned long long)(low),
-            (unsigned long long)(high));
+    std::fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
+                 (unsigned long long)(val), (unsigned long long)(low),
+                 (unsigned long long)(high));
   }
   return result;
 }
@@ -1501,7 +1501,7 @@ TEST_F(DBTest, Fflush_Issue474) {
   static const int kNum = 100000;
   Random rnd(test::RandomSeed());
   for (int i = 0; i < kNum; i++) {
-    fflush(nullptr);
+    std::fflush(nullptr);
     ASSERT_LEVELDB_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100)));
   }
 }
@@ -1578,7 +1578,7 @@ TEST_F(DBTest, CustomComparator) {
   for (int run = 0; run < 2; run++) {
     for (int i = 0; i < 1000; i++) {
       char buf[100];
-      snprintf(buf, sizeof(buf), "[%d]", i * 10);
+      std::snprintf(buf, sizeof(buf), "[%d]", i * 10);
       ASSERT_LEVELDB_OK(Put(buf, buf));
     }
     Compact("[0]", "[1000000]");
@@ -1748,7 +1748,7 @@ TEST_F(DBTest, NonWritableFileSystem) {
   std::string big(100000, 'x');
   int errors = 0;
   for (int i = 0; i < 20; i++) {
-    fprintf(stderr, "iter %d; errors %d\n", i, errors);
+    std::fprintf(stderr, "iter %d; errors %d\n", i, errors);
     if (!Put("foo", big).ok()) {
       errors++;
       DelayMilliseconds(100);
@@ -1901,7 +1901,7 @@ TEST_F(DBTest, BloomFilter) {
     ASSERT_EQ(Key(i), Get(Key(i)));
   }
   int reads = env_->random_read_counter_.Read();
-  fprintf(stderr, "%d present => %d reads\n", N, reads);
+  std::fprintf(stderr, "%d present => %d reads\n", N, reads);
   ASSERT_GE(reads, N);
   ASSERT_LE(reads, N + 2 * N / 100);
 
@@ -1911,7 +1911,7 @@ TEST_F(DBTest, BloomFilter) {
     ASSERT_EQ("NOT_FOUND", Get(Key(i) + ".missing"));
   }
   reads = env_->random_read_counter_.Read();
-  fprintf(stderr, "%d missing => %d reads\n", N, reads);
+  std::fprintf(stderr, "%d missing => %d reads\n", N, reads);
   ASSERT_LE(reads, 3 * N / 100);
 
   env_->delay_data_sync_.store(false, std::memory_order_release);
@@ -1944,7 +1944,7 @@ static void MTThreadBody(void* arg) {
   int id = t->id;
   DB* db = t->state->test->db_;
   int counter = 0;
-  fprintf(stderr, "... starting thread %d\n", id);
+  std::fprintf(stderr, "... starting thread %d\n", id);
   Random rnd(1000 + id);
   std::string value;
   char valbuf[1500];
@@ -1953,13 +1953,13 @@ static void MTThreadBody(void* arg) {
 
     int key = rnd.Uniform(kNumKeys);
     char keybuf[20];
-    snprintf(keybuf, sizeof(keybuf), "%016d", key);
+    std::snprintf(keybuf, sizeof(keybuf), "%016d", key);
 
     if (rnd.OneIn(2)) {
       // Write values of the form <key, my id, counter>.
       // We add some padding for force compactions.
-      snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id,
-               static_cast<int>(counter));
+      std::snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id,
+                    static_cast<int>(counter));
       ASSERT_LEVELDB_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
     } else {
       // Read a value and verify that it matches the pattern written above.
@@ -1980,7 +1980,7 @@ static void MTThreadBody(void* arg) {
     counter++;
   }
   t->state->thread_done[id].store(true, std::memory_order_release);
-  fprintf(stderr, "... stopping thread %d after %d ops\n", id, counter);
+  std::fprintf(stderr, "... stopping thread %d after %d ops\n", id, counter);
 }
 
 }  // namespace
@@ -2134,30 +2134,31 @@ static bool CompareIterators(int step, DB* model, DB* db,
        ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) {
     count++;
     if (miter->key().compare(dbiter->key()) != 0) {
-      fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step,
-              EscapeString(miter->key()).c_str(),
-              EscapeString(dbiter->key()).c_str());
+      std::fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step,
+                   EscapeString(miter->key()).c_str(),
+                   EscapeString(dbiter->key()).c_str());
       ok = false;
       break;
     }
 
     if (miter->value().compare(dbiter->value()) != 0) {
-      fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
-              step, EscapeString(miter->key()).c_str(),
-              EscapeString(miter->value()).c_str(),
-              EscapeString(miter->value()).c_str());
+      std::fprintf(stderr,
+                   "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
+                   step, EscapeString(miter->key()).c_str(),
+                   EscapeString(miter->value()).c_str(),
+                   EscapeString(miter->value()).c_str());
       ok = false;
     }
   }
 
   if (ok) {
     if (miter->Valid() != dbiter->Valid()) {
-      fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n",
-              step, miter->Valid(), dbiter->Valid());
+      std::fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n",
+                   step, miter->Valid(), dbiter->Valid());
       ok = false;
     }
   }
-  fprintf(stderr, "%d entries compared: ok=%d\n", count, ok);
+  std::fprintf(stderr, "%d entries compared: ok=%d\n", count, ok);
   delete miter;
   delete dbiter;
   return ok;
@@ -2173,7 +2174,7 @@ TEST_F(DBTest, Randomized) {
     std::string k, v;
     for (int step = 0; step < N; step++) {
       if (step % 100 == 0) {
-        fprintf(stderr, "Step %d of %d\n", step, N);
+        std::fprintf(stderr, "Step %d of %d\n", step, N);
       }
       // TODO(sanjay): Test Get() works
       int p = rnd.Uniform(100);
@@ -2233,7 +2234,7 @@ TEST_F(DBTest, Randomized) {
 
 std::string MakeKey(unsigned int num) {
   char buf[30];
-  snprintf(buf, sizeof(buf), "%016u", num);
+  std::snprintf(buf, sizeof(buf), "%016u", num);
   return std::string(buf);
 }
 
@@ -2283,10 +2284,10 @@ void BM_LogAndApply(int iters, int num_base_files) {
   uint64_t stop_micros = env->NowMicros();
   unsigned int us = stop_micros - start_micros;
   char buf[16];
-  snprintf(buf, sizeof(buf), "%d", num_base_files);
-  fprintf(stderr,
-          "BM_LogAndApply/%-6s   %8d iters : %9u us (%7.0f us / iter)\n", buf,
-          iters, us, ((float)us) / iters);
+  std::snprintf(buf, sizeof(buf), "%d", num_base_files);
+  std::fprintf(stderr,
+               "BM_LogAndApply/%-6s   %8d iters : %9u us (%7.0f us / iter)\n",
+               buf, iters, us, ((float)us) / iters);
 }
 
 }  // namespace leveldb
diff --git a/db/dbformat.cc b/db/dbformat.cc
index 019aa92..2a5749f 100644
--- a/db/dbformat.cc
+++ b/db/dbformat.cc
@@ -126,7 +126,7 @@ LookupKey::LookupKey(const Slice& user_key, SequenceNumber s) {
   start_ = dst;
   dst = EncodeVarint32(dst, usize + 8);
   kstart_ = dst;
-  memcpy(dst, user_key.data(), usize);
+  std::memcpy(dst, user_key.data(), usize);
   dst += usize;
   EncodeFixed64(dst, PackSequenceAndType(s, kValueTypeForSeek));
   dst += 8;
diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc
index 8f2b647..6eebafa 100644
--- a/db/fault_injection_test.cc
+++ b/db/fault_injection_test.cc
@@ -427,7 +427,7 @@ class FaultInjectionTest : public testing::Test {
           EXPECT_EQ(value_space, val);
         }
       } else if (s.ok()) {
-        fprintf(stderr, "Expected an error at %d, but was OK\n", i);
+        std::fprintf(stderr, "Expected an error at %d, but was OK\n", i);
         s = Status::IOError(dbname_, "Expected value error:");
       } else {
         s = Status::OK();  // An expected error
@@ -439,7 +439,7 @@ class FaultInjectionTest : public testing::Test {
   // Return the ith key
   Slice Key(int i, std::string* storage) const {
     char buf[100];
-    snprintf(buf, sizeof(buf), "%016d", i);
+    std::snprintf(buf, sizeof(buf), "%016d", i);
     storage->assign(buf, strlen(buf));
     return Slice(*storage);
   }
diff --git a/db/filename.cc b/db/filename.cc
index f6bec00..e526249 100644
--- a/db/filename.cc
+++ b/db/filename.cc
@@ -20,8 +20,8 @@ Status WriteStringToFileSync(Env* env, const Slice& data,
 static std::string MakeFileName(const std::string& dbname, uint64_t number,
                                 const char* suffix) {
   char buf[100];
-  snprintf(buf, sizeof(buf), "/%06llu.%s",
-           static_cast<unsigned long long>(number), suffix);
+  std::snprintf(buf, sizeof(buf), "/%06llu.%s",
+                static_cast<unsigned long long>(number), suffix);
   return dbname + buf;
 }
 
@@ -43,8 +43,8 @@ std::string SSTTableFileName(const std::string& dbname, uint64_t number) {
 std::string DescriptorFileName(const std::string& dbname, uint64_t number) {
   assert(number > 0);
   char buf[100];
-  snprintf(buf, sizeof(buf), "/MANIFEST-%06llu",
-           static_cast<unsigned long long>(number));
+  std::snprintf(buf, sizeof(buf), "/MANIFEST-%06llu",
+                static_cast<unsigned long long>(number));
   return dbname + buf;
 }
 
diff --git a/db/leveldbutil.cc b/db/leveldbutil.cc
index 8e94abd..95ee897 100644
--- a/db/leveldbutil.cc
+++ b/db/leveldbutil.cc
@@ -28,7 +28,7 @@ bool HandleDumpCommand(Env* env, char** files, int num) {
   for (int i = 0; i < num; i++) {
     Status s = DumpFile(env, files[i], &printer);
     if (!s.ok()) {
-      fprintf(stderr, "%s\n", s.ToString().c_str());
+      std::fprintf(stderr, "%s\n", s.ToString().c_str());
       ok = false;
     }
   }
@@ -39,9 +39,10 @@ bool HandleDumpCommand(Env* env, char** files, int num) {
 }  // namespace leveldb
 
 static void Usage() {
-  fprintf(stderr,
-          "Usage: leveldbutil command...\n"
-          "   dump files...         -- dump contents of specified files\n");
+  std::fprintf(
+      stderr,
+      "Usage: leveldbutil command...\n"
+      "   dump files...         -- dump contents of specified files\n");
 }
 
 int main(int argc, char** argv) {
diff --git a/db/log_reader.cc b/db/log_reader.cc
index dcd4b75..9880279 100644
--- a/db/log_reader.cc
+++ b/db/log_reader.cc
@@ -160,7 +160,7 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) {
 
       default: {
         char buf[40];
-        snprintf(buf, sizeof(buf), "unknown record type %u", record_type);
+        std::snprintf(buf, sizeof(buf), "unknown record type %u", record_type);
         ReportCorruption(
             (fragment.size() + (in_fragmented_record ? scratch->size() : 0)),
             buf);
diff --git a/db/log_test.cc b/db/log_test.cc
index c765e93..346b19c 100644
--- a/db/log_test.cc
+++ b/db/log_test.cc
@@ -27,7 +27,7 @@ static std::string BigString(const std::string& partial_string, size_t n) {
 // Construct a string from a number
 static std::string NumberString(int n) {
   char buf[50];
-  snprintf(buf, sizeof(buf), "%d.", n);
+  std::snprintf(buf, sizeof(buf), "%d.", n);
   return std::string(buf);
 }
 
diff --git a/db/memtable.cc b/db/memtable.cc
index 00931d4..f42774d 100644
--- a/db/memtable.cc
+++ b/db/memtable.cc
@@ -88,12 +88,12 @@ void MemTable::Add(SequenceNumber s, ValueType type, const Slice& key,
                              val_size;
   char* buf = arena_.Allocate(encoded_len);
   char* p = EncodeVarint32(buf, internal_key_size);
-  memcpy(p, key.data(), key_size);
+  std::memcpy(p, key.data(), key_size);
   p += key_size;
   EncodeFixed64(p, (s << 8) | type);
   p += 8;
   p = EncodeVarint32(p, val_size);
-  memcpy(p, value.data(), val_size);
+  std::memcpy(p, value.data(), val_size);
   assert(p + val_size == buf + encoded_len);
   table_.Insert(buf);
 }
diff --git a/db/recovery_test.cc b/db/recovery_test.cc
index e5cc916..3db817e 100644
--- a/db/recovery_test.cc
+++ b/db/recovery_test.cc
@@ -160,7 +160,8 @@ class RecoveryTest : public testing::Test {
 
 TEST_F(RecoveryTest, ManifestReused) {
   if (!CanAppend()) {
-    fprintf(stderr, "skipping test because env does not support appending\n");
+    std::fprintf(stderr,
+                 "skipping test because env does not support appending\n");
     return;
   }
   ASSERT_LEVELDB_OK(Put("foo", "bar"));
@@ -176,7 +177,8 @@ TEST_F(RecoveryTest, ManifestReused) {
 
 TEST_F(RecoveryTest, LargeManifestCompacted) {
   if (!CanAppend()) {
-    fprintf(stderr, "skipping test because env does not support appending\n");
+    std::fprintf(stderr,
+                 "skipping test because env does not support appending\n");
     return;
   }
   ASSERT_LEVELDB_OK(Put("foo", "bar"));
@@ -216,7 +218,8 @@ TEST_F(RecoveryTest, NoLogFiles) {
 
 TEST_F(RecoveryTest, LogFileReuse) {
   if (!CanAppend()) {
-    fprintf(stderr, "skipping test because env does not support appending\n");
+    std::fprintf(stderr,
+                 "skipping test because env does not support appending\n");
     return;
   }
   for (int i = 0; i < 2; i++) {
@@ -249,7 +252,7 @@ TEST_F(RecoveryTest, MultipleMemTables) {
   const int kNum = 1000;
   for (int i = 0; i < kNum; i++) {
     char buf[100];
-    snprintf(buf, sizeof(buf), "%050d", i);
+    std::snprintf(buf, sizeof(buf), "%050d", i);
     ASSERT_LEVELDB_OK(Put(buf, buf));
   }
   ASSERT_EQ(0, NumTables());
@@ -268,7 +271,7 @@ TEST_F(RecoveryTest, MultipleMemTables) {
   ASSERT_NE(old_log_file, FirstLogFile()) << "must not reuse log";
   for (int i = 0; i < kNum; i++) {
     char buf[100];
-    snprintf(buf, sizeof(buf), "%050d", i);
+    std::snprintf(buf, sizeof(buf), "%050d", i);
     ASSERT_EQ(buf, Get(buf));
   }
 }
diff --git a/db/repair.cc b/db/repair.cc
index d2a495e..97a27c6 100644
--- a/db/repair.cc
+++ b/db/repair.cc
@@ -372,7 +372,8 @@ class Repairer {
                     t.meta.largest);
     }
 
-    // fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
+    // std::fprintf(stderr,
+    //              "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
     {
       log::Writer log(file);
       std::string record;
diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc
index b548017..79a5b86 100644
--- a/db/skiplist_test.cc
+++ b/db/skiplist_test.cc
@@ -346,7 +346,7 @@ static void RunConcurrent(int run) {
   const int kSize = 1000;
   for (int i = 0; i < N; i++) {
     if ((i % 100) == 0) {
-      fprintf(stderr, "Run %d of %d\n", i, N);
+      std::fprintf(stderr, "Run %d of %d\n", i, N);
     }
     TestState state(seed + 1);
     Env::Default()->Schedule(ConcurrentReader, &state);
diff --git a/db/version_set.cc b/db/version_set.cc
index f23ae14..a459587 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -703,10 +703,10 @@ class VersionSet::Builder {
           const InternalKey& prev_end = v->files_[level][i - 1]->largest;
           const InternalKey& this_begin = v->files_[level][i]->smallest;
           if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) {
-            fprintf(stderr, "overlapping ranges in same level %s vs. %s\n",
-                    prev_end.DebugString().c_str(),
-                    this_begin.DebugString().c_str());
-            abort();
+            std::fprintf(stderr, "overlapping ranges in same level %s vs. %s\n",
+                         prev_end.DebugString().c_str(),
+                         this_begin.DebugString().c_str());
+            std::abort();
           }
         }
       }
@@ -1100,11 +1100,12 @@ int VersionSet::NumLevelFiles(int level) const {
 const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const {
   // Update code if kNumLevels changes
   static_assert(config::kNumLevels == 7, "");
-  snprintf(scratch->buffer, sizeof(scratch->buffer),
-           "files[ %d %d %d %d %d %d %d ]", int(current_->files_[0].size()),
-           int(current_->files_[1].size()), int(current_->files_[2].size()),
-           int(current_->files_[3].size()), int(current_->files_[4].size()),
-           int(current_->files_[5].size()), int(current_->files_[6].size()));
+  std::snprintf(
+      scratch->buffer, sizeof(scratch->buffer), "files[ %d %d %d %d %d %d %d ]",
+      int(current_->files_[0].size()), int(current_->files_[1].size()),
+      int(current_->files_[2].size()), int(current_->files_[3].size()),
+      int(current_->files_[4].size()), int(current_->files_[5].size()),
+      int(current_->files_[6].size()));
   return scratch->buffer;
 }
 
diff --git a/helpers/memenv/memenv.cc b/helpers/memenv/memenv.cc
index 0da4e76..e476613 100644
--- a/helpers/memenv/memenv.cc
+++ b/helpers/memenv/memenv.cc
@@ -93,7 +93,7 @@ class FileState {
       if (avail > bytes_to_copy) {
         avail = bytes_to_copy;
       }
-      memcpy(dst, blocks_[block] + block_offset, avail);
+      std::memcpy(dst, blocks_[block] + block_offset, avail);
 
       bytes_to_copy -= avail;
       dst += avail;
@@ -126,7 +126,7 @@ class FileState {
       if (avail > src_len) {
         avail = src_len;
       }
-      memcpy(blocks_.back() + offset, src, avail);
+      std::memcpy(blocks_.back() + offset, src, avail);
       src_len -= avail;
       src += avail;
       size_ += avail;
@@ -215,7 +215,7 @@ class WritableFileImpl : public WritableFile {
 
 class NoOpLogger : public Logger {
  public:
-  void Logv(const char* format, va_list ap) override {}
+  void Logv(const char* format, std::va_list ap) override {}
 };
 
 class InMemoryEnv : public EnvWrapper {
diff --git a/include/leveldb/env.h b/include/leveldb/env.h
index 3ef0393..e00895a 100644
--- a/include/leveldb/env.h
+++ b/include/leveldb/env.h
@@ -300,7 +300,7 @@ class LEVELDB_EXPORT Logger {
   virtual ~Logger();
 
   // Write an entry to the log file with the specified format.
-  virtual void Logv(const char* format, va_list ap) = 0;
+  virtual void Logv(const char* format, std::va_list ap) = 0;
 };
 
 // Identifies a locked file.
diff --git a/issues/issue178_test.cc b/issues/issue178_test.cc
index 7fc43ea..8fa5bb9 100644
--- a/issues/issue178_test.cc
+++ b/issues/issue178_test.cc
@@ -18,7 +18,7 @@ const int kNumKeys = 1100000;
 
 std::string Key1(int i) {
   char buf[100];
-  snprintf(buf, sizeof(buf), "my_key_%d", i);
+  std::snprintf(buf, sizeof(buf), "my_key_%d", i);
   return buf;
 }
 
diff --git a/table/table_test.cc b/table/table_test.cc
index 713b63e..190dd0f 100644
--- a/table/table_test.cc
+++ b/table/table_test.cc
@@ -123,7 +123,7 @@ class StringSource : public RandomAccessFile {
     if (offset + n > contents_.size()) {
       n = contents_.size() - offset;
     }
-    memcpy(scratch, &contents_[offset], n);
+    std::memcpy(scratch, &contents_[offset], n);
     *result = Slice(scratch, n);
     return Status::OK();
   }
@@ -485,13 +485,13 @@ class Harness : public testing::Test {
     Iterator* iter = constructor_->NewIterator();
     ASSERT_TRUE(!iter->Valid());
     KVMap::const_iterator model_iter = data.begin();
-    if (kVerbose) fprintf(stderr, "---\n");
+    if (kVerbose) std::fprintf(stderr, "---\n");
     for (int i = 0; i < 200; i++) {
       const int toss = rnd->Uniform(5);
       switch (toss) {
         case 0: {
           if (iter->Valid()) {
-            if (kVerbose) fprintf(stderr, "Next\n");
+            if (kVerbose) std::fprintf(stderr, "Next\n");
             iter->Next();
             ++model_iter;
             ASSERT_EQ(ToString(data, model_iter), ToString(iter));
@@ -500,7 +500,7 @@ class Harness : public testing::Test {
         }
 
         case 1: {
-          if (kVerbose) fprintf(stderr, "SeekToFirst\n");
+          if (kVerbose) std::fprintf(stderr, "SeekToFirst\n");
           iter->SeekToFirst();
           model_iter = data.begin();
           ASSERT_EQ(ToString(data, model_iter), ToString(iter));
@@ -511,7 +511,7 @@ class Harness : public testing::Test {
           std::string key = PickRandomKey(rnd, keys);
           model_iter = data.lower_bound(key);
           if (kVerbose)
-            fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str());
+            std::fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str());
           iter->Seek(Slice(key));
           ASSERT_EQ(ToString(data, model_iter), ToString(iter));
           break;
@@ -519,7 +519,7 @@ class Harness : public testing::Test {
 
         case 3: {
           if (iter->Valid()) {
-            if (kVerbose) fprintf(stderr, "Prev\n");
+            if (kVerbose) std::fprintf(stderr, "Prev\n");
             iter->Prev();
             if (model_iter == data.begin()) {
               model_iter = data.end();  // Wrap around to invalid value
@@ -532,7 +532,7 @@ class Harness : public testing::Test {
         }
 
         case 4: {
-          if (kVerbose) fprintf(stderr, "SeekToLast\n");
+          if (kVerbose) std::fprintf(stderr, "SeekToLast\n");
           iter->SeekToLast();
           if (keys.empty()) {
             model_iter = data.end();
@@ -684,8 +684,8 @@ TEST_F(Harness, Randomized) {
     for (int num_entries = 0; num_entries < 2000;
          num_entries += (num_entries < 50 ? 1 : 200)) {
       if ((num_entries % 10) == 0) {
-        fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1),
-                int(kNumTestArgs), num_entries);
+        std::fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1),
+                     int(kNumTestArgs), num_entries);
       }
       for (int e = 0; e < num_entries; e++) {
         std::string v;
@@ -714,7 +714,7 @@ TEST_F(Harness, RandomizedLongDB) {
   for (int level = 0; level < config::kNumLevels; level++) {
     std::string value;
     char name[100];
-    snprintf(name, sizeof(name), "leveldb.num-files-at-level%d", level);
+    std::snprintf(name, sizeof(name), "leveldb.num-files-at-level%d", level);
     ASSERT_TRUE(db()->GetProperty(name, &value));
     files += atoi(value.c_str());
   }
@@ -736,8 +736,8 @@ TEST(MemTableTest, Simple) {
   Iterator* iter = memtable->NewIterator();
   iter->SeekToFirst();
   while (iter->Valid()) {
-    fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(),
-            iter->value().ToString().c_str());
+    std::fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(),
+                 iter->value().ToString().c_str());
     iter->Next();
   }
 
@@ -748,9 +748,9 @@ TEST(MemTableTest, Simple) {
 static bool Between(uint64_t val, uint64_t low, uint64_t high) {
   bool result = (val >= low) && (val <= high);
   if (!result) {
-    fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
-            (unsigned long long)(val), (unsigned long long)(low),
-            (unsigned long long)(high));
+    std::fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
+                 (unsigned long long)(val), (unsigned long long)(low),
+                 (unsigned long long)(high));
   }
   return result;
 }
@@ -792,7 +792,7 @@ static bool SnappyCompressionSupported() {
 
 TEST(TableTest, ApproximateOffsetOfCompressed) {
   if (!SnappyCompressionSupported()) {
-    fprintf(stderr, "skipping compression tests\n");
+    std::fprintf(stderr, "skipping compression tests\n");
     return;
   }
 
diff --git a/util/bloom_test.cc b/util/bloom_test.cc
index bcf14dc..520473e 100644
--- a/util/bloom_test.cc
+++ b/util/bloom_test.cc
@@ -45,14 +45,14 @@ class BloomTest : public testing::Test {
   size_t FilterSize() const { return filter_.size(); }
 
   void DumpFilter() {
-    fprintf(stderr, "F(");
+    std::fprintf(stderr, "F(");
     for (size_t i = 0; i + 1 < filter_.size(); i++) {
       const unsigned int c = static_cast<unsigned int>(filter_[i]);
       for (int j = 0; j < 8; j++) {
-        fprintf(stderr, "%c", (c & (1 << j)) ? '1' : '.');
+        std::fprintf(stderr, "%c", (c & (1 << j)) ? '1' : '.');
       }
     }
-    fprintf(stderr, ")\n");
+    std::fprintf(stderr, ")\n");
   }
 
   bool Matches(const Slice& s) {
@@ -132,8 +132,9 @@ TEST_F(BloomTest, VaryingLengths) {
     // Check false positive rate
     double rate = FalsePositiveRate();
     if (kVerbose >= 1) {
-      fprintf(stderr, "False positives: %5.2f%% @ length = %6d ; bytes = %6d\n",
-              rate * 100.0, length, static_cast<int>(FilterSize()));
+      std::fprintf(stderr,
+                   "False positives: %5.2f%% @ length = %6d ; bytes = %6d\n",
+                   rate * 100.0, length, static_cast<int>(FilterSize()));
     }
     ASSERT_LE(rate, 0.02);  // Must not be over 2%
     if (rate > 0.0125)
@@ -142,8 +143,8 @@ TEST_F(BloomTest, VaryingLengths) {
       good_filters++;
   }
   if (kVerbose >= 1) {
-    fprintf(stderr, "Filters: %d good, %d mediocre\n", good_filters,
-            mediocre_filters);
+    std::fprintf(stderr, "Filters: %d good, %d mediocre\n", good_filters,
+                 mediocre_filters);
   }
   ASSERT_LE(mediocre_filters, good_filters / 5);
 }
diff --git a/util/cache.cc b/util/cache.cc
index 509e5eb..ad1e9a2 100644
--- a/util/cache.cc
+++ b/util/cache.cc
@@ -279,7 +279,7 @@ Cache::Handle* LRUCache::Insert(const Slice& key, uint32_t hash, void* value,
   e->hash = hash;
   e->in_cache = false;
   e->refs = 1;  // for the returned handle.
-  memcpy(e->key_data, key.data(), key.size());
+  std::memcpy(e->key_data, key.data(), key.size());
 
   if (capacity_ > 0) {
     e->refs++;  // for the cache's reference.
diff --git a/util/env.cc b/util/env.cc
index 40e6071..a53b230 100644
--- a/util/env.cc
+++ b/util/env.cc
@@ -4,6 +4,8 @@
 
 #include "leveldb/env.h"
 
+#include <cstdarg>
+
 // This workaround can be removed when leveldb::Env::DeleteFile is removed.
 // See env.h for justification.
 #if defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED)
@@ -38,7 +40,7 @@ FileLock::~FileLock() = default;
 
 void Log(Logger* info_log, const char* format, ...) {
   if (info_log != nullptr) {
-    va_list ap;
+    std::va_list ap;
     va_start(ap, format);
     info_log->Logv(format, ap);
     va_end(ap);
diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc
index 36f226f..29f973f 100644
--- a/util/env_posix_test.cc
+++ b/util/env_posix_test.cc
@@ -149,7 +149,7 @@ void CheckCloseOnExecDoesNotLeakFDs(
   if (child_pid == kForkInChildProcessReturnValue) {
     ::execv(child_argv[0], child_argv);
     std::fprintf(stderr, "Error spawning child process: %s\n", strerror(errno));
-    std::exit(kTextCloseOnExecHelperExecFailedCode);
+    std::std::exit(kTextCloseOnExecHelperExecFailedCode);
   }
 
   int child_status = 0;
@@ -187,11 +187,11 @@ TEST_F(EnvPosixTest, TestOpenOnRead) {
   ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
   std::string test_file = test_dir + "/open_on_read.txt";
 
-  FILE* f = fopen(test_file.c_str(), "we");
+  FILE* f = std::fopen(test_file.c_str(), "we");
   ASSERT_TRUE(f != nullptr);
   const char kFileData[] = "abcdefghijklmnopqrstuvwxyz";
   fputs(kFileData, f);
-  fclose(f);
+  std::fclose(f);
 
   // Open test file some number above the sum of the two limits to force
   // open-on-read behavior of POSIX Env leveldb::RandomAccessFile.
diff --git a/util/env_windows_test.cc b/util/env_windows_test.cc
index 15c0274..d6822d2 100644
--- a/util/env_windows_test.cc
+++ b/util/env_windows_test.cc
@@ -29,11 +29,11 @@ TEST_F(EnvWindowsTest, TestOpenOnRead) {
   ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
   std::string test_file = test_dir + "/open_on_read.txt";
 
-  FILE* f = fopen(test_file.c_str(), "w");
+  FILE* f = std::fopen(test_file.c_str(), "w");
   ASSERT_TRUE(f != nullptr);
   const char kFileData[] = "abcdefghijklmnopqrstuvwxyz";
   fputs(kFileData, f);
-  fclose(f);
+  std::fclose(f);
 
   // Open test file some number above the sum of the two limits to force
   // leveldb::WindowsEnv to switch from mapping the file into memory
diff --git a/util/histogram.cc b/util/histogram.cc
index d110d28..7af4030 100644
--- a/util/histogram.cc
+++ b/util/histogram.cc
@@ -241,11 +241,11 @@ double Histogram::StandardDeviation() const {
 std::string Histogram::ToString() const {
   std::string r;
   char buf[200];
-  snprintf(buf, sizeof(buf), "Count: %.0f  Average: %.4f  StdDev: %.2f\n", num_,
-           Average(), StandardDeviation());
+  std::snprintf(buf, sizeof(buf), "Count: %.0f  Average: %.4f  StdDev: %.2f\n",
+                num_, Average(), StandardDeviation());
   r.append(buf);
-  snprintf(buf, sizeof(buf), "Min: %.4f  Median: %.4f  Max: %.4f\n",
-           (num_ == 0.0 ? 0.0 : min_), Median(), max_);
+  std::snprintf(buf, sizeof(buf), "Min: %.4f  Median: %.4f  Max: %.4f\n",
+                (num_ == 0.0 ? 0.0 : min_), Median(), max_);
   r.append(buf);
   r.append("------------------------------------------------------\n");
   const double mult = 100.0 / num_;
@@ -253,12 +253,12 @@ std::string Histogram::ToString() const {
   for (int b = 0; b < kNumBuckets; b++) {
     if (buckets_[b] <= 0.0) continue;
     sum += buckets_[b];
-    snprintf(buf, sizeof(buf), "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ",
-             ((b == 0) ? 0.0 : kBucketLimit[b - 1]),  // left
-             kBucketLimit[b],                         // right
-             buckets_[b],                             // count
-             mult * buckets_[b],                      // percentage
-             mult * sum);                             // cumulative percentage
+    std::snprintf(buf, sizeof(buf), "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ",
+                  ((b == 0) ? 0.0 : kBucketLimit[b - 1]),  // left
+                  kBucketLimit[b],                         // right
+                  buckets_[b],                             // count
+                  mult * buckets_[b],                      // percentage
+                  mult * sum);  // cumulative percentage
     r.append(buf);
 
     // Add hash marks based on percentage; 20 marks for 100%.
diff --git a/util/logging.cc b/util/logging.cc
index 39d8551..8d6fb5b 100644
--- a/util/logging.cc
+++ b/util/logging.cc
@@ -16,7 +16,7 @@ namespace leveldb {
 
 void AppendNumberTo(std::string* str, uint64_t num) {
   char buf[30];
-  snprintf(buf, sizeof(buf), "%llu", (unsigned long long)num);
+  std::snprintf(buf, sizeof(buf), "%llu", static_cast<unsigned long long>(num));
   str->append(buf);
 }
 
@@ -27,8 +27,8 @@ void AppendEscapedStringTo(std::string* str, const Slice& value) {
       str->push_back(c);
     } else {
       char buf[10];
-      snprintf(buf, sizeof(buf), "\\x%02x",
-               static_cast<unsigned int>(c) & 0xff);
+      std::snprintf(buf, sizeof(buf), "\\x%02x",
+                    static_cast<unsigned int>(c) & 0xff);
       str->append(buf);
     }
   }
diff --git a/util/posix_logger.h b/util/posix_logger.h
index 28e15d1..6bbc1a0 100644
--- a/util/posix_logger.h
+++ b/util/posix_logger.h
@@ -30,7 +30,7 @@ class PosixLogger final : public Logger {
 
   ~PosixLogger() override { std::fclose(fp_); }
 
-  void Logv(const char* format, va_list arguments) override {
+  void Logv(const char* format, std::va_list arguments) override {
     // Record the time as close to the Logv() call as possible.
     struct ::timeval now_timeval;
     ::gettimeofday(&now_timeval, nullptr);
@@ -62,7 +62,7 @@ class PosixLogger final : public Logger {
           (iteration == 0) ? stack_buffer : new char[dynamic_buffer_size];
 
       // Print the header into the buffer.
-      int buffer_offset = snprintf(
+      int buffer_offset = std::snprintf(
           buffer, buffer_size, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
           now_components.tm_year + 1900, now_components.tm_mon + 1,
           now_components.tm_mday, now_components.tm_hour, now_components.tm_min,
@@ -98,8 +98,8 @@ class PosixLogger final : public Logger {
         }
 
         // The dynamically-allocated buffer was incorrectly sized. This should
-        // not happen, assuming a correct implementation of (v)snprintf. Fail
-        // in tests, recover by truncating the log message in production.
+        // not happen, assuming a correct implementation of std::(v)snprintf.
+        // Fail in tests, recover by truncating the log message in production.
         assert(false);
         buffer_offset = buffer_size - 1;
       }
diff --git a/util/status.cc b/util/status.cc
index 6b6528b..0559f5b 100644
--- a/util/status.cc
+++ b/util/status.cc
@@ -12,9 +12,9 @@ namespace leveldb {
 
 const char* Status::CopyState(const char* state) {
   uint32_t size;
-  memcpy(&size, state, sizeof(size));
+  std::memcpy(&size, state, sizeof(size));
   char* result = new char[size + 5];
-  memcpy(result, state, size + 5);
+  std::memcpy(result, state, size + 5);
   return result;
 }
 
@@ -24,13 +24,13 @@ Status::Status(Code code, const Slice& msg, const Slice& msg2) {
   const uint32_t len2 = static_cast<uint32_t>(msg2.size());
   const uint32_t size = len1 + (len2 ? (2 + len2) : 0);
   char* result = new char[size + 5];
-  memcpy(result, &size, sizeof(size));
+  std::memcpy(result, &size, sizeof(size));
   result[4] = static_cast<char>(code);
-  memcpy(result + 5, msg.data(), len1);
+  std::memcpy(result + 5, msg.data(), len1);
   if (len2) {
     result[5 + len1] = ':';
     result[6 + len1] = ' ';
-    memcpy(result + 7 + len1, msg2.data(), len2);
+    std::memcpy(result + 7 + len1, msg2.data(), len2);
   }
   state_ = result;
 }
@@ -61,14 +61,14 @@ std::string Status::ToString() const {
         type = "IO error: ";
         break;
       default:
-        snprintf(tmp, sizeof(tmp),
-                 "Unknown code(%d): ", static_cast<int>(code()));
+        std::snprintf(tmp, sizeof(tmp),
+                      "Unknown code(%d): ", static_cast<int>(code()));
         type = tmp;
         break;
     }
     std::string result(type);
     uint32_t length;
-    memcpy(&length, state_, sizeof(length));
+    std::memcpy(&length, state_, sizeof(length));
     result.append(state_ + 5, length);
     return result;
   }
diff --git a/util/windows_logger.h b/util/windows_logger.h
index 9296063..26e6c7b 100644
--- a/util/windows_logger.h
+++ b/util/windows_logger.h
@@ -27,7 +27,7 @@ class WindowsLogger final : public Logger {
 
   ~WindowsLogger() override { std::fclose(fp_); }
 
-  void Logv(const char* format, va_list arguments) override {
+  void Logv(const char* format, std::va_list arguments) override {
     // Record the time as close to the Logv() call as possible.
     SYSTEMTIME now_components;
     ::GetLocalTime(&now_components);
@@ -56,7 +56,7 @@ class WindowsLogger final : public Logger {
           (iteration == 0) ? stack_buffer : new char[dynamic_buffer_size];
 
       // Print the header into the buffer.
-      int buffer_offset = snprintf(
+      int buffer_offset = std::snprintf(
           buffer, buffer_size, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
           now_components.wYear, now_components.wMonth, now_components.wDay,
           now_components.wHour, now_components.wMinute, now_components.wSecond,
@@ -92,8 +92,8 @@ class WindowsLogger final : public Logger {
         }
 
         // The dynamically-allocated buffer was incorrectly sized. This should
-        // not happen, assuming a correct implementation of (v)snprintf. Fail
-        // in tests, recover by truncating the log message in production.
+        // not happen, assuming a correct implementation of std::(v)snprintf.
+        // Fail in tests, recover by truncating the log message in production.
         assert(false);
         buffer_offset = buffer_size - 1;
       }

From 5c6dd75897adc9e542a55d983e4b57406fbfb0a0 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Thu, 30 Apr 2020 01:03:12 +0000
Subject: [PATCH 135/181] Fix accidental double std:: qualifiers.

PiperOrigin-RevId: 309136120
---
 util/env_posix_test.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc
index 29f973f..da264f0 100644
--- a/util/env_posix_test.cc
+++ b/util/env_posix_test.cc
@@ -149,7 +149,7 @@ void CheckCloseOnExecDoesNotLeakFDs(
   if (child_pid == kForkInChildProcessReturnValue) {
     ::execv(child_argv[0], child_argv);
     std::fprintf(stderr, "Error spawning child process: %s\n", strerror(errno));
-    std::std::exit(kTextCloseOnExecHelperExecFailedCode);
+    std::exit(kTextCloseOnExecHelperExecFailedCode);
   }
 
   int child_status = 0;

From 23b6337f69a39d16570f8a66db69b55535d59a51 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Thu, 30 Apr 2020 01:18:25 +0000
Subject: [PATCH 136/181] Fix Travis CI build.

PiperOrigin-RevId: 309138195
---
 .travis.yml | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/.travis.yml b/.travis.yml
index 766fdc9..56c772d 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,7 +4,7 @@
 
 language: cpp
 dist: bionic
-osx_image: xcode10.3
+osx_image: xcode11.3
 
 compiler:
 - gcc
@@ -17,6 +17,12 @@ env:
 - BUILD_TYPE=Debug
 - BUILD_TYPE=RelWithDebInfo
 
+jobs:
+  allow_failures:
+  # Homebrew's GCC is currently broken on XCode 11.
+  - compiler: gcc
+    os: osx
+
 addons:
   apt:
     sources:

From 28602d36254263127c0e1b90334614abc1ee0c83 Mon Sep 17 00:00:00 2001
From: wzk784533 <wzk784533@163.com>
Date: Sat, 11 Jul 2020 13:44:11 +0800
Subject: [PATCH 137/181] avoid unnecessary memory copy

---
 db/builder.cc | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/db/builder.cc b/db/builder.cc
index 943e857..fe5cde1 100644
--- a/db/builder.cc
+++ b/db/builder.cc
@@ -30,11 +30,14 @@ Status BuildTable(const std::string& dbname, Env* env, const Options& options,
 
     TableBuilder* builder = new TableBuilder(options, file);
     meta->smallest.DecodeFrom(iter->key());
+    Slice key;
     for (; iter->Valid(); iter->Next()) {
-      Slice key = iter->key();
-      meta->largest.DecodeFrom(key);
+      key = iter->key();
       builder->Add(key, iter->value());
     }
+    if(!key.empty()) {
+      meta->largest.DecodeFrom(key);
+    }
 
     // Finish and check for builder errors
     s = builder->Finish();

From 1754c12c54d3544678205930a09b142418e34181 Mon Sep 17 00:00:00 2001
From: jl0x61 <xdujlx@foxmail.com>
Date: Tue, 14 Jul 2020 19:32:03 +0800
Subject: [PATCH 138/181] update index.md

remove return value of GetApproximateSizes in index.md
---
 doc/index.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/doc/index.md b/doc/index.md
index 3d9a258..4e7c5ef 100644
--- a/doc/index.md
+++ b/doc/index.md
@@ -478,7 +478,7 @@ leveldb::Range ranges[2];
 ranges[0] = leveldb::Range("a", "c");
 ranges[1] = leveldb::Range("x", "z");
 uint64_t sizes[2];
-leveldb::Status s = db->GetApproximateSizes(ranges, 2, sizes);
+db->GetApproximateSizes(ranges, 2, sizes);
 ```
 
 The preceding call will set `sizes[0]` to the approximate number of bytes of

From b7d302326961fb809d92a95ce813e2d26fe2e16e Mon Sep 17 00:00:00 2001
From: leveldb Team <no-reply@google.com>
Date: Wed, 2 Sep 2020 15:45:40 +0000
Subject: [PATCH 139/181] Internal cleanup migrating StatusOr.

PiperOrigin-RevId: 329720018
---
 util/testutil.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/util/testutil.h b/util/testutil.h
index cc67d96..e0e2d64 100644
--- a/util/testutil.h
+++ b/util/testutil.h
@@ -18,7 +18,7 @@ namespace test {
 MATCHER(IsOK, "") { return arg.ok(); }
 
 // Macros for testing the results of functions that return leveldb::Status or
-// util::StatusOr<T> (for any type T).
+// absl::StatusOr<T> (for any type T).
 #define EXPECT_LEVELDB_OK(expression) \
   EXPECT_THAT(expression, leveldb::test::IsOK())
 #define ASSERT_LEVELDB_OK(expression) \

From ed781070b42f368ea2c914158528848143f92684 Mon Sep 17 00:00:00 2001
From: leveldb Team <no-reply@google.com>
Date: Tue, 27 Oct 2020 11:09:49 -0700
Subject: [PATCH 140/181] Internal test cleanup

PiperOrigin-RevId: 339287832
---
 db/db_test.cc | 21 +++++++++------------
 1 file changed, 9 insertions(+), 12 deletions(-)

diff --git a/db/db_test.cc b/db/db_test.cc
index 3a45731..22ac292 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -7,6 +7,7 @@
 #include <atomic>
 #include <string>
 
+#include "testing/base/public/benchmark.h"
 #include "gtest/gtest.h"
 #include "db/db_impl.h"
 #include "db/filename.h"
@@ -2238,7 +2239,9 @@ std::string MakeKey(unsigned int num) {
   return std::string(buf);
 }
 
-void BM_LogAndApply(int iters, int num_base_files) {
+static void BM_LogAndApply(benchmark::State& state) {
+  const int num_base_files = state.range(0);
+
   std::string dbname = testing::TempDir() + "leveldb_test_benchmark";
   DestroyDB(dbname, Options());
 
@@ -2273,7 +2276,7 @@ void BM_LogAndApply(int iters, int num_base_files) {
 
   uint64_t start_micros = env->NowMicros();
 
-  for (int i = 0; i < iters; i++) {
+  for (auto st : state) {
     VersionEdit vedit;
     vedit.RemoveFile(2, fnum);
     InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
@@ -2286,21 +2289,15 @@ void BM_LogAndApply(int iters, int num_base_files) {
   char buf[16];
   std::snprintf(buf, sizeof(buf), "%d", num_base_files);
   std::fprintf(stderr,
-               "BM_LogAndApply/%-6s   %8d iters : %9u us (%7.0f us / iter)\n",
-               buf, iters, us, ((float)us) / iters);
+               "BM_LogAndApply/%-6s   %8zu iters : %9u us (%7.0f us / iter)\n",
+               buf, state.iterations(), us, ((float)us) / state.iterations());
 }
 
+BENCHMARK(BM_LogAndApply)->Arg(1)->Arg(100)->Arg(10000)->Arg(100000);
 }  // namespace leveldb
 
 int main(int argc, char** argv) {
-  if (argc > 1 && std::string(argv[1]) == "--benchmark") {
-    leveldb::BM_LogAndApply(1000, 1);
-    leveldb::BM_LogAndApply(1000, 100);
-    leveldb::BM_LogAndApply(1000, 10000);
-    leveldb::BM_LogAndApply(100, 100000);
-    return 0;
-  }
-
   testing::InitGoogleTest(&argc, argv);
+  RunSpecifiedBenchmarks();
   return RUN_ALL_TESTS();
 }

From 99ab4730d62444099dbd1ea9c402e15f4aad0728 Mon Sep 17 00:00:00 2001
From: leveldb Team <no-reply@google.com>
Date: Tue, 27 Oct 2020 12:59:41 -0700
Subject: [PATCH 141/181] Use external benchmark API header

PiperOrigin-RevId: 339310928
---
 db/db_test.cc | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/db/db_test.cc b/db/db_test.cc
index 22ac292..8cab018 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -7,8 +7,8 @@
 #include <atomic>
 #include <string>
 
-#include "testing/base/public/benchmark.h"
 #include "gtest/gtest.h"
+#include "third_party/benchmark/include/benchmark/benchmark.h"
 #include "db/db_impl.h"
 #include "db/filename.h"
 #include "db/version_set.h"
@@ -2298,6 +2298,6 @@ BENCHMARK(BM_LogAndApply)->Arg(1)->Arg(100)->Arg(10000)->Arg(100000);
 
 int main(int argc, char** argv) {
   testing::InitGoogleTest(&argc, argv);
-  RunSpecifiedBenchmarks();
+  benchmark::RunSpecifiedBenchmarks();
   return RUN_ALL_TESTS();
 }

From 2802398c94b3b5708f111dae58ac1b738613bbf8 Mon Sep 17 00:00:00 2001
From: Sanjay Ghemawat <sanjay@google.com>
Date: Mon, 30 Nov 2020 08:32:50 -0800
Subject: [PATCH 142/181] Fix bug in filter policy documentation example.

PiperOrigin-RevId: 344817715
---
 doc/index.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/doc/index.md b/doc/index.md
index 4e7c5ef..01693ad 100644
--- a/doc/index.md
+++ b/doc/index.md
@@ -438,7 +438,7 @@ class CustomFilterPolicy : public leveldb::FilterPolicy {
     for (int i = 0; i < n; i++) {
       trimmed[i] = RemoveTrailingSpaces(keys[i]);
     }
-    return builtin_policy_->CreateFilter(&trimmed[i], n, dst);
+    return builtin_policy_->CreateFilter(trimmed.data(), n, dst);
   }
 };
 ```

From 37d36c92f8622595aa791867775d2f4d82e45be7 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@chromium.org>
Date: Mon, 30 Nov 2020 09:57:27 -0800
Subject: [PATCH 143/181] Added google/benchmark submodule.

---
 .gitmodules           | 3 +++
 CMakeLists.txt        | 4 +++-
 third_party/benchmark | 1 +
 3 files changed, 7 insertions(+), 1 deletion(-)
 create mode 160000 third_party/benchmark

diff --git a/.gitmodules b/.gitmodules
index 5a4e85a..6e6d3f0 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,3 +1,6 @@
 [submodule "third_party/googletest"]
 	path = third_party/googletest
 	url = https://github.com/google/googletest.git
+[submodule "third_party/benchmark"]
+	path = third_party/benchmark
+	url = https://github.com/google/benchmark
diff --git a/CMakeLists.txt b/CMakeLists.txt
index ae9b0f7..2cb2296 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -298,6 +298,8 @@ if(LEVELDB_BUILD_TESTS)
   # This project is tested using GoogleTest.
   add_subdirectory("third_party/googletest")
 
+  add_subdirectory("third_party/benchmark")
+
   # GoogleTest triggers a missing field initializers warning.
   if(LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS)
     set_property(TARGET gtest
@@ -318,7 +320,7 @@ if(LEVELDB_BUILD_TESTS)
 
         "${test_file}"
     )
-    target_link_libraries("${test_target_name}" leveldb gmock gtest)
+    target_link_libraries("${test_target_name}" leveldb gmock gtest benchmark)
     target_compile_definitions("${test_target_name}"
       PRIVATE
         ${LEVELDB_PLATFORM_NAME}=1
diff --git a/third_party/benchmark b/third_party/benchmark
new file mode 160000
index 0000000..bf585a2
--- /dev/null
+++ b/third_party/benchmark
@@ -0,0 +1 @@
+Subproject commit bf585a2789e30585b4e3ce6baf11ef2750b54677

From b754fdca72e9382edd457c0fd81de6e1b644d789 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@chromium.org>
Date: Mon, 30 Nov 2020 10:43:24 -0800
Subject: [PATCH 144/181] Fixed fprintf of 64-bit value.

---
 db/db_test.cc | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/db/db_test.cc b/db/db_test.cc
index 8cab018..5c364a3 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -5,6 +5,7 @@
 #include "leveldb/db.h"
 
 #include <atomic>
+#include <cinttypes>
 #include <string>
 
 #include "gtest/gtest.h"
@@ -2289,7 +2290,8 @@ static void BM_LogAndApply(benchmark::State& state) {
   char buf[16];
   std::snprintf(buf, sizeof(buf), "%d", num_base_files);
   std::fprintf(stderr,
-               "BM_LogAndApply/%-6s   %8zu iters : %9u us (%7.0f us / iter)\n",
+               "BM_LogAndApply/%-6s   %8" PRIu64
+               " iters : %9u us (%7.0f us / iter)\n",
                buf, state.iterations(), us, ((float)us) / state.iterations());
 }
 

From c3b52f7db6dba54bb8c17fa0dee9e2c0d066fa92 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@chromium.org>
Date: Mon, 30 Nov 2020 09:57:27 -0800
Subject: [PATCH 145/181] Fixup for adding the third_party/benchmark submodule.

---
 third_party/benchmark | 1 +
 1 file changed, 1 insertion(+)
 create mode 160000 third_party/benchmark

diff --git a/third_party/benchmark b/third_party/benchmark
new file mode 160000
index 0000000..bf585a2
--- /dev/null
+++ b/third_party/benchmark
@@ -0,0 +1 @@
+Subproject commit bf585a2789e30585b4e3ce6baf11ef2750b54677

From 6721eda0b46654d3531b4a0a00c90dc659b337d6 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Mon, 14 Dec 2020 08:34:41 -0800
Subject: [PATCH 146/181] Update Travis CI config.

PiperOrigin-RevId: 347391876
---
 .travis.yml | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index 56c772d..e34a67e 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,7 +4,7 @@
 
 language: cpp
 dist: bionic
-osx_image: xcode11.3
+osx_image: xcode12.2
 
 compiler:
 - gcc
@@ -32,8 +32,8 @@ addons:
     packages:
     - clang-10
     - cmake
-    - gcc-9
-    - g++-9
+    - gcc-10
+    - g++-10
     - libgoogle-perftools-dev
     - libkyotocabinet-dev
     - libsnappy-dev
@@ -43,7 +43,7 @@ addons:
     packages:
     - cmake
     - crc32c
-    - gcc@9
+    - gcc@10
     - gperftools
     - kyoto-cabinet
     - llvm@10
@@ -59,7 +59,7 @@ install:
     export PATH="$(brew --prefix llvm)/bin:$PATH";
   fi
 # /usr/bin/gcc points to an older compiler on both Linux and macOS.
-- if [ "$CXX" = "g++" ]; then export CXX="g++-9" CC="gcc-9"; fi
+- if [ "$CXX" = "g++" ]; then export CXX="g++-10" CC="gcc-10"; fi
 # /usr/bin/clang points to an older compiler on both Linux and macOS.
 #
 # Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values

From 532be8530678a95da46354037d61a504c290403d Mon Sep 17 00:00:00 2001
From: Dimitris Apostolou <dimitris.apostolou@icloud.com>
Date: Thu, 17 Dec 2020 09:05:33 +0200
Subject: [PATCH 147/181] Fix insecure links

---
 CONTRIBUTING.md    |  2 +-
 README.md          |  2 +-
 doc/benchmark.html | 10 +++++-----
 doc/impl.md        |  2 +-
 4 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a74572a..7ede021 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -32,5 +32,5 @@ the CLA.
 ## Writing Code ##
 
 If your contribution contains code, please make sure that it follows
-[the style guide](http://google.github.io/styleguide/cppguide.html).
+[the style guide](https://google.github.io/styleguide/cppguide.html).
 Otherwise we will have to ask you to make changes, and that's no fun for anyone.
diff --git a/README.md b/README.md
index 28d29c1..81144dd 100644
--- a/README.md
+++ b/README.md
@@ -14,7 +14,7 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
   * Multiple changes can be made in one atomic batch.
   * Users can create a transient snapshot to get a consistent view of data.
   * Forward and backward iteration is supported over the data.
-  * Data is automatically compressed using the [Snappy compression library](http://google.github.io/snappy/).
+  * Data is automatically compressed using the [Snappy compression library](https://google.github.io/snappy/).
   * External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions.
 
 # Documentation
diff --git a/doc/benchmark.html b/doc/benchmark.html
index f3fd771..1e0b4ef 100644
--- a/doc/benchmark.html
+++ b/doc/benchmark.html
@@ -83,7 +83,7 @@ div.bsql {
 <p>Google, July 2011</p>
 <hr>
 
-<p>In order to test LevelDB's performance, we benchmark it against other well-established database implementations. We compare LevelDB (revision 39) against <a href="http://www.sqlite.org/">SQLite3</a> (version 3.7.6.3) and <a href="http://fallabs.com/kyotocabinet/spex.html">Kyoto Cabinet's</a> (version 1.2.67) TreeDB (a B+Tree based key-value store). We would like to acknowledge Scott Hess and Mikio Hirabayashi for their suggestions and contributions to the SQLite3 and Kyoto Cabinet benchmarks, respectively.</p>
+<p>In order to test LevelDB's performance, we benchmark it against other well-established database implementations. We compare LevelDB (revision 39) against <a href="https://www.sqlite.org/">SQLite3</a> (version 3.7.6.3) and <a href="https://dbmx.net/kyotocabinet/spex.html">Kyoto Cabinet's</a> (version 1.2.67) TreeDB (a B+Tree based key-value store). We would like to acknowledge Scott Hess and Mikio Hirabayashi for their suggestions and contributions to the SQLite3 and Kyoto Cabinet benchmarks, respectively.</p>
 
 <p>Benchmarks were all performed on a six-core Intel(R) Xeon(R) CPU X5650 @ 2.67GHz, with 12288 KB of total L3 cache and 12 GB of DDR3 RAM at 1333 MHz. (Note that LevelDB uses at most two CPUs since the benchmarks are single threaded: one to run the benchmark, and one for background compactions.) We ran the benchmarks on two machines (with identical processors), one with an Ext3 file system and one with an Ext4 file system. The machine with the Ext3 file system has a SATA Hitachi HDS721050CLA362 hard drive. The machine with the Ext4 file system has a SATA Samsung HD502HJ hard drive. Both hard drives spin at 7200 RPM and have hard drive write-caching enabled (using `hdparm -W 1 [device]`). The numbers reported below are the median of three measurements.</p>
 
@@ -97,9 +97,9 @@ div.bsql {
 
 <h4>Custom Build Specifications</h4>
 <ul>
-<li>LevelDB: LevelDB was compiled with the <a href="http://code.google.com/p/google-perftools">tcmalloc</a> library and the <a href="http://code.google.com/p/snappy/">Snappy</a> compression library (revision 33).  Assertions were disabled.</li>
-<li>TreeDB: TreeDB was compiled using the <a href="http://www.oberhumer.com/opensource/lzo/">LZO</a> compression library (version 2.03). Furthermore, we enabled the TSMALL and TLINEAR options when opening the database in order to reduce the footprint of each record.</li>
-<li>SQLite: We tuned SQLite's performance, by setting its locking mode to exclusive.  We also enabled SQLite's <a href="http://www.sqlite.org/draft/wal.html">write-ahead logging</a>.</li>
+<li>LevelDB: LevelDB was compiled with the <a href="https://github.com/gperftools/gperftools">tcmalloc</a> library and the <a href="https://github.com/google/snappy">Snappy</a> compression library (revision 33).  Assertions were disabled.</li>
+<li>TreeDB: TreeDB was compiled using the <a href="https://www.oberhumer.com/opensource/lzo/">LZO</a> compression library (version 2.03). Furthermore, we enabled the TSMALL and TLINEAR options when opening the database in order to reduce the footprint of each record.</li>
+<li>SQLite: We tuned SQLite's performance, by setting its locking mode to exclusive.  We also enabled SQLite's <a href="https://www.sqlite.org/draft/wal.html">write-ahead logging</a>.</li>
 </ul>
 
 <h2>1. Baseline Performance</h2>
@@ -451,7 +451,7 @@ performance may very well be better with compression if it allows more
 of the working set to fit in memory.</p>
 
 <h2>Note about Ext4 Filesystems</h2>
-<p>The preceding numbers are for an ext3 file system. Synchronous writes are much slower under <a href="http://en.wikipedia.org/wiki/Ext4">ext4</a> (LevelDB drops to ~31 writes / second and TreeDB drops to ~5 writes / second; SQLite3's synchronous writes do not noticeably drop) due to ext4's different handling of <span class="code">fsync</span> / <span class="code">msync</span> calls. Even LevelDB's asynchronous write performance drops somewhat since it spreads its storage across multiple files and issues <span class="code">fsync</span> calls when switching to a new file.</p>
+<p>The preceding numbers are for an ext3 file system. Synchronous writes are much slower under <a href="https://en.wikipedia.org/wiki/Ext4">ext4</a> (LevelDB drops to ~31 writes / second and TreeDB drops to ~5 writes / second; SQLite3's synchronous writes do not noticeably drop) due to ext4's different handling of <span class="code">fsync</span> / <span class="code">msync</span> calls. Even LevelDB's asynchronous write performance drops somewhat since it spreads its storage across multiple files and issues <span class="code">fsync</span> calls when switching to a new file.</p>
 
 <h2>Acknowledgements</h2>
 <p>Jeff Dean and Sanjay Ghemawat wrote LevelDB. Kevin Tseng wrote and compiled these benchmarks. Mikio Hirabayashi, Scott Hess, and Gabor Cselle provided help and advice.</p>
diff --git a/doc/impl.md b/doc/impl.md
index 45187a2..c9bb621 100644
--- a/doc/impl.md
+++ b/doc/impl.md
@@ -1,7 +1,7 @@
 ## Files
 
 The implementation of leveldb is similar in spirit to the representation of a
-single [Bigtable tablet (section 5.3)](http://research.google.com/archive/bigtable.html).
+single [Bigtable tablet (section 5.3)](https://research.google/pubs/pub27898/).
 However the organization of the files that make up the representation is
 somewhat different and is explained below.
 

From 8cce47e450b365347769959c53b8836ef0216df9 Mon Sep 17 00:00:00 2001
From: leveldb Team <no-reply@google.com>
Date: Mon, 11 Jan 2021 15:32:34 +0000
Subject: [PATCH 148/181] Optimize leveldb block seeks to utilize the current
 iterator location. This is beneficial when iterators are reused and seeks are
 not random but increasing. It is additionally beneficial with larger block
 sizes and keys with common prefixes.

Add a benchmark "seekordered" to db_bench that reuses iterators across
increasing seeks.  Add support to the benchmark to count comparisons made and to support common key prefix length. Change benchmark random seeds to be reproducible for entire benchmark suite executions but unique for threads in different benchmarks runs.  This changes a benchmark suite of readrandom,seekrandom from having a 100% found ratio as previously it had the same seed used for fillrandom.

./db_bench --benchmarks=fillrandom,compact,seekordered --block_size=262144 --comparisons=1 --key_prefix=100

without this change (though with benchmark changes):
seekrandom   :      55.309 micros/op; (631820 of 1000000 found)
Comparisons: 27001049
seekordered  :       1.732 micros/op; (631882 of 1000000 found)
Comparisons: 26998402

with this change:
seekrandom   :      55.866 micros/op; (631820 of 1000000 found)
Comparisons: 26952143
seekordered  :       1.686 micros/op; (631882 of 1000000 found)
Comparisons: 25549369

For ordered seeking, this is a reduction of 5% comparisons and a 3% speedup. For random seeking (with single use iterators) the comparisons and speed are less than 1% and likely noise.

PiperOrigin-RevId: 351149832
---
 benchmarks/db_bench.cc | 168 ++++++++++++++++++++++++++++++++---------
 db/db_test.cc          |  61 +++++++++++++++
 table/block.cc         |  27 ++++++-
 3 files changed, 221 insertions(+), 35 deletions(-)

diff --git a/benchmarks/db_bench.cc b/benchmarks/db_bench.cc
index 288b119..7c474d8 100644
--- a/benchmarks/db_bench.cc
+++ b/benchmarks/db_bench.cc
@@ -4,10 +4,12 @@
 
 #include <sys/types.h>
 
+#include <atomic>
 #include <cstdio>
 #include <cstdlib>
 
 #include "leveldb/cache.h"
+#include "leveldb/comparator.h"
 #include "leveldb/db.h"
 #include "leveldb/env.h"
 #include "leveldb/filter_policy.h"
@@ -34,6 +36,7 @@
 //      readmissing   -- read N missing keys in random order
 //      readhot       -- read N times in random order from 1% section of DB
 //      seekrandom    -- N random seeks
+//      seekordered   -- N ordered seeks
 //      open          -- cost of opening a DB
 //      crc32c        -- repeated crc32c of 4K of data
 //   Meta operations:
@@ -78,6 +81,9 @@ static double FLAGS_compression_ratio = 0.5;
 // Print histogram of operation timings
 static bool FLAGS_histogram = false;
 
+// Count the number of string comparisons performed
+static bool FLAGS_comparisons = false;
+
 // Number of bytes to buffer in memtable before compacting
 // (initialized to default value by "main")
 static int FLAGS_write_buffer_size = 0;
@@ -101,6 +107,9 @@ static int FLAGS_open_files = 0;
 // Negative means use default settings.
 static int FLAGS_bloom_bits = -1;
 
+// Common key prefix length.
+static int FLAGS_key_prefix = 0;
+
 // If true, do not destroy the existing database.  If you set this
 // flag and also specify a benchmark that wants a fresh database, that
 // benchmark will fail.
@@ -117,6 +126,33 @@ namespace leveldb {
 namespace {
 leveldb::Env* g_env = nullptr;
 
+class CountComparator : public Comparator {
+ public:
+  CountComparator(const Comparator* wrapped) : wrapped_(wrapped) {}
+  ~CountComparator() override {}
+  int Compare(const Slice& a, const Slice& b) const {
+    count_.fetch_add(1, std::memory_order_relaxed);
+    return wrapped_->Compare(a, b);
+  }
+  const char* Name() const override { return wrapped_->Name(); }
+  void FindShortestSeparator(std::string* start,
+                             const Slice& limit) const override {
+    wrapped_->FindShortestSeparator(start, limit);
+  }
+
+  void FindShortSuccessor(std::string* key) const override {
+    return wrapped_->FindShortSuccessor(key);
+  }
+
+  size_t comparisons() const { return count_.load(std::memory_order_relaxed); }
+
+  void reset() { count_.store(0, std::memory_order_relaxed); }
+
+ private:
+  mutable std::atomic<size_t> count_ = 0;
+  const Comparator* const wrapped_;
+};
+
 // Helper for quickly generating random data.
 class RandomGenerator {
  private:
@@ -149,6 +185,26 @@ class RandomGenerator {
   }
 };
 
+class KeyBuffer {
+ public:
+  KeyBuffer() {
+    assert(FLAGS_key_prefix < sizeof(buffer_));
+    memset(buffer_, 'a', FLAGS_key_prefix);
+  }
+  KeyBuffer& operator=(KeyBuffer& other) = delete;
+  KeyBuffer(KeyBuffer& other) = delete;
+
+  void Set(int k) {
+    std::snprintf(buffer_ + FLAGS_key_prefix,
+                  sizeof(buffer_) - FLAGS_key_prefix, "%016d", k);
+  }
+
+  Slice slice() const { return Slice(buffer_, FLAGS_key_prefix + 16); }
+
+ private:
+  char buffer_[1024];
+};
+
 #if defined(__linux)
 static Slice TrimSpace(Slice s) {
   size_t start = 0;
@@ -305,7 +361,7 @@ struct ThreadState {
   Stats stats;
   SharedState* shared;
 
-  ThreadState(int index) : tid(index), rand(1000 + index), shared(nullptr) {}
+  ThreadState(int index, int seed) : tid(index), rand(seed), shared(nullptr) {}
 };
 
 }  // namespace
@@ -321,9 +377,11 @@ class Benchmark {
   WriteOptions write_options_;
   int reads_;
   int heap_counter_;
+  CountComparator count_comparator_;
+  int total_thread_count_;
 
   void PrintHeader() {
-    const int kKeySize = 16;
+    const int kKeySize = 16 + FLAGS_key_prefix;
     PrintEnvironment();
     std::fprintf(stdout, "Keys:       %d bytes each\n", kKeySize);
     std::fprintf(
@@ -411,7 +469,9 @@ class Benchmark {
         value_size_(FLAGS_value_size),
         entries_per_batch_(1),
         reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
-        heap_counter_(0) {
+        heap_counter_(0),
+        count_comparator_(BytewiseComparator()),
+        total_thread_count_(0) {
     std::vector<std::string> files;
     g_env->GetChildren(FLAGS_db, &files);
     for (size_t i = 0; i < files.size(); i++) {
@@ -494,6 +554,8 @@ class Benchmark {
         method = &Benchmark::ReadMissing;
       } else if (name == Slice("seekrandom")) {
         method = &Benchmark::SeekRandom;
+      } else if (name == Slice("seekordered")) {
+        method = &Benchmark::SeekOrdered;
       } else if (name == Slice("readhot")) {
         method = &Benchmark::ReadHot;
       } else if (name == Slice("readrandomsmall")) {
@@ -591,7 +653,11 @@ class Benchmark {
       arg[i].bm = this;
       arg[i].method = method;
       arg[i].shared = &shared;
-      arg[i].thread = new ThreadState(i);
+      ++total_thread_count_;
+      // Seed the thread's random state deterministically based upon thread
+      // creation across all benchmarks. This ensures that the seeds are unique
+      // but reproducible when rerunning the same set of benchmarks.
+      arg[i].thread = new ThreadState(i, /*seed=*/1000 + total_thread_count_);
       arg[i].thread->shared = &shared;
       g_env->StartThread(ThreadBody, &arg[i]);
     }
@@ -612,6 +678,11 @@ class Benchmark {
       arg[0].thread->stats.Merge(arg[i].thread->stats);
     }
     arg[0].thread->stats.Report(name);
+    if (FLAGS_comparisons) {
+      fprintf(stdout, "Comparisons: %ld\n", count_comparator_.comparisons());
+      count_comparator_.reset();
+      fflush(stdout);
+    }
 
     for (int i = 0; i < n; i++) {
       delete arg[i].thread;
@@ -694,6 +765,9 @@ class Benchmark {
     options.write_buffer_size = FLAGS_write_buffer_size;
     options.max_file_size = FLAGS_max_file_size;
     options.block_size = FLAGS_block_size;
+    if (FLAGS_comparisons) {
+      options.comparator = &count_comparator_;
+    }
     options.max_open_files = FLAGS_open_files;
     options.filter_policy = filter_policy_;
     options.reuse_logs = FLAGS_reuse_logs;
@@ -727,14 +801,14 @@ class Benchmark {
     WriteBatch batch;
     Status s;
     int64_t bytes = 0;
+    KeyBuffer key;
     for (int i = 0; i < num_; i += entries_per_batch_) {
       batch.Clear();
       for (int j = 0; j < entries_per_batch_; j++) {
-        const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
-        char key[100];
-        std::snprintf(key, sizeof(key), "%016d", k);
-        batch.Put(key, gen.Generate(value_size_));
-        bytes += value_size_ + strlen(key);
+        const int k = seq ? i + j : thread->rand.Uniform(FLAGS_num);
+        key.Set(k);
+        batch.Put(key.slice(), gen.Generate(value_size_));
+        bytes += value_size_ + key.slice().size();
         thread->stats.FinishedSingleOp();
       }
       s = db_->Write(write_options_, &batch);
@@ -776,11 +850,11 @@ class Benchmark {
     ReadOptions options;
     std::string value;
     int found = 0;
+    KeyBuffer key;
     for (int i = 0; i < reads_; i++) {
-      char key[100];
-      const int k = thread->rand.Next() % FLAGS_num;
-      std::snprintf(key, sizeof(key), "%016d", k);
-      if (db_->Get(options, key, &value).ok()) {
+      const int k = thread->rand.Uniform(FLAGS_num);
+      key.Set(k);
+      if (db_->Get(options, key.slice(), &value).ok()) {
         found++;
       }
       thread->stats.FinishedSingleOp();
@@ -793,11 +867,12 @@ class Benchmark {
   void ReadMissing(ThreadState* thread) {
     ReadOptions options;
     std::string value;
+    KeyBuffer key;
     for (int i = 0; i < reads_; i++) {
-      char key[100];
-      const int k = thread->rand.Next() % FLAGS_num;
-      std::snprintf(key, sizeof(key), "%016d.", k);
-      db_->Get(options, key, &value);
+      const int k = thread->rand.Uniform(FLAGS_num);
+      key.Set(k);
+      Slice s = Slice(key.slice().data(), key.slice().size() - 1);
+      db_->Get(options, s, &value);
       thread->stats.FinishedSingleOp();
     }
   }
@@ -806,11 +881,11 @@ class Benchmark {
     ReadOptions options;
     std::string value;
     const int range = (FLAGS_num + 99) / 100;
+    KeyBuffer key;
     for (int i = 0; i < reads_; i++) {
-      char key[100];
-      const int k = thread->rand.Next() % range;
-      std::snprintf(key, sizeof(key), "%016d", k);
-      db_->Get(options, key, &value);
+      const int k = thread->rand.Uniform(range);
+      key.Set(k);
+      db_->Get(options, key.slice(), &value);
       thread->stats.FinishedSingleOp();
     }
   }
@@ -818,17 +893,36 @@ class Benchmark {
   void SeekRandom(ThreadState* thread) {
     ReadOptions options;
     int found = 0;
+    KeyBuffer key;
     for (int i = 0; i < reads_; i++) {
       Iterator* iter = db_->NewIterator(options);
-      char key[100];
-      const int k = thread->rand.Next() % FLAGS_num;
-      std::snprintf(key, sizeof(key), "%016d", k);
-      iter->Seek(key);
-      if (iter->Valid() && iter->key() == key) found++;
+      const int k = thread->rand.Uniform(FLAGS_num);
+      key.Set(k);
+      iter->Seek(key.slice());
+      if (iter->Valid() && iter->key() == key.slice()) found++;
       delete iter;
       thread->stats.FinishedSingleOp();
     }
     char msg[100];
+    snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
+    thread->stats.AddMessage(msg);
+  }
+
+  void SeekOrdered(ThreadState* thread) {
+    ReadOptions options;
+    Iterator* iter = db_->NewIterator(options);
+    int found = 0;
+    int k = 0;
+    KeyBuffer key;
+    for (int i = 0; i < reads_; i++) {
+      k = (k + (thread->rand.Uniform(100))) % FLAGS_num;
+      key.Set(k);
+      iter->Seek(key.slice());
+      if (iter->Valid() && iter->key() == key.slice()) found++;
+      thread->stats.FinishedSingleOp();
+    }
+    delete iter;
+    char msg[100];
     std::snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
     thread->stats.AddMessage(msg);
   }
@@ -837,13 +931,13 @@ class Benchmark {
     RandomGenerator gen;
     WriteBatch batch;
     Status s;
+    KeyBuffer key;
     for (int i = 0; i < num_; i += entries_per_batch_) {
       batch.Clear();
       for (int j = 0; j < entries_per_batch_; j++) {
-        const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
-        char key[100];
-        std::snprintf(key, sizeof(key), "%016d", k);
-        batch.Delete(key);
+        const int k = seq ? i + j : (thread->rand.Uniform(FLAGS_num));
+        key.Set(k);
+        batch.Delete(key.slice());
         thread->stats.FinishedSingleOp();
       }
       s = db_->Write(write_options_, &batch);
@@ -864,6 +958,7 @@ class Benchmark {
     } else {
       // Special thread that keeps writing until other threads are done.
       RandomGenerator gen;
+      KeyBuffer key;
       while (true) {
         {
           MutexLock l(&thread->shared->mu);
@@ -873,10 +968,10 @@ class Benchmark {
           }
         }
 
-        const int k = thread->rand.Next() % FLAGS_num;
-        char key[100];
-        std::snprintf(key, sizeof(key), "%016d", k);
-        Status s = db_->Put(write_options_, key, gen.Generate(value_size_));
+        const int k = thread->rand.Uniform(FLAGS_num);
+        key.Set(k);
+        Status s =
+            db_->Put(write_options_, key.slice(), gen.Generate(value_size_));
         if (!s.ok()) {
           std::fprintf(stderr, "put error: %s\n", s.ToString().c_str());
           std::exit(1);
@@ -941,6 +1036,9 @@ int main(int argc, char** argv) {
     } else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 &&
                (n == 0 || n == 1)) {
       FLAGS_histogram = n;
+    } else if (sscanf(argv[i], "--comparisons=%d%c", &n, &junk) == 1 &&
+               (n == 0 || n == 1)) {
+      FLAGS_comparisons = n;
     } else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 &&
                (n == 0 || n == 1)) {
       FLAGS_use_existing_db = n;
@@ -961,6 +1059,8 @@ int main(int argc, char** argv) {
       FLAGS_max_file_size = n;
     } else if (sscanf(argv[i], "--block_size=%d%c", &n, &junk) == 1) {
       FLAGS_block_size = n;
+    } else if (sscanf(argv[i], "--key_prefix=%d%c", &n, &junk) == 1) {
+      FLAGS_key_prefix = n;
     } else if (sscanf(argv[i], "--cache_size=%d%c", &n, &junk) == 1) {
       FLAGS_cache_size = n;
     } else if (sscanf(argv[i], "--bloom_bits=%d%c", &n, &junk) == 1) {
diff --git a/db/db_test.cc b/db/db_test.cc
index 5c364a3..eb8d60c 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -965,6 +965,26 @@ TEST_F(DBTest, IterMultiWithDelete) {
   } while (ChangeOptions());
 }
 
+TEST_F(DBTest, IterMultiWithDeleteAndCompaction) {
+  do {
+    ASSERT_LEVELDB_OK(Put("b", "vb"));
+    ASSERT_LEVELDB_OK(Put("c", "vc"));
+    ASSERT_LEVELDB_OK(Put("a", "va"));
+    dbfull()->TEST_CompactMemTable();
+    ASSERT_LEVELDB_OK(Delete("b"));
+    ASSERT_EQ("NOT_FOUND", Get("b"));
+
+    Iterator* iter = db_->NewIterator(ReadOptions());
+    iter->Seek("c");
+    ASSERT_EQ(IterStatus(iter), "c->vc");
+    iter->Prev();
+    ASSERT_EQ(IterStatus(iter), "a->va");
+    iter->Seek("b");
+    ASSERT_EQ(IterStatus(iter), "c->vc");
+    delete iter;
+  } while (ChangeOptions());
+}
+
 TEST_F(DBTest, Recover) {
   do {
     ASSERT_LEVELDB_OK(Put("foo", "v1"));
@@ -2132,6 +2152,9 @@ static bool CompareIterators(int step, DB* model, DB* db,
   Iterator* dbiter = db->NewIterator(options);
   bool ok = true;
   int count = 0;
+  std::vector<std::string> seek_keys;
+  // Compare equality of all elements using Next(). Save some of the keys for
+  // comparing Seek equality.
   for (miter->SeekToFirst(), dbiter->SeekToFirst();
        ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) {
     count++;
@@ -2150,6 +2173,11 @@ static bool CompareIterators(int step, DB* model, DB* db,
                    EscapeString(miter->value()).c_str(),
                    EscapeString(miter->value()).c_str());
       ok = false;
+      break;
+    }
+
+    if (count % 10 == 0) {
+      seek_keys.push_back(miter->key().ToString());
     }
   }
 
@@ -2160,6 +2188,39 @@ static bool CompareIterators(int step, DB* model, DB* db,
       ok = false;
     }
   }
+
+  if (ok) {
+    // Validate iterator equality when performing seeks.
+    for (auto kiter = seek_keys.begin(); ok && kiter != seek_keys.end();
+         ++kiter) {
+      miter->Seek(*kiter);
+      dbiter->Seek(*kiter);
+      if (!miter->Valid() || !dbiter->Valid()) {
+        std::fprintf(stderr, "step %d: Seek iterators invalid: %d vs. %d\n",
+                     step, miter->Valid(), dbiter->Valid());
+        ok = false;
+      }
+      if (miter->key().compare(dbiter->key()) != 0) {
+        std::fprintf(stderr, "step %d: Seek key mismatch: '%s' vs. '%s'\n",
+                     step, EscapeString(miter->key()).c_str(),
+                     EscapeString(dbiter->key()).c_str());
+        ok = false;
+        break;
+      }
+
+      if (miter->value().compare(dbiter->value()) != 0) {
+        std::fprintf(
+            stderr,
+            "step %d: Seek value mismatch for key '%s': '%s' vs. '%s'\n", step,
+            EscapeString(miter->key()).c_str(),
+            EscapeString(miter->value()).c_str(),
+            EscapeString(miter->value()).c_str());
+        ok = false;
+        break;
+      }
+    }
+  }
+
   std::fprintf(stderr, "%d entries compared: ok=%d\n", count, ok);
   delete miter;
   delete dbiter;
diff --git a/table/block.cc b/table/block.cc
index 2fe89ea..3b15257 100644
--- a/table/block.cc
+++ b/table/block.cc
@@ -166,6 +166,24 @@ class Block::Iter : public Iterator {
     // with a key < target
     uint32_t left = 0;
     uint32_t right = num_restarts_ - 1;
+    int current_key_compare = 0;
+
+    if (Valid()) {
+      // If we're already scanning, use the current position as a starting
+      // point. This is beneficial if the key we're seeking to is ahead of the
+      // current position.
+      current_key_compare = Compare(key_, target);
+      if (current_key_compare < 0) {
+        // key_ is smaller than target
+        left = restart_index_;
+      } else if (current_key_compare > 0) {
+        right = restart_index_;
+      } else {
+        // We're seeking to the key we're already at.
+        return;
+      }
+    }
+
     while (left < right) {
       uint32_t mid = (left + right + 1) / 2;
       uint32_t region_offset = GetRestartPoint(mid);
@@ -189,8 +207,15 @@ class Block::Iter : public Iterator {
       }
     }
 
+    // We might be able to use our current position within the restart block.
+    // This is true if we determined the key we desire is in the current block
+    // and is after than the current key.
+    assert(current_key_compare == 0 || Valid());
+    bool skip_seek = left == restart_index_ && current_key_compare < 0;
+    if (!skip_seek) {
+      SeekToRestartPoint(left);
+    }
     // Linear search (within restart block) for first key >= target
-    SeekToRestartPoint(left);
     while (true) {
       if (!ParseNextKey()) {
         return;

From 8f1861462b27727dfc5b2c4687112108e6ba88eb Mon Sep 17 00:00:00 2001
From: leveldb Team <no-reply@google.com>
Date: Tue, 12 Jan 2021 21:08:52 +0000
Subject: [PATCH 149/181] Sync MANIFEST before closing in db_impl when creating
 a new DB. Add logging with debugging information when failing to load a
 version set.

PiperOrigin-RevId: 351432332
---
 db/db_impl.cc     | 5 +++++
 db/version_set.cc | 6 ++++++
 2 files changed, 11 insertions(+)

diff --git a/db/db_impl.cc b/db/db_impl.cc
index 59b834f..1a4e459 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -196,6 +196,9 @@ Status DBImpl::NewDB() {
     std::string record;
     new_db.EncodeTo(&record);
     s = log.AddRecord(record);
+    if (s.ok()) {
+      s = file->Sync();
+    }
     if (s.ok()) {
       s = file->Close();
     }
@@ -301,6 +304,8 @@ Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) {
 
   if (!env_->FileExists(CurrentFileName(dbname_))) {
     if (options_.create_if_missing) {
+      Log(options_.info_log, "Creating DB %s since it was missing.",
+          dbname_.c_str());
       s = NewDB();
       if (!s.ok()) {
         return s;
diff --git a/db/version_set.cc b/db/version_set.cc
index a459587..1963353 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -898,6 +898,7 @@ Status VersionSet::Recover(bool* save_manifest) {
   uint64_t log_number = 0;
   uint64_t prev_log_number = 0;
   Builder builder(this, current_);
+  int read_records = 0;
 
   {
     LogReporter reporter;
@@ -907,6 +908,7 @@ Status VersionSet::Recover(bool* save_manifest) {
     Slice record;
     std::string scratch;
     while (reader.ReadRecord(&record, &scratch) && s.ok()) {
+      ++read_records;
       VersionEdit edit;
       s = edit.DecodeFrom(record);
       if (s.ok()) {
@@ -981,6 +983,10 @@ Status VersionSet::Recover(bool* save_manifest) {
     } else {
       *save_manifest = true;
     }
+  } else {
+    std::string error = s.ToString();
+    Log(options_->info_log, "Error recovering version set with %d records: %s",
+        read_records, error.c_str());
   }
 
   return s;

From 1998c0ef15f0fb64994e165230473337f041fd8c Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Tue, 12 Jan 2021 21:54:35 +0000
Subject: [PATCH 150/181] Fix build errors.

PiperOrigin-RevId: 351442409
---
 benchmarks/db_bench.cc | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/benchmarks/db_bench.cc b/benchmarks/db_bench.cc
index 7c474d8..b362569 100644
--- a/benchmarks/db_bench.cc
+++ b/benchmarks/db_bench.cc
@@ -130,7 +130,7 @@ class CountComparator : public Comparator {
  public:
   CountComparator(const Comparator* wrapped) : wrapped_(wrapped) {}
   ~CountComparator() override {}
-  int Compare(const Slice& a, const Slice& b) const {
+  int Compare(const Slice& a, const Slice& b) const override {
     count_.fetch_add(1, std::memory_order_relaxed);
     return wrapped_->Compare(a, b);
   }
@@ -149,7 +149,7 @@ class CountComparator : public Comparator {
   void reset() { count_.store(0, std::memory_order_relaxed); }
 
  private:
-  mutable std::atomic<size_t> count_ = 0;
+  mutable std::atomic<size_t> count_{0};
   const Comparator* const wrapped_;
 };
 

From 4a919ea4f7cf2b78bb1403e40b4888f592610c5a Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Mon, 25 Jan 2021 17:02:58 +0000
Subject: [PATCH 151/181] IWYU fixes in db/c.cc.

Fixes https://github.com/google/leveldb/issues/872

PiperOrigin-RevId: 353657701
---
 db/c.cc | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/db/c.cc b/db/c.cc
index b5c9251..8bdde38 100644
--- a/db/c.cc
+++ b/db/c.cc
@@ -4,6 +4,8 @@
 
 #include "leveldb/c.h"
 
+#include <string.h>
+
 #include <cstdint>
 #include <cstdlib>
 
@@ -119,7 +121,7 @@ struct leveldb_filterpolicy_t : public FilterPolicy {
     size_t len;
     char* filter = (*create_)(state_, &key_pointers[0], &key_sizes[0], n, &len);
     dst->append(filter, len);
-    free(filter);
+    std::free(filter);
   }
 
   bool KeyMayMatch(const Slice& key, const Slice& filter) const override {
@@ -150,14 +152,15 @@ static bool SaveError(char** errptr, const Status& s) {
     *errptr = strdup(s.ToString().c_str());
   } else {
     // TODO(sanjay): Merge with existing error?
-    free(*errptr);
+    std::free(*errptr);
     *errptr = strdup(s.ToString().c_str());
   }
   return true;
 }
 
 static char* CopyString(const std::string& str) {
-  char* result = reinterpret_cast<char*>(malloc(sizeof(char) * str.size()));
+  char* result =
+      reinterpret_cast<char*>(std::malloc(sizeof(char) * str.size()));
   std::memcpy(result, str.data(), sizeof(char) * str.size());
   return result;
 }
@@ -547,13 +550,13 @@ char* leveldb_env_get_test_directory(leveldb_env_t* env) {
     return nullptr;
   }
 
-  char* buffer = static_cast<char*>(malloc(result.size() + 1));
+  char* buffer = static_cast<char*>(std::malloc(result.size() + 1));
   std::memcpy(buffer, result.data(), result.size());
   buffer[result.size()] = '\0';
   return buffer;
 }
 
-void leveldb_free(void* ptr) { free(ptr); }
+void leveldb_free(void* ptr) { std::free(ptr); }
 
 int leveldb_major_version() { return kMajorVersion; }
 

From 2a47801868e223fd10af272ed9fbd8b699711aae Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Tue, 16 Feb 2021 15:38:44 -0800
Subject: [PATCH 152/181] Use partial path to benchmark/benchmark.h.

Using the partial path offers more flexibility to projects which
may checkout google/benchmark to a different location.

PiperOrigin-RevId: 357819911
---
 db/db_test.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/db/db_test.cc b/db/db_test.cc
index eb8d60c..908b41d 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -9,7 +9,7 @@
 #include <string>
 
 #include "gtest/gtest.h"
-#include "third_party/benchmark/include/benchmark/benchmark.h"
+#include "benchmark/benchmark.h"
 #include "db/db_impl.h"
 #include "db/filename.h"
 #include "db/version_set.h"

From 37aaf2fccd8db3b18bb303bfdb25b455f5c75c51 Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Wed, 17 Feb 2021 10:18:12 -0800
Subject: [PATCH 153/181] Fix fprintf format string.

Using %zu for size_t instead of %ld.

PiperOrigin-RevId: 357976882
---
 benchmarks/db_bench.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/benchmarks/db_bench.cc b/benchmarks/db_bench.cc
index b362569..429a61a 100644
--- a/benchmarks/db_bench.cc
+++ b/benchmarks/db_bench.cc
@@ -679,7 +679,7 @@ class Benchmark {
     }
     arg[0].thread->stats.Report(name);
     if (FLAGS_comparisons) {
-      fprintf(stdout, "Comparisons: %ld\n", count_comparator_.comparisons());
+      fprintf(stdout, "Comparisons: %zu\n", count_comparator_.comparisons());
       count_comparator_.reset();
       fflush(stdout);
     }

From 99b3c03b3284f5886f9ef9a4ef703d57373e61be Mon Sep 17 00:00:00 2001
From: Chris Mumford <cmumford@google.com>
Date: Tue, 23 Feb 2021 12:51:40 -0800
Subject: [PATCH 154/181] Change version to 1.23.

PiperOrigin-RevId: 359111035
---
 CMakeLists.txt       | 2 +-
 include/leveldb/db.h | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7ecf317..f8285b8 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -4,7 +4,7 @@
 
 cmake_minimum_required(VERSION 3.9)
 # Keep the version below in sync with the one in db.h
-project(leveldb VERSION 1.22.0 LANGUAGES C CXX)
+project(leveldb VERSION 1.23.0 LANGUAGES C CXX)
 
 # C standard can be overridden when this is used as a sub-project.
 if(NOT CMAKE_C_STANDARD)
diff --git a/include/leveldb/db.h b/include/leveldb/db.h
index 2a995ec..a13d147 100644
--- a/include/leveldb/db.h
+++ b/include/leveldb/db.h
@@ -16,7 +16,7 @@ namespace leveldb {
 
 // Update CMakeLists.txt if you change these
 static const int kMajorVersion = 1;
-static const int kMinorVersion = 22;
+static const int kMinorVersion = 23;
 
 struct Options;
 struct ReadOptions;

From 24bcf7f7ceeb4534e8e3fd6a7e543e41b568e251 Mon Sep 17 00:00:00 2001
From: Paul Beusterien <paulbeusterien@google.com>
Date: Fri, 19 Feb 2021 13:17:38 -0800
Subject: [PATCH 155/181] Don't include C++ headers in extern C

---
 include/leveldb/c.h | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/include/leveldb/c.h b/include/leveldb/c.h
index 02c79ba..62e1f64 100644
--- a/include/leveldb/c.h
+++ b/include/leveldb/c.h
@@ -40,16 +40,16 @@
 #ifndef STORAGE_LEVELDB_INCLUDE_C_H_
 #define STORAGE_LEVELDB_INCLUDE_C_H_
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 #include <stdarg.h>
 #include <stddef.h>
 #include <stdint.h>
 
 #include "leveldb/export.h"
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /* Exported types */
 
 typedef struct leveldb_t leveldb_t;

From 9e8500518f7612e35afbbf55c82a74e26895c2ed Mon Sep 17 00:00:00 2001
From: Jayice <1185430411@qq.com>
Date: Sun, 28 Mar 2021 16:38:37 +0800
Subject: [PATCH 156/181] fix typo in port_example.h

---
 port/port_example.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/port/port_example.h b/port/port_example.h
index a665910..704aa24 100644
--- a/port/port_example.h
+++ b/port/port_example.h
@@ -55,7 +55,7 @@ class CondVar {
   void Signal();
 
   // Wake up all waiting threads.
-  void SignallAll();
+  void SignalAll();
 };
 
 // ------------------ Compression -------------------

From e2ad7dad5415e3d77b97eaf7beecb7e756937b17 Mon Sep 17 00:00:00 2001
From: mayingchun <mayingchun321@gmail.com>
Date: Mon, 5 Apr 2021 13:58:20 +0800
Subject: [PATCH 157/181] delete an unnecessary forward declaration

---
 db/skiplist.h | 2 --
 1 file changed, 2 deletions(-)

diff --git a/db/skiplist.h b/db/skiplist.h
index a59b45b..00f4132 100644
--- a/db/skiplist.h
+++ b/db/skiplist.h
@@ -36,8 +36,6 @@
 
 namespace leveldb {
 
-class Arena;
-
 template <typename Key, class Comparator>
 class SkipList {
  private:

From f6d094e994d54d8e536cf7c36fafd0f5f1af61f9 Mon Sep 17 00:00:00 2001
From: Raynol Menezes <62543741+raynolmenezes@users.noreply.github.com>
Date: Fri, 16 Apr 2021 13:00:59 +0530
Subject: [PATCH 158/181] Update log_reader.h

---
 db/log_reader.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/db/log_reader.h b/db/log_reader.h
index 75d53f7..ba711f8 100644
--- a/db/log_reader.h
+++ b/db/log_reader.h
@@ -24,7 +24,7 @@ class Reader {
    public:
     virtual ~Reporter();
 
-    // Some corruption was detected.  "size" is the approximate number
+    // Some corruption was detected.  "bytes" is the approximate number
     // of bytes dropped due to the corruption.
     virtual void Corruption(size_t bytes, const Status& status) = 0;
   };

From 1ca4f5b466c84063d61c350abe2c04f88d656e33 Mon Sep 17 00:00:00 2001
From: mwish <anmmscs_maple@qq.com>
Date: Sun, 2 May 2021 12:31:40 +0800
Subject: [PATCH 159/181] [Init] initial commit

---
 util/env_posix.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/util/env_posix.cc b/util/env_posix.cc
index d84cd1e..e6a5743 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -108,7 +108,7 @@ class Limiter {
 class PosixSequentialFile final : public SequentialFile {
  public:
   PosixSequentialFile(std::string filename, int fd)
-      : fd_(fd), filename_(filename) {}
+      : fd_(fd), filename_(std::move(filename)) {}
   ~PosixSequentialFile() override { close(fd_); }
 
   Status Read(size_t n, Slice* result, char* scratch) override {

From dbf24d9a0c3e91345281d1c8c9263e31fefadc36 Mon Sep 17 00:00:00 2001
From: ehds <ehds@qq.com>
Date: Sat, 8 May 2021 13:48:39 +0800
Subject: [PATCH 160/181] Make table cache non-copyable

---
 db/table_cache.h | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/db/table_cache.h b/db/table_cache.h
index aac9bfc..db8a123 100644
--- a/db/table_cache.h
+++ b/db/table_cache.h
@@ -22,6 +22,10 @@ class Env;
 class TableCache {
  public:
   TableCache(const std::string& dbname, const Options& options, int entries);
+
+  TableCache(const TableCache&) = delete;
+  TableCache& operator=(const TableCache&) = delete;
+
   ~TableCache();
 
   // Return an iterator for the specified file number (the corresponding

From 3806fbc23c6b4a84b2abe26bb650e1b3d059438f Mon Sep 17 00:00:00 2001
From: LazyWolfLin <LazyWolfLin@gmail.com>
Date: Thu, 18 Feb 2021 11:04:10 +0800
Subject: [PATCH 161/181] Small fix.

Use function instead of original expression.
---
 db/skiplist.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/db/skiplist.h b/db/skiplist.h
index a59b45b..f716834 100644
--- a/db/skiplist.h
+++ b/db/skiplist.h
@@ -243,7 +243,7 @@ int SkipList<Key, Comparator>::RandomHeight() {
   // Increase height with probability 1 in kBranching
   static const unsigned int kBranching = 4;
   int height = 1;
-  while (height < kMaxHeight && ((rnd_.Next() % kBranching) == 0)) {
+  while (height < kMaxHeight && rnd_.OneIn(kBranching)) {
     height++;
   }
   assert(height > 0);

From f6fe2ec5616823da11d3a36674e94131047f9210 Mon Sep 17 00:00:00 2001
From: Victor Costan <pwnall@chromium.org>
Date: Mon, 17 May 2021 18:11:07 -0700
Subject: [PATCH 162/181] Roll third-party dependencies.

---
 third_party/benchmark  | 2 +-
 third_party/googletest | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/third_party/benchmark b/third_party/benchmark
index bf585a2..7d0d906 160000
--- a/third_party/benchmark
+++ b/third_party/benchmark
@@ -1 +1 @@
-Subproject commit bf585a2789e30585b4e3ce6baf11ef2750b54677
+Subproject commit 7d0d9061d83b663ce05d9de5da3d5865a3845b79
diff --git a/third_party/googletest b/third_party/googletest
index c27aceb..662fe38 160000
--- a/third_party/googletest
+++ b/third_party/googletest
@@ -1 +1 @@
-Subproject commit c27acebba3b3c7d94209e0467b0a801db4af73ed
+Subproject commit 662fe38e44900c007eccb65a5d2ea19df7bd520e

From 13e3c4efc66b8d7317c7648766a930b5d7e48aa7 Mon Sep 17 00:00:00 2001
From: Sanjay Ghemawat <sanjay@google.com>
Date: Thu, 20 May 2021 19:02:41 +0000
Subject: [PATCH 163/181] Fix compactions that could end up breaking a run of
 the same user key across multiple files.

As reported in Github issue #339, it is incorrect to split the
same user key across multiple compacted files since it causes
tombstones/newer-versions to be dropped, thereby exposing obsolete
data. There was a fix for #339, but it ended up not fully fixing
the problem. (It checked for boundary problems in the first level
being compacted, but not the second). This problem was revealed
by Github issue 887.

We now adjust boundaries to avoid splitting user keys in both the
first level and the second level.

PiperOrigin-RevId: 374921082
---
 db/version_set.cc | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/db/version_set.cc b/db/version_set.cc
index 1963353..8d85fce 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -1392,6 +1392,7 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
 
   current_->GetOverlappingInputs(level + 1, &smallest, &largest,
                                  &c->inputs_[1]);
+  AddBoundaryInputs(icmp_, current_->files_[level + 1], &c->inputs_[1]);
 
   // Get entire range covered by compaction
   InternalKey all_start, all_limit;
@@ -1414,6 +1415,7 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
       std::vector<FileMetaData*> expanded1;
       current_->GetOverlappingInputs(level + 1, &new_start, &new_limit,
                                      &expanded1);
+      AddBoundaryInputs(icmp_, current_->files_[level + 1], &expanded1);
       if (expanded1.size() == c->inputs_[1].size()) {
         Log(options_->info_log,
             "Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n",

From 5d94ad4d95c09d3ac203ddaf9922e55e730706a8 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Mon, 24 May 2021 23:28:59 +0000
Subject: [PATCH 164/181] Update Travis CI config.

Xcode (drives macOS image) : 12.2 => 12.5
Clang                      : 10 => 12
GCC                        : 10 => 11
PiperOrigin-RevId: 375582717
---
 .travis.yml | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index e34a67e..ad59b19 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,7 +4,7 @@
 
 language: cpp
 dist: bionic
-osx_image: xcode12.2
+osx_image: xcode12.5
 
 compiler:
 - gcc
@@ -26,14 +26,14 @@ jobs:
 addons:
   apt:
     sources:
-    - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-10 main'
+    - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-12 main'
       key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
     - sourceline: 'ppa:ubuntu-toolchain-r/test'
     packages:
-    - clang-10
+    - clang-12
     - cmake
-    - gcc-10
-    - g++-10
+    - gcc-11
+    - g++-11
     - libgoogle-perftools-dev
     - libkyotocabinet-dev
     - libsnappy-dev
@@ -43,10 +43,10 @@ addons:
     packages:
     - cmake
     - crc32c
-    - gcc@10
+    - gcc@11
     - gperftools
     - kyoto-cabinet
-    - llvm@10
+    - llvm@12
     - ninja
     - snappy
     - sqlite3
@@ -59,14 +59,14 @@ install:
     export PATH="$(brew --prefix llvm)/bin:$PATH";
   fi
 # /usr/bin/gcc points to an older compiler on both Linux and macOS.
-- if [ "$CXX" = "g++" ]; then export CXX="g++-10" CC="gcc-10"; fi
+- if [ "$CXX" = "g++" ]; then export CXX="g++-11" CC="gcc-11"; fi
 # /usr/bin/clang points to an older compiler on both Linux and macOS.
 #
 # Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values
 # below don't work on macOS. Fortunately, the path change above makes the
 # default values (clang and clang++) resolve to the correct compiler on macOS.
 - if [ "$TRAVIS_OS_NAME" = "linux" ]; then
-    if [ "$CXX" = "clang++" ]; then export CXX="clang++-10" CC="clang-10"; fi;
+    if [ "$CXX" = "clang++" ]; then export CXX="clang++-12" CC="clang-12"; fi;
   fi
 - echo ${CC}
 - echo ${CXX}

From 8949158f5d7264444e5b04530c92e9cc524499c4 Mon Sep 17 00:00:00 2001
From: wineway <wangyuweihx@gmail.com>
Date: Thu, 1 Jul 2021 20:52:01 +0800
Subject: [PATCH 165/181] fixed random access file exhaust random mmap file use
 wrong limit count

---
 util/env_posix_test.cc | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc
index da264f0..34bda62 100644
--- a/util/env_posix_test.cc
+++ b/util/env_posix_test.cc
@@ -243,8 +243,8 @@ TEST_F(EnvPosixTest, TestCloseOnExecRandomAccessFile) {
   // Exhaust the RandomAccessFile mmap limit. This way, the test
   // RandomAccessFile instance below is backed by a file descriptor, not by an
   // mmap region.
-  leveldb::RandomAccessFile* mmapped_files[kReadOnlyFileLimit] = {nullptr};
-  for (int i = 0; i < kReadOnlyFileLimit; i++) {
+  leveldb::RandomAccessFile* mmapped_files[kMMapLimit];
+  for (int i = 0; i < kMMapLimit; i++) {
     ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(file_path, &mmapped_files[i]));
   }
 
@@ -253,7 +253,7 @@ TEST_F(EnvPosixTest, TestCloseOnExecRandomAccessFile) {
   CheckCloseOnExecDoesNotLeakFDs(open_fds);
   delete file;
 
-  for (int i = 0; i < kReadOnlyFileLimit; i++) {
+  for (int i = 0; i < kMMapLimit; i++) {
     delete mmapped_files[i];
   }
   ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));

From 8e62cc51246612ff8ea30c3eeffb2407807e5525 Mon Sep 17 00:00:00 2001
From: leveldb Team <no-reply@google.com>
Date: Tue, 3 Aug 2021 00:25:26 +0000
Subject: [PATCH 166/181] Remove the `/` prefix from the recovery_test test
 file to prevent a double `/`.

PiperOrigin-RevId: 388341429
---
 db/recovery_test.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/db/recovery_test.cc b/db/recovery_test.cc
index 3db817e..6c5d42e 100644
--- a/db/recovery_test.cc
+++ b/db/recovery_test.cc
@@ -18,7 +18,7 @@ namespace leveldb {
 class RecoveryTest : public testing::Test {
  public:
   RecoveryTest() : env_(Env::Default()), db_(nullptr) {
-    dbname_ = testing::TempDir() + "/recovery_test";
+    dbname_ = testing::TempDir() + "recovery_test";
     DestroyDB(dbname_, Options());
     Open();
   }

From 54340b4a1020737e17ae4efacc31afeb53022be9 Mon Sep 17 00:00:00 2001
From: ehds <ehds@qq.com>
Date: Sun, 8 Aug 2021 22:24:19 +0800
Subject: [PATCH 167/181] Fix comments position

---
 db/version_set.h | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/db/version_set.h b/db/version_set.h
index 69f3d70..ea0c925 100644
--- a/db/version_set.h
+++ b/db/version_set.h
@@ -59,9 +59,6 @@ bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
 
 class Version {
  public:
-  // Lookup the value for key.  If found, store it in *val and
-  // return OK.  Else return a non-OK status.  Fills *stats.
-  // REQUIRES: lock is not held
   struct GetStats {
     FileMetaData* seek_file;
     int seek_file_level;
@@ -72,6 +69,9 @@ class Version {
   // REQUIRES: This version has been saved (see VersionSet::SaveTo)
   void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters);
 
+  // Lookup the value for key.  If found, store it in *val and
+  // return OK.  Else return a non-OK status.  Fills *stats.
+  // REQUIRES: lock is not held
   Status Get(const ReadOptions&, const LookupKey& key, std::string* val,
              GetStats* stats);
 

From 5783a79309bfcd2089147bd474f796347e4a2d1e Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Thu, 2 Sep 2021 21:25:03 +0000
Subject: [PATCH 168/181] Switch CI to GitHub Actions.

PiperOrigin-RevId: 394542401
---
 .appveyor.yml               |  36 -------------
 .github/workflows/build.yml | 101 ++++++++++++++++++++++++++++++++++++
 .travis.yml                 |  88 -------------------------------
 README.md                   |   3 +-
 4 files changed, 102 insertions(+), 126 deletions(-)
 delete mode 100644 .appveyor.yml
 create mode 100644 .github/workflows/build.yml
 delete mode 100644 .travis.yml

diff --git a/.appveyor.yml b/.appveyor.yml
deleted file mode 100644
index 448f183..0000000
--- a/.appveyor.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-# Build matrix / environment variables are explained on:
-# https://www.appveyor.com/docs/appveyor-yml/
-# This file can be validated on: https://ci.appveyor.com/tools/validate-yaml
-
-version: "{build}"
-
-environment:
-  matrix:
-    # AppVeyor currently has no custom job name feature.
-    # http://help.appveyor.com/discussions/questions/1623-can-i-provide-a-friendly-name-for-jobs
-    - JOB: Visual Studio 2019
-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
-      CMAKE_GENERATOR: Visual Studio 16 2019
-
-platform:
-  - x86
-  - x64
-
-configuration:
-  - RelWithDebInfo
-  - Debug
-
-build_script:
-  - git submodule update --init --recursive
-  - mkdir build
-  - cd build
-  - if "%platform%"=="x86" (set CMAKE_GENERATOR_PLATFORM="Win32")
-      else (set CMAKE_GENERATOR_PLATFORM="%platform%")
-  - cmake --version
-  - cmake .. -G "%CMAKE_GENERATOR%" -A "%CMAKE_GENERATOR_PLATFORM%"
-      -DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%"
-  - cmake --build . --config "%CONFIGURATION%"
-  - cd ..
-
-test_script:
-  - cd build && ctest --verbose --build-config "%CONFIGURATION%" && cd ..
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
new file mode 100644
index 0000000..efb81ee
--- /dev/null
+++ b/.github/workflows/build.yml
@@ -0,0 +1,101 @@
+# Copyright 2021 The LevelDB Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+name: ci
+on: [push, pull_request]
+
+permissions:
+  contents: read
+
+jobs:
+  build-and-test:
+    name:  >-
+      CI
+      ${{ matrix.os }}
+      ${{ matrix.compiler }}
+      ${{ matrix.optimized && 'release' || 'debug' }}
+    runs-on: ${{ matrix.os }}
+    strategy:
+      fail-fast: false
+      matrix:
+        compiler: [clang, gcc, msvc]
+        os: [ubuntu-latest, macos-latest, windows-latest]
+        optimized: [true, false]
+        exclude:
+        # MSVC only works on Windows.
+        - os: ubuntu-latest
+          compiler: msvc
+        - os: macos-latest
+          compiler: msvc
+        # Not testing with GCC on macOS.
+        - os: macos-latest
+          compiler: gcc
+        # Only testing with MSVC on Windows.
+        - os: windows-latest
+          compiler: clang
+        - os: windows-latest
+          compiler: gcc
+        include:
+        - compiler: clang
+          CC: clang
+          CXX: clang++
+        - compiler: gcc
+          CC: gcc
+          CXX: g++
+        - compiler: msvc
+          CC:
+          CXX:
+
+    env:
+      CMAKE_BUILD_DIR: ${{ github.workspace }}/build
+      CMAKE_BUILD_TYPE: ${{ matrix.optimized && 'RelWithDebInfo' || 'Debug' }}
+      CC: ${{ matrix.CC }}
+      CXX: ${{ matrix.CXX }}
+      BINARY_SUFFIX: ${{ startsWith(matrix.os, 'windows') && '.exe' || '' }}
+      BINARY_PATH: >-
+        ${{ format(
+        startsWith(matrix.os, 'windows') && '{0}\build\{1}\' || '{0}/build/',
+        github.workspace,
+        matrix.optimized && 'RelWithDebInfo' || 'Debug') }}
+
+    steps:
+    - uses: actions/checkout@v2
+      with:
+        submodules: true
+
+    - name: Install dependencies on Linux
+      if: ${{ runner.os == 'Linux' }}
+      run: |
+        sudo apt-get update
+        sudo apt-get install libgoogle-perftools-dev libkyotocabinet-dev \
+            libsnappy-dev libsqlite3-dev
+
+    - name: Generate build config
+      run: >-
+        cmake -S "${{ github.workspace }}" -B "${{ env.CMAKE_BUILD_DIR }}"
+        -DCMAKE_BUILD_TYPE=${{ env.CMAKE_BUILD_TYPE }}
+        -DCMAKE_INSTALL_PREFIX=${{ runner.temp }}/install_test/
+
+    - name: Build
+      run: >-
+        cmake --build "${{ env.CMAKE_BUILD_DIR }}"
+        --config "${{ env.CMAKE_BUILD_TYPE }}"
+
+    - name: Run Tests
+      working-directory: ${{ github.workspace }}/build
+      run: ctest -C "${{ env.CMAKE_BUILD_TYPE }}" --verbose
+
+    - name: Run LevelDB Benchmarks
+      run: ${{ env.BINARY_PATH }}db_bench${{ env.BINARY_SUFFIX }}
+
+    - name: Run SQLite Benchmarks
+      if: ${{ runner.os != 'Windows' }}
+      run: ${{ env.BINARY_PATH }}db_bench_sqlite3${{ env.BINARY_SUFFIX }}
+
+    - name: Run Kyoto Cabinet Benchmarks
+      if: ${{ runner.os == 'Linux' && matrix.compiler == 'clang' }}
+      run: ${{ env.BINARY_PATH }}db_bench_tree_db${{ env.BINARY_SUFFIX }}
+
+    - name: Test CMake installation
+      run: cmake --build "${{ env.CMAKE_BUILD_DIR }}" --target install
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index ad59b19..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,88 +0,0 @@
-# Build matrix / environment variables are explained on:
-# http://about.travis-ci.org/docs/user/build-configuration/
-# This file can be validated on: http://lint.travis-ci.org/
-
-language: cpp
-dist: bionic
-osx_image: xcode12.5
-
-compiler:
-- gcc
-- clang
-os:
-- linux
-- osx
-
-env:
-- BUILD_TYPE=Debug
-- BUILD_TYPE=RelWithDebInfo
-
-jobs:
-  allow_failures:
-  # Homebrew's GCC is currently broken on XCode 11.
-  - compiler: gcc
-    os: osx
-
-addons:
-  apt:
-    sources:
-    - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-12 main'
-      key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
-    - sourceline: 'ppa:ubuntu-toolchain-r/test'
-    packages:
-    - clang-12
-    - cmake
-    - gcc-11
-    - g++-11
-    - libgoogle-perftools-dev
-    - libkyotocabinet-dev
-    - libsnappy-dev
-    - libsqlite3-dev
-    - ninja-build
-  homebrew:
-    packages:
-    - cmake
-    - crc32c
-    - gcc@11
-    - gperftools
-    - kyoto-cabinet
-    - llvm@12
-    - ninja
-    - snappy
-    - sqlite3
-    update: true
-
-install:
-# The following Homebrew packages aren't linked by default, and need to be
-# prepended to the path explicitly.
-- if [ "$TRAVIS_OS_NAME" = "osx" ]; then
-    export PATH="$(brew --prefix llvm)/bin:$PATH";
-  fi
-# /usr/bin/gcc points to an older compiler on both Linux and macOS.
-- if [ "$CXX" = "g++" ]; then export CXX="g++-11" CC="gcc-11"; fi
-# /usr/bin/clang points to an older compiler on both Linux and macOS.
-#
-# Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values
-# below don't work on macOS. Fortunately, the path change above makes the
-# default values (clang and clang++) resolve to the correct compiler on macOS.
-- if [ "$TRAVIS_OS_NAME" = "linux" ]; then
-    if [ "$CXX" = "clang++" ]; then export CXX="clang++-12" CC="clang-12"; fi;
-  fi
-- echo ${CC}
-- echo ${CXX}
-- ${CXX} --version
-- cmake --version
-
-before_script:
-- mkdir -p build && cd build
-- cmake .. -G Ninja -DCMAKE_BUILD_TYPE=$BUILD_TYPE
-    -DCMAKE_INSTALL_PREFIX=$HOME/.local
-- cmake --build .
-- cd ..
-
-script:
-- cd build && ctest --verbose && cd ..
-- "if [ -f build/db_bench ] ; then build/db_bench ; fi"
-- "if [ -f build/db_bench_sqlite3 ] ; then build/db_bench_sqlite3 ; fi"
-- "if [ -f build/db_bench_tree_db ] ; then build/db_bench_tree_db ; fi"
-- cd build && cmake --build . --target install
diff --git a/README.md b/README.md
index 81144dd..3c4d14d 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,6 @@
 **LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.**
 
-[![Build Status](https://travis-ci.org/google/leveldb.svg?branch=master)](https://travis-ci.org/google/leveldb)
-[![Build status](https://ci.appveyor.com/api/projects/status/g2j5j4rfkda6eyw5/branch/master?svg=true)](https://ci.appveyor.com/project/pwnall/leveldb)
+[![ci](https://github.com/google/leveldb/actions/workflows/build.yml/badge.svg)](https://github.com/google/leveldb/actions/workflows/build.yml)
 
 Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
 

From 11aafab31f96ba0a07ac5b2d5275f289e17c7814 Mon Sep 17 00:00:00 2001
From: zzt <zhuangzhutao@gmail.com>
Date: Fri, 3 Sep 2021 11:18:31 +0800
Subject: [PATCH 169/181] Fix version_set.cc comments typo

Fix typo of comment of FindLargestKey function
---
 db/version_set.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/db/version_set.cc b/db/version_set.cc
index 8d85fce..da38bbb 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -1304,7 +1304,7 @@ Compaction* VersionSet::PickCompaction() {
   return c;
 }
 
-// Finds the largest key in a vector of files. Returns true if files it not
+// Finds the largest key in a vector of files. Returns true if files is not
 // empty.
 bool FindLargestKey(const InternalKeyComparator& icmp,
                     const std::vector<FileMetaData*>& files,

From c5d5174a66f02e66d8e30c21ff4761214d8e4d6d Mon Sep 17 00:00:00 2001
From: leveldb Team <no-reply@google.com>
Date: Fri, 10 Sep 2021 00:45:26 +0000
Subject: [PATCH 170/181] Get env_posix.cc building under Fuchsia.

PiperOrigin-RevId: 395824737
---
 util/env_posix.cc | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/util/env_posix.cc b/util/env_posix.cc
index d84cd1e..24b1c4c 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -6,7 +6,9 @@
 #include <fcntl.h>
 #include <pthread.h>
 #include <sys/mman.h>
+#ifndef __Fuchsia__
 #include <sys/resource.h>
+#endif
 #include <sys/stat.h>
 #include <sys/time.h>
 #include <sys/types.h>
@@ -757,6 +759,10 @@ int MaxOpenFiles() {
   if (g_open_read_only_file_limit >= 0) {
     return g_open_read_only_file_limit;
   }
+#ifdef __Fuchsia__
+  // Fuchsia doesn't implement getrlimit.
+  g_open_read_only_file_limit = 50;
+#else
   struct ::rlimit rlim;
   if (::getrlimit(RLIMIT_NOFILE, &rlim)) {
     // getrlimit failed, fallback to hard-coded default.
@@ -767,6 +773,7 @@ int MaxOpenFiles() {
     // Allow use of 20% of available file descriptors for read-only files.
     g_open_read_only_file_limit = rlim.rlim_cur / 5;
   }
+#endif
   return g_open_read_only_file_limit;
 }
 

From 68d14a723a23eac5e53d4643890f27651eb2df28 Mon Sep 17 00:00:00 2001
From: "Dylan K. Taylor" <dktapps@pmmp.io>
Date: Sat, 9 Oct 2021 16:21:57 +0100
Subject: [PATCH 171/181] Prevent handle used for LOG from being inherited by
 subprocesses

I recently encountered a problem with this because Windows doesn't allow
files to be deleted when there's open handles to them.

Other files opened by leveldb are not affected because by and large they
are using CreateFileA, which does not allow inheritance when
lpSecurityAttributes is null (ref:
https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea)

However, fopen() _does_ allow inheritance, and it needs to be expressly
disabled.
https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/fopen-wfopen?view=msvc-160
---
 util/env_windows.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/util/env_windows.cc b/util/env_windows.cc
index 449f564..84905df 100644
--- a/util/env_windows.cc
+++ b/util/env_windows.cc
@@ -622,7 +622,7 @@ class WindowsEnv : public Env {
   }
 
   Status NewLogger(const std::string& filename, Logger** result) override {
-    std::FILE* fp = std::fopen(filename.c_str(), "w");
+    std::FILE* fp = std::fopen(filename.c_str(), "wN");
     if (fp == nullptr) {
       *result = nullptr;
       return WindowsError(filename, ::GetLastError());

From d7da5d9d353cf3d865109fc1aac8e587f6086ef5 Mon Sep 17 00:00:00 2001
From: xiong-ang <xiong.ang@foxmail.com>
Date: Fri, 22 Oct 2021 18:00:57 +0800
Subject: [PATCH 172/181] fix some trifling points

---
 doc/index.md | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/doc/index.md b/doc/index.md
index 01693ad..56967c7 100644
--- a/doc/index.md
+++ b/doc/index.md
@@ -345,7 +345,7 @@ non-NULL, it is used to cache frequently used uncompressed block contents.
 #include "leveldb/cache.h"
 
 leveldb::Options options;
-options.block_cache = leveldb::NewLRUCache(100 * 1048576);  // 100MB cache
+options.block_cache = leveldb::NewLRUCache(100 * 1048576);  // 100M cache capacity
 leveldb::DB* db;
 leveldb::DB::Open(options, name, &db);
 ... use the db ...
@@ -369,6 +369,7 @@ leveldb::Iterator* it = db->NewIterator(options);
 for (it->SeekToFirst(); it->Valid(); it->Next()) {
   ...
 }
+delete it;
 ```
 
 ### Key Layout
@@ -424,21 +425,21 @@ spaces. For example:
 ```c++
 class CustomFilterPolicy : public leveldb::FilterPolicy {
  private:
-  FilterPolicy* builtin_policy_;
+  leveldb::FilterPolicy* builtin_policy_;
 
  public:
-  CustomFilterPolicy() : builtin_policy_(NewBloomFilterPolicy(10)) {}
+  CustomFilterPolicy() : builtin_policy_(leveldb::NewBloomFilterPolicy(10)) {}
   ~CustomFilterPolicy() { delete builtin_policy_; }
 
   const char* Name() const { return "IgnoreTrailingSpacesFilter"; }
 
-  void CreateFilter(const Slice* keys, int n, std::string* dst) const {
+  void CreateFilter(const leveldb::Slice* keys, int n, std::string* dst) const {
     // Use builtin bloom filter code after removing trailing spaces
-    std::vector<Slice> trimmed(n);
+    std::vector<leveldb::Slice> trimmed(n);
     for (int i = 0; i < n; i++) {
       trimmed[i] = RemoveTrailingSpaces(keys[i]);
     }
-    return builtin_policy_->CreateFilter(trimmed.data(), n, dst);
+    builtin_policy_->CreateFilter(trimmed.data(), n, dst);
   }
 };
 ```

From dd6658754f85f54058be416e1b43150e39d8ffa5 Mon Sep 17 00:00:00 2001
From: Felipe Oliveira Carvalho <felipekde@gmail.com>
Date: Mon, 15 Nov 2021 00:36:57 +0100
Subject: [PATCH 173/181] Remove <pthread.h> include and find_package() from
 build files

---
 util/env_posix.cc | 1 -
 1 file changed, 1 deletion(-)

diff --git a/util/env_posix.cc b/util/env_posix.cc
index 24b1c4c..9ac03f8 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -4,7 +4,6 @@
 
 #include <dirent.h>
 #include <fcntl.h>
-#include <pthread.h>
 #include <sys/mman.h>
 #ifndef __Fuchsia__
 #include <sys/resource.h>

From 42d00a80cc0bc776f19325e9043fcf4e7892ec81 Mon Sep 17 00:00:00 2001
From: Eric Wang <wangchaogo1990@gmail.com>
Date: Sun, 5 Dec 2021 11:44:55 +0800
Subject: [PATCH 174/181] rm redundant code: SetNextFile has already been
 called before in this function

---
 db/version_set.cc | 1 -
 1 file changed, 1 deletion(-)

diff --git a/db/version_set.cc b/db/version_set.cc
index 8d85fce..f457e26 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -806,7 +806,6 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
     // first call to LogAndApply (when opening the database).
     assert(descriptor_file_ == nullptr);
     new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_);
-    edit->SetNextFile(next_file_number_);
     s = env_->NewWritableFile(new_manifest_file, &descriptor_file_);
     if (s.ok()) {
       descriptor_log_ = new log::Writer(descriptor_file_);

From 335876a1335c765f818ae10d9c4d18f563cdfce5 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Wed, 22 Dec 2021 19:12:56 +0000
Subject: [PATCH 175/181] Add invariant checks to Limiter in Env
 implementations.

PiperOrigin-RevId: 417853172
---
 util/env_posix.cc   | 33 ++++++++++++++++++++++++++++++---
 util/env_windows.cc | 24 ++++++++++++++++++++++--
 2 files changed, 52 insertions(+), 5 deletions(-)

diff --git a/util/env_posix.cc b/util/env_posix.cc
index 9ac03f8..8b8d9c8 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -73,7 +73,14 @@ Status PosixError(const std::string& context, int error_number) {
 class Limiter {
  public:
   // Limit maximum number of resources to |max_acquires|.
-  Limiter(int max_acquires) : acquires_allowed_(max_acquires) {}
+  Limiter(int max_acquires)
+      :
+#if !defined(NDEBUG)
+        max_acquires_(max_acquires),
+#endif  // !defined(NDEBUG)
+        acquires_allowed_(max_acquires) {
+    assert(max_acquires >= 0);
+  }
 
   Limiter(const Limiter&) = delete;
   Limiter operator=(const Limiter&) = delete;
@@ -86,15 +93,35 @@ class Limiter {
 
     if (old_acquires_allowed > 0) return true;
 
-    acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
+    int pre_increment_acquires_allowed =
+        acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
+
+    // Silence compiler warnings about unused arguments when NDEBUG is defined.
+    (void)pre_increment_acquires_allowed;
+    // If the check below fails, Release() was called more times than acquire.
+    assert(pre_increment_acquires_allowed < max_acquires_);
+
     return false;
   }
 
   // Release a resource acquired by a previous call to Acquire() that returned
   // true.
-  void Release() { acquires_allowed_.fetch_add(1, std::memory_order_relaxed); }
+  void Release() {
+    int old_acquires_allowed =
+        acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
+
+    // Silence compiler warnings about unused arguments when NDEBUG is defined.
+    (void)old_acquires_allowed;
+    // If the check below fails, Release() was called more times than acquire.
+    assert(old_acquires_allowed < max_acquires_);
+  }
 
  private:
+#if !defined(NDEBUG)
+  // Catches an excessive number of Release() calls.
+  const int max_acquires_;
+#endif  // !defined(NDEBUG)
+
   // The number of available resources.
   //
   // This is a counter and is not tied to the invariants of any other class, so
diff --git a/util/env_windows.cc b/util/env_windows.cc
index 84905df..9ffcd07 100644
--- a/util/env_windows.cc
+++ b/util/env_windows.cc
@@ -114,7 +114,14 @@ class ScopedHandle {
 class Limiter {
  public:
   // Limit maximum number of resources to |max_acquires|.
-  Limiter(int max_acquires) : acquires_allowed_(max_acquires) {}
+  Limiter(int max_acquires)
+      :
+#if !defined(NDEBUG)
+        max_acquires_(max_acquires),
+#endif  // !defined(NDEBUG)
+        acquires_allowed_(max_acquires) {
+    assert(max_acquires >= 0);
+  }
 
   Limiter(const Limiter&) = delete;
   Limiter operator=(const Limiter&) = delete;
@@ -133,9 +140,22 @@ class Limiter {
 
   // Release a resource acquired by a previous call to Acquire() that returned
   // true.
-  void Release() { acquires_allowed_.fetch_add(1, std::memory_order_relaxed); }
+  void Release() {
+    int old_acquires_allowed =
+        acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
+
+    // Silence compiler warnings about unused arguments when NDEBUG is defined.
+    (void)old_acquires_allowed;
+    // If the check below fails, Release() was called more times than acquire.
+    assert(old_acquires_allowed < max_acquires_);
+  }
 
  private:
+#if !defined(NDEBUG)
+  // Catches an excessive number of Release() calls.
+  const int max_acquires_;
+#endif  // !defined(NDEBUG)
+
   // The number of available resources.
   //
   // This is a counter and is not tied to the invariants of any other class, so

From b2801ee1a0e3a5c0c393dc04eef63691f79ed694 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Wed, 29 Dec 2021 03:48:42 +0000
Subject: [PATCH 176/181] Extract benchmark from db_test.cc.

The benchmark in db/db_test.cc is extracted to its own file,
benchmarks/db_bench_log.cc.

PiperOrigin-RevId: 418713499
---
 CMakeLists.txt             | 14 +++---
 benchmarks/db_bench_log.cc | 92 ++++++++++++++++++++++++++++++++++++++
 db/db_test.cc              | 64 --------------------------
 3 files changed, 99 insertions(+), 71 deletions(-)
 create mode 100644 benchmarks/db_bench_log.cc

diff --git a/CMakeLists.txt b/CMakeLists.txt
index f8285b8..7690302 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -298,11 +298,6 @@ if(LEVELDB_BUILD_TESTS)
   # This project is tested using GoogleTest.
   add_subdirectory("third_party/googletest")
 
-  # This project uses Google benchmark for benchmarking.
-  set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE)
-  set(BENCHMARK_ENABLE_EXCEPTIONS OFF CACHE BOOL "" FORCE)
-  add_subdirectory("third_party/benchmark")
-
   # GoogleTest triggers a missing field initializers warning.
   if(LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS)
     set_property(TARGET gtest
@@ -323,7 +318,7 @@ if(LEVELDB_BUILD_TESTS)
 
         "${test_file}"
     )
-    target_link_libraries("${test_target_name}" leveldb gmock gtest benchmark)
+    target_link_libraries("${test_target_name}" leveldb gmock gtest)
     target_compile_definitions("${test_target_name}"
       PRIVATE
         ${LEVELDB_PLATFORM_NAME}=1
@@ -386,6 +381,11 @@ if(LEVELDB_BUILD_TESTS)
 endif(LEVELDB_BUILD_TESTS)
 
 if(LEVELDB_BUILD_BENCHMARKS)
+  # This project uses Google benchmark for benchmarking.
+  set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE)
+  set(BENCHMARK_ENABLE_EXCEPTIONS OFF CACHE BOOL "" FORCE)
+  add_subdirectory("third_party/benchmark")
+
   function(leveldb_benchmark bench_file)
     get_filename_component(bench_target_name "${bench_file}" NAME_WE)
 
@@ -400,7 +400,7 @@ if(LEVELDB_BUILD_BENCHMARKS)
 
         "${bench_file}"
     )
-    target_link_libraries("${bench_target_name}" leveldb gmock gtest)
+    target_link_libraries("${bench_target_name}" leveldb gmock gtest benchmark)
     target_compile_definitions("${bench_target_name}"
       PRIVATE
         ${LEVELDB_PLATFORM_NAME}=1
diff --git a/benchmarks/db_bench_log.cc b/benchmarks/db_bench_log.cc
new file mode 100644
index 0000000..a1845bf
--- /dev/null
+++ b/benchmarks/db_bench_log.cc
@@ -0,0 +1,92 @@
+// Copyright (c) 2019 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <cinttypes>
+#include <cstdio>
+#include <string>
+
+#include "gtest/gtest.h"
+#include "benchmark/benchmark.h"
+#include "db/version_set.h"
+#include "leveldb/comparator.h"
+#include "leveldb/db.h"
+#include "leveldb/env.h"
+#include "leveldb/options.h"
+#include "port/port.h"
+#include "util/mutexlock.h"
+#include "util/testutil.h"
+
+namespace leveldb {
+
+namespace {
+
+std::string MakeKey(unsigned int num) {
+  char buf[30];
+  std::snprintf(buf, sizeof(buf), "%016u", num);
+  return std::string(buf);
+}
+
+void BM_LogAndApply(benchmark::State& state) {
+  const int num_base_files = state.range(0);
+
+  std::string dbname = testing::TempDir() + "leveldb_test_benchmark";
+  DestroyDB(dbname, Options());
+
+  DB* db = nullptr;
+  Options opts;
+  opts.create_if_missing = true;
+  Status s = DB::Open(opts, dbname, &db);
+  ASSERT_LEVELDB_OK(s);
+  ASSERT_TRUE(db != nullptr);
+
+  delete db;
+  db = nullptr;
+
+  Env* env = Env::Default();
+
+  port::Mutex mu;
+  MutexLock l(&mu);
+
+  InternalKeyComparator cmp(BytewiseComparator());
+  Options options;
+  VersionSet vset(dbname, &options, nullptr, &cmp);
+  bool save_manifest;
+  ASSERT_LEVELDB_OK(vset.Recover(&save_manifest));
+  VersionEdit vbase;
+  uint64_t fnum = 1;
+  for (int i = 0; i < num_base_files; i++) {
+    InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
+    InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
+    vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
+  }
+  ASSERT_LEVELDB_OK(vset.LogAndApply(&vbase, &mu));
+
+  uint64_t start_micros = env->NowMicros();
+
+  for (auto st : state) {
+    VersionEdit vedit;
+    vedit.RemoveFile(2, fnum);
+    InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
+    InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
+    vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
+    vset.LogAndApply(&vedit, &mu);
+  }
+
+  uint64_t stop_micros = env->NowMicros();
+  unsigned int us = stop_micros - start_micros;
+  char buf[16];
+  std::snprintf(buf, sizeof(buf), "%d", num_base_files);
+  std::fprintf(stderr,
+               "BM_LogAndApply/%-6s   %8" PRIu64
+               " iters : %9u us (%7.0f us / iter)\n",
+               buf, state.iterations(), us, ((float)us) / state.iterations());
+}
+
+BENCHMARK(BM_LogAndApply)->Arg(1)->Arg(100)->Arg(10000)->Arg(100000);
+
+}  // namespace
+
+}  // namespace leveldb
+
+BENCHMARK_MAIN();
diff --git a/db/db_test.cc b/db/db_test.cc
index 908b41d..7f22688 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -9,7 +9,6 @@
 #include <string>
 
 #include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
 #include "db/db_impl.h"
 #include "db/filename.h"
 #include "db/version_set.h"
@@ -2295,72 +2294,9 @@ TEST_F(DBTest, Randomized) {
   } while (ChangeOptions());
 }
 
-std::string MakeKey(unsigned int num) {
-  char buf[30];
-  std::snprintf(buf, sizeof(buf), "%016u", num);
-  return std::string(buf);
-}
-
-static void BM_LogAndApply(benchmark::State& state) {
-  const int num_base_files = state.range(0);
-
-  std::string dbname = testing::TempDir() + "leveldb_test_benchmark";
-  DestroyDB(dbname, Options());
-
-  DB* db = nullptr;
-  Options opts;
-  opts.create_if_missing = true;
-  Status s = DB::Open(opts, dbname, &db);
-  ASSERT_LEVELDB_OK(s);
-  ASSERT_TRUE(db != nullptr);
-
-  delete db;
-  db = nullptr;
-
-  Env* env = Env::Default();
-
-  port::Mutex mu;
-  MutexLock l(&mu);
-
-  InternalKeyComparator cmp(BytewiseComparator());
-  Options options;
-  VersionSet vset(dbname, &options, nullptr, &cmp);
-  bool save_manifest;
-  ASSERT_LEVELDB_OK(vset.Recover(&save_manifest));
-  VersionEdit vbase;
-  uint64_t fnum = 1;
-  for (int i = 0; i < num_base_files; i++) {
-    InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
-    InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
-    vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
-  }
-  ASSERT_LEVELDB_OK(vset.LogAndApply(&vbase, &mu));
-
-  uint64_t start_micros = env->NowMicros();
-
-  for (auto st : state) {
-    VersionEdit vedit;
-    vedit.RemoveFile(2, fnum);
-    InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
-    InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
-    vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
-    vset.LogAndApply(&vedit, &mu);
-  }
-  uint64_t stop_micros = env->NowMicros();
-  unsigned int us = stop_micros - start_micros;
-  char buf[16];
-  std::snprintf(buf, sizeof(buf), "%d", num_base_files);
-  std::fprintf(stderr,
-               "BM_LogAndApply/%-6s   %8" PRIu64
-               " iters : %9u us (%7.0f us / iter)\n",
-               buf, state.iterations(), us, ((float)us) / state.iterations());
-}
-
-BENCHMARK(BM_LogAndApply)->Arg(1)->Arg(100)->Arg(10000)->Arg(100000);
 }  // namespace leveldb
 
 int main(int argc, char** argv) {
   testing::InitGoogleTest(&argc, argv);
-  benchmark::RunSpecifiedBenchmarks();
   return RUN_ALL_TESTS();
 }

From 7a2f64ed504f510425183fa225ae80c671f0145f Mon Sep 17 00:00:00 2001
From: Shawn Zhong <github@shawnzhong.com>
Date: Thu, 30 Dec 2021 18:33:55 -0600
Subject: [PATCH 177/181] Update env_posix.cc

---
 util/env_posix.cc | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/util/env_posix.cc b/util/env_posix.cc
index 8b8d9c8..d8d793a 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -870,7 +870,7 @@ class SingletonEnv {
  public:
   SingletonEnv() {
 #if !defined(NDEBUG)
-    env_initialized_.store(true, std::memory_order::memory_order_relaxed);
+    env_initialized_.store(true, std::memory_order_relaxed);
 #endif  // !defined(NDEBUG)
     static_assert(sizeof(env_storage_) >= sizeof(EnvType),
                   "env_storage_ will not fit the Env");
@@ -887,7 +887,7 @@ class SingletonEnv {
 
   static void AssertEnvNotInitialized() {
 #if !defined(NDEBUG)
-    assert(!env_initialized_.load(std::memory_order::memory_order_relaxed));
+    assert(!env_initialized_.load(std::memory_order_relaxed));
 #endif  // !defined(NDEBUG)
   }
 

From 8f464e7f68fd9d50ed39b2866ef8dac9c837439d Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Mon, 3 Jan 2022 20:57:56 +0000
Subject: [PATCH 178/181] Remove main() from most tests.

This gives some flexibility to embedders.

Currently, embedders have to build a binary for each test file.

After this CL, embedders can still choose to have a binary for each test
file, by linking each test file with a googletest target that includes
main() (usually "gtest_main"). Embedders can also choose to build a
single binary for almost all test files, and link with a googletest
target that includes main(). The latter is more convenient for projects
that have very few test binaries, like Chromium.

PiperOrigin-RevId: 419470798
---
 CMakeLists.txt                | 88 +++++++++++++++++++++--------------
 db/autocompact_test.cc        |  5 --
 db/corruption_test.cc         |  5 --
 db/db_test.cc                 |  5 --
 db/dbformat_test.cc           |  5 --
 db/fault_injection_test.cc    |  5 --
 db/filename_test.cc           |  5 --
 db/log_test.cc                |  5 --
 db/recovery_test.cc           |  5 --
 db/skiplist_test.cc           |  5 --
 db/version_edit_test.cc       |  5 --
 db/version_set_test.cc        |  5 --
 db/write_batch_test.cc        |  5 --
 helpers/memenv/memenv_test.cc |  5 --
 issues/issue178_test.cc       |  5 --
 issues/issue200_test.cc       |  5 --
 issues/issue320_test.cc       |  5 --
 table/filter_block_test.cc    |  5 --
 table/table_test.cc           |  5 --
 util/arena_test.cc            |  5 --
 util/bloom_test.cc            |  5 --
 util/cache_test.cc            |  5 --
 util/coding_test.cc           |  5 --
 util/crc32c_test.cc           |  5 --
 util/env_test.cc              |  5 --
 util/hash_test.cc             |  5 --
 util/logging_test.cc          |  5 --
 util/no_destructor_test.cc    |  5 --
 util/status_test.cc           |  5 --
 29 files changed, 54 insertions(+), 174 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7690302..b829c94 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -306,6 +306,60 @@ if(LEVELDB_BUILD_TESTS)
         APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers)
   endif(LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS)
 
+  add_executable(leveldb_tests "")
+  target_sources(leveldb_tests
+    PRIVATE
+      # "db/fault_injection_test.cc"
+      # "issues/issue178_test.cc"
+      # "issues/issue200_test.cc"
+      # "issues/issue320_test.cc"
+      "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+      # "util/env_test.cc"
+      "util/status_test.cc"
+      "util/no_destructor_test.cc"
+      "util/testutil.cc"
+      "util/testutil.h"
+  )
+  if(NOT BUILD_SHARED_LIBS)
+    target_sources(leveldb_tests
+      PRIVATE
+        "db/autocompact_test.cc"
+        "db/corruption_test.cc"
+        "db/db_test.cc"
+        "db/dbformat_test.cc"
+        "db/filename_test.cc"
+        "db/log_test.cc"
+        "db/recovery_test.cc"
+        "db/skiplist_test.cc"
+        "db/version_edit_test.cc"
+        "db/version_set_test.cc"
+        "db/write_batch_test.cc"
+        "helpers/memenv/memenv_test.cc"
+        "table/filter_block_test.cc"
+        "table/table_test.cc"
+        "util/arena_test.cc"
+        "util/bloom_test.cc"
+        "util/cache_test.cc"
+        "util/coding_test.cc"
+        "util/crc32c_test.cc"
+        "util/hash_test.cc"
+        "util/logging_test.cc"
+    )
+  endif(NOT BUILD_SHARED_LIBS)
+  target_link_libraries(leveldb_tests leveldb gmock gtest gtest_main)
+  target_compile_definitions(leveldb_tests
+    PRIVATE
+      ${LEVELDB_PLATFORM_NAME}=1
+  )
+  if (NOT HAVE_CXX17_HAS_INCLUDE)
+    target_compile_definitions(leveldb_tests
+      PRIVATE
+        LEVELDB_HAS_PORT_CONFIG_H=1
+    )
+  endif(NOT HAVE_CXX17_HAS_INCLUDE)
+
+  add_test(NAME "leveldb_tests" COMMAND "leveldb_tests")
+
   function(leveldb_test test_file)
     get_filename_component(test_target_name "${test_file}" NAME_WE)
 
@@ -334,42 +388,8 @@ if(LEVELDB_BUILD_TESTS)
   endfunction(leveldb_test)
 
   leveldb_test("db/c_test.c")
-  leveldb_test("db/fault_injection_test.cc")
-
-  leveldb_test("issues/issue178_test.cc")
-  leveldb_test("issues/issue200_test.cc")
-  leveldb_test("issues/issue320_test.cc")
-
-  leveldb_test("util/env_test.cc")
-  leveldb_test("util/status_test.cc")
-  leveldb_test("util/no_destructor_test.cc")
 
   if(NOT BUILD_SHARED_LIBS)
-    leveldb_test("db/autocompact_test.cc")
-    leveldb_test("db/corruption_test.cc")
-    leveldb_test("db/db_test.cc")
-    leveldb_test("db/dbformat_test.cc")
-    leveldb_test("db/filename_test.cc")
-    leveldb_test("db/log_test.cc")
-    leveldb_test("db/recovery_test.cc")
-    leveldb_test("db/skiplist_test.cc")
-    leveldb_test("db/version_edit_test.cc")
-    leveldb_test("db/version_set_test.cc")
-    leveldb_test("db/write_batch_test.cc")
-
-    leveldb_test("helpers/memenv/memenv_test.cc")
-
-    leveldb_test("table/filter_block_test.cc")
-    leveldb_test("table/table_test.cc")
-
-    leveldb_test("util/arena_test.cc")
-    leveldb_test("util/bloom_test.cc")
-    leveldb_test("util/cache_test.cc")
-    leveldb_test("util/coding_test.cc")
-    leveldb_test("util/crc32c_test.cc")
-    leveldb_test("util/hash_test.cc")
-    leveldb_test("util/logging_test.cc")
-
     # TODO(costan): This test also uses
     #               "util/env_{posix|windows}_test_helper.h"
     if (WIN32)
diff --git a/db/autocompact_test.cc b/db/autocompact_test.cc
index 3b7241b..69341e3 100644
--- a/db/autocompact_test.cc
+++ b/db/autocompact_test.cc
@@ -108,8 +108,3 @@ TEST_F(AutoCompactTest, ReadAll) { DoReads(kCount); }
 TEST_F(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/db/corruption_test.cc b/db/corruption_test.cc
index a31f448..dc7da76 100644
--- a/db/corruption_test.cc
+++ b/db/corruption_test.cc
@@ -360,8 +360,3 @@ TEST_F(CorruptionTest, UnrelatedKeys) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/db/db_test.cc b/db/db_test.cc
index 7f22688..9bd6e14 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -2295,8 +2295,3 @@ TEST_F(DBTest, Randomized) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/db/dbformat_test.cc b/db/dbformat_test.cc
index 4a11c4a..7f3f81a 100644
--- a/db/dbformat_test.cc
+++ b/db/dbformat_test.cc
@@ -126,8 +126,3 @@ TEST(FormatTest, InternalKeyDebugString) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc
index 6eebafa..ef864a4 100644
--- a/db/fault_injection_test.cc
+++ b/db/fault_injection_test.cc
@@ -548,8 +548,3 @@ TEST_F(FaultInjectionTest, FaultTestWithLogReuse) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/db/filename_test.cc b/db/filename_test.cc
index f291d72..9ac0111 100644
--- a/db/filename_test.cc
+++ b/db/filename_test.cc
@@ -125,8 +125,3 @@ TEST(FileNameTest, Construction) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/db/log_test.cc b/db/log_test.cc
index 346b19c..d55d4dd 100644
--- a/db/log_test.cc
+++ b/db/log_test.cc
@@ -556,8 +556,3 @@ TEST_F(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); }
 
 }  // namespace log
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/db/recovery_test.cc b/db/recovery_test.cc
index 6c5d42e..1d9f621 100644
--- a/db/recovery_test.cc
+++ b/db/recovery_test.cc
@@ -332,8 +332,3 @@ TEST_F(RecoveryTest, ManifestMissing) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc
index 79a5b86..1d355cb 100644
--- a/db/skiplist_test.cc
+++ b/db/skiplist_test.cc
@@ -366,8 +366,3 @@ TEST(SkipTest, Concurrent4) { RunConcurrent(4); }
 TEST(SkipTest, Concurrent5) { RunConcurrent(5); }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/db/version_edit_test.cc b/db/version_edit_test.cc
index acafab0..a108c15 100644
--- a/db/version_edit_test.cc
+++ b/db/version_edit_test.cc
@@ -39,8 +39,3 @@ TEST(VersionEditTest, EncodeDecode) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/db/version_set_test.cc b/db/version_set_test.cc
index dee6b4c..64bb983 100644
--- a/db/version_set_test.cc
+++ b/db/version_set_test.cc
@@ -329,8 +329,3 @@ TEST_F(AddBoundaryInputsTest, TestDisjoinFilePointers) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc
index 64df9b8..1a3ea8f 100644
--- a/db/write_batch_test.cc
+++ b/db/write_batch_test.cc
@@ -130,8 +130,3 @@ TEST(WriteBatchTest, ApproximateSize) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/helpers/memenv/memenv_test.cc b/helpers/memenv/memenv_test.cc
index 3f03cb6..909a0ca 100644
--- a/helpers/memenv/memenv_test.cc
+++ b/helpers/memenv/memenv_test.cc
@@ -257,8 +257,3 @@ TEST_F(MemEnvTest, DBTest) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/issues/issue178_test.cc b/issues/issue178_test.cc
index 8fa5bb9..5cd5862 100644
--- a/issues/issue178_test.cc
+++ b/issues/issue178_test.cc
@@ -83,8 +83,3 @@ TEST(Issue178, Test) {
 }
 
 }  // anonymous namespace
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/issues/issue200_test.cc b/issues/issue200_test.cc
index 4eba23a..959b371 100644
--- a/issues/issue200_test.cc
+++ b/issues/issue200_test.cc
@@ -52,8 +52,3 @@ TEST(Issue200, Test) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/issues/issue320_test.cc b/issues/issue320_test.cc
index c08296a..9d7fa7b 100644
--- a/issues/issue320_test.cc
+++ b/issues/issue320_test.cc
@@ -124,8 +124,3 @@ TEST(Issue320, Test) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/table/filter_block_test.cc b/table/filter_block_test.cc
index 91a6be2..3ee41cf 100644
--- a/table/filter_block_test.cc
+++ b/table/filter_block_test.cc
@@ -120,8 +120,3 @@ TEST_F(FilterBlockTest, MultiChunk) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/table/table_test.cc b/table/table_test.cc
index 190dd0f..7f0f998 100644
--- a/table/table_test.cc
+++ b/table/table_test.cc
@@ -827,8 +827,3 @@ TEST(TableTest, ApproximateOffsetOfCompressed) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/util/arena_test.cc b/util/arena_test.cc
index 90226fe..3e2011e 100644
--- a/util/arena_test.cc
+++ b/util/arena_test.cc
@@ -59,8 +59,3 @@ TEST(ArenaTest, Simple) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/util/bloom_test.cc b/util/bloom_test.cc
index 520473e..9f11108 100644
--- a/util/bloom_test.cc
+++ b/util/bloom_test.cc
@@ -152,8 +152,3 @@ TEST_F(BloomTest, VaryingLengths) {
 // Different bits-per-byte
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/util/cache_test.cc b/util/cache_test.cc
index 79cfc27..e68da34 100644
--- a/util/cache_test.cc
+++ b/util/cache_test.cc
@@ -222,8 +222,3 @@ TEST_F(CacheTest, ZeroSizeCache) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/util/coding_test.cc b/util/coding_test.cc
index aa6c748..cceda14 100644
--- a/util/coding_test.cc
+++ b/util/coding_test.cc
@@ -191,8 +191,3 @@ TEST(Coding, Strings) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/util/crc32c_test.cc b/util/crc32c_test.cc
index 647e561..2fe1c41 100644
--- a/util/crc32c_test.cc
+++ b/util/crc32c_test.cc
@@ -54,8 +54,3 @@ TEST(CRC, Mask) {
 
 }  // namespace crc32c
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/util/env_test.cc b/util/env_test.cc
index 491ef43..fc69d71 100644
--- a/util/env_test.cc
+++ b/util/env_test.cc
@@ -233,8 +233,3 @@ TEST_F(EnvTest, ReopenAppendableFile) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/util/hash_test.cc b/util/hash_test.cc
index 6d6771f..0ea5977 100644
--- a/util/hash_test.cc
+++ b/util/hash_test.cc
@@ -39,8 +39,3 @@ TEST(HASH, SignedUnsignedIssue) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/util/logging_test.cc b/util/logging_test.cc
index 24e1fe9..1746c57 100644
--- a/util/logging_test.cc
+++ b/util/logging_test.cc
@@ -138,8 +138,3 @@ TEST(Logging, ConsumeDecimalNumberNoDigits) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/util/no_destructor_test.cc b/util/no_destructor_test.cc
index 68fdfee..e3602cc 100644
--- a/util/no_destructor_test.cc
+++ b/util/no_destructor_test.cc
@@ -42,8 +42,3 @@ TEST(NoDestructorTest, StaticInstance) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/util/status_test.cc b/util/status_test.cc
index 914b386..dbf5faa 100644
--- a/util/status_test.cc
+++ b/util/status_test.cc
@@ -37,8 +37,3 @@ TEST(Status, MoveConstructor) {
 }
 
 }  // namespace leveldb
-
-int main(int argc, char** argv) {
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}

From 0e8aa26c4e9325f04e186defca123f7d4837791f Mon Sep 17 00:00:00 2001
From: Dimitris Apostolou <dimitris.apostolou@icloud.com>
Date: Wed, 5 Jan 2022 11:04:16 +0200
Subject: [PATCH 179/181] Fix typos

---
 db/snapshot.h       | 2 +-
 util/env_posix.cc   | 4 ++--
 util/env_windows.cc | 2 +-
 3 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/db/snapshot.h b/db/snapshot.h
index 9f1d664..817bb7b 100644
--- a/db/snapshot.h
+++ b/db/snapshot.h
@@ -25,7 +25,7 @@ class SnapshotImpl : public Snapshot {
   friend class SnapshotList;
 
   // SnapshotImpl is kept in a doubly-linked circular list. The SnapshotList
-  // implementation operates on the next/previous fields direcly.
+  // implementation operates on the next/previous fields directly.
   SnapshotImpl* prev_;
   SnapshotImpl* next_;
 
diff --git a/util/env_posix.cc b/util/env_posix.cc
index 8b8d9c8..8405909 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -242,7 +242,7 @@ class PosixMmapReadableFile final : public RandomAccessFile {
   // over the ownership of the region.
   //
   // |mmap_limiter| must outlive this instance. The caller must have already
-  // aquired the right to use one mmap region, which will be released when this
+  // acquired the right to use one mmap region, which will be released when this
   // instance is destroyed.
   PosixMmapReadableFile(std::string filename, char* mmap_base, size_t length,
                         Limiter* mmap_limiter)
@@ -756,7 +756,7 @@ class PosixEnv : public Env {
   // Instances are constructed on the thread calling Schedule() and used on the
   // background thread.
   //
-  // This structure is thread-safe beacuse it is immutable.
+  // This structure is thread-safe because it is immutable.
   struct BackgroundWorkItem {
     explicit BackgroundWorkItem(void (*function)(void* arg), void* arg)
         : function(function), arg(arg) {}
diff --git a/util/env_windows.cc b/util/env_windows.cc
index 9ffcd07..c6d439c 100644
--- a/util/env_windows.cc
+++ b/util/env_windows.cc
@@ -681,7 +681,7 @@ class WindowsEnv : public Env {
   // Instances are constructed on the thread calling Schedule() and used on the
   // background thread.
   //
-  // This structure is thread-safe beacuse it is immutable.
+  // This structure is thread-safe because it is immutable.
   struct BackgroundWorkItem {
     explicit BackgroundWorkItem(void (*function)(void* arg), void* arg)
         : function(function), arg(arg) {}

From 87b3a371b1a12ed17dae3b80239a84b6bbecd570 Mon Sep 17 00:00:00 2001
From: xindubawukong <xindubawukong@gmail.com>
Date: Thu, 6 Jan 2022 03:11:11 +0800
Subject: [PATCH 180/181] remove useless code in cache.h

---
 include/leveldb/cache.h | 8 --------
 1 file changed, 8 deletions(-)

diff --git a/include/leveldb/cache.h b/include/leveldb/cache.h
index 98c95ac..a94c683 100644
--- a/include/leveldb/cache.h
+++ b/include/leveldb/cache.h
@@ -96,14 +96,6 @@ class LEVELDB_EXPORT Cache {
   // Return an estimate of the combined charges of all elements stored in the
   // cache.
   virtual size_t TotalCharge() const = 0;
-
- private:
-  void LRU_Remove(Handle* e);
-  void LRU_Append(Handle* e);
-  void Unref(Handle* e);
-
-  struct Rep;
-  Rep* rep_;
 };
 
 }  // namespace leveldb

From 7ee3889a6137075560e64a7ad4289c10d4cdafc9 Mon Sep 17 00:00:00 2001
From: Victor Costan <costan@google.com>
Date: Sun, 9 Jan 2022 03:04:29 +0000
Subject: [PATCH 181/181] VersionSet::Builder::Apply() does not mutate its
 argument.

PiperOrigin-RevId: 420533763
---
 db/version_set.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/db/version_set.cc b/db/version_set.cc
index 8d85fce..597e226 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -626,7 +626,7 @@ class VersionSet::Builder {
   }
 
   // Apply all of the edits in *edit to the current state.
-  void Apply(VersionEdit* edit) {
+  void Apply(const VersionEdit* edit) {
     // Update compaction pointers
     for (size_t i = 0; i < edit->compact_pointers_.size(); i++) {
       const int level = edit->compact_pointers_[i].first;