2011-07-27 09:46:25 +08:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
#include <sqlite3.h>
|
2020-04-30 03:59:39 +08:00
|
|
|
|
|
|
|
#include <cstdio>
|
|
|
|
#include <cstdlib>
|
2019-05-03 02:01:00 +08:00
|
|
|
|
2011-07-27 09:46:25 +08:00
|
|
|
#include "util/histogram.h"
|
|
|
|
#include "util/random.h"
|
|
|
|
#include "util/testutil.h"
|
|
|
|
|
|
|
|
// Comma-separated list of operations to run in the specified order
|
|
|
|
// Actual benchmarks:
|
|
|
|
//
|
|
|
|
// fillseq -- write N values in sequential key order in async mode
|
|
|
|
// fillseqsync -- write N/100 values in sequential key order in sync mode
|
|
|
|
// fillseqbatch -- batch write N values in sequential key order in async mode
|
|
|
|
// fillrandom -- write N values in random key order in async mode
|
|
|
|
// fillrandsync -- write N/100 values in random key order in sync mode
|
|
|
|
// fillrandbatch -- batch write N values in sequential key order in async mode
|
|
|
|
// overwrite -- overwrite N values in random key order in async mode
|
|
|
|
// fillrand100K -- write N/1000 100K values in random order in async mode
|
|
|
|
// fillseq100K -- write N/1000 100K values in sequential order in async mode
|
|
|
|
// readseq -- read N times sequentially
|
|
|
|
// readrandom -- read N times in random order
|
|
|
|
// readrand100K -- read N/1000 100K values in sequential order in async mode
|
|
|
|
static const char* FLAGS_benchmarks =
|
|
|
|
"fillseq,"
|
|
|
|
"fillseqsync,"
|
|
|
|
"fillseqbatch,"
|
|
|
|
"fillrandom,"
|
|
|
|
"fillrandsync,"
|
|
|
|
"fillrandbatch,"
|
|
|
|
"overwrite,"
|
|
|
|
"overwritebatch,"
|
|
|
|
"readrandom,"
|
|
|
|
"readseq,"
|
|
|
|
"fillrand100K,"
|
|
|
|
"fillseq100K,"
|
2011-08-06 04:40:49 +08:00
|
|
|
"readseq,"
|
2019-05-03 02:01:00 +08:00
|
|
|
"readrand100K,";
|
2011-07-27 09:46:25 +08:00
|
|
|
|
|
|
|
// Number of key/values to place in database
|
|
|
|
static int FLAGS_num = 1000000;
|
|
|
|
|
|
|
|
// Number of read operations to do. If negative, do FLAGS_num reads.
|
|
|
|
static int FLAGS_reads = -1;
|
|
|
|
|
|
|
|
// Size of each value
|
|
|
|
static int FLAGS_value_size = 100;
|
|
|
|
|
|
|
|
// Print histogram of operation timings
|
|
|
|
static bool FLAGS_histogram = false;
|
|
|
|
|
|
|
|
// Arrange to generate values that shrink to this fraction of
|
|
|
|
// their original size after compression
|
|
|
|
static double FLAGS_compression_ratio = 0.5;
|
|
|
|
|
|
|
|
// Page size. Default 1 KB.
|
|
|
|
static int FLAGS_page_size = 1024;
|
|
|
|
|
|
|
|
// Number of pages.
|
|
|
|
// Default cache size = FLAGS_page_size * FLAGS_num_pages = 4 MB.
|
|
|
|
static int FLAGS_num_pages = 4096;
|
|
|
|
|
|
|
|
// If true, do not destroy the existing database. If you set this
|
|
|
|
// flag and also specify a benchmark that wants a fresh database, that
|
|
|
|
// benchmark will fail.
|
|
|
|
static bool FLAGS_use_existing_db = false;
|
|
|
|
|
2019-12-03 05:37:34 +08:00
|
|
|
// If true, the SQLite table has ROWIDs.
|
|
|
|
static bool FLAGS_use_rowids = false;
|
|
|
|
|
2011-07-27 09:46:25 +08:00
|
|
|
// If true, we allow batch writes to occur
|
|
|
|
static bool FLAGS_transaction = true;
|
|
|
|
|
|
|
|
// If true, we enable Write-Ahead Logging
|
2011-07-30 05:35:05 +08:00
|
|
|
static bool FLAGS_WAL_enabled = true;
|
2011-07-27 09:46:25 +08:00
|
|
|
|
2012-05-31 00:45:46 +08:00
|
|
|
// Use the db with the following name.
|
2018-04-11 07:18:06 +08:00
|
|
|
static const char* FLAGS_db = nullptr;
|
2012-05-31 00:45:46 +08:00
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
inline static void ExecErrorCheck(int status, char* err_msg) {
|
2011-07-27 09:46:25 +08:00
|
|
|
if (status != SQLITE_OK) {
|
|
|
|
fprintf(stderr, "SQL error: %s\n", err_msg);
|
|
|
|
sqlite3_free(err_msg);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
inline static void StepErrorCheck(int status) {
|
2011-07-27 09:46:25 +08:00
|
|
|
if (status != SQLITE_DONE) {
|
|
|
|
fprintf(stderr, "SQL step error: status = %d\n", status);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
inline static void ErrorCheck(int status) {
|
2011-07-27 09:46:25 +08:00
|
|
|
if (status != SQLITE_OK) {
|
|
|
|
fprintf(stderr, "sqlite3 error: status = %d\n", status);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
inline static void WalCheckpoint(sqlite3* db_) {
|
2011-07-27 09:46:25 +08:00
|
|
|
// Flush all writes to disk
|
|
|
|
if (FLAGS_WAL_enabled) {
|
2018-04-11 07:18:06 +08:00
|
|
|
sqlite3_wal_checkpoint_v2(db_, nullptr, SQLITE_CHECKPOINT_FULL, nullptr,
|
|
|
|
nullptr);
|
2011-07-27 09:46:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace leveldb {
|
|
|
|
|
|
|
|
// Helper for quickly generating random data.
|
|
|
|
namespace {
|
|
|
|
class RandomGenerator {
|
|
|
|
private:
|
|
|
|
std::string data_;
|
|
|
|
int pos_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
RandomGenerator() {
|
|
|
|
// We use a limited amount of data over and over again and ensure
|
|
|
|
// that it is larger than the compression window (32KB), and also
|
|
|
|
// large enough to serve all typical value sizes we want to write.
|
|
|
|
Random rnd(301);
|
|
|
|
std::string piece;
|
|
|
|
while (data_.size() < 1048576) {
|
|
|
|
// Add a short fragment that is as compressible as specified
|
|
|
|
// by FLAGS_compression_ratio.
|
|
|
|
test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece);
|
|
|
|
data_.append(piece);
|
|
|
|
}
|
|
|
|
pos_ = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
Slice Generate(int len) {
|
|
|
|
if (pos_ + len > data_.size()) {
|
|
|
|
pos_ = 0;
|
|
|
|
assert(len < data_.size());
|
|
|
|
}
|
|
|
|
pos_ += len;
|
|
|
|
return Slice(data_.data() + pos_ - len, len);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
static Slice TrimSpace(Slice s) {
|
|
|
|
int start = 0;
|
|
|
|
while (start < s.size() && isspace(s[start])) {
|
|
|
|
start++;
|
|
|
|
}
|
|
|
|
int limit = s.size();
|
2019-05-03 02:01:00 +08:00
|
|
|
while (limit > start && isspace(s[limit - 1])) {
|
2011-07-27 09:46:25 +08:00
|
|
|
limit--;
|
|
|
|
}
|
|
|
|
return Slice(s.data() + start, limit - start);
|
|
|
|
}
|
|
|
|
|
2011-11-01 01:22:06 +08:00
|
|
|
} // namespace
|
2011-07-27 09:46:25 +08:00
|
|
|
|
|
|
|
class Benchmark {
|
|
|
|
private:
|
|
|
|
sqlite3* db_;
|
|
|
|
int db_num_;
|
|
|
|
int num_;
|
|
|
|
int reads_;
|
|
|
|
double start_;
|
|
|
|
double last_op_finish_;
|
|
|
|
int64_t bytes_;
|
|
|
|
std::string message_;
|
|
|
|
Histogram hist_;
|
|
|
|
RandomGenerator gen_;
|
|
|
|
Random rand_;
|
|
|
|
|
|
|
|
// State kept for progress messages
|
|
|
|
int done_;
|
2019-05-03 02:01:00 +08:00
|
|
|
int next_report_; // When to report next
|
2011-07-27 09:46:25 +08:00
|
|
|
|
|
|
|
void PrintHeader() {
|
|
|
|
const int kKeySize = 16;
|
|
|
|
PrintEnvironment();
|
|
|
|
fprintf(stdout, "Keys: %d bytes each\n", kKeySize);
|
|
|
|
fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size);
|
|
|
|
fprintf(stdout, "Entries: %d\n", num_);
|
|
|
|
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
|
2019-05-03 02:01:00 +08:00
|
|
|
((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
|
|
|
|
1048576.0));
|
2011-07-27 09:46:25 +08:00
|
|
|
PrintWarnings();
|
|
|
|
fprintf(stdout, "------------------------------------------------\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void PrintWarnings() {
|
|
|
|
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
|
2019-05-03 02:01:00 +08:00
|
|
|
fprintf(
|
|
|
|
stdout,
|
|
|
|
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
|
2011-07-27 09:46:25 +08:00
|
|
|
#endif
|
|
|
|
#ifndef NDEBUG
|
|
|
|
fprintf(stdout,
|
|
|
|
"WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void PrintEnvironment() {
|
|
|
|
fprintf(stderr, "SQLite: version %s\n", SQLITE_VERSION);
|
|
|
|
|
|
|
|
#if defined(__linux)
|
2018-04-11 07:18:06 +08:00
|
|
|
time_t now = time(nullptr);
|
2011-07-27 09:46:25 +08:00
|
|
|
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
|
|
|
|
|
|
|
|
FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
|
2018-04-11 07:18:06 +08:00
|
|
|
if (cpuinfo != nullptr) {
|
2011-07-27 09:46:25 +08:00
|
|
|
char line[1000];
|
|
|
|
int num_cpus = 0;
|
|
|
|
std::string cpu_type;
|
|
|
|
std::string cache_size;
|
2018-04-11 07:18:06 +08:00
|
|
|
while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
|
2011-07-27 09:46:25 +08:00
|
|
|
const char* sep = strchr(line, ':');
|
2018-04-11 07:18:06 +08:00
|
|
|
if (sep == nullptr) {
|
2011-07-27 09:46:25 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
Slice key = TrimSpace(Slice(line, sep - 1 - line));
|
|
|
|
Slice val = TrimSpace(Slice(sep + 1));
|
|
|
|
if (key == "model name") {
|
|
|
|
++num_cpus;
|
|
|
|
cpu_type = val.ToString();
|
|
|
|
} else if (key == "cache size") {
|
|
|
|
cache_size = val.ToString();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fclose(cpuinfo);
|
|
|
|
fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str());
|
|
|
|
fprintf(stderr, "CPUCache: %s\n", cache_size.c_str());
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void Start() {
|
|
|
|
start_ = Env::Default()->NowMicros() * 1e-6;
|
|
|
|
bytes_ = 0;
|
|
|
|
message_.clear();
|
|
|
|
last_op_finish_ = start_;
|
|
|
|
hist_.Clear();
|
|
|
|
done_ = 0;
|
|
|
|
next_report_ = 100;
|
|
|
|
}
|
|
|
|
|
|
|
|
void FinishedSingleOp() {
|
|
|
|
if (FLAGS_histogram) {
|
|
|
|
double now = Env::Default()->NowMicros() * 1e-6;
|
|
|
|
double micros = (now - last_op_finish_) * 1e6;
|
|
|
|
hist_.Add(micros);
|
|
|
|
if (micros > 20000) {
|
|
|
|
fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
|
|
|
|
fflush(stderr);
|
|
|
|
}
|
|
|
|
last_op_finish_ = now;
|
|
|
|
}
|
|
|
|
|
|
|
|
done_++;
|
|
|
|
if (done_ >= next_report_) {
|
2019-05-03 02:01:00 +08:00
|
|
|
if (next_report_ < 1000)
|
|
|
|
next_report_ += 100;
|
|
|
|
else if (next_report_ < 5000)
|
|
|
|
next_report_ += 500;
|
|
|
|
else if (next_report_ < 10000)
|
|
|
|
next_report_ += 1000;
|
|
|
|
else if (next_report_ < 50000)
|
|
|
|
next_report_ += 5000;
|
|
|
|
else if (next_report_ < 100000)
|
|
|
|
next_report_ += 10000;
|
|
|
|
else if (next_report_ < 500000)
|
|
|
|
next_report_ += 50000;
|
|
|
|
else
|
|
|
|
next_report_ += 100000;
|
2011-07-27 09:46:25 +08:00
|
|
|
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
|
|
|
|
fflush(stderr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Stop(const Slice& name) {
|
|
|
|
double finish = Env::Default()->NowMicros() * 1e-6;
|
|
|
|
|
|
|
|
// Pretend at least one op was done in case we are running a benchmark
|
|
|
|
// that does not call FinishedSingleOp().
|
|
|
|
if (done_ < 1) done_ = 1;
|
|
|
|
|
|
|
|
if (bytes_ > 0) {
|
|
|
|
char rate[100];
|
|
|
|
snprintf(rate, sizeof(rate), "%6.1f MB/s",
|
|
|
|
(bytes_ / 1048576.0) / (finish - start_));
|
|
|
|
if (!message_.empty()) {
|
2019-05-03 02:01:00 +08:00
|
|
|
message_ = std::string(rate) + " " + message_;
|
2011-07-27 09:46:25 +08:00
|
|
|
} else {
|
|
|
|
message_ = rate;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
|
|
|
|
(finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
|
2011-07-27 09:46:25 +08:00
|
|
|
message_.c_str());
|
|
|
|
if (FLAGS_histogram) {
|
|
|
|
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
|
|
|
|
}
|
|
|
|
fflush(stdout);
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
2019-05-03 02:01:00 +08:00
|
|
|
enum Order { SEQUENTIAL, RANDOM };
|
|
|
|
enum DBState { FRESH, EXISTING };
|
2011-07-27 09:46:25 +08:00
|
|
|
|
|
|
|
Benchmark()
|
2019-05-03 02:01:00 +08:00
|
|
|
: db_(nullptr),
|
|
|
|
db_num_(0),
|
|
|
|
num_(FLAGS_num),
|
|
|
|
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
|
|
|
|
bytes_(0),
|
|
|
|
rand_(301) {
|
2011-07-27 09:46:25 +08:00
|
|
|
std::vector<std::string> files;
|
2012-05-31 00:45:46 +08:00
|
|
|
std::string test_dir;
|
|
|
|
Env::Default()->GetTestDirectory(&test_dir);
|
|
|
|
Env::Default()->GetChildren(test_dir, &files);
|
2011-07-27 09:46:25 +08:00
|
|
|
if (!FLAGS_use_existing_db) {
|
|
|
|
for (int i = 0; i < files.size(); i++) {
|
|
|
|
if (Slice(files[i]).starts_with("dbbench_sqlite3")) {
|
2012-05-31 00:45:46 +08:00
|
|
|
std::string file_name(test_dir);
|
|
|
|
file_name += "/";
|
|
|
|
file_name += files[i];
|
Add Env::Remove{File,Dir} which obsolete Env::Delete{File,Dir}.
The "DeleteFile" method name causes pain for Windows developers, because
<windows.h> #defines a DeleteFile macro to DeleteFileW or DeleteFileA.
Current code uses workarounds, like #undefining DeleteFile everywhere an
Env is declared, implemented, or used.
This CL removes the need for workarounds by renaming Env::DeleteFile to
Env::RemoveFile. For consistency, Env::DeleteDir is also renamed to
Env::RemoveDir. A few internal methods are also renamed for consistency.
Software that supports Windows is expected to migrate any Env
implementations and usage to Remove{File,Dir}, and never use the name
Env::Delete{File,Dir} in its code.
The renaming is done in a backwards-compatible way, at the risk of
making it slightly more difficult to build a new correct Env
implementation. The backwards compatibility is achieved using the
following hacks:
1) Env::Remove{File,Dir} methods are added, with a default
implementation that calls into Env::Delete{File,Dir}. This makes old
Env implementations compatible with code that calls into the updated
API.
2) The Env::Delete{File,Dir} methods are no longer pure virtuals.
Instead, they gain a default implementation that calls into
Env::Remove{File,Dir}. This makes updated Env implementations
compatible with code that calls into the old API.
The cost of this approach is that it's possible to write an Env without
overriding either Rename{File,Dir} or Delete{File,Dir}, without getting
a compiler warning. However, attempting to run the test suite will
immediately fail with an infinite call stack ending in
{Remove,Delete}{File,Dir}, making developers aware of the problem.
PiperOrigin-RevId: 288710907
2020-01-09 01:14:53 +08:00
|
|
|
Env::Default()->RemoveFile(file_name.c_str());
|
2011-07-27 09:46:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
~Benchmark() {
|
|
|
|
int status = sqlite3_close(db_);
|
|
|
|
ErrorCheck(status);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Run() {
|
|
|
|
PrintHeader();
|
|
|
|
Open();
|
|
|
|
|
|
|
|
const char* benchmarks = FLAGS_benchmarks;
|
2018-04-11 07:18:06 +08:00
|
|
|
while (benchmarks != nullptr) {
|
2011-07-27 09:46:25 +08:00
|
|
|
const char* sep = strchr(benchmarks, ',');
|
|
|
|
Slice name;
|
2018-04-11 07:18:06 +08:00
|
|
|
if (sep == nullptr) {
|
2011-07-27 09:46:25 +08:00
|
|
|
name = benchmarks;
|
2018-04-11 07:18:06 +08:00
|
|
|
benchmarks = nullptr;
|
2011-07-27 09:46:25 +08:00
|
|
|
} else {
|
|
|
|
name = Slice(benchmarks, sep - benchmarks);
|
|
|
|
benchmarks = sep + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
bytes_ = 0;
|
|
|
|
Start();
|
|
|
|
|
|
|
|
bool known = true;
|
|
|
|
bool write_sync = false;
|
|
|
|
if (name == Slice("fillseq")) {
|
|
|
|
Write(write_sync, SEQUENTIAL, FRESH, num_, FLAGS_value_size, 1);
|
|
|
|
WalCheckpoint(db_);
|
|
|
|
} else if (name == Slice("fillseqbatch")) {
|
|
|
|
Write(write_sync, SEQUENTIAL, FRESH, num_, FLAGS_value_size, 1000);
|
|
|
|
WalCheckpoint(db_);
|
|
|
|
} else if (name == Slice("fillrandom")) {
|
|
|
|
Write(write_sync, RANDOM, FRESH, num_, FLAGS_value_size, 1);
|
|
|
|
WalCheckpoint(db_);
|
|
|
|
} else if (name == Slice("fillrandbatch")) {
|
|
|
|
Write(write_sync, RANDOM, FRESH, num_, FLAGS_value_size, 1000);
|
|
|
|
WalCheckpoint(db_);
|
|
|
|
} else if (name == Slice("overwrite")) {
|
|
|
|
Write(write_sync, RANDOM, EXISTING, num_, FLAGS_value_size, 1);
|
|
|
|
WalCheckpoint(db_);
|
|
|
|
} else if (name == Slice("overwritebatch")) {
|
|
|
|
Write(write_sync, RANDOM, EXISTING, num_, FLAGS_value_size, 1000);
|
|
|
|
WalCheckpoint(db_);
|
|
|
|
} else if (name == Slice("fillrandsync")) {
|
|
|
|
write_sync = true;
|
|
|
|
Write(write_sync, RANDOM, FRESH, num_ / 100, FLAGS_value_size, 1);
|
|
|
|
WalCheckpoint(db_);
|
|
|
|
} else if (name == Slice("fillseqsync")) {
|
|
|
|
write_sync = true;
|
|
|
|
Write(write_sync, SEQUENTIAL, FRESH, num_ / 100, FLAGS_value_size, 1);
|
|
|
|
WalCheckpoint(db_);
|
|
|
|
} else if (name == Slice("fillrand100K")) {
|
|
|
|
Write(write_sync, RANDOM, FRESH, num_ / 1000, 100 * 1000, 1);
|
|
|
|
WalCheckpoint(db_);
|
|
|
|
} else if (name == Slice("fillseq100K")) {
|
|
|
|
Write(write_sync, SEQUENTIAL, FRESH, num_ / 1000, 100 * 1000, 1);
|
|
|
|
WalCheckpoint(db_);
|
|
|
|
} else if (name == Slice("readseq")) {
|
2011-08-06 04:40:49 +08:00
|
|
|
ReadSequential();
|
2011-07-27 09:46:25 +08:00
|
|
|
} else if (name == Slice("readrandom")) {
|
|
|
|
Read(RANDOM, 1);
|
|
|
|
} else if (name == Slice("readrand100K")) {
|
|
|
|
int n = reads_;
|
|
|
|
reads_ /= 1000;
|
|
|
|
Read(RANDOM, 1);
|
|
|
|
reads_ = n;
|
|
|
|
} else {
|
|
|
|
known = false;
|
|
|
|
if (name != Slice()) { // No error message for empty name
|
|
|
|
fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (known) {
|
|
|
|
Stop(name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Open() {
|
2018-04-11 07:18:06 +08:00
|
|
|
assert(db_ == nullptr);
|
2011-07-27 09:46:25 +08:00
|
|
|
|
|
|
|
int status;
|
|
|
|
char file_name[100];
|
2018-04-11 07:18:06 +08:00
|
|
|
char* err_msg = nullptr;
|
2011-07-27 09:46:25 +08:00
|
|
|
db_num_++;
|
|
|
|
|
|
|
|
// Open database
|
2012-05-31 00:45:46 +08:00
|
|
|
std::string tmp_dir;
|
|
|
|
Env::Default()->GetTestDirectory(&tmp_dir);
|
2019-05-03 02:01:00 +08:00
|
|
|
snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db",
|
|
|
|
tmp_dir.c_str(), db_num_);
|
2011-07-27 09:46:25 +08:00
|
|
|
status = sqlite3_open(file_name, &db_);
|
|
|
|
if (status) {
|
|
|
|
fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_));
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Change SQLite cache size
|
|
|
|
char cache_size[100];
|
|
|
|
snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d",
|
|
|
|
FLAGS_num_pages);
|
2018-04-11 07:18:06 +08:00
|
|
|
status = sqlite3_exec(db_, cache_size, nullptr, nullptr, &err_msg);
|
2011-07-27 09:46:25 +08:00
|
|
|
ExecErrorCheck(status, err_msg);
|
|
|
|
|
|
|
|
// FLAGS_page_size is defaulted to 1024
|
|
|
|
if (FLAGS_page_size != 1024) {
|
|
|
|
char page_size[100];
|
|
|
|
snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d",
|
|
|
|
FLAGS_page_size);
|
2018-04-11 07:18:06 +08:00
|
|
|
status = sqlite3_exec(db_, page_size, nullptr, nullptr, &err_msg);
|
2011-07-27 09:46:25 +08:00
|
|
|
ExecErrorCheck(status, err_msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Change journal mode to WAL if WAL enabled flag is on
|
|
|
|
if (FLAGS_WAL_enabled) {
|
|
|
|
std::string WAL_stmt = "PRAGMA journal_mode = WAL";
|
2011-07-30 05:35:05 +08:00
|
|
|
|
|
|
|
// LevelDB's default cache size is a combined 4 MB
|
|
|
|
std::string WAL_checkpoint = "PRAGMA wal_autocheckpoint = 4096";
|
2018-04-11 07:18:06 +08:00
|
|
|
status = sqlite3_exec(db_, WAL_stmt.c_str(), nullptr, nullptr, &err_msg);
|
2011-07-27 09:46:25 +08:00
|
|
|
ExecErrorCheck(status, err_msg);
|
2019-05-03 02:01:00 +08:00
|
|
|
status =
|
|
|
|
sqlite3_exec(db_, WAL_checkpoint.c_str(), nullptr, nullptr, &err_msg);
|
2011-07-30 05:35:05 +08:00
|
|
|
ExecErrorCheck(status, err_msg);
|
2011-07-27 09:46:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Change locking mode to exclusive and create tables/index for database
|
|
|
|
std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE";
|
|
|
|
std::string create_stmt =
|
2019-05-03 02:01:00 +08:00
|
|
|
"CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
|
2019-12-03 05:37:34 +08:00
|
|
|
if (!FLAGS_use_rowids) create_stmt += " WITHOUT ROWID";
|
2019-05-03 02:01:00 +08:00
|
|
|
std::string stmt_array[] = {locking_stmt, create_stmt};
|
2011-07-27 09:46:25 +08:00
|
|
|
int stmt_array_length = sizeof(stmt_array) / sizeof(std::string);
|
|
|
|
for (int i = 0; i < stmt_array_length; i++) {
|
2019-05-03 02:01:00 +08:00
|
|
|
status =
|
|
|
|
sqlite3_exec(db_, stmt_array[i].c_str(), nullptr, nullptr, &err_msg);
|
2011-07-27 09:46:25 +08:00
|
|
|
ExecErrorCheck(status, err_msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
void Write(bool write_sync, Order order, DBState state, int num_entries,
|
|
|
|
int value_size, int entries_per_batch) {
|
2011-07-27 09:46:25 +08:00
|
|
|
// Create new database if state == FRESH
|
|
|
|
if (state == FRESH) {
|
|
|
|
if (FLAGS_use_existing_db) {
|
|
|
|
message_ = "skipping (--use_existing_db is true)";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
sqlite3_close(db_);
|
2018-04-11 07:18:06 +08:00
|
|
|
db_ = nullptr;
|
2011-07-27 09:46:25 +08:00
|
|
|
Open();
|
|
|
|
Start();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (num_entries != num_) {
|
|
|
|
char msg[100];
|
|
|
|
snprintf(msg, sizeof(msg), "(%d ops)", num_entries);
|
|
|
|
message_ = msg;
|
|
|
|
}
|
|
|
|
|
2018-04-11 07:18:06 +08:00
|
|
|
char* err_msg = nullptr;
|
2011-07-27 09:46:25 +08:00
|
|
|
int status;
|
|
|
|
|
|
|
|
sqlite3_stmt *replace_stmt, *begin_trans_stmt, *end_trans_stmt;
|
|
|
|
std::string replace_str = "REPLACE INTO test (key, value) VALUES (?, ?)";
|
|
|
|
std::string begin_trans_str = "BEGIN TRANSACTION;";
|
|
|
|
std::string end_trans_str = "END TRANSACTION;";
|
|
|
|
|
|
|
|
// Check for synchronous flag in options
|
2019-05-03 02:01:00 +08:00
|
|
|
std::string sync_stmt =
|
|
|
|
(write_sync) ? "PRAGMA synchronous = FULL" : "PRAGMA synchronous = OFF";
|
2018-04-11 07:18:06 +08:00
|
|
|
status = sqlite3_exec(db_, sync_stmt.c_str(), nullptr, nullptr, &err_msg);
|
2011-07-27 09:46:25 +08:00
|
|
|
ExecErrorCheck(status, err_msg);
|
|
|
|
|
|
|
|
// Preparing sqlite3 statements
|
2019-05-03 02:01:00 +08:00
|
|
|
status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1, &replace_stmt,
|
|
|
|
nullptr);
|
2011-07-27 09:46:25 +08:00
|
|
|
ErrorCheck(status);
|
|
|
|
status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
|
2018-04-11 07:18:06 +08:00
|
|
|
&begin_trans_stmt, nullptr);
|
2011-07-27 09:46:25 +08:00
|
|
|
ErrorCheck(status);
|
2019-05-03 02:01:00 +08:00
|
|
|
status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt,
|
|
|
|
nullptr);
|
2011-07-27 09:46:25 +08:00
|
|
|
ErrorCheck(status);
|
|
|
|
|
|
|
|
bool transaction = (entries_per_batch > 1);
|
|
|
|
for (int i = 0; i < num_entries; i += entries_per_batch) {
|
|
|
|
// Begin write transaction
|
|
|
|
if (FLAGS_transaction && transaction) {
|
|
|
|
status = sqlite3_step(begin_trans_stmt);
|
|
|
|
StepErrorCheck(status);
|
|
|
|
status = sqlite3_reset(begin_trans_stmt);
|
|
|
|
ErrorCheck(status);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create and execute SQL statements
|
|
|
|
for (int j = 0; j < entries_per_batch; j++) {
|
|
|
|
const char* value = gen_.Generate(value_size).data();
|
|
|
|
|
|
|
|
// Create values for key-value pair
|
2019-05-03 02:01:00 +08:00
|
|
|
const int k =
|
|
|
|
(order == SEQUENTIAL) ? i + j : (rand_.Next() % num_entries);
|
2011-07-27 09:46:25 +08:00
|
|
|
char key[100];
|
|
|
|
snprintf(key, sizeof(key), "%016d", k);
|
|
|
|
|
|
|
|
// Bind KV values into replace_stmt
|
|
|
|
status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC);
|
|
|
|
ErrorCheck(status);
|
2019-05-03 02:01:00 +08:00
|
|
|
status = sqlite3_bind_blob(replace_stmt, 2, value, value_size,
|
|
|
|
SQLITE_STATIC);
|
2011-07-27 09:46:25 +08:00
|
|
|
ErrorCheck(status);
|
|
|
|
|
|
|
|
// Execute replace_stmt
|
|
|
|
bytes_ += value_size + strlen(key);
|
|
|
|
status = sqlite3_step(replace_stmt);
|
|
|
|
StepErrorCheck(status);
|
|
|
|
|
|
|
|
// Reset SQLite statement for another use
|
|
|
|
status = sqlite3_clear_bindings(replace_stmt);
|
|
|
|
ErrorCheck(status);
|
|
|
|
status = sqlite3_reset(replace_stmt);
|
|
|
|
ErrorCheck(status);
|
|
|
|
|
|
|
|
FinishedSingleOp();
|
|
|
|
}
|
|
|
|
|
|
|
|
// End write transaction
|
|
|
|
if (FLAGS_transaction && transaction) {
|
|
|
|
status = sqlite3_step(end_trans_stmt);
|
|
|
|
StepErrorCheck(status);
|
|
|
|
status = sqlite3_reset(end_trans_stmt);
|
|
|
|
ErrorCheck(status);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
status = sqlite3_finalize(replace_stmt);
|
|
|
|
ErrorCheck(status);
|
|
|
|
status = sqlite3_finalize(begin_trans_stmt);
|
|
|
|
ErrorCheck(status);
|
|
|
|
status = sqlite3_finalize(end_trans_stmt);
|
|
|
|
ErrorCheck(status);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Read(Order order, int entries_per_batch) {
|
|
|
|
int status;
|
|
|
|
sqlite3_stmt *read_stmt, *begin_trans_stmt, *end_trans_stmt;
|
|
|
|
|
|
|
|
std::string read_str = "SELECT * FROM test WHERE key = ?";
|
|
|
|
std::string begin_trans_str = "BEGIN TRANSACTION;";
|
|
|
|
std::string end_trans_str = "END TRANSACTION;";
|
|
|
|
|
|
|
|
// Preparing sqlite3 statements
|
|
|
|
status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
|
2018-04-11 07:18:06 +08:00
|
|
|
&begin_trans_stmt, nullptr);
|
2011-07-27 09:46:25 +08:00
|
|
|
ErrorCheck(status);
|
2019-05-03 02:01:00 +08:00
|
|
|
status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt,
|
|
|
|
nullptr);
|
2011-07-27 09:46:25 +08:00
|
|
|
ErrorCheck(status);
|
2018-04-11 07:18:06 +08:00
|
|
|
status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, nullptr);
|
2011-07-27 09:46:25 +08:00
|
|
|
ErrorCheck(status);
|
|
|
|
|
|
|
|
bool transaction = (entries_per_batch > 1);
|
|
|
|
for (int i = 0; i < reads_; i += entries_per_batch) {
|
|
|
|
// Begin read transaction
|
|
|
|
if (FLAGS_transaction && transaction) {
|
|
|
|
status = sqlite3_step(begin_trans_stmt);
|
|
|
|
StepErrorCheck(status);
|
|
|
|
status = sqlite3_reset(begin_trans_stmt);
|
|
|
|
ErrorCheck(status);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create and execute SQL statements
|
|
|
|
for (int j = 0; j < entries_per_batch; j++) {
|
|
|
|
// Create key value
|
|
|
|
char key[100];
|
|
|
|
int k = (order == SEQUENTIAL) ? i + j : (rand_.Next() % reads_);
|
|
|
|
snprintf(key, sizeof(key), "%016d", k);
|
|
|
|
|
|
|
|
// Bind key value into read_stmt
|
|
|
|
status = sqlite3_bind_blob(read_stmt, 1, key, 16, SQLITE_STATIC);
|
|
|
|
ErrorCheck(status);
|
|
|
|
|
|
|
|
// Execute read statement
|
2019-05-03 02:01:00 +08:00
|
|
|
while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) {
|
|
|
|
}
|
2011-07-27 09:46:25 +08:00
|
|
|
StepErrorCheck(status);
|
|
|
|
|
|
|
|
// Reset SQLite statement for another use
|
|
|
|
status = sqlite3_clear_bindings(read_stmt);
|
|
|
|
ErrorCheck(status);
|
|
|
|
status = sqlite3_reset(read_stmt);
|
|
|
|
ErrorCheck(status);
|
|
|
|
FinishedSingleOp();
|
|
|
|
}
|
|
|
|
|
|
|
|
// End read transaction
|
|
|
|
if (FLAGS_transaction && transaction) {
|
|
|
|
status = sqlite3_step(end_trans_stmt);
|
|
|
|
StepErrorCheck(status);
|
|
|
|
status = sqlite3_reset(end_trans_stmt);
|
|
|
|
ErrorCheck(status);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
status = sqlite3_finalize(read_stmt);
|
|
|
|
ErrorCheck(status);
|
|
|
|
status = sqlite3_finalize(begin_trans_stmt);
|
|
|
|
ErrorCheck(status);
|
|
|
|
status = sqlite3_finalize(end_trans_stmt);
|
|
|
|
ErrorCheck(status);
|
|
|
|
}
|
|
|
|
|
2011-08-06 04:40:49 +08:00
|
|
|
void ReadSequential() {
|
|
|
|
int status;
|
2019-05-03 02:01:00 +08:00
|
|
|
sqlite3_stmt* pStmt;
|
2011-08-06 04:40:49 +08:00
|
|
|
std::string read_str = "SELECT * FROM test ORDER BY key";
|
|
|
|
|
2018-04-11 07:18:06 +08:00
|
|
|
status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, nullptr);
|
2011-08-06 04:40:49 +08:00
|
|
|
ErrorCheck(status);
|
|
|
|
for (int i = 0; i < reads_ && SQLITE_ROW == sqlite3_step(pStmt); i++) {
|
|
|
|
bytes_ += sqlite3_column_bytes(pStmt, 1) + sqlite3_column_bytes(pStmt, 2);
|
|
|
|
FinishedSingleOp();
|
|
|
|
}
|
|
|
|
|
|
|
|
status = sqlite3_finalize(pStmt);
|
|
|
|
ErrorCheck(status);
|
|
|
|
}
|
2011-07-27 09:46:25 +08:00
|
|
|
};
|
|
|
|
|
2011-11-01 01:22:06 +08:00
|
|
|
} // namespace leveldb
|
2011-07-27 09:46:25 +08:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2012-05-31 00:45:46 +08:00
|
|
|
std::string default_db_path;
|
2011-07-27 09:46:25 +08:00
|
|
|
for (int i = 1; i < argc; i++) {
|
|
|
|
double d;
|
|
|
|
int n;
|
|
|
|
char junk;
|
|
|
|
if (leveldb::Slice(argv[i]).starts_with("--benchmarks=")) {
|
|
|
|
FLAGS_benchmarks = argv[i] + strlen("--benchmarks=");
|
|
|
|
} else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 &&
|
|
|
|
(n == 0 || n == 1)) {
|
|
|
|
FLAGS_histogram = n;
|
|
|
|
} else if (sscanf(argv[i], "--compression_ratio=%lf%c", &d, &junk) == 1) {
|
|
|
|
FLAGS_compression_ratio = d;
|
|
|
|
} else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 &&
|
|
|
|
(n == 0 || n == 1)) {
|
|
|
|
FLAGS_use_existing_db = n;
|
2019-12-03 05:37:34 +08:00
|
|
|
} else if (sscanf(argv[i], "--use_rowids=%d%c", &n, &junk) == 1 &&
|
|
|
|
(n == 0 || n == 1)) {
|
|
|
|
FLAGS_use_rowids = n;
|
2011-07-27 09:46:25 +08:00
|
|
|
} else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) {
|
|
|
|
FLAGS_num = n;
|
|
|
|
} else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) {
|
|
|
|
FLAGS_reads = n;
|
|
|
|
} else if (sscanf(argv[i], "--value_size=%d%c", &n, &junk) == 1) {
|
|
|
|
FLAGS_value_size = n;
|
|
|
|
} else if (leveldb::Slice(argv[i]) == leveldb::Slice("--no_transaction")) {
|
|
|
|
FLAGS_transaction = false;
|
|
|
|
} else if (sscanf(argv[i], "--page_size=%d%c", &n, &junk) == 1) {
|
|
|
|
FLAGS_page_size = n;
|
|
|
|
} else if (sscanf(argv[i], "--num_pages=%d%c", &n, &junk) == 1) {
|
|
|
|
FLAGS_num_pages = n;
|
|
|
|
} else if (sscanf(argv[i], "--WAL_enabled=%d%c", &n, &junk) == 1 &&
|
|
|
|
(n == 0 || n == 1)) {
|
|
|
|
FLAGS_WAL_enabled = n;
|
2012-05-31 00:45:46 +08:00
|
|
|
} else if (strncmp(argv[i], "--db=", 5) == 0) {
|
|
|
|
FLAGS_db = argv[i] + 5;
|
2011-07-27 09:46:25 +08:00
|
|
|
} else {
|
|
|
|
fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-31 00:45:46 +08:00
|
|
|
// Choose a location for the test database if none given with --db=<path>
|
2018-04-11 07:18:06 +08:00
|
|
|
if (FLAGS_db == nullptr) {
|
2019-05-03 02:01:00 +08:00
|
|
|
leveldb::Env::Default()->GetTestDirectory(&default_db_path);
|
|
|
|
default_db_path += "/dbbench";
|
|
|
|
FLAGS_db = default_db_path.c_str();
|
2012-05-31 00:45:46 +08:00
|
|
|
}
|
|
|
|
|
2011-07-27 09:46:25 +08:00
|
|
|
leveldb::Benchmark benchmark;
|
|
|
|
benchmark.Run();
|
|
|
|
return 0;
|
|
|
|
}
|