mirror of
https://github.com/google/leveldb.git
synced 2024-12-28 04:00:09 +08:00
e3584f9c28
- Fix for issue 33 (non-null-terminated result from leveldb_property_value()) - Support for running multiple instances of a benchmark in parallel. - Reduce lock contention on Get(): (1) Do not hold the lock while searching memtables. (2) Shard block and table caches 16-ways. Benchmark for evaluating this change: $ db_bench --benchmarks=fillseq1,readrandom --threads=$n (fillseq1 is a small hack to make sure fillseq runs once regardless of number of threads specified on the command line). git-svn-id: https://leveldb.googlecode.com/svn/trunk@49 62dab493-f737-651d-591e-8d6aee1b9529
187 lines
4.6 KiB
C++
187 lines
4.6 KiB
C++
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#include "leveldb/cache.h"
|
|
|
|
#include <vector>
|
|
#include "util/coding.h"
|
|
#include "util/testharness.h"
|
|
|
|
namespace leveldb {
|
|
|
|
// Conversions between numeric keys/values and the types expected by Cache.
|
|
static std::string EncodeKey(int k) {
|
|
std::string result;
|
|
PutFixed32(&result, k);
|
|
return result;
|
|
}
|
|
static int DecodeKey(const Slice& k) {
|
|
assert(k.size() == 4);
|
|
return DecodeFixed32(k.data());
|
|
}
|
|
static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
|
|
static int DecodeValue(void* v) { return reinterpret_cast<uintptr_t>(v); }
|
|
|
|
class CacheTest {
|
|
public:
|
|
static CacheTest* current_;
|
|
|
|
static void Deleter(const Slice& key, void* v) {
|
|
current_->deleted_keys_.push_back(DecodeKey(key));
|
|
current_->deleted_values_.push_back(DecodeValue(v));
|
|
}
|
|
|
|
static const int kCacheSize = 1000;
|
|
std::vector<int> deleted_keys_;
|
|
std::vector<int> deleted_values_;
|
|
Cache* cache_;
|
|
|
|
CacheTest() : cache_(NewLRUCache(kCacheSize)) {
|
|
current_ = this;
|
|
}
|
|
|
|
~CacheTest() {
|
|
delete cache_;
|
|
}
|
|
|
|
int Lookup(int key) {
|
|
Cache::Handle* handle = cache_->Lookup(EncodeKey(key));
|
|
const int r = (handle == NULL) ? -1 : DecodeValue(cache_->Value(handle));
|
|
if (handle != NULL) {
|
|
cache_->Release(handle);
|
|
}
|
|
return r;
|
|
}
|
|
|
|
void Insert(int key, int value, int charge = 1) {
|
|
cache_->Release(cache_->Insert(EncodeKey(key), EncodeValue(value), charge,
|
|
&CacheTest::Deleter));
|
|
}
|
|
|
|
void Erase(int key) {
|
|
cache_->Erase(EncodeKey(key));
|
|
}
|
|
};
|
|
CacheTest* CacheTest::current_;
|
|
|
|
TEST(CacheTest, HitAndMiss) {
|
|
ASSERT_EQ(-1, Lookup(100));
|
|
|
|
Insert(100, 101);
|
|
ASSERT_EQ(101, Lookup(100));
|
|
ASSERT_EQ(-1, Lookup(200));
|
|
ASSERT_EQ(-1, Lookup(300));
|
|
|
|
Insert(200, 201);
|
|
ASSERT_EQ(101, Lookup(100));
|
|
ASSERT_EQ(201, Lookup(200));
|
|
ASSERT_EQ(-1, Lookup(300));
|
|
|
|
Insert(100, 102);
|
|
ASSERT_EQ(102, Lookup(100));
|
|
ASSERT_EQ(201, Lookup(200));
|
|
ASSERT_EQ(-1, Lookup(300));
|
|
|
|
ASSERT_EQ(1, deleted_keys_.size());
|
|
ASSERT_EQ(100, deleted_keys_[0]);
|
|
ASSERT_EQ(101, deleted_values_[0]);
|
|
}
|
|
|
|
TEST(CacheTest, Erase) {
|
|
Erase(200);
|
|
ASSERT_EQ(0, deleted_keys_.size());
|
|
|
|
Insert(100, 101);
|
|
Insert(200, 201);
|
|
Erase(100);
|
|
ASSERT_EQ(-1, Lookup(100));
|
|
ASSERT_EQ(201, Lookup(200));
|
|
ASSERT_EQ(1, deleted_keys_.size());
|
|
ASSERT_EQ(100, deleted_keys_[0]);
|
|
ASSERT_EQ(101, deleted_values_[0]);
|
|
|
|
Erase(100);
|
|
ASSERT_EQ(-1, Lookup(100));
|
|
ASSERT_EQ(201, Lookup(200));
|
|
ASSERT_EQ(1, deleted_keys_.size());
|
|
}
|
|
|
|
TEST(CacheTest, EntriesArePinned) {
|
|
Insert(100, 101);
|
|
Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
|
|
ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
|
|
|
|
Insert(100, 102);
|
|
Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
|
|
ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
|
|
ASSERT_EQ(0, deleted_keys_.size());
|
|
|
|
cache_->Release(h1);
|
|
ASSERT_EQ(1, deleted_keys_.size());
|
|
ASSERT_EQ(100, deleted_keys_[0]);
|
|
ASSERT_EQ(101, deleted_values_[0]);
|
|
|
|
Erase(100);
|
|
ASSERT_EQ(-1, Lookup(100));
|
|
ASSERT_EQ(1, deleted_keys_.size());
|
|
|
|
cache_->Release(h2);
|
|
ASSERT_EQ(2, deleted_keys_.size());
|
|
ASSERT_EQ(100, deleted_keys_[1]);
|
|
ASSERT_EQ(102, deleted_values_[1]);
|
|
}
|
|
|
|
TEST(CacheTest, EvictionPolicy) {
|
|
Insert(100, 101);
|
|
Insert(200, 201);
|
|
|
|
// Frequently used entry must be kept around
|
|
for (int i = 0; i < kCacheSize + 100; i++) {
|
|
Insert(1000+i, 2000+i);
|
|
ASSERT_EQ(2000+i, Lookup(1000+i));
|
|
ASSERT_EQ(101, Lookup(100));
|
|
}
|
|
ASSERT_EQ(101, Lookup(100));
|
|
ASSERT_EQ(-1, Lookup(200));
|
|
}
|
|
|
|
TEST(CacheTest, HeavyEntries) {
|
|
// Add a bunch of light and heavy entries and then count the combined
|
|
// size of items still in the cache, which must be approximately the
|
|
// same as the total capacity.
|
|
const int kLight = 1;
|
|
const int kHeavy = 10;
|
|
int added = 0;
|
|
int index = 0;
|
|
while (added < 2*kCacheSize) {
|
|
const int weight = (index & 1) ? kLight : kHeavy;
|
|
Insert(index, 1000+index, weight);
|
|
added += weight;
|
|
index++;
|
|
}
|
|
|
|
int cached_weight = 0;
|
|
for (int i = 0; i < index; i++) {
|
|
const int weight = (i & 1 ? kLight : kHeavy);
|
|
int r = Lookup(i);
|
|
if (r >= 0) {
|
|
cached_weight += weight;
|
|
ASSERT_EQ(1000+i, r);
|
|
}
|
|
}
|
|
ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
|
|
}
|
|
|
|
TEST(CacheTest, NewId) {
|
|
uint64_t a = cache_->NewId();
|
|
uint64_t b = cache_->NewId();
|
|
ASSERT_NE(a, b);
|
|
}
|
|
|
|
}
|
|
|
|
int main(int argc, char** argv) {
|
|
return leveldb::test::RunAllTests();
|
|
}
|