mirror of
https://github.com/libp2p/go-libp2p-peerstore.git
synced 2025-01-15 02:40:07 +08:00
add tests for new GC routines.
This commit is contained in:
parent
35444fbc6a
commit
3a85498ae8
141
pstoreds/ds_gc_test.go
Normal file
141
pstoreds/ds_gc_test.go
Normal file
@ -0,0 +1,141 @@
|
||||
package pstoreds
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-datastore/query"
|
||||
"github.com/libp2p/go-libp2p-peerstore"
|
||||
"github.com/libp2p/go-libp2p-peerstore/test"
|
||||
)
|
||||
|
||||
var lookaheadQuery = query.Query{Prefix: gcLookaheadBase.String(), KeysOnly: true}
|
||||
|
||||
type testProbe struct {
|
||||
t *testing.T
|
||||
ab peerstore.AddrBook
|
||||
}
|
||||
|
||||
func (tp *testProbe) countLookaheadEntries() (i int) {
|
||||
results, err := tp.ab.(*dsAddrBook).ds.Query(lookaheadQuery)
|
||||
if err != nil {
|
||||
tp.t.Fatal(err)
|
||||
}
|
||||
|
||||
defer results.Close()
|
||||
for range results.Next() {
|
||||
i++
|
||||
}
|
||||
return i
|
||||
}
|
||||
func (tp *testProbe) clearCache() {
|
||||
for _, k := range tp.ab.(*dsAddrBook).cache.Keys() {
|
||||
tp.ab.(*dsAddrBook).cache.Remove(k)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGCLookahead(t *testing.T) {
|
||||
opts := DefaultOpts()
|
||||
|
||||
// effectively disable automatic GC for this test.
|
||||
opts.GCInitialDelay = 90 * time.Hour
|
||||
opts.GCLookaheadInterval = 10 * time.Second
|
||||
opts.GCPruneInterval = 1 * time.Minute
|
||||
|
||||
factory := addressBookFactory(t, badgerStore, opts)
|
||||
ab, closeFn := factory()
|
||||
defer closeFn()
|
||||
|
||||
tp := &testProbe{t, ab}
|
||||
|
||||
ids := test.GeneratePeerIDs(10)
|
||||
addrs := test.GenerateAddrs(100)
|
||||
|
||||
// lookahead is 10 seconds, so these entries will be outside the lookahead window.
|
||||
ab.AddAddrs(ids[0], addrs[:10], time.Hour)
|
||||
ab.AddAddrs(ids[1], addrs[10:20], time.Hour)
|
||||
ab.AddAddrs(ids[2], addrs[20:30], time.Hour)
|
||||
ab.(*dsAddrBook).populateLookahead()
|
||||
if i := tp.countLookaheadEntries(); i != 0 {
|
||||
t.Errorf("expected no GC lookahead entries, got: %v", i)
|
||||
}
|
||||
|
||||
// Purge the cache, to exercise a different path in the lookahead cycle.
|
||||
tp.clearCache()
|
||||
|
||||
// change addresses of a peer to have TTL 1 second, placing them in the lookahead window.
|
||||
ab.UpdateAddrs(ids[1], time.Hour, time.Second)
|
||||
ab.(*dsAddrBook).populateLookahead()
|
||||
if i := tp.countLookaheadEntries(); i != 1 {
|
||||
t.Errorf("expected 1 GC lookahead entry, got: %v", i)
|
||||
}
|
||||
|
||||
// change addresses of another to have TTL 5 second, placing them in the lookahead window.
|
||||
ab.UpdateAddrs(ids[2], time.Hour, 5*time.Second)
|
||||
ab.(*dsAddrBook).populateLookahead()
|
||||
if i := tp.countLookaheadEntries(); i != 2 {
|
||||
t.Errorf("expected 2 GC lookahead entries, got: %v", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGCPurging(t *testing.T) {
|
||||
opts := DefaultOpts()
|
||||
|
||||
// effectively disable automatic GC for this test.
|
||||
opts.GCInitialDelay = 90 * time.Hour
|
||||
opts.GCLookaheadInterval = 12 * time.Second
|
||||
opts.GCPruneInterval = 1 * time.Minute
|
||||
|
||||
factory := addressBookFactory(t, badgerStore, opts)
|
||||
ab, closeFn := factory()
|
||||
defer closeFn()
|
||||
|
||||
tp := &testProbe{t, ab}
|
||||
|
||||
ids := test.GeneratePeerIDs(10)
|
||||
addrs := test.GenerateAddrs(100)
|
||||
|
||||
// stagger addresses within the lookahead window, but stagger them.
|
||||
ab.AddAddrs(ids[0], addrs[:10], 1*time.Second)
|
||||
ab.AddAddrs(ids[1], addrs[30:40], 1*time.Second)
|
||||
ab.AddAddrs(ids[2], addrs[60:70], 1*time.Second)
|
||||
|
||||
ab.AddAddrs(ids[0], addrs[10:20], 4*time.Second)
|
||||
ab.AddAddrs(ids[1], addrs[40:50], 4*time.Second)
|
||||
|
||||
ab.AddAddrs(ids[0], addrs[20:30], 10*time.Second)
|
||||
ab.AddAddrs(ids[1], addrs[50:60], 10*time.Second)
|
||||
|
||||
ab.(*dsAddrBook).populateLookahead()
|
||||
if i := tp.countLookaheadEntries(); i != 3 {
|
||||
t.Errorf("expected 3 GC lookahead entries, got: %v", i)
|
||||
}
|
||||
|
||||
<-time.After(2 * time.Second)
|
||||
ab.(*dsAddrBook).purgeCycle()
|
||||
|
||||
if i := tp.countLookaheadEntries(); i != 2 {
|
||||
t.Errorf("expected 2 GC lookahead entries, got: %v", i)
|
||||
}
|
||||
|
||||
// Purge the cache, to exercise a different path in the purge cycle.
|
||||
tp.clearCache()
|
||||
|
||||
<-time.After(5 * time.Second)
|
||||
ab.(*dsAddrBook).purgeCycle()
|
||||
|
||||
if i := tp.countLookaheadEntries(); i != 2 {
|
||||
t.Errorf("expected 2 GC lookahead entries, got: %v", i)
|
||||
}
|
||||
|
||||
<-time.After(5 * time.Second)
|
||||
ab.(*dsAddrBook).purgeCycle()
|
||||
|
||||
if i := tp.countLookaheadEntries(); i != 0 {
|
||||
t.Errorf("expected 0 GC lookahead entries, got: %v", i)
|
||||
}
|
||||
|
||||
if i := len(ab.PeersWithAddrs()); i != 0 {
|
||||
t.Errorf("expected 0 entries in database, got: %v", i)
|
||||
}
|
||||
}
|
@ -1,12 +1,9 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
peer "github.com/libp2p/go-libp2p-peer"
|
||||
pt "github.com/libp2p/go-libp2p-peer/test"
|
||||
pstore "github.com/libp2p/go-libp2p-peerstore"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
@ -39,27 +36,11 @@ func TestAddrBook(t *testing.T, factory AddrBookFactory) {
|
||||
}
|
||||
}
|
||||
|
||||
func generateAddrs(count int) []ma.Multiaddr {
|
||||
var addrs = make([]ma.Multiaddr, count)
|
||||
for i := 0; i < count; i++ {
|
||||
addrs[i] = multiaddr(fmt.Sprintf("/ip4/1.1.1.%d/tcp/1111", i))
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
|
||||
func generatePeerIds(count int) []peer.ID {
|
||||
var ids = make([]peer.ID, count)
|
||||
for i := 0; i < count; i++ {
|
||||
ids[i], _ = pt.RandPeerID()
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
func testAddAddress(ab pstore.AddrBook) func(*testing.T) {
|
||||
return func(t *testing.T) {
|
||||
t.Run("add a single address", func(t *testing.T) {
|
||||
id := generatePeerIds(1)[0]
|
||||
addrs := generateAddrs(1)
|
||||
id := GeneratePeerIDs(1)[0]
|
||||
addrs := GenerateAddrs(1)
|
||||
|
||||
ab.AddAddr(id, addrs[0], time.Hour)
|
||||
|
||||
@ -67,8 +48,8 @@ func testAddAddress(ab pstore.AddrBook) func(*testing.T) {
|
||||
})
|
||||
|
||||
t.Run("idempotent add single address", func(t *testing.T) {
|
||||
id := generatePeerIds(1)[0]
|
||||
addrs := generateAddrs(1)
|
||||
id := GeneratePeerIDs(1)[0]
|
||||
addrs := GenerateAddrs(1)
|
||||
|
||||
ab.AddAddr(id, addrs[0], time.Hour)
|
||||
ab.AddAddr(id, addrs[0], time.Hour)
|
||||
@ -77,16 +58,16 @@ func testAddAddress(ab pstore.AddrBook) func(*testing.T) {
|
||||
})
|
||||
|
||||
t.Run("add multiple addresses", func(t *testing.T) {
|
||||
id := generatePeerIds(1)[0]
|
||||
addrs := generateAddrs(3)
|
||||
id := GeneratePeerIDs(1)[0]
|
||||
addrs := GenerateAddrs(3)
|
||||
|
||||
ab.AddAddrs(id, addrs, time.Hour)
|
||||
testHas(t, addrs, ab.Addrs(id))
|
||||
})
|
||||
|
||||
t.Run("idempotent add multiple addresses", func(t *testing.T) {
|
||||
id := generatePeerIds(1)[0]
|
||||
addrs := generateAddrs(3)
|
||||
id := GeneratePeerIDs(1)[0]
|
||||
addrs := GenerateAddrs(3)
|
||||
|
||||
ab.AddAddrs(id, addrs, time.Hour)
|
||||
ab.AddAddrs(id, addrs, time.Hour)
|
||||
@ -95,8 +76,8 @@ func testAddAddress(ab pstore.AddrBook) func(*testing.T) {
|
||||
})
|
||||
|
||||
t.Run("adding an existing address with a later expiration extends its ttl", func(t *testing.T) {
|
||||
id := generatePeerIds(1)[0]
|
||||
addrs := generateAddrs(3)
|
||||
id := GeneratePeerIDs(1)[0]
|
||||
addrs := GenerateAddrs(3)
|
||||
|
||||
ab.AddAddrs(id, addrs, time.Second)
|
||||
|
||||
@ -112,8 +93,8 @@ func testAddAddress(ab pstore.AddrBook) func(*testing.T) {
|
||||
|
||||
func testClearWorks(ab pstore.AddrBook) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
ids := generatePeerIds(2)
|
||||
addrs := generateAddrs(5)
|
||||
ids := GeneratePeerIDs(2)
|
||||
addrs := GenerateAddrs(5)
|
||||
|
||||
ab.AddAddrs(ids[0], addrs[0:3], time.Hour)
|
||||
ab.AddAddrs(ids[1], addrs[3:], time.Hour)
|
||||
@ -133,29 +114,36 @@ func testClearWorks(ab pstore.AddrBook) func(t *testing.T) {
|
||||
|
||||
func testSetNegativeTTLClears(m pstore.AddrBook) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
id := generatePeerIds(1)[0]
|
||||
addr := generateAddrs(1)[0]
|
||||
id := GeneratePeerIDs(1)[0]
|
||||
addrs := GenerateAddrs(100)
|
||||
|
||||
m.SetAddr(id, addr, time.Hour)
|
||||
testHas(t, []ma.Multiaddr{addr}, m.Addrs(id))
|
||||
m.SetAddrs(id, addrs, time.Hour)
|
||||
testHas(t, addrs, m.Addrs(id))
|
||||
|
||||
m.SetAddr(id, addr, -1)
|
||||
testHas(t, nil, m.Addrs(id))
|
||||
// remove two addresses.
|
||||
m.SetAddr(id, addrs[50], -1)
|
||||
m.SetAddr(id, addrs[75], -1)
|
||||
|
||||
// calculate the survivors
|
||||
survivors := append(addrs[0:50], addrs[51:]...)
|
||||
survivors = append(survivors[0:74], survivors[75:]...)
|
||||
|
||||
testHas(t, survivors, m.Addrs(id))
|
||||
}
|
||||
}
|
||||
|
||||
func testUpdateTTLs(m pstore.AddrBook) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
t.Run("update ttl of peer with no addrs", func(t *testing.T) {
|
||||
id := generatePeerIds(1)[0]
|
||||
id := GeneratePeerIDs(1)[0]
|
||||
|
||||
// Shouldn't panic.
|
||||
m.UpdateAddrs(id, time.Hour, time.Minute)
|
||||
})
|
||||
|
||||
t.Run("update ttls successfully", func(t *testing.T) {
|
||||
ids := generatePeerIds(2)
|
||||
addrs1, addrs2 := generateAddrs(2), generateAddrs(2)
|
||||
ids := GeneratePeerIDs(2)
|
||||
addrs1, addrs2 := GenerateAddrs(2), GenerateAddrs(2)
|
||||
|
||||
// set two keys with different ttls for each peer.
|
||||
m.SetAddr(ids[0], addrs1[0], time.Hour)
|
||||
@ -200,7 +188,7 @@ func testUpdateTTLs(m pstore.AddrBook) func(t *testing.T) {
|
||||
|
||||
func testNilAddrsDontBreak(m pstore.AddrBook) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
id := generatePeerIds(1)[0]
|
||||
id := GeneratePeerIDs(1)[0]
|
||||
|
||||
m.SetAddr(id, nil, time.Hour)
|
||||
m.AddAddr(id, nil, time.Hour)
|
||||
@ -209,9 +197,9 @@ func testNilAddrsDontBreak(m pstore.AddrBook) func(t *testing.T) {
|
||||
|
||||
func testAddressesExpire(m pstore.AddrBook) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
ids := generatePeerIds(2)
|
||||
addrs1 := generateAddrs(3)
|
||||
addrs2 := generateAddrs(2)
|
||||
ids := GeneratePeerIDs(2)
|
||||
addrs1 := GenerateAddrs(3)
|
||||
addrs2 := GenerateAddrs(2)
|
||||
|
||||
m.AddAddrs(ids[0], addrs1, time.Hour)
|
||||
m.AddAddrs(ids[1], addrs2, time.Hour)
|
||||
@ -254,8 +242,8 @@ func testAddressesExpire(m pstore.AddrBook) func(t *testing.T) {
|
||||
|
||||
func testClearWithIterator(m pstore.AddrBook) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
ids := generatePeerIds(2)
|
||||
addrs := generateAddrs(100)
|
||||
ids := GeneratePeerIDs(2)
|
||||
addrs := GenerateAddrs(100)
|
||||
|
||||
// Add the peers with 50 addresses each.
|
||||
m.AddAddrs(ids[0], addrs[:50], pstore.PermanentAddrTTL)
|
||||
@ -292,8 +280,8 @@ func testPeersWithAddrs(m pstore.AddrBook) func(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("non-empty addrbook", func(t *testing.T) {
|
||||
ids := generatePeerIds(2)
|
||||
addrs := generateAddrs(10)
|
||||
ids := GeneratePeerIDs(2)
|
||||
addrs := GenerateAddrs(10)
|
||||
|
||||
m.AddAddrs(ids[0], addrs[:5], pstore.PermanentAddrTTL)
|
||||
m.AddAddrs(ids[1], addrs[5:], pstore.PermanentAddrTTL)
|
||||
|
@ -36,7 +36,7 @@ func BenchmarkPeerstore(b *testing.B, factory PeerstoreFactory, variant string)
|
||||
// Start all test peer producing goroutines, where each produces peers with as many
|
||||
// multiaddrs as the n field in the param struct.
|
||||
for _, p := range params {
|
||||
go addressProducer(ctx, b, p.ch, p.n)
|
||||
go AddressProducer(ctx, b, p.ch, p.n)
|
||||
}
|
||||
|
||||
// So tests are always run in the same order.
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
func multiaddr(m string) ma.Multiaddr {
|
||||
func Multiaddr(m string) ma.Multiaddr {
|
||||
maddr, err := ma.NewMultiaddr(m)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -23,7 +23,7 @@ type peerpair struct {
|
||||
Addr []ma.Multiaddr
|
||||
}
|
||||
|
||||
func randomPeer(b *testing.B, addrCount int) *peerpair {
|
||||
func RandomPeer(b *testing.B, addrCount int) *peerpair {
|
||||
var (
|
||||
pid peer.ID
|
||||
err error
|
||||
@ -44,11 +44,11 @@ func randomPeer(b *testing.B, addrCount int) *peerpair {
|
||||
return &peerpair{pid, addrs}
|
||||
}
|
||||
|
||||
func addressProducer(ctx context.Context, b *testing.B, addrs chan *peerpair, addrsPerPeer int) {
|
||||
func AddressProducer(ctx context.Context, b *testing.B, addrs chan *peerpair, addrsPerPeer int) {
|
||||
b.Helper()
|
||||
defer close(addrs)
|
||||
for {
|
||||
p := randomPeer(b, addrsPerPeer)
|
||||
p := RandomPeer(b, addrsPerPeer)
|
||||
select {
|
||||
case addrs <- p:
|
||||
case <-ctx.Done():
|
||||
@ -56,3 +56,19 @@ func addressProducer(ctx context.Context, b *testing.B, addrs chan *peerpair, ad
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateAddrs(count int) []ma.Multiaddr {
|
||||
var addrs = make([]ma.Multiaddr, count)
|
||||
for i := 0; i < count; i++ {
|
||||
addrs[i] = Multiaddr(fmt.Sprintf("/ip4/1.1.1.%d/tcp/1111", i))
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
|
||||
func GeneratePeerIDs(count int) []peer.ID {
|
||||
var ids = make([]peer.ID, count)
|
||||
for i := 0; i < count; i++ {
|
||||
ids[i], _ = pt.RandPeerID()
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user