mirror of
https://github.com/libp2p/go-libp2p-peerstore.git
synced 2024-12-27 23:40:16 +08:00
increase test coverage.
This commit is contained in:
parent
7ae2621803
commit
4037390c42
@ -321,7 +321,6 @@ func (mgr *dsAddrBook) ClearAddrs(p peer.ID) {
|
||||
if e, ok := mgr.cache.Peek(p.Pretty()); ok {
|
||||
mgr.cache.Remove(p.Pretty())
|
||||
keys, _, _ := keysAndAddrs(p, e.([]ma.Multiaddr))
|
||||
|
||||
deleteFn = func() error {
|
||||
return mgr.dbDelete(keys)
|
||||
}
|
||||
|
@ -122,10 +122,25 @@ func TestBadgerDsPeerstore(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBadgerDsAddrBook(t *testing.T) {
|
||||
opts := DefaultOpts()
|
||||
opts.TTLInterval = 100 * time.Microsecond
|
||||
t.Run("Cacheful", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
pt.TestAddrBook(t, addressBookFactory(t, opts))
|
||||
opts := DefaultOpts()
|
||||
opts.TTLInterval = 100 * time.Microsecond
|
||||
opts.CacheSize = 1024
|
||||
|
||||
pt.TestAddrBook(t, addressBookFactory(t, opts))
|
||||
})
|
||||
|
||||
t.Run("Cacheless", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
opts := DefaultOpts()
|
||||
opts.TTLInterval = 100 * time.Microsecond
|
||||
opts.CacheSize = 0
|
||||
|
||||
pt.TestAddrBook(t, addressBookFactory(t, opts))
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkBadgerDsPeerstore(b *testing.B) {
|
||||
|
@ -18,6 +18,8 @@ var addressBookSuite = map[string]func(book pstore.AddrBook) func(*testing.T){
|
||||
"UpdateTTLs": testUpdateTTLs,
|
||||
"NilAddrsDontBreak": testNilAddrsDontBreak,
|
||||
"AddressesExpire": testAddressesExpire,
|
||||
"ClearWithIter": testClearWithIterator,
|
||||
"PeersWithAddresses": testPeersWithAddrs,
|
||||
}
|
||||
|
||||
type AddrBookFactory func() (pstore.AddrBook, func())
|
||||
@ -234,6 +236,59 @@ func testAddressesExpire(m pstore.AddrBook) func(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testClearWithIterator(m pstore.AddrBook) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
ids := generatePeerIds(2)
|
||||
addrs := generateAddrs(100)
|
||||
|
||||
// Add the peers with 50 addresses each.
|
||||
m.AddAddrs(ids[0], addrs[:50], pstore.PermanentAddrTTL)
|
||||
m.AddAddrs(ids[1], addrs[50:], pstore.PermanentAddrTTL)
|
||||
|
||||
if all := append(m.Addrs(ids[0]), m.Addrs(ids[1])...); len(all) != 100 {
|
||||
t.Fatal("expected pstore to contain both peers with all their maddrs")
|
||||
}
|
||||
|
||||
// Since we don't fetch these peers, they won't be present in cache.
|
||||
|
||||
m.ClearAddrs(ids[0])
|
||||
if all := append(m.Addrs(ids[0]), m.Addrs(ids[1])...); len(all) != 50 {
|
||||
t.Fatal("expected pstore to contain only addrs of peer 2")
|
||||
}
|
||||
|
||||
m.ClearAddrs(ids[1])
|
||||
if all := append(m.Addrs(ids[0]), m.Addrs(ids[1])...); len(all) != 0 {
|
||||
t.Fatal("expected pstore to contain no addresses")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testPeersWithAddrs(m pstore.AddrBook) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
// cannot run in parallel as the store is modified.
|
||||
// go runs sequentially in the specified order
|
||||
// see https://blog.golang.org/subtests
|
||||
|
||||
t.Run("empty addrbook", func(t *testing.T) {
|
||||
if peers := m.PeersWithAddrs(); len(peers) != 0 {
|
||||
t.Fatal("expected to find no peers")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("non-empty addrbook", func(t *testing.T) {
|
||||
ids := generatePeerIds(2)
|
||||
addrs := generateAddrs(10)
|
||||
|
||||
m.AddAddrs(ids[0], addrs[:5], pstore.PermanentAddrTTL)
|
||||
m.AddAddrs(ids[1], addrs[5:], pstore.PermanentAddrTTL)
|
||||
|
||||
if peers := m.PeersWithAddrs(); len(peers) != 2 {
|
||||
t.Fatal("expected to find 2 peers")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testHas(t *testing.T, exp, act []ma.Multiaddr) {
|
||||
t.Helper()
|
||||
if len(exp) != len(act) {
|
||||
|
Loading…
Reference in New Issue
Block a user