Merge pull request #32 from raulk/keybook-tests

* Introduce tests for KeyBook to increase coverage.
* Change usages of []peer.ID to IDSlice, which supports sorting.
* Restructure AddrBook tests into subtests.
* Import aliases.
This commit is contained in:
Raúl Kripalani 2018-09-07 19:01:48 +01:00 committed by GitHub
commit ee1756a6b3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 362 additions and 268 deletions

View File

@ -1 +1 @@
1.5.0: QmXkA9vNhpuTjHRedDioHj39oBEtt72JfEtQFPECUxHXx4
2.0.0: QmRad2sSzE6BmqFbwgpMB39Zv45Vng9A7kvizdZhFVrwoU

View File

@ -2,13 +2,12 @@ package peerstore
import (
"context"
"errors"
"math"
"time"
"github.com/pkg/errors"
ic "github.com/libp2p/go-libp2p-crypto"
"github.com/libp2p/go-libp2p-peer"
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
)
@ -66,7 +65,7 @@ type Peerstore interface {
SupportsProtocols(peer.ID, ...string) ([]string, error)
// Peers returns all of the peer IDs stored across all inner stores.
Peers() []peer.ID
Peers() peer.IDSlice
}
type PeerMetadata interface {
@ -111,7 +110,7 @@ type AddrBook interface {
ClearAddrs(p peer.ID)
// PeersWithAddrs returns all of the peer IDs stored in the AddrBook
PeersWithAddrs() []peer.ID
PeersWithAddrs() peer.IDSlice
}
// KeyBook tracks the keys of Peers.
@ -130,5 +129,5 @@ type KeyBook interface {
AddPrivKey(peer.ID, ic.PrivKey) error
// PeersWithKeys returns all the peer IDs stored in the KeyBook
PeersWithKeys() []peer.ID
PeersWithKeys() peer.IDSlice
}

View File

@ -77,6 +77,6 @@
"license": "MIT",
"name": "go-libp2p-peerstore",
"releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
"version": "1.5.0"
"version": "2.0.0"
}

View File

@ -4,7 +4,7 @@ import (
"fmt"
"sync"
"github.com/libp2p/go-libp2p-peer"
peer "github.com/libp2p/go-libp2p-peer"
)
var _ Peerstore = (*peerstore)(nil)
@ -31,7 +31,7 @@ func NewPeerstore(kb KeyBook, ab AddrBook, md PeerMetadata) Peerstore {
}
}
func (ps *peerstore) Peers() []peer.ID {
func (ps *peerstore) Peers() peer.IDSlice {
set := map[peer.ID]struct{}{}
for _, p := range ps.PeersWithKeys() {
set[p] = struct{}{}
@ -40,7 +40,7 @@ func (ps *peerstore) Peers() []peer.ID {
set[p] = struct{}{}
}
pps := make([]peer.ID, 0, len(set))
pps := make(peer.IDSlice, 0, len(set))
for p := range set {
pps = append(pps, p)
}
@ -132,7 +132,7 @@ func (ps *peerstore) SupportsProtocols(p peer.ID, protos ...string) ([]string, e
return out, nil
}
func PeerInfos(ps Peerstore, peers []peer.ID) []PeerInfo {
func PeerInfos(ps Peerstore, peers peer.IDSlice) []PeerInfo {
pi := make([]PeerInfo, len(peers))
for i, p := range peers {
pi[i] = ps.PeerInfo(p)
@ -140,8 +140,8 @@ func PeerInfos(ps Peerstore, peers []peer.ID) []PeerInfo {
return pi
}
func PeerInfoIDs(pis []PeerInfo) []peer.ID {
ps := make([]peer.ID, len(pis))
func PeerInfoIDs(pis []PeerInfo) peer.IDSlice {
ps := make(peer.IDSlice, len(pis))
for i, pi := range pis {
ps[i] = pi.ID
}

View File

@ -5,16 +5,16 @@ import (
"sync"
"time"
"github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru"
ds "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
query "github.com/ipfs/go-datastore/query"
logging "github.com/ipfs/go-log"
"github.com/libp2p/go-libp2p-peer"
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
mh "github.com/multiformats/go-multihash"
pstore "github.com/libp2p/go-libp2p-peerstore"
"github.com/libp2p/go-libp2p-peerstore/pstoremem"
pstoremem "github.com/libp2p/go-libp2p-peerstore/pstoremem"
)
var log = logging.Logger("peerstore/ds")
@ -193,12 +193,12 @@ func (mgr *dsAddrBook) Addrs(p peer.ID) []ma.Multiaddr {
}
// Peers returns all of the peer IDs for which the AddrBook has addresses.
func (mgr *dsAddrBook) PeersWithAddrs() []peer.ID {
func (mgr *dsAddrBook) PeersWithAddrs() peer.IDSlice {
q := query.Query{KeysOnly: true}
results, err := mgr.ds.Query(q)
if err != nil {
log.Error(err)
return []peer.ID{}
return peer.IDSlice{}
}
idset := make(map[peer.ID]struct{})
@ -211,7 +211,7 @@ func (mgr *dsAddrBook) PeersWithAddrs() []peer.ID {
idset[id] = struct{}{}
}
ids := make([]peer.ID, 0, len(idset))
ids := make(peer.IDSlice, 0, len(idset))
for id := range idset {
ids = append(ids, id)
}

View File

@ -7,14 +7,14 @@ import (
"testing"
"time"
"github.com/ipfs/go-datastore"
ds "github.com/ipfs/go-datastore"
"github.com/ipfs/go-ds-badger"
"github.com/libp2p/go-libp2p-peerstore"
"github.com/libp2p/go-libp2p-peerstore/test"
pstore "github.com/libp2p/go-libp2p-peerstore"
pt "github.com/libp2p/go-libp2p-peerstore/test"
)
func setupBadgerDatastore(t testing.TB) (datastore.Batching, func()) {
func setupBadgerDatastore(t testing.TB) (ds.Batching, func()) {
dataPath, err := ioutil.TempDir(os.TempDir(), "badger")
if err != nil {
t.Fatal(err)
@ -30,8 +30,8 @@ func setupBadgerDatastore(t testing.TB) (datastore.Batching, func()) {
return ds, closer
}
func newPeerstoreFactory(tb testing.TB) test.PeerstoreFactory {
return func() (peerstore.Peerstore, func()) {
func newPeerstoreFactory(tb testing.TB) pt.PeerstoreFactory {
return func() (pstore.Peerstore, func()) {
ds, closeFunc := setupBadgerDatastore(tb)
ps, err := NewPeerstore(context.Background(), ds)
@ -44,11 +44,11 @@ func newPeerstoreFactory(tb testing.TB) test.PeerstoreFactory {
}
func TestBadgerDsPeerstore(t *testing.T) {
test.TestPeerstore(t, newPeerstoreFactory(t))
pt.TestPeerstore(t, newPeerstoreFactory(t))
}
func TestBadgerDsAddrBook(t *testing.T) {
test.TestAddrBook(t, func() (peerstore.AddrBook, func()) {
pt.TestAddrBook(t, func() (pstore.AddrBook, func()) {
ds, closeDB := setupBadgerDatastore(t)
mgr, err := NewAddrBook(context.Background(), ds, 100*time.Microsecond)
@ -65,5 +65,5 @@ func TestBadgerDsAddrBook(t *testing.T) {
}
func BenchmarkBadgerDsPeerstore(b *testing.B) {
test.BenchmarkPeerstore(b, newPeerstoreFactory(b))
pt.BenchmarkPeerstore(b, newPeerstoreFactory(b))
}

View File

@ -4,14 +4,14 @@ import (
"context"
"time"
"github.com/ipfs/go-datastore"
ds "github.com/ipfs/go-datastore"
pstore "github.com/libp2p/go-libp2p-peerstore"
"github.com/libp2p/go-libp2p-peerstore/pstoremem"
pstoremem "github.com/libp2p/go-libp2p-peerstore/pstoremem"
)
// NewPeerstore creates a peerstore backed by the provided persistent datastore.
func NewPeerstore(ctx context.Context, ds datastore.Batching) (pstore.Peerstore, error) {
func NewPeerstore(ctx context.Context, ds ds.Batching) (pstore.Peerstore, error) {
addrBook, err := NewAddrBook(ctx, ds, time.Second)
if err != nil {
return nil, err

View File

@ -7,11 +7,11 @@ import (
"time"
logging "github.com/ipfs/go-log"
"github.com/libp2p/go-libp2p-peer"
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
pstore "github.com/libp2p/go-libp2p-peerstore"
"github.com/libp2p/go-libp2p-peerstore/addr"
addr "github.com/libp2p/go-libp2p-peerstore/addr"
)
var log = logging.Logger("peerstore")
@ -45,14 +45,14 @@ func NewAddrBook() pstore.AddrBook {
}
}
func (mab *memoryAddrBook) PeersWithAddrs() []peer.ID {
func (mab *memoryAddrBook) PeersWithAddrs() peer.IDSlice {
mab.addrmu.Lock()
defer mab.addrmu.Unlock()
if mab.addrs == nil {
return nil
}
pids := make([]peer.ID, 0, len(mab.addrs))
pids := make(peer.IDSlice, 0, len(mab.addrs))
for pid := range mab.addrs {
pids = append(pids, pid)
}

View File

@ -4,23 +4,29 @@ import (
"testing"
pstore "github.com/libp2p/go-libp2p-peerstore"
"github.com/libp2p/go-libp2p-peerstore/test"
pt "github.com/libp2p/go-libp2p-peerstore/test"
)
func TestInMemoryPeerstore(t *testing.T) {
test.TestPeerstore(t, func() (pstore.Peerstore, func()) {
pt.TestPeerstore(t, func() (pstore.Peerstore, func()) {
return NewPeerstore(), nil
})
}
func TestInMemoryAddrBook(t *testing.T) {
test.TestAddrBook(t, func() (pstore.AddrBook, func()) {
pt.TestAddrBook(t, func() (pstore.AddrBook, func()) {
return NewAddrBook(), nil
})
}
func TestInMemoryKeyBook(t *testing.T) {
pt.TestKeyBook(t, func() (pstore.KeyBook, func()) {
return NewKeyBook(), nil
})
}
func BenchmarkInMemoryPeerstore(b *testing.B) {
test.BenchmarkPeerstore(b, func() (pstore.Peerstore, func()) {
pt.BenchmarkPeerstore(b, func() (pstore.Peerstore, func()) {
return NewPeerstore(), nil
})
}

View File

@ -5,7 +5,7 @@ import (
"sync"
ic "github.com/libp2p/go-libp2p-crypto"
"github.com/libp2p/go-libp2p-peer"
peer "github.com/libp2p/go-libp2p-peer"
pstore "github.com/libp2p/go-libp2p-peerstore"
)
@ -26,9 +26,9 @@ func NewKeyBook() pstore.KeyBook {
}
}
func (mkb *memoryKeyBook) PeersWithKeys() []peer.ID {
func (mkb *memoryKeyBook) PeersWithKeys() peer.IDSlice {
mkb.RLock()
ps := make([]peer.ID, 0, len(mkb.pks)+len(mkb.sks))
ps := make(peer.IDSlice, 0, len(mkb.pks)+len(mkb.sks))
for p := range mkb.pks {
ps = append(ps, p)
}

View File

@ -3,8 +3,7 @@ package pstoremem
import (
"sync"
"github.com/libp2p/go-libp2p-peer"
peer "github.com/libp2p/go-libp2p-peer"
pstore "github.com/libp2p/go-libp2p-peerstore"
)

View File

@ -1,17 +1,18 @@
package test
import (
"fmt"
"testing"
"time"
"github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
peer "github.com/libp2p/go-libp2p-peer"
pt "github.com/libp2p/go-libp2p-peer/test"
pstore "github.com/libp2p/go-libp2p-peerstore"
ma "github.com/multiformats/go-multiaddr"
)
var addressBookSuite = map[string]func(book pstore.AddrBook) func(*testing.T){
"Addresses": testAddresses,
"AddAddress": testAddAddress,
"Clear": testClearWorks,
"SetNegativeTTLClears": testSetNegativeTTLClears,
"UpdateTTLs": testUpdateTTLs,
@ -36,227 +37,203 @@ func TestAddrBook(t *testing.T, factory AddrBookFactory) {
}
}
func testAddresses(m pstore.AddrBook) func(*testing.T) {
func generateAddrs(count int) []ma.Multiaddr {
var addrs = make([]ma.Multiaddr, count)
for i := 0; i < count; i++ {
addrs[i] = multiaddr(fmt.Sprintf("/ip4/1.1.1.%d/tcp/1111", i))
}
return addrs
}
func generatePeerIds(count int) []peer.ID {
var ids = make([]peer.ID, count)
for i := 0; i < count; i++ {
ids[i], _ = pt.RandPeerID()
}
return ids
}
func testAddAddress(ab pstore.AddrBook) func(*testing.T) {
return func(t *testing.T) {
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
id2 := IDS(t, "QmRmPL3FDZKE3Qiwv1RosLdwdvbvg17b2hB39QPScgWKKZ")
id3 := IDS(t, "QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ6Kn")
id4 := IDS(t, "QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ5Kn")
id5 := IDS(t, "QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ5Km")
t.Run("add a single address", func(t *testing.T) {
id := generatePeerIds(1)[0]
addrs := generateAddrs(1)
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
ma21 := MA(t, "/ip4/2.2.3.2/tcp/1111")
ma22 := MA(t, "/ip4/2.2.3.2/tcp/2222")
ma31 := MA(t, "/ip4/3.2.3.3/tcp/1111")
ma32 := MA(t, "/ip4/3.2.3.3/tcp/2222")
ma33 := MA(t, "/ip4/3.2.3.3/tcp/3333")
ma41 := MA(t, "/ip4/4.2.3.3/tcp/1111")
ma42 := MA(t, "/ip4/4.2.3.3/tcp/2222")
ma43 := MA(t, "/ip4/4.2.3.3/tcp/3333")
ma44 := MA(t, "/ip4/4.2.3.3/tcp/4444")
ma51 := MA(t, "/ip4/5.2.3.3/tcp/1111")
ma52 := MA(t, "/ip4/5.2.3.3/tcp/2222")
ma53 := MA(t, "/ip4/5.2.3.3/tcp/3333")
ma54 := MA(t, "/ip4/5.2.3.3/tcp/4444")
ma55 := MA(t, "/ip4/5.2.3.3/tcp/5555")
ab.AddAddr(id, addrs[0], time.Hour)
ttl := time.Hour
m.AddAddr(id1, ma11, ttl)
testHas(t, addrs, ab.Addrs(id))
})
m.AddAddrs(id2, []ma.Multiaddr{ma21, ma22}, ttl)
m.AddAddrs(id2, []ma.Multiaddr{ma21, ma22}, ttl) // idempotency
t.Run("idempotent add single address", func(t *testing.T) {
id := generatePeerIds(1)[0]
addrs := generateAddrs(1)
m.AddAddr(id3, ma31, ttl)
m.AddAddr(id3, ma32, ttl)
m.AddAddr(id3, ma33, ttl)
m.AddAddr(id3, ma33, ttl) // idempotency
m.AddAddr(id3, ma33, ttl)
ab.AddAddr(id, addrs[0], time.Hour)
ab.AddAddr(id, addrs[0], time.Hour)
m.AddAddrs(id4, []ma.Multiaddr{ma41, ma42, ma43, ma44}, ttl) // multiple
testHas(t, addrs, ab.Addrs(id))
})
m.AddAddrs(id5, []ma.Multiaddr{ma21, ma22}, ttl) // clearing
m.AddAddrs(id5, []ma.Multiaddr{ma41, ma42, ma43, ma44}, ttl) // clearing
m.ClearAddrs(id5)
m.AddAddrs(id5, []ma.Multiaddr{ma51, ma52, ma53, ma54, ma55}, ttl) // clearing
t.Run("add multiple addresses", func(t *testing.T) {
id := generatePeerIds(1)[0]
addrs := generateAddrs(3)
// test the Addresses return value
testHas(t, []ma.Multiaddr{ma11}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma21, ma22}, m.Addrs(id2))
testHas(t, []ma.Multiaddr{ma31, ma32, ma33}, m.Addrs(id3))
testHas(t, []ma.Multiaddr{ma41, ma42, ma43, ma44}, m.Addrs(id4))
testHas(t, []ma.Multiaddr{ma51, ma52, ma53, ma54, ma55}, m.Addrs(id5))
ab.AddAddrs(id, addrs, time.Hour)
testHas(t, addrs, ab.Addrs(id))
})
t.Run("idempotent add multiple addresses", func(t *testing.T) {
id := generatePeerIds(1)[0]
addrs := generateAddrs(3)
ab.AddAddrs(id, addrs, time.Hour)
ab.AddAddrs(id, addrs, time.Hour)
testHas(t, addrs, ab.Addrs(id))
})
}
}
func testClearWorks(m pstore.AddrBook) func(t *testing.T) {
func testClearWorks(ab pstore.AddrBook) func(t *testing.T) {
return func(t *testing.T) {
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
id2 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQM")
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
ma12 := MA(t, "/ip4/2.2.3.2/tcp/2222")
ma13 := MA(t, "/ip4/3.2.3.3/tcp/3333")
ma24 := MA(t, "/ip4/4.2.3.3/tcp/4444")
ma25 := MA(t, "/ip4/5.2.3.3/tcp/5555")
ids := generatePeerIds(2)
addrs := generateAddrs(5)
m.AddAddr(id1, ma11, time.Hour)
m.AddAddr(id1, ma12, time.Hour)
m.AddAddr(id1, ma13, time.Hour)
m.AddAddr(id2, ma24, time.Hour)
m.AddAddr(id2, ma25, time.Hour)
ab.AddAddrs(ids[0], addrs[0:3], time.Hour)
ab.AddAddrs(ids[1], addrs[3:], time.Hour)
testHas(t, []ma.Multiaddr{ma11, ma12, ma13}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
testHas(t, addrs[0:3], ab.Addrs(ids[0]))
testHas(t, addrs[3:], ab.Addrs(ids[1]))
m.ClearAddrs(id1)
m.ClearAddrs(id2)
ab.ClearAddrs(ids[0])
testHas(t, nil, ab.Addrs(ids[0]))
testHas(t, addrs[3:], ab.Addrs(ids[1]))
testHas(t, nil, m.Addrs(id1))
testHas(t, nil, m.Addrs(id2))
ab.ClearAddrs(ids[1])
testHas(t, nil, ab.Addrs(ids[0]))
testHas(t, nil, ab.Addrs(ids[1]))
}
}
func testSetNegativeTTLClears(m pstore.AddrBook) func(t *testing.T) {
return func(t *testing.T) {
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
id := generatePeerIds(1)[0]
addr := generateAddrs(1)[0]
m.SetAddr(id1, ma11, time.Hour)
m.SetAddr(id, addr, time.Hour)
testHas(t, []ma.Multiaddr{addr}, m.Addrs(id))
testHas(t, []ma.Multiaddr{ma11}, m.Addrs(id1))
m.SetAddr(id1, ma11, -1)
testHas(t, nil, m.Addrs(id1))
m.SetAddr(id, addr, -1)
testHas(t, nil, m.Addrs(id))
}
}
func testUpdateTTLs(m pstore.AddrBook) func(t *testing.T) {
return func(t *testing.T) {
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
id2 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQM")
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
ma12 := MA(t, "/ip4/1.2.3.1/tcp/1112")
ma21 := MA(t, "/ip4/1.2.3.1/tcp/1121")
ma22 := MA(t, "/ip4/1.2.3.1/tcp/1122")
t.Run("update ttl of peer with no addrs", func(t *testing.T) {
id := generatePeerIds(1)[0]
// Shouldn't panic.
m.UpdateAddrs(id1, time.Hour, time.Minute)
// Shouldn't panic.
m.UpdateAddrs(id, time.Hour, time.Minute)
})
m.SetAddr(id1, ma11, time.Hour)
m.SetAddr(id1, ma12, time.Minute)
t.Run("update ttls successfully", func(t *testing.T) {
ids := generatePeerIds(2)
addrs1, addrs2 := generateAddrs(2), generateAddrs(2)
// Shouldn't panic.
m.UpdateAddrs(id2, time.Hour, time.Minute)
// set two keys with different ttls for each peer.
m.SetAddr(ids[0], addrs1[0], time.Hour)
m.SetAddr(ids[0], addrs1[1], time.Minute)
m.SetAddr(ids[1], addrs2[0], time.Hour)
m.SetAddr(ids[1], addrs2[1], time.Minute)
m.SetAddr(id2, ma21, time.Hour)
m.SetAddr(id2, ma22, time.Minute)
// Sanity check.
testHas(t, addrs1, m.Addrs(ids[0]))
testHas(t, addrs2, m.Addrs(ids[1]))
testHas(t, []ma.Multiaddr{ma11, ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma21, ma22}, m.Addrs(id2))
// Will only affect addrs1[0].
m.UpdateAddrs(ids[0], time.Hour, time.Second)
m.UpdateAddrs(id1, time.Hour, time.Second)
// No immediate effect.
testHas(t, addrs1, m.Addrs(ids[0]))
testHas(t, addrs2, m.Addrs(ids[1]))
testHas(t, []ma.Multiaddr{ma11, ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma21, ma22}, m.Addrs(id2))
// After a wait, addrs[0] is gone.
time.Sleep(1200 * time.Millisecond)
testHas(t, addrs1[1:2], m.Addrs(ids[0]))
testHas(t, addrs2, m.Addrs(ids[1]))
time.Sleep(1200 * time.Millisecond)
// Will only affect addrs2[0].
m.UpdateAddrs(ids[1], time.Hour, time.Second)
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma21, ma22}, m.Addrs(id2))
// No immediate effect.
testHas(t, addrs1[1:2], m.Addrs(ids[0]))
testHas(t, addrs2, m.Addrs(ids[1]))
m.UpdateAddrs(id2, time.Hour, time.Second)
time.Sleep(1200 * time.Millisecond)
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma21, ma22}, m.Addrs(id2))
// First addrs is gone in both.
testHas(t, addrs1[1:], m.Addrs(ids[0]))
testHas(t, addrs2[1:], m.Addrs(ids[1]))
})
time.Sleep(1200 * time.Millisecond)
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma22}, m.Addrs(id2))
}
}
func testNilAddrsDontBreak(m pstore.AddrBook) func(t *testing.T) {
return func(t *testing.T) {
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
m.SetAddr(id1, nil, time.Hour)
m.AddAddr(id1, nil, time.Hour)
id := generatePeerIds(1)[0]
m.SetAddr(id, nil, time.Hour)
m.AddAddr(id, nil, time.Hour)
}
}
func testAddressesExpire(m pstore.AddrBook) func(t *testing.T) {
return func(t *testing.T) {
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
id2 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQM")
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
ma12 := MA(t, "/ip4/2.2.3.2/tcp/2222")
ma13 := MA(t, "/ip4/3.2.3.3/tcp/3333")
ma24 := MA(t, "/ip4/4.2.3.3/tcp/4444")
ma25 := MA(t, "/ip4/5.2.3.3/tcp/5555")
ids := generatePeerIds(2)
addrs1 := generateAddrs(3)
addrs2 := generateAddrs(2)
m.AddAddr(id1, ma11, time.Hour)
m.AddAddr(id1, ma12, time.Hour)
m.AddAddr(id1, ma13, time.Hour)
m.AddAddr(id2, ma24, time.Hour)
m.AddAddr(id2, ma25, time.Hour)
m.AddAddrs(ids[0], addrs1, time.Hour)
m.AddAddrs(ids[1], addrs2, time.Hour)
testHas(t, []ma.Multiaddr{ma11, ma12, ma13}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
testHas(t, addrs1, m.Addrs(ids[0]))
testHas(t, addrs2, m.Addrs(ids[1]))
m.SetAddr(id1, ma11, 2*time.Hour)
m.SetAddr(id1, ma12, 2*time.Hour)
m.SetAddr(id1, ma13, 2*time.Hour)
m.SetAddr(id2, ma24, 2*time.Hour)
m.SetAddr(id2, ma25, 2*time.Hour)
m.AddAddrs(ids[0], addrs1, 2*time.Hour)
m.AddAddrs(ids[1], addrs2, 2*time.Hour)
testHas(t, []ma.Multiaddr{ma11, ma12, ma13}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
testHas(t, addrs1, m.Addrs(ids[0]))
testHas(t, addrs2, m.Addrs(ids[1]))
m.SetAddr(id1, ma11, time.Millisecond)
m.SetAddr(ids[0], addrs1[0], time.Millisecond)
<-time.After(time.Millisecond * 5)
testHas(t, []ma.Multiaddr{ma12, ma13}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
testHas(t, addrs1[1:3], m.Addrs(ids[0]))
testHas(t, addrs2, m.Addrs(ids[1]))
m.SetAddr(id1, ma13, time.Millisecond)
m.SetAddr(ids[0], addrs1[2], time.Millisecond)
<-time.After(time.Millisecond * 5)
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
testHas(t, addrs1[1:2], m.Addrs(ids[0]))
testHas(t, addrs2, m.Addrs(ids[1]))
m.SetAddr(id2, ma24, time.Millisecond)
m.SetAddr(ids[1], addrs2[0], time.Millisecond)
<-time.After(time.Millisecond * 5)
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma25}, m.Addrs(id2))
testHas(t, addrs1[1:2], m.Addrs(ids[0]))
testHas(t, addrs2[1:], m.Addrs(ids[1]))
m.SetAddr(id2, ma25, time.Millisecond)
m.SetAddr(ids[1], addrs2[1], time.Millisecond)
<-time.After(time.Millisecond * 5)
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
testHas(t, nil, m.Addrs(id2))
testHas(t, addrs1[1:2], m.Addrs(ids[0]))
testHas(t, nil, m.Addrs(ids[1]))
m.SetAddr(id1, ma12, time.Millisecond)
m.SetAddr(ids[0], addrs1[1], time.Millisecond)
<-time.After(time.Millisecond * 5)
testHas(t, nil, m.Addrs(id1))
testHas(t, nil, m.Addrs(id2))
testHas(t, nil, m.Addrs(ids[0]))
testHas(t, nil, m.Addrs(ids[1]))
}
}
func IDS(t *testing.T, ids string) peer.ID {
t.Helper()
id, err := peer.IDB58Decode(ids)
if err != nil {
t.Fatalf("id %q is bad: %s", ids, err)
}
return id
}
func MA(t *testing.T, m string) ma.Multiaddr {
t.Helper()
maddr, err := ma.NewMultiaddr(m)
if err != nil {
t.Fatal(err)
}
return maddr
}
func testHas(t *testing.T, exp, act []ma.Multiaddr) {
t.Helper()
if len(exp) != len(act) {

154
test/keybook_suite.go Normal file
View File

@ -0,0 +1,154 @@
package test
import (
"sort"
"testing"
peer "github.com/libp2p/go-libp2p-peer"
pt "github.com/libp2p/go-libp2p-peer/test"
pstore "github.com/libp2p/go-libp2p-peerstore"
)
var keyBookSuite = map[string]func(kb pstore.KeyBook) func(*testing.T){
"AddGetPrivKey": testKeybookPrivKey,
"AddGetPubKey": testKeyBookPubKey,
"PeersWithKeys": testKeyBookPeers,
"PubKeyAddedOnRetrieve": testInlinedPubKeyAddedOnRetrieve,
}
type KeyBookFactory func() (pstore.KeyBook, func())
func TestKeyBook(t *testing.T, factory KeyBookFactory) {
for name, test := range keyBookSuite {
// Create a new peerstore.
kb, closeFunc := factory()
// Run the test.
t.Run(name, test(kb))
// Cleanup.
if closeFunc != nil {
closeFunc()
}
}
}
func testKeybookPrivKey(kb pstore.KeyBook) func(t *testing.T) {
return func(t *testing.T) {
if peers := kb.PeersWithKeys(); len(peers) > 0 {
t.Error("expected peers to be empty on init")
}
priv, _, err := pt.RandTestKeyPair(512)
if err != nil {
t.Error(err)
}
id, err := peer.IDFromPrivateKey(priv)
if err != nil {
t.Error(err)
}
err = kb.AddPrivKey(id, priv)
if err != nil {
t.Error(err)
}
if res := kb.PrivKey(id); !priv.Equals(res) {
t.Error("retrieved private key did not match stored private key")
}
if peers := kb.PeersWithKeys(); len(peers) != 1 || peers[0] != id {
t.Error("list of peers did not include test peer")
}
}
}
func testKeyBookPubKey(kb pstore.KeyBook) func(t *testing.T) {
return func(t *testing.T) {
if peers := kb.PeersWithKeys(); len(peers) > 0 {
t.Error("expected peers to be empty on init")
}
_, pub, err := pt.RandTestKeyPair(512)
if err != nil {
t.Error(err)
}
id, err := peer.IDFromPublicKey(pub)
if err != nil {
t.Error(err)
}
err = kb.AddPubKey(id, pub)
if err != nil {
t.Error(err)
}
if res := kb.PubKey(id); !pub.Equals(res) {
t.Error("retrieved public key did not match stored public key")
}
if peers := kb.PeersWithKeys(); len(peers) != 1 || peers[0] != id {
t.Error("list of peers did not include test peer")
}
}
}
func testKeyBookPeers(kb pstore.KeyBook) func(t *testing.T) {
return func(t *testing.T) {
if peers := kb.PeersWithKeys(); len(peers) > 0 {
t.Error("expected peers to be empty on init")
}
var peers peer.IDSlice
for i := 0; i < 10; i++ {
// Add a public key.
_, pub, _ := pt.RandTestKeyPair(512)
p1, _ := peer.IDFromPublicKey(pub)
kb.AddPubKey(p1, pub)
// Add a private key.
priv, _, _ := pt.RandTestKeyPair(512)
p2, _ := peer.IDFromPrivateKey(priv)
kb.AddPrivKey(p2, priv)
peers = append(peers, []peer.ID{p1, p2}...)
}
kbPeers := kb.PeersWithKeys()
sort.Sort(kbPeers)
sort.Sort(peers)
for i, p := range kbPeers {
if p != peers[i] {
t.Errorf("mismatch of peer at index %d", i)
}
}
}
}
func testInlinedPubKeyAddedOnRetrieve(kb pstore.KeyBook) func(t *testing.T) {
return func(t *testing.T) {
if peers := kb.PeersWithKeys(); len(peers) > 0 {
t.Error("expected peers to be empty on init")
}
// Key small enough for inlining.
_, pub, err := pt.RandTestKeyPair(32)
if err != nil {
t.Error(err)
}
id, err := peer.IDFromPublicKey(pub)
if err != nil {
t.Error(err)
}
pubKey := kb.PubKey(id)
if !pubKey.Equals(pub) {
t.Error("mismatch between original public key and keybook-calculated one")
}
}
}

View File

@ -8,8 +8,8 @@ import (
"testing"
"time"
"github.com/libp2p/go-libp2p-crypto"
"github.com/libp2p/go-libp2p-peer"
crypto "github.com/libp2p/go-libp2p-crypto"
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
pstore "github.com/libp2p/go-libp2p-peerstore"
@ -299,8 +299,7 @@ func benchmarkPeerstore(ps pstore.Peerstore) func(*testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
pp := <-addrs
pid := peer.ID(pp.ID)
ps.AddAddr(pid, pp.Addr, pstore.PermanentAddrTTL)
ps.AddAddr(pp.ID, pp.Addr, pstore.PermanentAddrTTL)
}
cancel()
}

View File

@ -1,36 +0,0 @@
package test
import (
"io"
"math/rand"
"time"
ci "github.com/libp2p/go-libp2p-crypto"
"github.com/libp2p/go-libp2p-peer"
mh "github.com/multiformats/go-multihash"
)
func timeSeededRand() io.Reader {
return rand.New(rand.NewSource(time.Now().UnixNano()))
}
func RandPeerID() (peer.ID, error) {
buf := make([]byte, 16)
if _, err := io.ReadFull(timeSeededRand(), buf); err != nil {
return "", err
}
h, err := mh.Sum(buf, mh.SHA2_256, -1)
if err != nil {
return "", err
}
return peer.ID(h), nil
}
func RandTestKeyPair(bits int) (ci.PrivKey, ci.PubKey, error) {
return ci.GenerateKeyPairWithReader(ci.RSA, bits, timeSeededRand())
}
func SeededTestKeyPair(seed int64) (ci.PrivKey, ci.PubKey, error) {
return ci.GenerateKeyPairWithReader(ci.RSA, 512, rand.New(rand.NewSource(seed)))
}

View File

@ -2,45 +2,33 @@ package test
import (
"context"
cr "crypto/rand"
"fmt"
"testing"
"github.com/mr-tron/base58/base58"
peer "github.com/libp2p/go-libp2p-peer"
pt "github.com/libp2p/go-libp2p-peer/test"
ma "github.com/multiformats/go-multiaddr"
mh "github.com/multiformats/go-multihash"
)
type peerpair struct {
ID string
ID peer.ID
Addr ma.Multiaddr
}
func randomPeer(b *testing.B) *peerpair {
buf := make([]byte, 50)
var pid peer.ID
var err error
var addr ma.Multiaddr
for {
n, err := cr.Read(buf)
if err != nil {
b.Fatal(err)
}
if n > 0 {
break
}
}
id, err := mh.Encode(buf, mh.SHA2_256)
if err != nil {
b.Fatal(err)
}
b58ID := base58.Encode(id)
addr, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/6666/ipfs/%s", b58ID))
if err != nil {
if pid, err = pt.RandPeerID(); err != nil {
b.Fatal(err)
}
return &peerpair{b58ID, addr}
if addr, err = ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/6666/ipfs/%s", pid.Pretty())); err != nil {
b.Fatal(err)
}
return &peerpair{pid, addr}
}
func addressProducer(ctx context.Context, b *testing.B, addrs chan *peerpair) {
@ -54,3 +42,11 @@ func addressProducer(ctx context.Context, b *testing.B, addrs chan *peerpair) {
}
}
}
func multiaddr(m string) ma.Multiaddr {
maddr, err := ma.NewMultiaddr(m)
if err != nil {
panic(err)
}
return maddr
}