1
0
mirror of https://github.com/libp2p/go-libp2p-peerstore.git synced 2025-03-24 13:10:06 +08:00

refactor tests into suites.

This enables each implementation to subject itself to a common test
suite.
This commit is contained in:
Raúl Kripalani 2018-08-30 13:44:50 +01:00
parent aeae977598
commit eaa2ccbf1e
8 changed files with 690 additions and 646 deletions

View File

@ -1,307 +0,0 @@
package peerstore
import (
"context"
"io/ioutil"
"os"
"testing"
"time"
"github.com/ipfs/go-datastore"
"github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
)
func IDS(t *testing.T, ids string) peer.ID {
t.Helper()
id, err := peer.IDB58Decode(ids)
if err != nil {
t.Fatalf("id %q is bad: %s", ids, err)
}
return id
}
func MA(t *testing.T, m string) ma.Multiaddr {
t.Helper()
maddr, err := ma.NewMultiaddr(m)
if err != nil {
t.Fatal(err)
}
return maddr
}
func testHas(t *testing.T, exp, act []ma.Multiaddr) {
t.Helper()
if len(exp) != len(act) {
t.Fatalf("lengths not the same. expected %d, got %d\n", len(exp), len(act))
}
for _, a := range exp {
found := false
for _, b := range act {
if a.Equal(b) {
found = true
break
}
}
if !found {
t.Fatalf("expected address %s not found", a)
}
}
}
func setupBadgerDatastore(t testing.TB) (datastore.Batching, func()) {
dataPath, err := ioutil.TempDir(os.TempDir(), "badger")
if err != nil {
t.Fatal(err)
}
ds, err := badger.NewDatastore(dataPath, nil)
if err != nil {
t.Fatal(err)
}
closer := func() {
ds.Close()
os.RemoveAll(dataPath)
}
return ds, closer
}
func setupDatastoreAddrManager(t *testing.T) (*DatastoreAddrManager, func()) {
ds, closeDB := setupBadgerDatastore(t)
mgr, err := NewDatastoreAddrManager(context.Background(), ds, 100*time.Microsecond)
if err != nil {
t.Fatal(err)
}
closer := func() {
mgr.Stop()
closeDB()
}
return mgr, closer
}
func runTestWithAddrManagers(t *testing.T, test func(*testing.T, AddrBook)) {
t.Log("AddrManager")
mgr1 := &AddrManager{}
test(t, mgr1)
t.Log("DatastoreAddrManager")
mgr2, closer2 := setupDatastoreAddrManager(t)
defer closer2()
test(t, mgr2)
}
func testAddresses(t *testing.T, m AddrBook) {
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
id2 := IDS(t, "QmRmPL3FDZKE3Qiwv1RosLdwdvbvg17b2hB39QPScgWKKZ")
id3 := IDS(t, "QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ6Kn")
id4 := IDS(t, "QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ5Kn")
id5 := IDS(t, "QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ5Km")
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
ma21 := MA(t, "/ip4/2.2.3.2/tcp/1111")
ma22 := MA(t, "/ip4/2.2.3.2/tcp/2222")
ma31 := MA(t, "/ip4/3.2.3.3/tcp/1111")
ma32 := MA(t, "/ip4/3.2.3.3/tcp/2222")
ma33 := MA(t, "/ip4/3.2.3.3/tcp/3333")
ma41 := MA(t, "/ip4/4.2.3.3/tcp/1111")
ma42 := MA(t, "/ip4/4.2.3.3/tcp/2222")
ma43 := MA(t, "/ip4/4.2.3.3/tcp/3333")
ma44 := MA(t, "/ip4/4.2.3.3/tcp/4444")
ma51 := MA(t, "/ip4/5.2.3.3/tcp/1111")
ma52 := MA(t, "/ip4/5.2.3.3/tcp/2222")
ma53 := MA(t, "/ip4/5.2.3.3/tcp/3333")
ma54 := MA(t, "/ip4/5.2.3.3/tcp/4444")
ma55 := MA(t, "/ip4/5.2.3.3/tcp/5555")
ttl := time.Hour
m.AddAddr(id1, ma11, ttl)
m.AddAddrs(id2, []ma.Multiaddr{ma21, ma22}, ttl)
m.AddAddrs(id2, []ma.Multiaddr{ma21, ma22}, ttl) // idempotency
m.AddAddr(id3, ma31, ttl)
m.AddAddr(id3, ma32, ttl)
m.AddAddr(id3, ma33, ttl)
m.AddAddr(id3, ma33, ttl) // idempotency
m.AddAddr(id3, ma33, ttl)
m.AddAddrs(id4, []ma.Multiaddr{ma41, ma42, ma43, ma44}, ttl) // multiple
m.AddAddrs(id5, []ma.Multiaddr{ma21, ma22}, ttl) // clearing
m.AddAddrs(id5, []ma.Multiaddr{ma41, ma42, ma43, ma44}, ttl) // clearing
m.ClearAddrs(id5)
m.AddAddrs(id5, []ma.Multiaddr{ma51, ma52, ma53, ma54, ma55}, ttl) // clearing
// test the Addresses return value
testHas(t, []ma.Multiaddr{ma11}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma21, ma22}, m.Addrs(id2))
testHas(t, []ma.Multiaddr{ma31, ma32, ma33}, m.Addrs(id3))
testHas(t, []ma.Multiaddr{ma41, ma42, ma43, ma44}, m.Addrs(id4))
testHas(t, []ma.Multiaddr{ma51, ma52, ma53, ma54, ma55}, m.Addrs(id5))
}
func TestAddresses(t *testing.T) {
runTestWithAddrManagers(t, testAddresses)
}
func testAddressesExpire(t *testing.T, m AddrBook) {
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
id2 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQM")
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
ma12 := MA(t, "/ip4/2.2.3.2/tcp/2222")
ma13 := MA(t, "/ip4/3.2.3.3/tcp/3333")
ma24 := MA(t, "/ip4/4.2.3.3/tcp/4444")
ma25 := MA(t, "/ip4/5.2.3.3/tcp/5555")
m.AddAddr(id1, ma11, time.Hour)
m.AddAddr(id1, ma12, time.Hour)
m.AddAddr(id1, ma13, time.Hour)
m.AddAddr(id2, ma24, time.Hour)
m.AddAddr(id2, ma25, time.Hour)
testHas(t, []ma.Multiaddr{ma11, ma12, ma13}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
m.SetAddr(id1, ma11, 2*time.Hour)
m.SetAddr(id1, ma12, 2*time.Hour)
m.SetAddr(id1, ma13, 2*time.Hour)
m.SetAddr(id2, ma24, 2*time.Hour)
m.SetAddr(id2, ma25, 2*time.Hour)
testHas(t, []ma.Multiaddr{ma11, ma12, ma13}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
m.SetAddr(id1, ma11, time.Millisecond)
<-time.After(time.Millisecond * 2)
testHas(t, []ma.Multiaddr{ma12, ma13}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
m.SetAddr(id1, ma13, time.Millisecond)
<-time.After(time.Millisecond * 2)
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
m.SetAddr(id2, ma24, time.Millisecond)
<-time.After(time.Millisecond * 2)
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma25}, m.Addrs(id2))
m.SetAddr(id2, ma25, time.Millisecond)
<-time.After(time.Millisecond * 2)
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
testHas(t, nil, m.Addrs(id2))
m.SetAddr(id1, ma12, time.Millisecond)
<-time.After(time.Millisecond * 2)
testHas(t, nil, m.Addrs(id1))
testHas(t, nil, m.Addrs(id2))
}
func TestAddressesExpire(t *testing.T) {
runTestWithAddrManagers(t, testAddressesExpire)
}
func testClearWorks(t *testing.T, m AddrBook) {
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
id2 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQM")
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
ma12 := MA(t, "/ip4/2.2.3.2/tcp/2222")
ma13 := MA(t, "/ip4/3.2.3.3/tcp/3333")
ma24 := MA(t, "/ip4/4.2.3.3/tcp/4444")
ma25 := MA(t, "/ip4/5.2.3.3/tcp/5555")
m.AddAddr(id1, ma11, time.Hour)
m.AddAddr(id1, ma12, time.Hour)
m.AddAddr(id1, ma13, time.Hour)
m.AddAddr(id2, ma24, time.Hour)
m.AddAddr(id2, ma25, time.Hour)
testHas(t, []ma.Multiaddr{ma11, ma12, ma13}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
m.ClearAddrs(id1)
m.ClearAddrs(id2)
testHas(t, nil, m.Addrs(id1))
testHas(t, nil, m.Addrs(id2))
}
func TestClearWorks(t *testing.T) {
runTestWithAddrManagers(t, testClearWorks)
}
func testSetNegativeTTLClears(t *testing.T, m AddrBook) {
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
m.SetAddr(id1, ma11, time.Hour)
testHas(t, []ma.Multiaddr{ma11}, m.Addrs(id1))
m.SetAddr(id1, ma11, -1)
testHas(t, nil, m.Addrs(id1))
}
func TestSetNegativeTTLClears(t *testing.T) {
runTestWithAddrManagers(t, testSetNegativeTTLClears)
}
func testUpdateTTLs(t *testing.T, m AddrBook) {
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
id2 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQM")
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
ma12 := MA(t, "/ip4/1.2.3.1/tcp/1112")
ma21 := MA(t, "/ip4/1.2.3.1/tcp/1121")
ma22 := MA(t, "/ip4/1.2.3.1/tcp/1122")
// Shouldn't panic.
m.UpdateAddrs(id1, time.Hour, time.Minute)
m.SetAddr(id1, ma11, time.Hour)
m.SetAddr(id1, ma12, time.Minute)
// Shouldn't panic.
m.UpdateAddrs(id2, time.Hour, time.Minute)
m.SetAddr(id2, ma21, time.Hour)
m.SetAddr(id2, ma22, time.Minute)
testHas(t, []ma.Multiaddr{ma11, ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma21, ma22}, m.Addrs(id2))
m.UpdateAddrs(id1, time.Hour, time.Second)
testHas(t, []ma.Multiaddr{ma11, ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma21, ma22}, m.Addrs(id2))
time.Sleep(1200 * time.Millisecond)
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma21, ma22}, m.Addrs(id2))
m.UpdateAddrs(id2, time.Hour, time.Second)
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma21, ma22}, m.Addrs(id2))
time.Sleep(1200 * time.Millisecond)
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma22}, m.Addrs(id2))
}
func TestUpdateTTLs(t *testing.T) {
runTestWithAddrManagers(t, testUpdateTTLs)
}
func testNilAddrsDontBreak(t *testing.T, m AddrBook) {
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
m.SetAddr(id1, nil, time.Hour)
m.AddAddr(id1, nil, time.Hour)
}
func TestNilAddrsDontBreak(t *testing.T) {
runTestWithAddrManagers(t, testNilAddrsDontBreak)
}

68
ds/ds_test.go Normal file
View File

@ -0,0 +1,68 @@
package ds
import (
"context"
"io/ioutil"
"os"
"testing"
"time"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-ds-badger"
"github.com/libp2p/go-libp2p-peerstore"
"github.com/libp2p/go-libp2p-peerstore/test"
)
func setupBadgerDatastore(t testing.TB) (datastore.Batching, func()) {
dataPath, err := ioutil.TempDir(os.TempDir(), "badger")
if err != nil {
t.Fatal(err)
}
ds, err := badger.NewDatastore(dataPath, nil)
if err != nil {
t.Fatal(err)
}
closer := func() {
ds.Close()
os.RemoveAll(dataPath)
}
return ds, closer
}
func newPeerstoreFactory(tb testing.TB) test.PeerstoreFactory {
return func() (peerstore.Peerstore, func()) {
ds, closeFunc := setupBadgerDatastore(tb)
ps, err := NewPeerstore(context.Background(), ds)
if err != nil {
tb.Fatal(err)
}
return ps, closeFunc
}
}
func TestBadgerDsPeerstore(t *testing.T) {
test.TestPeerstore(t, newPeerstoreFactory(t))
}
func TestBadgerDsAddrBook(t *testing.T) {
test.TestAddrMgr(t, func() (peerstore.AddrBook, func()) {
ds, closeDB := setupBadgerDatastore(t)
mgr, err := NewAddrManager(context.Background(), ds, 100*time.Microsecond)
if err != nil {
t.Fatal(err)
}
closeFunc := func() {
mgr.Stop()
closeDB()
}
return mgr, closeFunc
})
}
func BenchmarkBadgerDsPeerstore(b *testing.B) {
test.BenchmarkPeerstore(b, newPeerstoreFactory(b))
}

25
inmem_test.go Normal file
View File

@ -0,0 +1,25 @@
package peerstore
import (
"testing"
"github.com/libp2p/go-libp2p-peerstore/test"
)
func TestInMemoryPeerstore(t *testing.T) {
test.TestPeerstore(t, func() (Peerstore, func()) {
return NewPeerstore(), nil
})
}
func TestInMemoryAddrMgr(t *testing.T) {
test.TestAddrMgr(t, func() (AddrBook, func()) {
return &AddrManager{}, nil
})
}
func BenchmarkInMemoryPeerstore(b *testing.B) {
test.BenchmarkPeerstore(b, func() (Peerstore, func()) {
return NewPeerstore(), nil
})
}

View File

@ -1,337 +0,0 @@
package peerstore
import (
"context"
"fmt"
"math/rand"
"sort"
"testing"
"time"
"github.com/libp2p/go-libp2p-crypto"
"github.com/libp2p/go-libp2p-peer"
dspstore "github.com/libp2p/go-libp2p-peerstore/ds"
ma "github.com/multiformats/go-multiaddr"
)
func getAddrs(t *testing.T, n int) []ma.Multiaddr {
var addrs []ma.Multiaddr
for i := 0; i < n; i++ {
a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", i))
if err != nil {
t.Fatal(err)
}
addrs = append(addrs, a)
}
return addrs
}
func runTestWithPeerstores(t *testing.T, testFunc func(*testing.T, Peerstore)) {
t.Helper()
t.Log("NewPeerstore")
ps1 := NewPeerstore()
testFunc(t, ps1)
t.Log("NewPeerstoreDatastore")
ps2, closer2 := setupDatastorePeerstore(t)
defer closer2()
testFunc(t, ps2)
}
func setupDatastorePeerstore(t testing.TB) (Peerstore, func()) {
ds, closeDB := setupBadgerDatastore(t)
ctx, cancel := context.WithCancel(context.Background())
ps, err := dspstore.NewPeerstore(ctx, ds)
if err != nil {
t.Fatal(err)
}
closer := func() {
cancel()
closeDB()
}
return ps, closer
}
func testAddrStream(t *testing.T, ps Peerstore) {
addrs := getAddrs(t, 100)
pid := peer.ID("testpeer")
ps.AddAddrs(pid, addrs[:10], time.Hour)
ctx, cancel := context.WithCancel(context.Background())
addrch := ps.AddrStream(ctx, pid)
// while that subscription is active, publish ten more addrs
// this tests that it doesnt hang
for i := 10; i < 20; i++ {
ps.AddAddr(pid, addrs[i], time.Hour)
}
// now receive them (without hanging)
timeout := time.After(time.Second * 10)
for i := 0; i < 20; i++ {
select {
case <-addrch:
case <-timeout:
t.Fatal("timed out")
}
}
// start a second stream
ctx2, cancel2 := context.WithCancel(context.Background())
addrch2 := ps.AddrStream(ctx2, pid)
done := make(chan struct{})
go func() {
defer close(done)
// now send the rest of the addresses
for _, a := range addrs[20:80] {
ps.AddAddr(pid, a, time.Hour)
}
}()
// receive some concurrently with the goroutine
timeout = time.After(time.Second * 10)
for i := 0; i < 40; i++ {
select {
case <-addrch:
case <-timeout:
}
}
<-done
// receive some more after waiting for that goroutine to complete
timeout = time.After(time.Second * 10)
for i := 0; i < 20; i++ {
select {
case <-addrch:
case <-timeout:
}
}
// now cancel it
cancel()
// now check the *second* subscription. We should see 80 addresses.
for i := 0; i < 80; i++ {
<-addrch2
}
cancel2()
// and add a few more addresses it doesnt hang afterwards
for _, a := range addrs[80:] {
ps.AddAddr(pid, a, time.Hour)
}
}
func TestAddrStream(t *testing.T) {
runTestWithPeerstores(t, testAddrStream)
}
func testGetStreamBeforePeerAdded(t *testing.T, ps Peerstore) {
addrs := getAddrs(t, 10)
pid := peer.ID("testpeer")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ach := ps.AddrStream(ctx, pid)
for i := 0; i < 10; i++ {
ps.AddAddr(pid, addrs[i], time.Hour)
}
received := make(map[string]bool)
var count int
for i := 0; i < 10; i++ {
a, ok := <-ach
if !ok {
t.Fatal("channel shouldnt be closed yet")
}
if a == nil {
t.Fatal("got a nil address, thats weird")
}
count++
if received[a.String()] {
t.Fatal("received duplicate address")
}
received[a.String()] = true
}
select {
case <-ach:
t.Fatal("shouldnt have received any more addresses")
default:
}
if count != 10 {
t.Fatal("should have received exactly ten addresses, got ", count)
}
for _, a := range addrs {
if !received[a.String()] {
t.Log(received)
t.Fatalf("expected to receive address %s but didnt", a)
}
}
}
func TestGetStreamBeforePeerAdded(t *testing.T) {
runTestWithPeerstores(t, testGetStreamBeforePeerAdded)
}
func testAddrStreamDuplicates(t *testing.T, ps Peerstore) {
addrs := getAddrs(t, 10)
pid := peer.ID("testpeer")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ach := ps.AddrStream(ctx, pid)
go func() {
for i := 0; i < 10; i++ {
ps.AddAddr(pid, addrs[i], time.Hour)
ps.AddAddr(pid, addrs[rand.Intn(10)], time.Hour)
}
// make sure that all addresses get processed before context is cancelled
time.Sleep(time.Millisecond * 50)
cancel()
}()
received := make(map[string]bool)
var count int
for a := range ach {
if a == nil {
t.Fatal("got a nil address, thats weird")
}
count++
if received[a.String()] {
t.Fatal("received duplicate address")
}
received[a.String()] = true
}
if count != 10 {
t.Fatal("should have received exactly ten addresses")
}
}
func TestAddrStreamDuplicates(t *testing.T) {
runTestWithPeerstores(t, testAddrStreamDuplicates)
}
func testPeerstoreProtoStore(t *testing.T, ps Peerstore) {
p1 := peer.ID("TESTPEER")
protos := []string{"a", "b", "c", "d"}
err := ps.AddProtocols(p1, protos...)
if err != nil {
t.Fatal(err)
}
out, err := ps.GetProtocols(p1)
if err != nil {
t.Fatal(err)
}
if len(out) != len(protos) {
t.Fatal("got wrong number of protocols back")
}
sort.Strings(out)
for i, p := range protos {
if out[i] != p {
t.Fatal("got wrong protocol")
}
}
supported, err := ps.SupportsProtocols(p1, "q", "w", "a", "y", "b")
if err != nil {
t.Fatal(err)
}
if len(supported) != 2 {
t.Fatal("only expected 2 supported")
}
if supported[0] != "a" || supported[1] != "b" {
t.Fatal("got wrong supported array: ", supported)
}
err = ps.SetProtocols(p1, "other")
if err != nil {
t.Fatal(err)
}
supported, err = ps.SupportsProtocols(p1, "q", "w", "a", "y", "b")
if err != nil {
t.Fatal(err)
}
if len(supported) != 0 {
t.Fatal("none of those protocols should have been supported")
}
}
func TestPeerstoreProtoStore(t *testing.T) {
runTestWithPeerstores(t, testAddrStreamDuplicates)
}
func testBasicPeerstore(t *testing.T, ps Peerstore) {
var pids []peer.ID
addrs := getAddrs(t, 10)
for _, a := range addrs {
priv, _, _ := crypto.GenerateKeyPair(crypto.RSA, 512)
p, _ := peer.IDFromPrivateKey(priv)
pids = append(pids, p)
ps.AddAddr(p, a, PermanentAddrTTL)
}
peers := ps.Peers()
if len(peers) != 10 {
t.Fatal("expected ten peers, got", len(peers))
}
pinfo := ps.PeerInfo(pids[0])
if !pinfo.Addrs[0].Equal(addrs[0]) {
t.Fatal("stored wrong address")
}
}
func TestBasicPeerstore(t *testing.T) {
runTestWithPeerstores(t, testBasicPeerstore)
}
func benchmarkPeerstore(ps Peerstore) func(*testing.B) {
return func(b *testing.B) {
addrs := make(chan *peerpair, 100)
ctx, cancel := context.WithCancel(context.Background())
go addressProducer(ctx, b, addrs)
b.ResetTimer()
for i := 0; i < b.N; i++ {
pp := <-addrs
pid := peer.ID(pp.ID)
ps.AddAddr(pid, pp.Addr, PermanentAddrTTL)
}
cancel()
}
}
func BenchmarkPeerstore(b *testing.B) {
ps := NewPeerstore()
b.Run("PeerstoreBasic", benchmarkPeerstore(ps))
dsps, closer := setupDatastorePeerstore(b)
defer closer()
b.Run("PeerstoreDatastore", benchmarkPeerstore(dsps))
}

279
test/addr_manager_suite.go Normal file
View File

@ -0,0 +1,279 @@
package test
import (
"testing"
"time"
"github.com/libp2p/go-libp2p-peer"
pstore "github.com/libp2p/go-libp2p-peerstore"
ma "github.com/multiformats/go-multiaddr"
)
var addressBookSuite = map[string]func(book pstore.AddrBook) func(*testing.T){
"Addresses": testAddresses,
"Clear": testClearWorks,
"SetNegativeTTLClears": testSetNegativeTTLClears,
"UpdateTTLs": testUpdateTTLs,
"NilAddrsDontBreak": testNilAddrsDontBreak,
"AddressesExpire": testAddressesExpire,
}
type AddrMgrFactory func() (pstore.AddrBook, func())
func TestAddrMgr(t *testing.T, factory AddrMgrFactory) {
for name, test := range addressBookSuite {
// Create a new peerstore.
ab, closeFunc := factory()
// Run the test.
t.Run(name, test(ab))
// Cleanup.
if closeFunc != nil {
closeFunc()
}
}
}
func testAddresses(m pstore.AddrBook) func(*testing.T) {
return func(t *testing.T) {
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
id2 := IDS(t, "QmRmPL3FDZKE3Qiwv1RosLdwdvbvg17b2hB39QPScgWKKZ")
id3 := IDS(t, "QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ6Kn")
id4 := IDS(t, "QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ5Kn")
id5 := IDS(t, "QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ5Km")
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
ma21 := MA(t, "/ip4/2.2.3.2/tcp/1111")
ma22 := MA(t, "/ip4/2.2.3.2/tcp/2222")
ma31 := MA(t, "/ip4/3.2.3.3/tcp/1111")
ma32 := MA(t, "/ip4/3.2.3.3/tcp/2222")
ma33 := MA(t, "/ip4/3.2.3.3/tcp/3333")
ma41 := MA(t, "/ip4/4.2.3.3/tcp/1111")
ma42 := MA(t, "/ip4/4.2.3.3/tcp/2222")
ma43 := MA(t, "/ip4/4.2.3.3/tcp/3333")
ma44 := MA(t, "/ip4/4.2.3.3/tcp/4444")
ma51 := MA(t, "/ip4/5.2.3.3/tcp/1111")
ma52 := MA(t, "/ip4/5.2.3.3/tcp/2222")
ma53 := MA(t, "/ip4/5.2.3.3/tcp/3333")
ma54 := MA(t, "/ip4/5.2.3.3/tcp/4444")
ma55 := MA(t, "/ip4/5.2.3.3/tcp/5555")
ttl := time.Hour
m.AddAddr(id1, ma11, ttl)
m.AddAddrs(id2, []ma.Multiaddr{ma21, ma22}, ttl)
m.AddAddrs(id2, []ma.Multiaddr{ma21, ma22}, ttl) // idempotency
m.AddAddr(id3, ma31, ttl)
m.AddAddr(id3, ma32, ttl)
m.AddAddr(id3, ma33, ttl)
m.AddAddr(id3, ma33, ttl) // idempotency
m.AddAddr(id3, ma33, ttl)
m.AddAddrs(id4, []ma.Multiaddr{ma41, ma42, ma43, ma44}, ttl) // multiple
m.AddAddrs(id5, []ma.Multiaddr{ma21, ma22}, ttl) // clearing
m.AddAddrs(id5, []ma.Multiaddr{ma41, ma42, ma43, ma44}, ttl) // clearing
m.ClearAddrs(id5)
m.AddAddrs(id5, []ma.Multiaddr{ma51, ma52, ma53, ma54, ma55}, ttl) // clearing
// test the Addresses return value
testHas(t, []ma.Multiaddr{ma11}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma21, ma22}, m.Addrs(id2))
testHas(t, []ma.Multiaddr{ma31, ma32, ma33}, m.Addrs(id3))
testHas(t, []ma.Multiaddr{ma41, ma42, ma43, ma44}, m.Addrs(id4))
testHas(t, []ma.Multiaddr{ma51, ma52, ma53, ma54, ma55}, m.Addrs(id5))
}
}
func testClearWorks(m pstore.AddrBook) func(t *testing.T) {
return func(t *testing.T) {
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
id2 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQM")
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
ma12 := MA(t, "/ip4/2.2.3.2/tcp/2222")
ma13 := MA(t, "/ip4/3.2.3.3/tcp/3333")
ma24 := MA(t, "/ip4/4.2.3.3/tcp/4444")
ma25 := MA(t, "/ip4/5.2.3.3/tcp/5555")
m.AddAddr(id1, ma11, time.Hour)
m.AddAddr(id1, ma12, time.Hour)
m.AddAddr(id1, ma13, time.Hour)
m.AddAddr(id2, ma24, time.Hour)
m.AddAddr(id2, ma25, time.Hour)
testHas(t, []ma.Multiaddr{ma11, ma12, ma13}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
m.ClearAddrs(id1)
m.ClearAddrs(id2)
testHas(t, nil, m.Addrs(id1))
testHas(t, nil, m.Addrs(id2))
}
}
func testSetNegativeTTLClears(m pstore.AddrBook) func(t *testing.T) {
return func(t *testing.T) {
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
m.SetAddr(id1, ma11, time.Hour)
testHas(t, []ma.Multiaddr{ma11}, m.Addrs(id1))
m.SetAddr(id1, ma11, -1)
testHas(t, nil, m.Addrs(id1))
}
}
func testUpdateTTLs(m pstore.AddrBook) func(t *testing.T) {
return func(t *testing.T) {
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
id2 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQM")
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
ma12 := MA(t, "/ip4/1.2.3.1/tcp/1112")
ma21 := MA(t, "/ip4/1.2.3.1/tcp/1121")
ma22 := MA(t, "/ip4/1.2.3.1/tcp/1122")
// Shouldn't panic.
m.UpdateAddrs(id1, time.Hour, time.Minute)
m.SetAddr(id1, ma11, time.Hour)
m.SetAddr(id1, ma12, time.Minute)
// Shouldn't panic.
m.UpdateAddrs(id2, time.Hour, time.Minute)
m.SetAddr(id2, ma21, time.Hour)
m.SetAddr(id2, ma22, time.Minute)
testHas(t, []ma.Multiaddr{ma11, ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma21, ma22}, m.Addrs(id2))
m.UpdateAddrs(id1, time.Hour, time.Second)
testHas(t, []ma.Multiaddr{ma11, ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma21, ma22}, m.Addrs(id2))
time.Sleep(1200 * time.Millisecond)
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma21, ma22}, m.Addrs(id2))
m.UpdateAddrs(id2, time.Hour, time.Second)
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma21, ma22}, m.Addrs(id2))
time.Sleep(1200 * time.Millisecond)
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma22}, m.Addrs(id2))
}
}
func testNilAddrsDontBreak(m pstore.AddrBook) func(t *testing.T) {
return func(t *testing.T) {
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
m.SetAddr(id1, nil, time.Hour)
m.AddAddr(id1, nil, time.Hour)
}
}
func testAddressesExpire(m pstore.AddrBook) func(t *testing.T) {
return func(t *testing.T) {
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
id2 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQM")
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
ma12 := MA(t, "/ip4/2.2.3.2/tcp/2222")
ma13 := MA(t, "/ip4/3.2.3.3/tcp/3333")
ma24 := MA(t, "/ip4/4.2.3.3/tcp/4444")
ma25 := MA(t, "/ip4/5.2.3.3/tcp/5555")
m.AddAddr(id1, ma11, time.Hour)
m.AddAddr(id1, ma12, time.Hour)
m.AddAddr(id1, ma13, time.Hour)
m.AddAddr(id2, ma24, time.Hour)
m.AddAddr(id2, ma25, time.Hour)
testHas(t, []ma.Multiaddr{ma11, ma12, ma13}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
m.SetAddr(id1, ma11, 2*time.Hour)
m.SetAddr(id1, ma12, 2*time.Hour)
m.SetAddr(id1, ma13, 2*time.Hour)
m.SetAddr(id2, ma24, 2*time.Hour)
m.SetAddr(id2, ma25, 2*time.Hour)
testHas(t, []ma.Multiaddr{ma11, ma12, ma13}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
m.SetAddr(id1, ma11, time.Millisecond)
<-time.After(time.Millisecond * 2)
testHas(t, []ma.Multiaddr{ma12, ma13}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
m.SetAddr(id1, ma13, time.Millisecond)
<-time.After(time.Millisecond * 2)
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
m.SetAddr(id2, ma24, time.Millisecond)
<-time.After(time.Millisecond * 2)
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
testHas(t, []ma.Multiaddr{ma25}, m.Addrs(id2))
m.SetAddr(id2, ma25, time.Millisecond)
<-time.After(time.Millisecond * 2)
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
testHas(t, nil, m.Addrs(id2))
m.SetAddr(id1, ma12, time.Millisecond)
<-time.After(time.Millisecond * 2)
testHas(t, nil, m.Addrs(id1))
testHas(t, nil, m.Addrs(id2))
}
}
func IDS(t *testing.T, ids string) peer.ID {
t.Helper()
id, err := peer.IDB58Decode(ids)
if err != nil {
t.Fatalf("id %q is bad: %s", ids, err)
}
return id
}
func MA(t *testing.T, m string) ma.Multiaddr {
t.Helper()
maddr, err := ma.NewMultiaddr(m)
if err != nil {
t.Fatal(err)
}
return maddr
}
func testHas(t *testing.T, exp, act []ma.Multiaddr) {
t.Helper()
if len(exp) != len(act) {
t.Fatalf("lengths not the same. expected %d, got %d\n", len(exp), len(act))
}
for _, a := range exp {
found := false
for _, b := range act {
if a.Equal(b) {
found = true
break
}
}
if !found {
t.Fatalf("expected address %s not found", a)
}
}
}

View File

@ -1,4 +1,4 @@
package peerstore
package test
import (
"context"

316
test/peerstore_suite.go Normal file
View File

@ -0,0 +1,316 @@
package test
import (
"context"
"fmt"
"math/rand"
"sort"
"testing"
"time"
"github.com/libp2p/go-libp2p-crypto"
"github.com/libp2p/go-libp2p-peer"
pstore "github.com/libp2p/go-libp2p-peerstore"
ma "github.com/multiformats/go-multiaddr"
)
var peerstoreSuite = map[string]func(book pstore.Peerstore) func(*testing.T){
"AddrStream": testAddrStream,
"GetStreamBeforePeerAdded": testGetStreamBeforePeerAdded,
"AddStreamDuplicates": testAddrStreamDuplicates,
"PeerstoreProtoStore": testPeerstoreProtoStore,
"BasicPeerstore": testBasicPeerstore,
}
type PeerstoreFactory func() (pstore.Peerstore, func())
func TestPeerstore(t *testing.T, factory PeerstoreFactory) {
for name, test := range peerstoreSuite {
// Create a new peerstore.
ps, closeFunc := factory()
// Run the test.
t.Run(name, test(ps))
// Cleanup.
if closeFunc != nil {
closeFunc()
}
}
}
func BenchmarkPeerstore(b *testing.B, factory PeerstoreFactory) {
ps, closeFunc := factory()
defer closeFunc()
b.Run("Peerstore", benchmarkPeerstore(ps))
}
func testAddrStream(ps pstore.Peerstore) func(t *testing.T) {
return func(t *testing.T) {
addrs, pid := getAddrs(t, 100), peer.ID("testpeer")
ps.AddAddrs(pid, addrs[:10], time.Hour)
ctx, cancel := context.WithCancel(context.Background())
addrch := ps.AddrStream(ctx, pid)
// while that subscription is active, publish ten more addrs
// this tests that it doesnt hang
for i := 10; i < 20; i++ {
ps.AddAddr(pid, addrs[i], time.Hour)
}
// now receive them (without hanging)
timeout := time.After(time.Second * 10)
for i := 0; i < 20; i++ {
select {
case <-addrch:
case <-timeout:
t.Fatal("timed out")
}
}
// start a second stream
ctx2, cancel2 := context.WithCancel(context.Background())
addrch2 := ps.AddrStream(ctx2, pid)
done := make(chan struct{})
go func() {
defer close(done)
// now send the rest of the addresses
for _, a := range addrs[20:80] {
ps.AddAddr(pid, a, time.Hour)
}
}()
// receive some concurrently with the goroutine
timeout = time.After(time.Second * 10)
for i := 0; i < 40; i++ {
select {
case <-addrch:
case <-timeout:
}
}
<-done
// receive some more after waiting for that goroutine to complete
timeout = time.After(time.Second * 10)
for i := 0; i < 20; i++ {
select {
case <-addrch:
case <-timeout:
}
}
// now cancel it
cancel()
// now check the *second* subscription. We should see 80 addresses.
for i := 0; i < 80; i++ {
<-addrch2
}
cancel2()
// and add a few more addresses it doesnt hang afterwards
for _, a := range addrs[80:] {
ps.AddAddr(pid, a, time.Hour)
}
}
}
func testGetStreamBeforePeerAdded(ps pstore.Peerstore) func(t *testing.T) {
return func(t *testing.T) {
addrs, pid := getAddrs(t, 10), peer.ID("testpeer")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ach := ps.AddrStream(ctx, pid)
for i := 0; i < 10; i++ {
ps.AddAddr(pid, addrs[i], time.Hour)
}
received := make(map[string]bool)
var count int
for i := 0; i < 10; i++ {
a, ok := <-ach
if !ok {
t.Fatal("channel shouldnt be closed yet")
}
if a == nil {
t.Fatal("got a nil address, thats weird")
}
count++
if received[a.String()] {
t.Fatal("received duplicate address")
}
received[a.String()] = true
}
select {
case <-ach:
t.Fatal("shouldnt have received any more addresses")
default:
}
if count != 10 {
t.Fatal("should have received exactly ten addresses, got ", count)
}
for _, a := range addrs {
if !received[a.String()] {
t.Log(received)
t.Fatalf("expected to receive address %s but didnt", a)
}
}
}
}
func testAddrStreamDuplicates(ps pstore.Peerstore) func(t *testing.T) {
return func(t *testing.T) {
addrs, pid := getAddrs(t, 10), peer.ID("testpeer")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ach := ps.AddrStream(ctx, pid)
go func() {
for i := 0; i < 10; i++ {
ps.AddAddr(pid, addrs[i], time.Hour)
ps.AddAddr(pid, addrs[rand.Intn(10)], time.Hour)
}
// make sure that all addresses get processed before context is cancelled
time.Sleep(time.Millisecond * 50)
cancel()
}()
received := make(map[string]bool)
var count int
for a := range ach {
if a == nil {
t.Fatal("got a nil address, thats weird")
}
count++
if received[a.String()] {
t.Fatal("received duplicate address")
}
received[a.String()] = true
}
if count != 10 {
t.Fatal("should have received exactly ten addresses")
}
}
}
func testPeerstoreProtoStore(ps pstore.Peerstore) func(t *testing.T) {
return func(t *testing.T) {
p1, protos := peer.ID("TESTPEER"), []string{"a", "b", "c", "d"}
err := ps.AddProtocols(p1, protos...)
if err != nil {
t.Fatal(err)
}
out, err := ps.GetProtocols(p1)
if err != nil {
t.Fatal(err)
}
if len(out) != len(protos) {
t.Fatal("got wrong number of protocols back")
}
sort.Strings(out)
for i, p := range protos {
if out[i] != p {
t.Fatal("got wrong protocol")
}
}
supported, err := ps.SupportsProtocols(p1, "q", "w", "a", "y", "b")
if err != nil {
t.Fatal(err)
}
if len(supported) != 2 {
t.Fatal("only expected 2 supported")
}
if supported[0] != "a" || supported[1] != "b" {
t.Fatal("got wrong supported array: ", supported)
}
err = ps.SetProtocols(p1, "other")
if err != nil {
t.Fatal(err)
}
supported, err = ps.SupportsProtocols(p1, "q", "w", "a", "y", "b")
if err != nil {
t.Fatal(err)
}
if len(supported) != 0 {
t.Fatal("none of those protocols should have been supported")
}
}
}
func testBasicPeerstore(ps pstore.Peerstore) func(t *testing.T) {
return func(t *testing.T) {
var pids []peer.ID
addrs := getAddrs(t, 10)
for _, a := range addrs {
priv, _, _ := crypto.GenerateKeyPair(crypto.RSA, 512)
p, _ := peer.IDFromPrivateKey(priv)
pids = append(pids, p)
ps.AddAddr(p, a, pstore.PermanentAddrTTL)
}
peers := ps.Peers()
if len(peers) != 10 {
t.Fatal("expected ten peers, got", len(peers))
}
pinfo := ps.PeerInfo(pids[0])
if !pinfo.Addrs[0].Equal(addrs[0]) {
t.Fatal("stored wrong address")
}
}
}
func benchmarkPeerstore(ps pstore.Peerstore) func(*testing.B) {
return func(b *testing.B) {
addrs := make(chan *peerpair, 100)
ctx, cancel := context.WithCancel(context.Background())
go addressProducer(ctx, b, addrs)
b.ResetTimer()
for i := 0; i < b.N; i++ {
pp := <-addrs
pid := peer.ID(pp.ID)
ps.AddAddr(pid, pp.Addr, pstore.PermanentAddrTTL)
}
cancel()
}
}
func getAddrs(t *testing.T, n int) []ma.Multiaddr {
var addrs []ma.Multiaddr
for i := 0; i < n; i++ {
a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", i))
if err != nil {
t.Fatal(err)
}
addrs = append(addrs, a)
}
return addrs
}

View File

@ -1,4 +1,4 @@
package testutil
package test
import (
"io"