go-libp2p-peerstore/ds/addr_book.go

387 lines
9.2 KiB
Go
Raw Normal View History

package ds
import (
"context"
"sync"
"time"
2018-08-30 22:24:06 +08:00
mh "gx/ipfs/QmPnFwZ2JXKnXgMw8CdBPxn7FWh6LLdjUjxV1fKHuJnkr8/go-multihash"
"gx/ipfs/QmQjMHF8ptRgx4E57UFMiT4YM6kqaJeYxZ1MCDX23aw4rK/golang-lru"
"gx/ipfs/QmQsErDt8Qgw1XrsXf2BpEzDgGWtB1YLsTAARBup5b6B9W/go-libp2p-peer"
logging "gx/ipfs/QmRREK2CAZ5Re2Bd9zZFG6FeYDppUWt5cMgsoUEp3ktgSr/go-log"
ds "gx/ipfs/QmSpg1CvpXQQow5ernt1gNBXaXV6yxyNqi7XoeerWfzB5w/go-datastore"
"gx/ipfs/QmSpg1CvpXQQow5ernt1gNBXaXV6yxyNqi7XoeerWfzB5w/go-datastore/query"
ma "gx/ipfs/QmYmsdtJ3HsodkePE3eU3TsCaP2YvPZJ4LoXnNkDE5Tpt7/go-multiaddr"
2018-08-30 23:24:09 +08:00
pstore "github.com/libp2p/go-libp2p-peerstore"
"github.com/libp2p/go-libp2p-peerstore/mem"
)
var log = logging.Logger("peerstore/ds")
// Number of times to retry transactional writes
var dsWriteRetries = 5
var _ pstore.AddrBook = (*dsAddrBook)(nil)
// dsAddrBook is an address manager backed by a Datastore with both an
// in-memory TTL manager and an in-memory address stream manager.
type dsAddrBook struct {
2018-06-27 07:06:05 +08:00
cache *lru.ARCCache
ds ds.Batching
ttlManager *ttlmanager
subsManager *mem.AddrSubManager
}
// NewAddrBook initializes a new address manager given a
// Datastore instance, a context for managing the TTL manager,
// and the interval at which the TTL manager should sweep the Datastore.
func NewAddrBook(ctx context.Context, ds ds.Batching, ttlInterval time.Duration) (*dsAddrBook, error) {
2018-06-27 07:06:05 +08:00
cache, err := lru.NewARC(1024)
if err != nil {
return nil, err
}
mgr := &dsAddrBook{
2018-06-27 07:06:05 +08:00
cache: cache,
ds: ds,
2018-06-27 07:06:05 +08:00
ttlManager: newTTLManager(ctx, ds, cache, ttlInterval),
subsManager: mem.NewAddrSubManager(),
}
2018-06-27 07:06:05 +08:00
return mgr, nil
}
// Stop will signal the TTL manager to stop and block until it returns.
func (mgr *dsAddrBook) Stop() {
mgr.ttlManager.cancel()
}
func peerAddressKey(p *peer.ID, addr *ma.Multiaddr) (ds.Key, error) {
hash, err := mh.Sum((*addr).Bytes(), mh.MURMUR3, -1)
if err != nil {
return ds.Key{}, nil
}
2018-06-15 04:17:50 +08:00
return ds.NewKey(peer.IDB58Encode(*p)).ChildString(hash.B58String()), nil
}
2018-06-14 07:27:14 +08:00
func peerIDFromKey(key ds.Key) (peer.ID, error) {
2018-06-15 04:17:50 +08:00
idstring := key.Parent().Name()
2018-06-14 07:27:14 +08:00
return peer.IDB58Decode(idstring)
}
// AddAddr will add a new address if it's not already in the AddrBook.
func (mgr *dsAddrBook) AddAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {
mgr.AddAddrs(p, []ma.Multiaddr{addr}, ttl)
}
// AddAddrs will add many new addresses if they're not already in the AddrBook.
func (mgr *dsAddrBook) AddAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
if ttl <= 0 {
return
}
mgr.setAddrs(p, addrs, ttl, true)
}
// SetAddr will add or update the TTL of an address in the AddrBook.
func (mgr *dsAddrBook) SetAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {
mgr.SetAddrs(p, []ma.Multiaddr{addr}, ttl)
}
// SetAddrs will add or update the TTLs of addresses in the AddrBook.
func (mgr *dsAddrBook) SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
mgr.setAddrs(p, addrs, ttl, false)
}
func (mgr *dsAddrBook) setAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration, add bool) {
for i := 0; i < dsWriteRetries; i++ {
// keys to add to the TTL manager
var keys []ds.Key
batch, err := mgr.ds.Batch()
if err != nil {
log.Error(err)
return
}
for _, addr := range addrs {
if addr == nil {
continue
}
key, err := peerAddressKey(&p, &addr)
if err != nil {
log.Error(err)
continue
}
keys = append(keys, key)
if ttl <= 0 {
2018-06-27 07:06:05 +08:00
if err := batch.Delete(key); err != nil {
log.Error(err)
} else {
mgr.cache.Remove(key)
}
continue
}
2018-06-27 07:06:05 +08:00
has := mgr.cache.Contains(key)
if !has {
has, err = mgr.ds.Has(key)
}
if err != nil || !has {
mgr.subsManager.BroadcastAddr(p, addr)
}
// Allows us to support AddAddr and SetAddr in one function
2018-06-27 05:58:38 +08:00
if !has {
if err := batch.Put(key, addr.Bytes()); err != nil {
log.Error(err)
2018-06-27 07:06:05 +08:00
} else {
mgr.cache.Add(key, addr.Bytes())
}
}
}
if err := batch.Commit(); err != nil {
log.Errorf("failed to write addresses for peer %s: %s\n", p.Pretty(), err)
continue
}
2018-06-27 05:58:38 +08:00
mgr.ttlManager.setTTLs(keys, ttl, add)
return
}
log.Errorf("failed to avoid write conflict for peer %s after %d retries\n", p.Pretty(), dsWriteRetries)
}
// UpdateAddrs will update any addresses for a given peer and TTL combination to
// have a new TTL.
func (mgr *dsAddrBook) UpdateAddrs(p peer.ID, oldTTL time.Duration, newTTL time.Duration) {
prefix := ds.NewKey(p.Pretty())
2018-06-12 07:59:50 +08:00
mgr.ttlManager.updateTTLs(prefix, oldTTL, newTTL)
}
// Addrs Returns all of the non-expired addresses for a given peer.
func (mgr *dsAddrBook) Addrs(p peer.ID) []ma.Multiaddr {
prefix := ds.NewKey(p.Pretty())
2018-06-27 07:06:05 +08:00
q := query.Query{Prefix: prefix.String(), KeysOnly: true}
results, err := mgr.ds.Query(q)
if err != nil {
log.Error(err)
2018-06-27 06:00:36 +08:00
return nil
}
var addrs []ma.Multiaddr
for result := range results.Next() {
2018-06-27 07:06:05 +08:00
key := ds.RawKey(result.Key)
var addri interface{}
addri, ok := mgr.cache.Get(key)
if !ok {
addri, err = mgr.ds.Get(key)
if err != nil {
log.Error(err)
continue
}
}
addrbytes := addri.([]byte)
addr, err := ma.NewMultiaddrBytes(addrbytes)
if err != nil {
2018-06-27 06:06:34 +08:00
log.Error(err)
continue
}
addrs = append(addrs, addr)
}
return addrs
}
// Peers returns all of the peer IDs for which the AddrBook has addresses.
func (mgr *dsAddrBook) AddrsPeers() []peer.ID {
2018-06-27 07:06:05 +08:00
q := query.Query{KeysOnly: true}
2018-06-14 07:27:14 +08:00
results, err := mgr.ds.Query(q)
if err != nil {
log.Error(err)
return []peer.ID{}
}
idset := make(map[peer.ID]struct{})
for result := range results.Next() {
key := ds.RawKey(result.Key)
id, err := peerIDFromKey(key)
if err != nil {
continue
}
idset[id] = struct{}{}
}
ids := make([]peer.ID, 0, len(idset))
for id := range idset {
ids = append(ids, id)
}
return ids
}
// AddrStream returns a channel on which all new addresses discovered for a
// given peer ID will be published.
func (mgr *dsAddrBook) AddrStream(ctx context.Context, p peer.ID) <-chan ma.Multiaddr {
initial := mgr.Addrs(p)
return mgr.subsManager.AddrStream(ctx, p, initial)
}
// ClearAddrs will delete all known addresses for a peer ID.
func (mgr *dsAddrBook) ClearAddrs(p peer.ID) {
prefix := ds.NewKey(p.Pretty())
for i := 0; i < dsWriteRetries; i++ {
2018-06-27 07:06:05 +08:00
q := query.Query{Prefix: prefix.String(), KeysOnly: true}
results, err := mgr.ds.Query(q)
if err != nil {
log.Error(err)
return
}
batch, err := mgr.ds.Batch()
if err != nil {
log.Error(err)
return
}
for result := range results.Next() {
2018-06-27 07:06:05 +08:00
key := ds.NewKey(result.Key)
err := batch.Delete(key)
2018-06-27 06:06:34 +08:00
if err != nil {
// From inspectin badger, errors here signify a problem with
// the transaction as a whole, so we can log and abort.
log.Error(err)
return
}
2018-06-27 07:06:05 +08:00
mgr.cache.Remove(key)
}
if err = batch.Commit(); err != nil {
log.Errorf("failed to clear addresses for peer %s: %s\n", p.Pretty(), err)
continue
}
mgr.ttlManager.clear(ds.NewKey(p.Pretty()))
return
}
log.Errorf("failed to clear addresses for peer %s after %d attempts\n", p.Pretty(), dsWriteRetries)
}
type ttlentry struct {
TTL time.Duration
ExpiresAt time.Time
}
type ttlmanager struct {
sync.RWMutex
entries map[ds.Key]*ttlentry
2018-06-16 05:35:47 +08:00
ctx context.Context
cancel context.CancelFunc
ticker *time.Ticker
2018-06-27 05:22:04 +08:00
ds ds.Batching
2018-06-27 07:06:05 +08:00
cache *lru.ARCCache
}
2018-06-27 07:06:05 +08:00
func newTTLManager(parent context.Context, d ds.Datastore, c *lru.ARCCache, tick time.Duration) *ttlmanager {
ctx, cancel := context.WithCancel(parent)
2018-06-27 05:58:38 +08:00
batching, ok := d.(ds.Batching)
if !ok {
panic("must construct ttlmanager with batching datastore")
}
mgr := &ttlmanager{
entries: make(map[ds.Key]*ttlentry),
ctx: ctx,
cancel: cancel,
ticker: time.NewTicker(tick),
2018-06-27 05:58:38 +08:00
ds: batching,
2018-06-27 07:06:05 +08:00
cache: c,
}
go func() {
for {
select {
case <-mgr.ctx.Done():
mgr.ticker.Stop()
return
case <-mgr.ticker.C:
mgr.tick()
}
}
}()
return mgr
}
2018-06-16 01:56:35 +08:00
// To be called by TTL manager's coroutine only.
func (mgr *ttlmanager) tick() {
mgr.Lock()
defer mgr.Unlock()
now := time.Now()
2018-06-27 05:22:04 +08:00
batch, err := mgr.ds.Batch()
if err != nil {
log.Error(err)
return
}
for key, entry := range mgr.entries {
if entry.ExpiresAt.Before(now) {
2018-06-27 05:22:04 +08:00
if err := batch.Delete(key); err != nil {
log.Error(err)
2018-06-27 07:06:05 +08:00
} else {
mgr.cache.Remove(key)
}
delete(mgr.entries, key)
}
}
2018-06-27 05:58:38 +08:00
err = batch.Commit()
2018-06-27 05:22:04 +08:00
if err != nil {
log.Error(err)
}
}
2018-06-27 05:58:38 +08:00
func (mgr *ttlmanager) setTTLs(keys []ds.Key, ttl time.Duration, add bool) {
mgr.Lock()
defer mgr.Unlock()
expiration := time.Now().Add(ttl)
for _, key := range keys {
2018-06-27 05:58:38 +08:00
update := true
if add {
if entry, ok := mgr.entries[key]; ok {
if entry.ExpiresAt.After(expiration) {
update = false
}
}
}
if update {
if ttl <= 0 {
delete(mgr.entries, key)
} else {
mgr.entries[key] = &ttlentry{TTL: ttl, ExpiresAt: expiration}
}
}
}
}
2018-06-12 07:59:50 +08:00
func (mgr *ttlmanager) updateTTLs(prefix ds.Key, oldTTL, newTTL time.Duration) {
mgr.Lock()
defer mgr.Unlock()
now := time.Now()
var keys []ds.Key
for key, entry := range mgr.entries {
if key.IsDescendantOf(prefix) && entry.TTL == oldTTL {
keys = append(keys, key)
entry.TTL = newTTL
entry.ExpiresAt = now.Add(newTTL)
}
}
}
2018-06-12 07:59:50 +08:00
func (mgr *ttlmanager) clear(prefix ds.Key) {
mgr.Lock()
defer mgr.Unlock()
for key := range mgr.entries {
if key.IsDescendantOf(prefix) {
delete(mgr.entries, key)
}
}
}