2018-09-02 19:03:02 +08:00
|
|
|
package pstoreds
|
2018-06-12 05:58:10 +08:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2018-11-13 06:28:12 +08:00
|
|
|
"fmt"
|
2018-11-15 08:27:28 +08:00
|
|
|
"sort"
|
2018-11-13 06:28:12 +08:00
|
|
|
"sync"
|
2018-06-12 05:58:10 +08:00
|
|
|
"time"
|
|
|
|
|
2018-09-02 19:10:55 +08:00
|
|
|
ds "github.com/ipfs/go-datastore"
|
2018-09-13 23:09:47 +08:00
|
|
|
query "github.com/ipfs/go-datastore/query"
|
2018-09-02 19:10:55 +08:00
|
|
|
logging "github.com/ipfs/go-log"
|
2018-09-28 21:04:52 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
pool "github.com/libp2p/go-buffer-pool"
|
2018-09-28 21:04:52 +08:00
|
|
|
peer "github.com/libp2p/go-libp2p-peer"
|
2018-08-30 23:24:09 +08:00
|
|
|
pstore "github.com/libp2p/go-libp2p-peerstore"
|
2018-11-13 06:28:12 +08:00
|
|
|
pb "github.com/libp2p/go-libp2p-peerstore/pb"
|
2018-09-13 23:09:47 +08:00
|
|
|
pstoremem "github.com/libp2p/go-libp2p-peerstore/pstoremem"
|
2018-06-12 05:58:10 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
lru "github.com/hashicorp/golang-lru"
|
|
|
|
ma "github.com/multiformats/go-multiaddr"
|
|
|
|
b32 "github.com/whyrusleeping/base32"
|
2018-09-01 01:35:23 +08:00
|
|
|
)
|
2018-06-16 05:48:39 +08:00
|
|
|
|
2018-09-13 03:40:51 +08:00
|
|
|
type ttlWriteMode int
|
|
|
|
|
|
|
|
const (
|
|
|
|
ttlOverride ttlWriteMode = iota
|
|
|
|
ttlExtend
|
|
|
|
)
|
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
var (
|
|
|
|
log = logging.Logger("peerstore/ds")
|
|
|
|
// Peer addresses are stored under the following db key pattern:
|
|
|
|
// /peers/addr/<b32 peer id no padding>/<hash of maddr>
|
|
|
|
addrBookBase = ds.NewKey("/peers/addrs")
|
|
|
|
)
|
2018-09-13 03:40:51 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
// addrsRecord decorates the AddrBookRecord with locks and metadata.
|
|
|
|
type addrsRecord struct {
|
|
|
|
sync.RWMutex
|
|
|
|
*pb.AddrBookRecord
|
2018-11-15 08:27:28 +08:00
|
|
|
dirty bool
|
2018-09-19 02:01:24 +08:00
|
|
|
}
|
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
// FlushInTxn writes the record to the datastore by calling ds.Put, unless the record is
|
|
|
|
// marked for deletion, in which case the deletion is executed via ds.Delete.
|
2018-11-13 06:28:12 +08:00
|
|
|
func (r *addrsRecord) FlushInTxn(txn ds.Txn) (err error) {
|
2018-11-15 08:27:28 +08:00
|
|
|
key := addrBookBase.ChildString(b32.RawStdEncoding.EncodeToString([]byte(r.Id.ID)))
|
|
|
|
if len(r.Addrs) == 0 {
|
2018-11-13 06:28:12 +08:00
|
|
|
return txn.Delete(key)
|
|
|
|
}
|
|
|
|
data := pool.Get(r.Size())
|
|
|
|
defer pool.Put(data)
|
2018-11-15 08:27:28 +08:00
|
|
|
|
|
|
|
// i is the number of bytes that were effectively written.
|
2018-11-13 06:28:12 +08:00
|
|
|
i, err := r.MarshalTo(data)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-11-15 08:27:28 +08:00
|
|
|
if err := txn.Put(key, data[:i]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// write succeeded; record is no longer dirty.
|
|
|
|
r.dirty = false
|
|
|
|
return nil
|
2018-09-19 02:01:24 +08:00
|
|
|
}
|
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
// Flush creates a ds.Txn, and calls FlushInTxn with it.
|
|
|
|
func (r *addrsRecord) Flush(ds ds.TxnDatastore) (err error) {
|
|
|
|
txn, err := ds.NewTransaction(false)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2018-09-13 03:40:51 +08:00
|
|
|
}
|
2018-11-13 06:28:12 +08:00
|
|
|
defer txn.Discard()
|
2018-09-13 03:40:51 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
if err = r.FlushInTxn(txn); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return txn.Commit()
|
|
|
|
}
|
2018-09-04 18:34:55 +08:00
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
// Refresh is called on records to perform housekeeping. The return value signals if the record was changed
|
|
|
|
// as a result of the refresh.
|
|
|
|
//
|
|
|
|
// Refresh does the following:
|
|
|
|
// * sorts the addresses by expiration (soonest expiring first).
|
|
|
|
// * removes the addresses that have expired.
|
2018-11-13 06:28:12 +08:00
|
|
|
//
|
2018-11-15 08:27:28 +08:00
|
|
|
// It short-circuits optimistically when we know there's nothing to do.
|
2018-11-13 06:28:12 +08:00
|
|
|
//
|
2018-11-15 08:27:28 +08:00
|
|
|
// Refresh is called from several points:
|
|
|
|
// * when accessing and loading an entry.
|
|
|
|
// * when performing periodic GC.
|
|
|
|
// * after an entry has been modified (e.g. addresses have been added or removed,
|
2018-11-13 06:28:12 +08:00
|
|
|
// TTLs updated, etc.)
|
2018-11-15 08:27:28 +08:00
|
|
|
//
|
|
|
|
// If the return value is true, the caller can perform a flush immediately, or can schedule an async
|
|
|
|
// flush, depending on the context.
|
|
|
|
func (r *addrsRecord) Refresh() (chgd bool) {
|
|
|
|
now := time.Now().Unix()
|
|
|
|
if !r.dirty && len(r.Addrs) > 0 && r.Addrs[0].Expiry > now {
|
|
|
|
// record is not dirty, and we have no expired entries to purge.
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
if len(r.Addrs) == 0 {
|
2018-11-15 08:27:28 +08:00
|
|
|
// this is a ghost record; let's signal it has to be written.
|
|
|
|
// Flush() will take care of doing the deletion.
|
2018-11-13 06:28:12 +08:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
if r.dirty && len(r.Addrs) > 1 {
|
|
|
|
// the record has been modified, so it may need resorting.
|
|
|
|
// we keep addresses sorted by expiration, where 0 is the soonest expiring.
|
|
|
|
sort.Slice(r.Addrs, func(i, j int) bool {
|
|
|
|
return r.Addrs[i].Expiry < r.Addrs[j].Expiry
|
|
|
|
})
|
2018-11-13 06:28:12 +08:00
|
|
|
}
|
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
// since addresses are sorted by expiration, we find the first survivor and split the
|
|
|
|
// slice on its index.
|
|
|
|
pivot := -1
|
|
|
|
for i, addr := range r.Addrs {
|
|
|
|
if addr.Expiry > now {
|
|
|
|
break
|
2018-09-04 18:34:55 +08:00
|
|
|
}
|
2018-11-15 08:27:28 +08:00
|
|
|
pivot = i
|
2018-06-27 07:06:05 +08:00
|
|
|
}
|
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
r.Addrs = r.Addrs[pivot+1:]
|
|
|
|
return r.dirty || pivot >= 0
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
// dsAddrBook is an address book backed by a Datastore with a GC-like procedure
|
|
|
|
// to purge expired entries. It uses an in-memory address stream manager.
|
|
|
|
type dsAddrBook struct {
|
|
|
|
ctx context.Context
|
|
|
|
gcInterval time.Duration
|
|
|
|
gcMaxPurgePerCycle int
|
2018-09-01 01:35:23 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
cache cache
|
|
|
|
ds ds.TxnDatastore
|
|
|
|
subsManager *pstoremem.AddrSubManager
|
|
|
|
|
|
|
|
flushJobCh chan *addrsRecord
|
|
|
|
cancelFn func()
|
|
|
|
closedCh chan struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
var _ pstore.AddrBook = (*dsAddrBook)(nil)
|
2018-09-01 01:35:23 +08:00
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
// NewAddrBook initializes a new address book given a Datastore instance, a context for managing the TTL manager,
|
2018-11-13 06:28:12 +08:00
|
|
|
// and the interval at which the TTL manager should sweep the Datastore.
|
|
|
|
func NewAddrBook(ctx context.Context, store ds.TxnDatastore, opts Options) (ab *dsAddrBook, err error) {
|
|
|
|
var cache cache = new(noopCache)
|
|
|
|
if opts.CacheSize > 0 {
|
|
|
|
if cache, err = lru.NewARC(int(opts.CacheSize)); err != nil {
|
|
|
|
return nil, err
|
2018-09-01 01:35:23 +08:00
|
|
|
}
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
2018-09-01 01:35:23 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
ctx, cancelFn := context.WithCancel(ctx)
|
|
|
|
mgr := &dsAddrBook{
|
|
|
|
ctx: ctx,
|
|
|
|
cancelFn: cancelFn,
|
|
|
|
gcInterval: opts.GCInterval,
|
|
|
|
cache: cache,
|
|
|
|
ds: store,
|
|
|
|
subsManager: pstoremem.NewAddrSubManager(),
|
|
|
|
flushJobCh: make(chan *addrsRecord, 32),
|
|
|
|
closedCh: make(chan struct{}),
|
|
|
|
}
|
2018-06-12 05:58:10 +08:00
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
// kick off periodic GC.
|
2018-11-13 06:28:12 +08:00
|
|
|
go mgr.background()
|
2018-06-12 05:58:10 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
return mgr, nil
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
func (ab *dsAddrBook) Close() {
|
|
|
|
ab.cancelFn()
|
|
|
|
<-ab.closedCh
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
func (ab *dsAddrBook) asyncFlush(pr *addrsRecord) {
|
2018-11-13 06:28:12 +08:00
|
|
|
select {
|
|
|
|
case ab.flushJobCh <- pr:
|
|
|
|
default:
|
2018-11-15 08:27:28 +08:00
|
|
|
log.Warningf("flush queue is full; could not flush peer %v", pr.Id.ID.Pretty())
|
2018-09-01 01:35:23 +08:00
|
|
|
}
|
2018-06-16 01:46:35 +08:00
|
|
|
}
|
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
// loadRecord is a read-through fetch. It fetches a record from cache, falling back to the
|
|
|
|
// datastore upon a miss, and returning an newly initialized record if the peer doesn't exist.
|
|
|
|
//
|
|
|
|
// loadRecord calls Refresh() on the record before returning it. If the record changes
|
|
|
|
// as a result and `update=true`, an async flush is scheduled.
|
|
|
|
//
|
|
|
|
// If `cache=true`, the record is inserted in the cache when loaded from the datastore.
|
2018-11-13 06:28:12 +08:00
|
|
|
func (ab *dsAddrBook) loadRecord(id peer.ID, cache bool, update bool) (pr *addrsRecord, err error) {
|
|
|
|
if e, ok := ab.cache.Get(id); ok {
|
|
|
|
pr = e.(*addrsRecord)
|
2018-11-15 08:27:28 +08:00
|
|
|
if pr.Refresh() && update {
|
|
|
|
ab.asyncFlush(pr)
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
2018-11-13 06:28:12 +08:00
|
|
|
return pr, nil
|
2018-09-01 01:35:23 +08:00
|
|
|
}
|
2018-06-12 05:58:10 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
txn, err := ab.ds.NewTransaction(true)
|
2018-09-01 01:35:23 +08:00
|
|
|
if err != nil {
|
2018-11-13 06:28:12 +08:00
|
|
|
return nil, err
|
2018-09-01 01:35:23 +08:00
|
|
|
}
|
2018-11-13 06:28:12 +08:00
|
|
|
defer txn.Discard()
|
2018-06-16 01:46:35 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
key := addrBookBase.ChildString(b32.RawStdEncoding.EncodeToString([]byte(id)))
|
|
|
|
data, err := txn.Get(key)
|
2018-06-16 05:48:39 +08:00
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
if err != nil && err != ds.ErrNotFound {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
if err == nil {
|
|
|
|
pr = &addrsRecord{AddrBookRecord: &pb.AddrBookRecord{}}
|
|
|
|
if err = pr.Unmarshal(data); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-11-15 08:27:28 +08:00
|
|
|
if pr.Refresh() && update {
|
|
|
|
ab.asyncFlush(pr)
|
2018-11-13 06:28:12 +08:00
|
|
|
}
|
2018-11-15 08:27:28 +08:00
|
|
|
} else {
|
|
|
|
pr = &addrsRecord{AddrBookRecord: &pb.AddrBookRecord{Id: &pb.ProtoPeerID{ID: id}}}
|
2018-09-01 01:35:23 +08:00
|
|
|
}
|
2018-06-27 07:06:05 +08:00
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
if cache {
|
|
|
|
ab.cache.Add(id, pr)
|
2018-09-01 01:35:23 +08:00
|
|
|
}
|
2018-11-15 08:27:28 +08:00
|
|
|
return pr, nil
|
2018-11-13 06:28:12 +08:00
|
|
|
}
|
2018-09-01 01:35:23 +08:00
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
// background runs the housekeeping process that takes care of:
|
|
|
|
//
|
|
|
|
// * purging expired addresses from the datastore at regular intervals.
|
|
|
|
// * persisting asynchronous flushes to the datastore.
|
2018-11-13 06:28:12 +08:00
|
|
|
func (ab *dsAddrBook) background() {
|
|
|
|
timer := time.NewTicker(ab.gcInterval)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case fj := <-ab.flushJobCh:
|
2018-11-15 08:27:28 +08:00
|
|
|
if cached, ok := ab.cache.Peek(fj.Id.ID); ok {
|
2018-11-13 06:28:12 +08:00
|
|
|
// Only continue flushing if the record we have in memory is the same as for which the flush
|
|
|
|
// job was requested. If it's not in memory, it has been evicted and we don't know if we hold
|
|
|
|
// the latest state or not. Similarly, if it's cached but the pointer is different, it means
|
|
|
|
// it was evicted and has been reloaded, so we're also uncertain if we hold the latest state.
|
|
|
|
if pr := cached.(*addrsRecord); pr == fj {
|
|
|
|
pr.RLock()
|
|
|
|
pr.Flush(ab.ds)
|
|
|
|
pr.RUnlock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-timer.C:
|
|
|
|
ab.purgeCycle()
|
|
|
|
|
|
|
|
case <-ab.ctx.Done():
|
|
|
|
timer.Stop()
|
|
|
|
close(ab.closedCh)
|
|
|
|
return
|
2018-09-01 01:35:23 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
var purgeQuery = query.Query{Prefix: addrBookBase.String()}
|
|
|
|
|
|
|
|
// purgeCycle runs a GC cycle
|
2018-11-13 06:28:12 +08:00
|
|
|
func (ab *dsAddrBook) purgeCycle() {
|
|
|
|
var id peer.ID
|
|
|
|
record := &addrsRecord{AddrBookRecord: &pb.AddrBookRecord{}}
|
|
|
|
txn, err := ab.ds.NewTransaction(false)
|
2018-09-28 03:52:25 +08:00
|
|
|
if err != nil {
|
2018-11-13 06:28:12 +08:00
|
|
|
log.Warningf("failed while purging entries: %v\n", err)
|
|
|
|
return
|
2018-09-28 03:52:25 +08:00
|
|
|
}
|
2018-09-01 01:35:23 +08:00
|
|
|
defer txn.Discard()
|
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
results, err := txn.Query(purgeQuery)
|
2018-11-13 06:28:12 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Warningf("failed while purging entries: %v\n", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer results.Close()
|
|
|
|
|
|
|
|
for result := range results.Next() {
|
2018-11-15 08:27:28 +08:00
|
|
|
k, err := b32.RawStdEncoding.DecodeString(ds.RawKey(result.Key).Name())
|
|
|
|
if err != nil {
|
|
|
|
// TODO: drop the record? this will keep failing forever.
|
|
|
|
log.Warningf("failed while purging record: %v, err: %v\n", result.Key, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
id, err = peer.IDFromBytes(k)
|
|
|
|
if err != nil {
|
|
|
|
// TODO: drop the record? this will keep failing forever.
|
|
|
|
log.Warningf("failed to get extract peer ID from bytes (hex): %x, err: %v\n", k, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// if the record is in cache, we refresh it and flush it if necessary.
|
2018-11-13 06:28:12 +08:00
|
|
|
if e, ok := ab.cache.Peek(id); ok {
|
|
|
|
cached := e.(*addrsRecord)
|
|
|
|
cached.Lock()
|
2018-11-15 08:27:28 +08:00
|
|
|
if cached.Refresh() {
|
2018-11-13 06:28:12 +08:00
|
|
|
cached.FlushInTxn(txn)
|
2018-09-13 03:40:51 +08:00
|
|
|
}
|
2018-11-13 06:28:12 +08:00
|
|
|
cached.Unlock()
|
2018-06-16 05:48:39 +08:00
|
|
|
continue
|
|
|
|
}
|
2018-09-01 01:35:23 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
if err := record.Unmarshal(result.Value); err != nil {
|
2018-11-15 08:27:28 +08:00
|
|
|
// TODO: drop the record? this will keep failing forever.
|
|
|
|
log.Warningf("failed while deserializing entry with key: %v, err: %v\n", result.Key, err)
|
2018-11-13 06:28:12 +08:00
|
|
|
continue
|
2018-09-19 02:01:24 +08:00
|
|
|
}
|
2018-11-15 08:27:28 +08:00
|
|
|
if record.Refresh() {
|
2018-11-13 06:28:12 +08:00
|
|
|
record.FlushInTxn(txn)
|
2018-09-01 01:35:23 +08:00
|
|
|
}
|
2018-11-13 06:28:12 +08:00
|
|
|
record.Reset()
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
2018-09-01 01:35:23 +08:00
|
|
|
|
|
|
|
if err = txn.Commit(); err != nil {
|
2018-11-13 06:28:12 +08:00
|
|
|
log.Warningf("failed to commit GC transaction: %v\n", err)
|
2018-09-01 01:35:23 +08:00
|
|
|
}
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
// AddAddr will add a new address if it's not already in the AddrBook.
|
|
|
|
func (ab *dsAddrBook) AddAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {
|
|
|
|
ab.AddAddrs(p, []ma.Multiaddr{addr}, ttl)
|
|
|
|
}
|
2018-09-13 03:40:51 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
// AddAddrs will add many new addresses if they're not already in the AddrBook.
|
|
|
|
func (ab *dsAddrBook) AddAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
|
|
|
|
if ttl <= 0 {
|
|
|
|
return
|
2018-09-13 03:40:51 +08:00
|
|
|
}
|
2018-11-13 06:28:12 +08:00
|
|
|
addrs = cleanAddrs(addrs)
|
|
|
|
ab.setAddrs(p, addrs, ttl, ttlExtend)
|
|
|
|
}
|
2018-09-13 03:40:51 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
// SetAddr will add or update the TTL of an address in the AddrBook.
|
|
|
|
func (ab *dsAddrBook) SetAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {
|
|
|
|
ab.SetAddrs(p, []ma.Multiaddr{addr}, ttl)
|
2018-09-13 03:40:51 +08:00
|
|
|
}
|
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
// SetAddrs will add or update the TTLs of addresses in the AddrBook.
|
|
|
|
func (ab *dsAddrBook) SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
|
|
|
|
addrs = cleanAddrs(addrs)
|
|
|
|
if ttl <= 0 {
|
|
|
|
ab.deleteAddrs(p, addrs)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ab.setAddrs(p, addrs, ttl, ttlOverride)
|
|
|
|
}
|
2018-09-13 03:40:51 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
// UpdateAddrs will update any addresses for a given peer and TTL combination to
|
|
|
|
// have a new TTL.
|
|
|
|
func (ab *dsAddrBook) UpdateAddrs(p peer.ID, oldTTL time.Duration, newTTL time.Duration) {
|
|
|
|
pr, err := ab.loadRecord(p, true, false)
|
2018-09-28 03:52:25 +08:00
|
|
|
if err != nil {
|
2018-11-13 06:28:12 +08:00
|
|
|
log.Errorf("failed to update ttls for peer %s: %s\n", p.Pretty(), err)
|
2018-09-28 03:52:25 +08:00
|
|
|
}
|
2018-09-13 03:40:51 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
pr.Lock()
|
|
|
|
defer pr.Unlock()
|
2018-09-13 03:40:51 +08:00
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
newExp := time.Now().Add(newTTL).Unix()
|
2018-11-13 06:28:12 +08:00
|
|
|
for _, entry := range pr.Addrs {
|
2018-11-15 08:27:28 +08:00
|
|
|
if entry.Ttl != int64(oldTTL) {
|
2018-09-13 03:40:51 +08:00
|
|
|
continue
|
|
|
|
}
|
2018-11-15 08:27:28 +08:00
|
|
|
entry.Ttl, entry.Expiry = int64(newTTL), newExp
|
|
|
|
pr.dirty = true
|
2018-09-13 03:40:51 +08:00
|
|
|
}
|
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
if pr.Refresh() {
|
2018-11-13 06:28:12 +08:00
|
|
|
pr.Flush(ab.ds)
|
2018-09-13 03:40:51 +08:00
|
|
|
}
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-09-08 01:37:01 +08:00
|
|
|
// Addrs returns all of the non-expired addresses for a given peer.
|
2018-11-13 06:28:12 +08:00
|
|
|
func (ab *dsAddrBook) Addrs(p peer.ID) []ma.Multiaddr {
|
|
|
|
pr, err := ab.loadRecord(p, true, true)
|
2018-09-28 03:52:25 +08:00
|
|
|
if err != nil {
|
2018-11-13 06:28:12 +08:00
|
|
|
log.Warning("failed to load peerstore entry for peer %v while querying addrs, err: %v", p, err)
|
2018-09-28 03:52:25 +08:00
|
|
|
return nil
|
|
|
|
}
|
2018-09-08 01:37:01 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
pr.RLock()
|
|
|
|
defer pr.RUnlock()
|
2018-09-13 03:40:51 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
addrs := make([]ma.Multiaddr, 0, len(pr.Addrs))
|
2018-11-15 08:27:28 +08:00
|
|
|
for _, a := range pr.Addrs {
|
|
|
|
addrs = append(addrs, a.Addr)
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
return addrs
|
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// Peers returns all of the peer IDs for which the AddrBook has addresses.
|
2018-11-13 06:28:12 +08:00
|
|
|
func (ab *dsAddrBook) PeersWithAddrs() peer.IDSlice {
|
|
|
|
ids, err := uniquePeerIds(ab.ds, addrBookBase, func(result query.Result) string {
|
|
|
|
return ds.RawKey(result.Key).Name()
|
2018-09-28 21:04:52 +08:00
|
|
|
})
|
2018-09-28 03:52:25 +08:00
|
|
|
if err != nil {
|
2018-09-28 21:04:52 +08:00
|
|
|
log.Errorf("error while retrieving peers with addresses: %v", err)
|
2018-06-14 07:27:14 +08:00
|
|
|
}
|
|
|
|
return ids
|
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// AddrStream returns a channel on which all new addresses discovered for a
|
|
|
|
// given peer ID will be published.
|
2018-11-13 06:28:12 +08:00
|
|
|
func (ab *dsAddrBook) AddrStream(ctx context.Context, p peer.ID) <-chan ma.Multiaddr {
|
|
|
|
initial := ab.Addrs(p)
|
|
|
|
return ab.subsManager.AddrStream(ctx, p, initial)
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// ClearAddrs will delete all known addresses for a peer ID.
|
2018-11-13 06:28:12 +08:00
|
|
|
func (ab *dsAddrBook) ClearAddrs(p peer.ID) {
|
|
|
|
ab.cache.Remove(p)
|
|
|
|
|
|
|
|
key := addrBookBase.ChildString(b32.RawStdEncoding.EncodeToString([]byte(p)))
|
|
|
|
txn, err := ab.ds.NewTransaction(false)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("failed to clear addresses for peer %s: %v\n", p.Pretty(), err)
|
2018-09-11 19:43:36 +08:00
|
|
|
}
|
2018-11-13 06:28:12 +08:00
|
|
|
defer txn.Discard()
|
2018-09-11 19:43:36 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
if err := txn.Delete(key); err != nil {
|
|
|
|
log.Errorf("failed to clear addresses for peer %s: %v\n", p.Pretty(), err)
|
2018-09-01 01:35:23 +08:00
|
|
|
}
|
2018-06-12 05:58:10 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
if err = txn.Commit(); err != nil {
|
|
|
|
log.Errorf("failed to commit transaction when deleting keys, cause: %v", err)
|
2018-09-01 01:35:23 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
func (ab *dsAddrBook) setAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration, mode ttlWriteMode) (err error) {
|
|
|
|
pr, err := ab.loadRecord(p, true, false)
|
2018-09-28 03:52:25 +08:00
|
|
|
if err != nil {
|
2018-11-15 08:27:28 +08:00
|
|
|
return fmt.Errorf("failed to load peerstore entry for peer %v while setting addrs, err: %v", p, err)
|
2018-09-28 03:52:25 +08:00
|
|
|
}
|
2018-09-11 19:43:36 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
pr.Lock()
|
|
|
|
defer pr.Unlock()
|
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
newExp := time.Now().Add(ttl).Unix()
|
|
|
|
existed := make([]bool, len(addrs)) // keeps track of which addrs we found
|
|
|
|
|
|
|
|
Outer:
|
|
|
|
for i, incoming := range addrs {
|
|
|
|
for _, have := range pr.Addrs {
|
|
|
|
if incoming.Equal(have.Addr) {
|
|
|
|
existed[i] = true
|
|
|
|
if mode == ttlExtend && have.Expiry > newExp {
|
|
|
|
// if we're only extending TTLs but the addr already has a longer one, we skip it.
|
|
|
|
continue Outer
|
|
|
|
}
|
|
|
|
have.Expiry = newExp
|
|
|
|
// we found the address, and addresses cannot be duplicate,
|
|
|
|
// so let's move on to the next.
|
|
|
|
continue Outer
|
|
|
|
}
|
2018-09-11 19:43:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
// add addresses we didn't hold.
|
|
|
|
var added []*pb.AddrBookRecord_AddrEntry
|
|
|
|
for i, e := range existed {
|
|
|
|
if e {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
addr := addrs[i]
|
|
|
|
entry := &pb.AddrBookRecord_AddrEntry{
|
|
|
|
Addr: &pb.ProtoAddr{Multiaddr: addr},
|
|
|
|
Ttl: int64(ttl),
|
|
|
|
Expiry: newExp,
|
2018-11-13 06:28:12 +08:00
|
|
|
}
|
2018-11-15 08:27:28 +08:00
|
|
|
added = append(added, entry)
|
|
|
|
// TODO: should we only broadcast if we updated the store successfully?
|
|
|
|
// we have no way of rolling back the state of the in-memory record, although we
|
|
|
|
// could at the expense of allocs. But is it worthwhile?
|
|
|
|
ab.subsManager.BroadcastAddr(p, addr)
|
2018-09-11 19:43:36 +08:00
|
|
|
}
|
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
pr.Addrs = append(pr.Addrs, added...)
|
|
|
|
pr.dirty = true
|
|
|
|
pr.Refresh()
|
2018-11-13 06:28:12 +08:00
|
|
|
return pr.Flush(ab.ds)
|
2018-09-11 19:43:36 +08:00
|
|
|
}
|
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
func (ab *dsAddrBook) deleteAddrs(p peer.ID, addrs []ma.Multiaddr) (err error) {
|
|
|
|
pr, err := ab.loadRecord(p, false, false)
|
2018-09-28 03:52:25 +08:00
|
|
|
if err != nil {
|
2018-11-13 06:28:12 +08:00
|
|
|
return fmt.Errorf("failed to load peerstore entry for peer %v while deleting addrs, err: %v", p, err)
|
2018-09-28 03:52:25 +08:00
|
|
|
}
|
2018-09-01 01:35:23 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
if pr.Addrs == nil {
|
|
|
|
return nil
|
2018-09-01 01:35:23 +08:00
|
|
|
}
|
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
pr.Lock()
|
|
|
|
defer pr.Unlock()
|
2018-09-01 01:35:23 +08:00
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
// deletes addresses in place, and avoiding copies until we encounter the first deletion.
|
|
|
|
survived := 0
|
|
|
|
for i, addr := range pr.Addrs {
|
|
|
|
for _, del := range addrs {
|
|
|
|
if addr.Addr.Equal(del) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if i != survived {
|
|
|
|
pr.Addrs[survived] = pr.Addrs[i]
|
|
|
|
}
|
|
|
|
survived++
|
|
|
|
}
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
2018-11-15 08:27:28 +08:00
|
|
|
pr.Addrs = pr.Addrs[:survived]
|
2018-09-01 01:35:23 +08:00
|
|
|
|
2018-11-15 08:27:28 +08:00
|
|
|
pr.dirty = true
|
|
|
|
pr.Refresh()
|
2018-11-13 06:28:12 +08:00
|
|
|
return pr.Flush(ab.ds)
|
|
|
|
}
|
2018-09-01 01:35:23 +08:00
|
|
|
|
2018-11-13 06:28:12 +08:00
|
|
|
func cleanAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
|
|
|
|
clean := make([]ma.Multiaddr, 0, len(addrs))
|
|
|
|
for _, addr := range addrs {
|
|
|
|
if addr == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
clean = append(clean, addr)
|
2018-09-01 01:35:23 +08:00
|
|
|
}
|
2018-11-13 06:28:12 +08:00
|
|
|
return clean
|
2018-09-01 01:35:23 +08:00
|
|
|
}
|