2018-08-29 22:12:41 +08:00
|
|
|
package ds
|
2018-06-12 05:58:10 +08:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2018-08-30 22:24:06 +08:00
|
|
|
mh "gx/ipfs/QmPnFwZ2JXKnXgMw8CdBPxn7FWh6LLdjUjxV1fKHuJnkr8/go-multihash"
|
|
|
|
"gx/ipfs/QmQjMHF8ptRgx4E57UFMiT4YM6kqaJeYxZ1MCDX23aw4rK/golang-lru"
|
|
|
|
"gx/ipfs/QmQsErDt8Qgw1XrsXf2BpEzDgGWtB1YLsTAARBup5b6B9W/go-libp2p-peer"
|
|
|
|
logging "gx/ipfs/QmRREK2CAZ5Re2Bd9zZFG6FeYDppUWt5cMgsoUEp3ktgSr/go-log"
|
|
|
|
ds "gx/ipfs/QmSpg1CvpXQQow5ernt1gNBXaXV6yxyNqi7XoeerWfzB5w/go-datastore"
|
|
|
|
"gx/ipfs/QmSpg1CvpXQQow5ernt1gNBXaXV6yxyNqi7XoeerWfzB5w/go-datastore/query"
|
|
|
|
ma "gx/ipfs/QmYmsdtJ3HsodkePE3eU3TsCaP2YvPZJ4LoXnNkDE5Tpt7/go-multiaddr"
|
2018-08-30 23:24:09 +08:00
|
|
|
|
|
|
|
pstore "github.com/libp2p/go-libp2p-peerstore"
|
|
|
|
"github.com/libp2p/go-libp2p-peerstore/mem"
|
2018-06-12 05:58:10 +08:00
|
|
|
)
|
|
|
|
|
2018-08-29 22:12:41 +08:00
|
|
|
var log = logging.Logger("peerstore/ds")
|
|
|
|
|
2018-06-16 05:48:39 +08:00
|
|
|
// Number of times to retry transactional writes
|
|
|
|
var dsWriteRetries = 5
|
|
|
|
|
2018-08-30 23:43:40 +08:00
|
|
|
var _ pstore.AddrBook = (*dsAddrBook)(nil)
|
2018-08-29 22:12:41 +08:00
|
|
|
|
2018-08-30 23:43:40 +08:00
|
|
|
// dsAddrBook is an address manager backed by a Datastore with both an
|
2018-06-16 01:46:35 +08:00
|
|
|
// in-memory TTL manager and an in-memory address stream manager.
|
2018-08-30 23:43:40 +08:00
|
|
|
type dsAddrBook struct {
|
2018-06-27 07:06:05 +08:00
|
|
|
cache *lru.ARCCache
|
2018-06-16 01:46:35 +08:00
|
|
|
ds ds.Batching
|
2018-06-13 06:02:42 +08:00
|
|
|
ttlManager *ttlmanager
|
2018-08-30 21:13:27 +08:00
|
|
|
subsManager *mem.AddrSubManager
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-08-30 23:43:40 +08:00
|
|
|
// NewAddrBook initializes a new address manager given a
|
2018-08-29 22:12:41 +08:00
|
|
|
// Datastore instance, a context for managing the TTL manager,
|
|
|
|
// and the interval at which the TTL manager should sweep the Datastore.
|
2018-08-30 23:43:40 +08:00
|
|
|
func NewAddrBook(ctx context.Context, ds ds.Batching, ttlInterval time.Duration) (*dsAddrBook, error) {
|
2018-06-27 07:06:05 +08:00
|
|
|
cache, err := lru.NewARC(1024)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-08-30 23:43:40 +08:00
|
|
|
mgr := &dsAddrBook{
|
2018-06-27 07:06:05 +08:00
|
|
|
cache: cache,
|
2018-06-13 06:02:42 +08:00
|
|
|
ds: ds,
|
2018-06-27 07:06:05 +08:00
|
|
|
ttlManager: newTTLManager(ctx, ds, cache, ttlInterval),
|
2018-08-30 21:13:27 +08:00
|
|
|
subsManager: mem.NewAddrSubManager(),
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
2018-06-27 07:06:05 +08:00
|
|
|
return mgr, nil
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// Stop will signal the TTL manager to stop and block until it returns.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) Stop() {
|
2018-06-16 05:35:18 +08:00
|
|
|
mgr.ttlManager.cancel()
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func peerAddressKey(p *peer.ID, addr *ma.Multiaddr) (ds.Key, error) {
|
|
|
|
hash, err := mh.Sum((*addr).Bytes(), mh.MURMUR3, -1)
|
|
|
|
if err != nil {
|
|
|
|
return ds.Key{}, nil
|
|
|
|
}
|
2018-06-15 04:17:50 +08:00
|
|
|
return ds.NewKey(peer.IDB58Encode(*p)).ChildString(hash.B58String()), nil
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-06-14 07:27:14 +08:00
|
|
|
func peerIDFromKey(key ds.Key) (peer.ID, error) {
|
2018-06-15 04:17:50 +08:00
|
|
|
idstring := key.Parent().Name()
|
2018-06-14 07:27:14 +08:00
|
|
|
return peer.IDB58Decode(idstring)
|
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// AddAddr will add a new address if it's not already in the AddrBook.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) AddAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {
|
2018-06-12 05:58:10 +08:00
|
|
|
mgr.AddAddrs(p, []ma.Multiaddr{addr}, ttl)
|
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// AddAddrs will add many new addresses if they're not already in the AddrBook.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) AddAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
|
2018-06-12 05:58:10 +08:00
|
|
|
if ttl <= 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
mgr.setAddrs(p, addrs, ttl, true)
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// SetAddr will add or update the TTL of an address in the AddrBook.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) SetAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {
|
2018-06-12 05:58:10 +08:00
|
|
|
mgr.SetAddrs(p, []ma.Multiaddr{addr}, ttl)
|
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// SetAddrs will add or update the TTLs of addresses in the AddrBook.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
|
2018-06-16 01:46:35 +08:00
|
|
|
mgr.setAddrs(p, addrs, ttl, false)
|
|
|
|
}
|
|
|
|
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) setAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration, add bool) {
|
2018-06-16 05:48:39 +08:00
|
|
|
for i := 0; i < dsWriteRetries; i++ {
|
|
|
|
// keys to add to the TTL manager
|
|
|
|
var keys []ds.Key
|
|
|
|
batch, err := mgr.ds.Batch()
|
2018-06-12 05:58:10 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Error(err)
|
2018-06-16 05:48:39 +08:00
|
|
|
return
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-06-16 05:48:39 +08:00
|
|
|
for _, addr := range addrs {
|
|
|
|
if addr == nil {
|
|
|
|
continue
|
|
|
|
}
|
2018-06-16 01:46:35 +08:00
|
|
|
|
2018-06-16 05:48:39 +08:00
|
|
|
key, err := peerAddressKey(&p, &addr)
|
|
|
|
if err != nil {
|
2018-06-16 01:46:35 +08:00
|
|
|
log.Error(err)
|
2018-06-16 05:48:39 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
keys = append(keys, key)
|
|
|
|
|
|
|
|
if ttl <= 0 {
|
2018-06-27 07:06:05 +08:00
|
|
|
if err := batch.Delete(key); err != nil {
|
|
|
|
log.Error(err)
|
|
|
|
} else {
|
|
|
|
mgr.cache.Remove(key)
|
|
|
|
}
|
2018-06-16 05:48:39 +08:00
|
|
|
continue
|
|
|
|
}
|
2018-06-27 07:06:05 +08:00
|
|
|
|
|
|
|
has := mgr.cache.Contains(key)
|
|
|
|
if !has {
|
|
|
|
has, err = mgr.ds.Has(key)
|
|
|
|
}
|
2018-06-16 05:48:39 +08:00
|
|
|
if err != nil || !has {
|
|
|
|
mgr.subsManager.BroadcastAddr(p, addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allows us to support AddAddr and SetAddr in one function
|
2018-06-27 05:58:38 +08:00
|
|
|
if !has {
|
2018-06-16 05:48:39 +08:00
|
|
|
if err := batch.Put(key, addr.Bytes()); err != nil {
|
|
|
|
log.Error(err)
|
2018-06-27 07:06:05 +08:00
|
|
|
} else {
|
|
|
|
mgr.cache.Add(key, addr.Bytes())
|
2018-06-16 05:48:39 +08:00
|
|
|
}
|
2018-06-16 01:46:35 +08:00
|
|
|
}
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
2018-06-16 05:48:39 +08:00
|
|
|
if err := batch.Commit(); err != nil {
|
|
|
|
log.Errorf("failed to write addresses for peer %s: %s\n", p.Pretty(), err)
|
|
|
|
continue
|
|
|
|
}
|
2018-06-27 05:58:38 +08:00
|
|
|
mgr.ttlManager.setTTLs(keys, ttl, add)
|
2018-06-16 05:48:39 +08:00
|
|
|
return
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
2018-06-16 05:48:39 +08:00
|
|
|
log.Errorf("failed to avoid write conflict for peer %s after %d retries\n", p.Pretty(), dsWriteRetries)
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// UpdateAddrs will update any addresses for a given peer and TTL combination to
|
|
|
|
// have a new TTL.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) UpdateAddrs(p peer.ID, oldTTL time.Duration, newTTL time.Duration) {
|
2018-06-12 05:58:10 +08:00
|
|
|
prefix := ds.NewKey(p.Pretty())
|
2018-06-12 07:59:50 +08:00
|
|
|
mgr.ttlManager.updateTTLs(prefix, oldTTL, newTTL)
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// Addrs Returns all of the non-expired addresses for a given peer.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) Addrs(p peer.ID) []ma.Multiaddr {
|
2018-06-12 05:58:10 +08:00
|
|
|
prefix := ds.NewKey(p.Pretty())
|
2018-06-27 07:06:05 +08:00
|
|
|
q := query.Query{Prefix: prefix.String(), KeysOnly: true}
|
2018-06-12 05:58:10 +08:00
|
|
|
results, err := mgr.ds.Query(q)
|
|
|
|
if err != nil {
|
|
|
|
log.Error(err)
|
2018-06-27 06:00:36 +08:00
|
|
|
return nil
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
var addrs []ma.Multiaddr
|
|
|
|
for result := range results.Next() {
|
2018-06-27 07:06:05 +08:00
|
|
|
key := ds.RawKey(result.Key)
|
|
|
|
var addri interface{}
|
|
|
|
addri, ok := mgr.cache.Get(key)
|
|
|
|
if !ok {
|
|
|
|
addri, err = mgr.ds.Get(key)
|
|
|
|
if err != nil {
|
|
|
|
log.Error(err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
addrbytes := addri.([]byte)
|
2018-06-12 05:58:10 +08:00
|
|
|
addr, err := ma.NewMultiaddrBytes(addrbytes)
|
|
|
|
if err != nil {
|
2018-06-27 06:06:34 +08:00
|
|
|
log.Error(err)
|
2018-06-12 05:58:10 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
addrs = append(addrs, addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
return addrs
|
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// Peers returns all of the peer IDs for which the AddrBook has addresses.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) AddrsPeers() []peer.ID {
|
2018-06-27 07:06:05 +08:00
|
|
|
q := query.Query{KeysOnly: true}
|
2018-06-14 07:27:14 +08:00
|
|
|
results, err := mgr.ds.Query(q)
|
|
|
|
if err != nil {
|
|
|
|
log.Error(err)
|
|
|
|
return []peer.ID{}
|
|
|
|
}
|
|
|
|
|
|
|
|
idset := make(map[peer.ID]struct{})
|
|
|
|
for result := range results.Next() {
|
|
|
|
key := ds.RawKey(result.Key)
|
|
|
|
id, err := peerIDFromKey(key)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
idset[id] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
ids := make([]peer.ID, 0, len(idset))
|
|
|
|
for id := range idset {
|
|
|
|
ids = append(ids, id)
|
|
|
|
}
|
|
|
|
return ids
|
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// AddrStream returns a channel on which all new addresses discovered for a
|
|
|
|
// given peer ID will be published.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) AddrStream(ctx context.Context, p peer.ID) <-chan ma.Multiaddr {
|
2018-06-13 06:02:42 +08:00
|
|
|
initial := mgr.Addrs(p)
|
|
|
|
return mgr.subsManager.AddrStream(ctx, p, initial)
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// ClearAddrs will delete all known addresses for a peer ID.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) ClearAddrs(p peer.ID) {
|
2018-06-12 05:58:10 +08:00
|
|
|
prefix := ds.NewKey(p.Pretty())
|
2018-06-16 05:48:39 +08:00
|
|
|
for i := 0; i < dsWriteRetries; i++ {
|
2018-06-27 07:06:05 +08:00
|
|
|
q := query.Query{Prefix: prefix.String(), KeysOnly: true}
|
2018-06-16 05:48:39 +08:00
|
|
|
results, err := mgr.ds.Query(q)
|
|
|
|
if err != nil {
|
|
|
|
log.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
batch, err := mgr.ds.Batch()
|
|
|
|
if err != nil {
|
|
|
|
log.Error(err)
|
|
|
|
return
|
|
|
|
}
|
2018-06-12 05:58:10 +08:00
|
|
|
|
2018-06-16 05:48:39 +08:00
|
|
|
for result := range results.Next() {
|
2018-06-27 07:06:05 +08:00
|
|
|
key := ds.NewKey(result.Key)
|
|
|
|
err := batch.Delete(key)
|
2018-06-27 06:06:34 +08:00
|
|
|
if err != nil {
|
|
|
|
// From inspectin badger, errors here signify a problem with
|
|
|
|
// the transaction as a whole, so we can log and abort.
|
|
|
|
log.Error(err)
|
|
|
|
return
|
|
|
|
}
|
2018-06-27 07:06:05 +08:00
|
|
|
mgr.cache.Remove(key)
|
2018-06-16 05:48:39 +08:00
|
|
|
}
|
|
|
|
if err = batch.Commit(); err != nil {
|
|
|
|
log.Errorf("failed to clear addresses for peer %s: %s\n", p.Pretty(), err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
mgr.ttlManager.clear(ds.NewKey(p.Pretty()))
|
|
|
|
return
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
2018-06-16 05:48:39 +08:00
|
|
|
log.Errorf("failed to clear addresses for peer %s after %d attempts\n", p.Pretty(), dsWriteRetries)
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
type ttlentry struct {
|
|
|
|
TTL time.Duration
|
|
|
|
ExpiresAt time.Time
|
|
|
|
}
|
|
|
|
|
|
|
|
type ttlmanager struct {
|
2018-06-16 01:46:35 +08:00
|
|
|
sync.RWMutex
|
2018-06-12 05:58:10 +08:00
|
|
|
entries map[ds.Key]*ttlentry
|
2018-06-16 05:35:47 +08:00
|
|
|
|
|
|
|
ctx context.Context
|
|
|
|
cancel context.CancelFunc
|
|
|
|
ticker *time.Ticker
|
2018-06-27 05:22:04 +08:00
|
|
|
ds ds.Batching
|
2018-06-27 07:06:05 +08:00
|
|
|
cache *lru.ARCCache
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-06-27 07:06:05 +08:00
|
|
|
func newTTLManager(parent context.Context, d ds.Datastore, c *lru.ARCCache, tick time.Duration) *ttlmanager {
|
2018-06-12 05:58:10 +08:00
|
|
|
ctx, cancel := context.WithCancel(parent)
|
2018-06-27 05:58:38 +08:00
|
|
|
batching, ok := d.(ds.Batching)
|
|
|
|
if !ok {
|
|
|
|
panic("must construct ttlmanager with batching datastore")
|
|
|
|
}
|
2018-06-12 05:58:10 +08:00
|
|
|
mgr := &ttlmanager{
|
|
|
|
entries: make(map[ds.Key]*ttlentry),
|
|
|
|
ctx: ctx,
|
|
|
|
cancel: cancel,
|
|
|
|
ticker: time.NewTicker(tick),
|
2018-06-27 05:58:38 +08:00
|
|
|
ds: batching,
|
2018-06-27 07:06:05 +08:00
|
|
|
cache: c,
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-mgr.ctx.Done():
|
2018-06-12 07:54:17 +08:00
|
|
|
mgr.ticker.Stop()
|
|
|
|
return
|
2018-06-12 05:58:10 +08:00
|
|
|
case <-mgr.ticker.C:
|
|
|
|
mgr.tick()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return mgr
|
|
|
|
}
|
|
|
|
|
2018-06-16 01:56:35 +08:00
|
|
|
// To be called by TTL manager's coroutine only.
|
2018-06-12 05:58:10 +08:00
|
|
|
func (mgr *ttlmanager) tick() {
|
2018-06-27 06:10:10 +08:00
|
|
|
mgr.Lock()
|
|
|
|
defer mgr.Unlock()
|
2018-06-12 05:58:10 +08:00
|
|
|
|
|
|
|
now := time.Now()
|
2018-06-27 05:22:04 +08:00
|
|
|
batch, err := mgr.ds.Batch()
|
|
|
|
if err != nil {
|
|
|
|
log.Error(err)
|
|
|
|
return
|
|
|
|
}
|
2018-06-12 05:58:10 +08:00
|
|
|
for key, entry := range mgr.entries {
|
|
|
|
if entry.ExpiresAt.Before(now) {
|
2018-06-27 05:22:04 +08:00
|
|
|
if err := batch.Delete(key); err != nil {
|
2018-06-12 05:58:10 +08:00
|
|
|
log.Error(err)
|
2018-06-27 07:06:05 +08:00
|
|
|
} else {
|
|
|
|
mgr.cache.Remove(key)
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
delete(mgr.entries, key)
|
|
|
|
}
|
|
|
|
}
|
2018-06-27 05:58:38 +08:00
|
|
|
err = batch.Commit()
|
2018-06-27 05:22:04 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Error(err)
|
|
|
|
}
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-06-27 05:58:38 +08:00
|
|
|
func (mgr *ttlmanager) setTTLs(keys []ds.Key, ttl time.Duration, add bool) {
|
2018-06-12 05:58:10 +08:00
|
|
|
mgr.Lock()
|
|
|
|
defer mgr.Unlock()
|
|
|
|
|
|
|
|
expiration := time.Now().Add(ttl)
|
|
|
|
for _, key := range keys {
|
2018-06-27 05:58:38 +08:00
|
|
|
update := true
|
|
|
|
if add {
|
|
|
|
if entry, ok := mgr.entries[key]; ok {
|
|
|
|
if entry.ExpiresAt.After(expiration) {
|
|
|
|
update = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if update {
|
|
|
|
if ttl <= 0 {
|
|
|
|
delete(mgr.entries, key)
|
|
|
|
} else {
|
|
|
|
mgr.entries[key] = &ttlentry{TTL: ttl, ExpiresAt: expiration}
|
|
|
|
}
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-12 07:59:50 +08:00
|
|
|
func (mgr *ttlmanager) updateTTLs(prefix ds.Key, oldTTL, newTTL time.Duration) {
|
2018-06-12 05:58:10 +08:00
|
|
|
mgr.Lock()
|
|
|
|
defer mgr.Unlock()
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
var keys []ds.Key
|
|
|
|
for key, entry := range mgr.entries {
|
|
|
|
if key.IsDescendantOf(prefix) && entry.TTL == oldTTL {
|
|
|
|
keys = append(keys, key)
|
|
|
|
entry.TTL = newTTL
|
|
|
|
entry.ExpiresAt = now.Add(newTTL)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-12 07:59:50 +08:00
|
|
|
func (mgr *ttlmanager) clear(prefix ds.Key) {
|
2018-06-12 05:58:10 +08:00
|
|
|
mgr.Lock()
|
|
|
|
defer mgr.Unlock()
|
|
|
|
|
|
|
|
for key := range mgr.entries {
|
|
|
|
if key.IsDescendantOf(prefix) {
|
|
|
|
delete(mgr.entries, key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|