2018-09-02 19:03:02 +08:00
|
|
|
package pstoreds
|
2018-06-12 05:58:10 +08:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2018-09-02 19:10:55 +08:00
|
|
|
"github.com/hashicorp/golang-lru"
|
|
|
|
ds "github.com/ipfs/go-datastore"
|
|
|
|
"github.com/ipfs/go-datastore/query"
|
|
|
|
logging "github.com/ipfs/go-log"
|
|
|
|
"github.com/libp2p/go-libp2p-peer"
|
|
|
|
ma "github.com/multiformats/go-multiaddr"
|
|
|
|
mh "github.com/multiformats/go-multihash"
|
2018-08-30 23:24:09 +08:00
|
|
|
|
|
|
|
pstore "github.com/libp2p/go-libp2p-peerstore"
|
2018-09-05 01:07:44 +08:00
|
|
|
"github.com/libp2p/go-libp2p-peerstore/pstoremem"
|
2018-06-12 05:58:10 +08:00
|
|
|
)
|
|
|
|
|
2018-09-01 01:35:23 +08:00
|
|
|
var (
|
|
|
|
log = logging.Logger("peerstore/ds")
|
|
|
|
)
|
2018-06-16 05:48:39 +08:00
|
|
|
|
2018-08-30 23:43:40 +08:00
|
|
|
var _ pstore.AddrBook = (*dsAddrBook)(nil)
|
2018-08-29 22:12:41 +08:00
|
|
|
|
2018-09-05 22:14:40 +08:00
|
|
|
// dsAddrBook is an address book backed by a Datastore with both an
|
2018-06-16 01:46:35 +08:00
|
|
|
// in-memory TTL manager and an in-memory address stream manager.
|
2018-08-30 23:43:40 +08:00
|
|
|
type dsAddrBook struct {
|
2018-09-04 18:34:55 +08:00
|
|
|
cache cache
|
|
|
|
ds ds.TxnDatastore
|
|
|
|
ttlManager *ttlManager
|
|
|
|
subsManager *pstoremem.AddrSubManager
|
|
|
|
writeRetries int
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-09-05 22:14:40 +08:00
|
|
|
// NewAddrBook initializes a new address book given a
|
2018-08-29 22:12:41 +08:00
|
|
|
// Datastore instance, a context for managing the TTL manager,
|
|
|
|
// and the interval at which the TTL manager should sweep the Datastore.
|
2018-09-04 18:34:55 +08:00
|
|
|
func NewAddrBook(ctx context.Context, ds ds.TxnDatastore, opts PeerstoreOpts) (*dsAddrBook, error) {
|
|
|
|
var (
|
|
|
|
cache cache = &noopCache{}
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
|
|
|
|
if opts.CacheSize > 0 {
|
|
|
|
if cache, err = lru.NewARC(int(opts.CacheSize)); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-06-27 07:06:05 +08:00
|
|
|
}
|
|
|
|
|
2018-08-30 23:43:40 +08:00
|
|
|
mgr := &dsAddrBook{
|
2018-09-04 18:34:55 +08:00
|
|
|
cache: cache,
|
|
|
|
ds: ds,
|
|
|
|
ttlManager: newTTLManager(ctx, ds, &cache, opts.TTLInterval),
|
|
|
|
subsManager: pstoremem.NewAddrSubManager(),
|
|
|
|
writeRetries: int(opts.WriteRetries),
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
2018-06-27 07:06:05 +08:00
|
|
|
return mgr, nil
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// Stop will signal the TTL manager to stop and block until it returns.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) Stop() {
|
2018-06-16 05:35:18 +08:00
|
|
|
mgr.ttlManager.cancel()
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-09-01 01:35:23 +08:00
|
|
|
func keysAndAddrs(p peer.ID, addrs []ma.Multiaddr) ([]ds.Key, []ma.Multiaddr, error) {
|
|
|
|
var (
|
|
|
|
keys = make([]ds.Key, len(addrs))
|
|
|
|
clean = make([]ma.Multiaddr, len(addrs))
|
|
|
|
parentKey = ds.NewKey(peer.IDB58Encode(p))
|
|
|
|
i = 0
|
|
|
|
)
|
|
|
|
|
|
|
|
for _, addr := range addrs {
|
|
|
|
if addr == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
hash, err := mh.Sum((addr).Bytes(), mh.MURMUR3, -1)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
keys[i] = parentKey.ChildString(hash.B58String())
|
|
|
|
clean[i] = addr
|
|
|
|
i++
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
2018-09-01 01:35:23 +08:00
|
|
|
|
|
|
|
return keys[:i], clean[:i], nil
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// AddAddr will add a new address if it's not already in the AddrBook.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) AddAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {
|
2018-06-12 05:58:10 +08:00
|
|
|
mgr.AddAddrs(p, []ma.Multiaddr{addr}, ttl)
|
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// AddAddrs will add many new addresses if they're not already in the AddrBook.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) AddAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
|
2018-06-12 05:58:10 +08:00
|
|
|
if ttl <= 0 {
|
|
|
|
return
|
|
|
|
}
|
2018-09-01 01:35:23 +08:00
|
|
|
mgr.setAddrs(p, addrs, ttl, false)
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// SetAddr will add or update the TTL of an address in the AddrBook.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) SetAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {
|
2018-09-01 01:35:23 +08:00
|
|
|
addrs := []ma.Multiaddr{addr}
|
|
|
|
mgr.SetAddrs(p, addrs, ttl)
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// SetAddrs will add or update the TTLs of addresses in the AddrBook.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
|
2018-09-01 01:35:23 +08:00
|
|
|
if ttl <= 0 {
|
|
|
|
mgr.deleteAddrs(p, addrs)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
mgr.setAddrs(p, addrs, ttl, true)
|
2018-06-16 01:46:35 +08:00
|
|
|
}
|
|
|
|
|
2018-09-01 01:35:23 +08:00
|
|
|
func (mgr *dsAddrBook) deleteAddrs(p peer.ID, addrs []ma.Multiaddr) error {
|
|
|
|
// Keys and cleaned up addresses.
|
|
|
|
keys, addrs, err := keysAndAddrs(p, addrs)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Evict all keys from cache.
|
|
|
|
for _, key := range keys {
|
|
|
|
mgr.cache.Remove(key)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt transactional KV deletion.
|
2018-09-04 18:34:55 +08:00
|
|
|
for i := 0; i < mgr.writeRetries; i++ {
|
2018-09-01 01:35:23 +08:00
|
|
|
if err = mgr.dbDelete(keys); err == nil {
|
|
|
|
break
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
2018-09-01 01:35:23 +08:00
|
|
|
log.Errorf("failed to delete addresses for peer %s: %s\n", p.Pretty(), err)
|
|
|
|
}
|
2018-06-12 05:58:10 +08:00
|
|
|
|
2018-09-01 01:35:23 +08:00
|
|
|
if err != nil {
|
2018-09-04 18:34:55 +08:00
|
|
|
log.Errorf("failed to avoid write conflict for peer %s after %d retries: %v\n", p.Pretty(), mgr.writeRetries, err)
|
2018-09-01 01:35:23 +08:00
|
|
|
return err
|
|
|
|
}
|
2018-06-16 01:46:35 +08:00
|
|
|
|
2018-09-01 01:35:23 +08:00
|
|
|
mgr.ttlManager.deleteTTLs(keys)
|
|
|
|
return nil
|
|
|
|
}
|
2018-06-16 05:48:39 +08:00
|
|
|
|
2018-09-01 01:35:23 +08:00
|
|
|
func (mgr *dsAddrBook) setAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration, ttlReset bool) error {
|
|
|
|
// Keys and cleaned up addresses.
|
|
|
|
keys, addrs, err := keysAndAddrs(p, addrs)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-06-27 07:06:05 +08:00
|
|
|
|
2018-09-01 01:35:23 +08:00
|
|
|
// Evict all keys from cache before the update.
|
|
|
|
for _, key := range keys {
|
|
|
|
mgr.cache.Remove(key)
|
|
|
|
}
|
2018-06-16 05:48:39 +08:00
|
|
|
|
2018-09-01 01:35:23 +08:00
|
|
|
// Attempt transactional KV insertion.
|
|
|
|
var existed []bool
|
2018-09-04 18:34:55 +08:00
|
|
|
for i := 0; i < mgr.writeRetries; i++ {
|
2018-09-01 01:35:23 +08:00
|
|
|
if existed, err = mgr.dbInsert(keys, addrs); err == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
log.Errorf("failed to write addresses for peer %s: %s\n", p.Pretty(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2018-09-04 18:34:55 +08:00
|
|
|
log.Errorf("failed to avoid write conflict for peer %s after %d retries: %v\n", p.Pretty(), mgr.writeRetries, err)
|
2018-09-01 01:35:23 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Successful. Update cache and broadcast event.
|
|
|
|
for i, key := range keys {
|
|
|
|
addr := addrs[i]
|
|
|
|
mgr.cache.Add(key, addr.Bytes())
|
|
|
|
|
|
|
|
if !existed[i] {
|
|
|
|
mgr.subsManager.BroadcastAddr(p, addr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Force update TTLs only if TTL reset was requested; otherwise
|
|
|
|
// insert the appropriate TTL entries if they don't already exist.
|
|
|
|
if ttlReset {
|
|
|
|
mgr.ttlManager.setTTLs(keys, ttl)
|
|
|
|
} else {
|
|
|
|
mgr.ttlManager.insertTTLs(keys, ttl)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// dbDelete performs a transactional delete of the provided keys.
|
|
|
|
func (mgr *dsAddrBook) dbDelete(keys []ds.Key) error {
|
|
|
|
var err error
|
|
|
|
|
|
|
|
txn := mgr.ds.NewTransaction(false)
|
|
|
|
defer txn.Discard()
|
|
|
|
|
|
|
|
// Attempt to delete all keys.
|
|
|
|
for _, key := range keys {
|
|
|
|
if err = txn.Delete(key); err != nil {
|
|
|
|
log.Errorf("transaction failed and aborted while deleting key: %s, cause: %v", key.String(), err)
|
|
|
|
return err
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
2018-09-01 01:35:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if err = txn.Commit(); err != nil {
|
|
|
|
log.Errorf("failed to commit transaction when deleting keys, cause: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// dbInsert performs a transactional insert of the provided keys and values.
|
|
|
|
func (mgr *dsAddrBook) dbInsert(keys []ds.Key, addrs []ma.Multiaddr) ([]bool, error) {
|
|
|
|
var (
|
|
|
|
err error
|
|
|
|
existed = make([]bool, len(keys))
|
|
|
|
)
|
|
|
|
|
|
|
|
txn := mgr.ds.NewTransaction(false)
|
|
|
|
defer txn.Discard()
|
|
|
|
|
|
|
|
for i, key := range keys {
|
|
|
|
// Check if the key existed previously.
|
|
|
|
if existed[i], err = txn.Has(key); err != nil {
|
|
|
|
log.Errorf("transaction failed and aborted while checking key existence: %s, cause: %v", key.String(), err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// The key embeds a hash of the value, so if it existed, we can safely skip the insert.
|
|
|
|
if existed[i] {
|
2018-06-16 05:48:39 +08:00
|
|
|
continue
|
|
|
|
}
|
2018-09-01 01:35:23 +08:00
|
|
|
|
|
|
|
// Attempt to add the key.
|
|
|
|
if err = txn.Put(key, addrs[i].Bytes()); err != nil {
|
|
|
|
log.Errorf("transaction failed and aborted while setting key: %s, cause: %v", key.String(), err)
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
2018-09-01 01:35:23 +08:00
|
|
|
|
|
|
|
if err = txn.Commit(); err != nil {
|
|
|
|
log.Errorf("failed to commit transaction when setting keys, cause: %v", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return existed, nil
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// UpdateAddrs will update any addresses for a given peer and TTL combination to
|
|
|
|
// have a new TTL.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) UpdateAddrs(p peer.ID, oldTTL time.Duration, newTTL time.Duration) {
|
2018-06-12 05:58:10 +08:00
|
|
|
prefix := ds.NewKey(p.Pretty())
|
2018-09-01 01:35:23 +08:00
|
|
|
mgr.ttlManager.adjustTTLs(prefix, oldTTL, newTTL)
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-09-08 01:37:01 +08:00
|
|
|
// Addrs returns all of the non-expired addresses for a given peer.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) Addrs(p peer.ID) []ma.Multiaddr {
|
2018-09-08 01:37:01 +08:00
|
|
|
var (
|
|
|
|
prefix = ds.NewKey(p.Pretty())
|
|
|
|
q = query.Query{Prefix: prefix.String(), KeysOnly: true}
|
|
|
|
results query.Results
|
|
|
|
err error
|
|
|
|
)
|
2018-09-01 01:35:23 +08:00
|
|
|
|
2018-09-08 01:37:01 +08:00
|
|
|
txn := mgr.ds.NewTransaction(true)
|
|
|
|
defer txn.Discard()
|
|
|
|
|
|
|
|
if results, err = txn.Query(q); err != nil {
|
2018-06-12 05:58:10 +08:00
|
|
|
log.Error(err)
|
2018-06-27 06:00:36 +08:00
|
|
|
return nil
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-09-08 01:37:01 +08:00
|
|
|
defer results.Close()
|
|
|
|
|
2018-06-12 05:58:10 +08:00
|
|
|
var addrs []ma.Multiaddr
|
|
|
|
for result := range results.Next() {
|
2018-06-27 07:06:05 +08:00
|
|
|
key := ds.RawKey(result.Key)
|
|
|
|
var addri interface{}
|
|
|
|
addri, ok := mgr.cache.Get(key)
|
2018-09-08 01:37:01 +08:00
|
|
|
|
2018-06-27 07:06:05 +08:00
|
|
|
if !ok {
|
2018-09-08 01:37:01 +08:00
|
|
|
if addri, err = txn.Get(key); err != nil {
|
2018-06-27 07:06:05 +08:00
|
|
|
log.Error(err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
2018-09-08 01:37:01 +08:00
|
|
|
|
|
|
|
if addr, err := ma.NewMultiaddrBytes(addri.([]byte)); err != nil {
|
|
|
|
addrs = append(addrs, addr)
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return addrs
|
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// Peers returns all of the peer IDs for which the AddrBook has addresses.
|
2018-08-31 19:59:46 +08:00
|
|
|
func (mgr *dsAddrBook) PeersWithAddrs() peer.IDSlice {
|
2018-09-08 01:37:01 +08:00
|
|
|
var (
|
|
|
|
q = query.Query{KeysOnly: true}
|
|
|
|
results query.Results
|
|
|
|
err error
|
|
|
|
)
|
2018-09-01 01:35:23 +08:00
|
|
|
|
2018-09-08 01:37:01 +08:00
|
|
|
txn := mgr.ds.NewTransaction(true)
|
|
|
|
defer txn.Discard()
|
|
|
|
|
|
|
|
if results, err = txn.Query(q); err != nil {
|
2018-06-14 07:27:14 +08:00
|
|
|
log.Error(err)
|
2018-08-31 19:59:46 +08:00
|
|
|
return peer.IDSlice{}
|
2018-06-14 07:27:14 +08:00
|
|
|
}
|
|
|
|
|
2018-09-08 01:37:01 +08:00
|
|
|
defer results.Close()
|
|
|
|
|
|
|
|
idset := make(map[string]struct{})
|
2018-06-14 07:27:14 +08:00
|
|
|
for result := range results.Next() {
|
|
|
|
key := ds.RawKey(result.Key)
|
2018-09-08 01:37:01 +08:00
|
|
|
idset[key.Parent().Name()] = struct{}{}
|
2018-06-14 07:27:14 +08:00
|
|
|
}
|
|
|
|
|
2018-09-08 01:37:01 +08:00
|
|
|
if len(idset) == 0 {
|
|
|
|
return peer.IDSlice{}
|
|
|
|
}
|
|
|
|
|
|
|
|
ids := make(peer.IDSlice, 1, len(idset))
|
2018-06-14 07:27:14 +08:00
|
|
|
for id := range idset {
|
2018-09-08 01:37:01 +08:00
|
|
|
i, _ := peer.IDB58Decode(id)
|
|
|
|
ids = append(ids, i)
|
2018-06-14 07:27:14 +08:00
|
|
|
}
|
|
|
|
return ids
|
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// AddrStream returns a channel on which all new addresses discovered for a
|
|
|
|
// given peer ID will be published.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) AddrStream(ctx context.Context, p peer.ID) <-chan ma.Multiaddr {
|
2018-06-13 06:02:42 +08:00
|
|
|
initial := mgr.Addrs(p)
|
|
|
|
return mgr.subsManager.AddrStream(ctx, p, initial)
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-06-16 01:46:35 +08:00
|
|
|
// ClearAddrs will delete all known addresses for a peer ID.
|
2018-08-30 23:43:40 +08:00
|
|
|
func (mgr *dsAddrBook) ClearAddrs(p peer.ID) {
|
2018-09-01 01:35:23 +08:00
|
|
|
var (
|
|
|
|
err error
|
|
|
|
keys []ds.Key
|
|
|
|
prefix = ds.NewKey(p.Pretty())
|
|
|
|
)
|
|
|
|
|
|
|
|
// Attempt transactional KV deletion.
|
2018-09-04 18:34:55 +08:00
|
|
|
for i := 0; i < mgr.writeRetries; i++ {
|
2018-09-01 01:35:23 +08:00
|
|
|
if keys, err = mgr.dbClear(prefix); err == nil {
|
|
|
|
break
|
2018-06-16 05:48:39 +08:00
|
|
|
}
|
2018-09-01 01:35:23 +08:00
|
|
|
log.Errorf("failed to clear addresses for peer %s: %s\n", p.Pretty(), err)
|
|
|
|
}
|
2018-06-12 05:58:10 +08:00
|
|
|
|
2018-09-01 01:35:23 +08:00
|
|
|
if err != nil {
|
2018-09-04 18:34:55 +08:00
|
|
|
log.Errorf("failed to clear addresses for peer %s after %d attempts\n", p.Pretty(), mgr.writeRetries)
|
2018-09-01 01:35:23 +08:00
|
|
|
// TODO: return error
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform housekeeping.
|
|
|
|
mgr.ttlManager.clear(prefix)
|
|
|
|
for _, key := range keys {
|
|
|
|
mgr.cache.Remove(key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mgr *dsAddrBook) dbClear(prefix ds.Key) ([]ds.Key, error) {
|
|
|
|
q := query.Query{Prefix: prefix.String(), KeysOnly: true}
|
|
|
|
|
|
|
|
txn := mgr.ds.NewTransaction(false)
|
|
|
|
defer txn.Discard()
|
|
|
|
|
|
|
|
results, err := txn.Query(q)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("failed to fetch all keys prefixed with: %s, cause: %v", prefix.String(), err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var keys []ds.Key
|
|
|
|
for result := range results.Next() {
|
|
|
|
key := ds.RawKey(result.Key)
|
|
|
|
keys = append(keys, key)
|
|
|
|
|
|
|
|
if err = txn.Delete(key); err != nil {
|
|
|
|
log.Errorf("failed to delete key: %s, cause: %v", key.String(), err)
|
|
|
|
return nil, err
|
2018-06-16 05:48:39 +08:00
|
|
|
}
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
2018-09-01 01:35:23 +08:00
|
|
|
|
|
|
|
if err := results.Close(); err != nil {
|
|
|
|
log.Errorf("failed to close cursor: %s, cause: %v", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = txn.Commit(); err != nil {
|
|
|
|
log.Errorf("failed to commit transaction when deleting keys, cause: %v", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return keys, nil
|
|
|
|
}
|
|
|
|
|
2018-09-04 18:34:55 +08:00
|
|
|
type ttlEntry struct {
|
2018-06-12 05:58:10 +08:00
|
|
|
TTL time.Duration
|
|
|
|
ExpiresAt time.Time
|
|
|
|
}
|
|
|
|
|
2018-09-01 01:35:23 +08:00
|
|
|
type ttlManager struct {
|
2018-06-16 01:46:35 +08:00
|
|
|
sync.RWMutex
|
2018-09-04 18:34:55 +08:00
|
|
|
entries map[ds.Key]*ttlEntry
|
2018-06-16 05:35:47 +08:00
|
|
|
|
|
|
|
ctx context.Context
|
|
|
|
cancel context.CancelFunc
|
|
|
|
ticker *time.Ticker
|
2018-09-04 21:30:52 +08:00
|
|
|
ds ds.TxnDatastore
|
2018-09-04 18:34:55 +08:00
|
|
|
cache cache
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-09-04 18:34:55 +08:00
|
|
|
func newTTLManager(parent context.Context, d ds.Datastore, c *cache, tick time.Duration) *ttlManager {
|
2018-06-12 05:58:10 +08:00
|
|
|
ctx, cancel := context.WithCancel(parent)
|
2018-09-04 21:30:52 +08:00
|
|
|
txnDs, ok := d.(ds.TxnDatastore)
|
2018-06-27 05:58:38 +08:00
|
|
|
if !ok {
|
2018-09-04 21:30:52 +08:00
|
|
|
panic("must construct ttlManager with transactional datastore")
|
2018-06-27 05:58:38 +08:00
|
|
|
}
|
2018-09-01 01:35:23 +08:00
|
|
|
mgr := &ttlManager{
|
2018-09-04 18:34:55 +08:00
|
|
|
entries: make(map[ds.Key]*ttlEntry),
|
2018-06-12 05:58:10 +08:00
|
|
|
ctx: ctx,
|
|
|
|
cancel: cancel,
|
|
|
|
ticker: time.NewTicker(tick),
|
2018-09-04 21:30:52 +08:00
|
|
|
ds: txnDs,
|
2018-09-04 18:34:55 +08:00
|
|
|
cache: *c,
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-mgr.ctx.Done():
|
2018-06-12 07:54:17 +08:00
|
|
|
mgr.ticker.Stop()
|
|
|
|
return
|
2018-06-12 05:58:10 +08:00
|
|
|
case <-mgr.ticker.C:
|
|
|
|
mgr.tick()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return mgr
|
|
|
|
}
|
|
|
|
|
2018-06-16 01:56:35 +08:00
|
|
|
// To be called by TTL manager's coroutine only.
|
2018-09-01 01:35:23 +08:00
|
|
|
func (mgr *ttlManager) tick() {
|
2018-06-27 06:10:10 +08:00
|
|
|
mgr.Lock()
|
|
|
|
defer mgr.Unlock()
|
2018-06-12 05:58:10 +08:00
|
|
|
|
|
|
|
now := time.Now()
|
2018-09-04 21:52:26 +08:00
|
|
|
var toDel []ds.Key
|
2018-06-12 05:58:10 +08:00
|
|
|
for key, entry := range mgr.entries {
|
2018-09-04 21:30:52 +08:00
|
|
|
if entry.ExpiresAt.After(now) {
|
|
|
|
continue
|
|
|
|
}
|
2018-09-04 21:52:26 +08:00
|
|
|
toDel = append(toDel, key)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(toDel) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
txn := mgr.ds.NewTransaction(false)
|
|
|
|
defer txn.Discard()
|
|
|
|
|
|
|
|
for _, key := range toDel {
|
2018-09-04 21:30:52 +08:00
|
|
|
if err := txn.Delete(key); err != nil {
|
|
|
|
log.Error("failed to delete TTL key: %v, cause: %v", key.String(), err)
|
|
|
|
break
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
2018-09-04 21:30:52 +08:00
|
|
|
mgr.cache.Remove(key)
|
|
|
|
delete(mgr.entries, key)
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
2018-09-04 21:30:52 +08:00
|
|
|
|
|
|
|
if err := txn.Commit(); err != nil {
|
|
|
|
log.Error("failed to commit TTL deletion, cause: %v", err)
|
2018-06-27 05:22:04 +08:00
|
|
|
}
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
|
2018-09-01 01:35:23 +08:00
|
|
|
func (mgr *ttlManager) deleteTTLs(keys []ds.Key) {
|
|
|
|
mgr.Lock()
|
|
|
|
defer mgr.Unlock()
|
|
|
|
|
|
|
|
for _, key := range keys {
|
|
|
|
delete(mgr.entries, key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mgr *ttlManager) insertTTLs(keys []ds.Key, ttl time.Duration) {
|
2018-06-12 05:58:10 +08:00
|
|
|
mgr.Lock()
|
|
|
|
defer mgr.Unlock()
|
|
|
|
|
|
|
|
expiration := time.Now().Add(ttl)
|
|
|
|
for _, key := range keys {
|
2018-09-01 01:35:23 +08:00
|
|
|
if entry, ok := mgr.entries[key]; !ok || (ok && entry.ExpiresAt.Before(expiration)) {
|
2018-09-04 18:34:55 +08:00
|
|
|
mgr.entries[key] = &ttlEntry{TTL: ttl, ExpiresAt: expiration}
|
2018-06-12 05:58:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-01 01:35:23 +08:00
|
|
|
func (mgr *ttlManager) setTTLs(keys []ds.Key, ttl time.Duration) {
|
|
|
|
mgr.Lock()
|
|
|
|
defer mgr.Unlock()
|
|
|
|
|
|
|
|
expiration := time.Now().Add(ttl)
|
|
|
|
for _, key := range keys {
|
2018-09-04 18:34:55 +08:00
|
|
|
mgr.entries[key] = &ttlEntry{TTL: ttl, ExpiresAt: expiration}
|
2018-09-01 01:35:23 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mgr *ttlManager) adjustTTLs(prefix ds.Key, oldTTL, newTTL time.Duration) {
|
2018-06-12 05:58:10 +08:00
|
|
|
mgr.Lock()
|
|
|
|
defer mgr.Unlock()
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
var keys []ds.Key
|
|
|
|
for key, entry := range mgr.entries {
|
|
|
|
if key.IsDescendantOf(prefix) && entry.TTL == oldTTL {
|
|
|
|
keys = append(keys, key)
|
|
|
|
entry.TTL = newTTL
|
|
|
|
entry.ExpiresAt = now.Add(newTTL)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-01 01:35:23 +08:00
|
|
|
func (mgr *ttlManager) clear(prefix ds.Key) {
|
2018-06-12 05:58:10 +08:00
|
|
|
mgr.Lock()
|
|
|
|
defer mgr.Unlock()
|
|
|
|
|
|
|
|
for key := range mgr.entries {
|
|
|
|
if key.IsDescendantOf(prefix) {
|
|
|
|
delete(mgr.entries, key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|