Don't subtract 128MiB from scale factor

I think this is easier to understand since you can explain the behavior
as:

"For every GiB of memory allowed we increase the imit by LimitIncrease"

Contrast this with the previous behavior of:

"For every GiB of memory allowed we increase the imit by LimitIncrease,
except that we first subtract 128MiB."
This commit is contained in:
Marco Munizaga 2022-08-12 15:23:06 -07:00
parent 3393a9fba7
commit 46fa3e1bcc
3 changed files with 67 additions and 35 deletions

View File

@ -269,7 +269,8 @@ For Example, calling `Scale` with 4 GB of memory will result in a limit of 384 f
The `FDFraction` defines how many of the file descriptors are allocated to this The `FDFraction` defines how many of the file descriptors are allocated to this
scope. In the example above, when called with a file descriptor value of 1000, scope. In the example above, when called with a file descriptor value of 1000,
this would result in a limit of 1000 (1000 * 1) file descriptors for the system scope. this would result in a limit of 1000 (1000 * 1) file descriptors for the system
scope. See `TestReadmeExample` in `limit_test.go`.
Note that we only showed the configuration for the system scope here, equivalent Note that we only showed the configuration for the system scope here, equivalent
configuration options apply to all other scopes as well. configuration options apply to all other scopes as well.

View File

@ -254,56 +254,52 @@ func (cfg *LimitConfig) Apply(c LimitConfig) {
} }
// Scale scales up a limit configuration. // Scale scales up a limit configuration.
// memory is the amount of memory in bytes that the stack is allowed to consume, // memory is the amount of memory that the stack is allowed to consume,
// for a full it's recommended to use 1/8 of the installed system memory. // for a dedicated node it's recommended to use 1/8 of the installed system memory.
// If memory is smaller than 128 MB, the base configuration will be used. // If memory is smaller than 128 MB, the base configuration will be used.
// //
func (cfg *ScalingLimitConfig) Scale(memory int64, numFD int) LimitConfig { func (cfg *ScalingLimitConfig) Scale(memory int64, numFD int) LimitConfig {
var scaleFactor int
if memory > 128<<20 {
scaleFactor = int((memory - 128<<20) >> 20)
}
lc := LimitConfig{ lc := LimitConfig{
System: scale(cfg.SystemBaseLimit, cfg.SystemLimitIncrease, scaleFactor, numFD), System: scale(cfg.SystemBaseLimit, cfg.SystemLimitIncrease, memory, numFD),
Transient: scale(cfg.TransientBaseLimit, cfg.TransientLimitIncrease, scaleFactor, numFD), Transient: scale(cfg.TransientBaseLimit, cfg.TransientLimitIncrease, memory, numFD),
AllowlistedSystem: scale(cfg.AllowlistedSystemBaseLimit, cfg.AllowlistedSystemLimitIncrease, scaleFactor, numFD), AllowlistedSystem: scale(cfg.AllowlistedSystemBaseLimit, cfg.AllowlistedSystemLimitIncrease, memory, numFD),
AllowlistedTransient: scale(cfg.AllowlistedTransientBaseLimit, cfg.AllowlistedTransientLimitIncrease, scaleFactor, numFD), AllowlistedTransient: scale(cfg.AllowlistedTransientBaseLimit, cfg.AllowlistedTransientLimitIncrease, memory, numFD),
ServiceDefault: scale(cfg.ServiceBaseLimit, cfg.ServiceLimitIncrease, scaleFactor, numFD), ServiceDefault: scale(cfg.ServiceBaseLimit, cfg.ServiceLimitIncrease, memory, numFD),
ServicePeerDefault: scale(cfg.ServicePeerBaseLimit, cfg.ServicePeerLimitIncrease, scaleFactor, numFD), ServicePeerDefault: scale(cfg.ServicePeerBaseLimit, cfg.ServicePeerLimitIncrease, memory, numFD),
ProtocolDefault: scale(cfg.ProtocolBaseLimit, cfg.ProtocolLimitIncrease, scaleFactor, numFD), ProtocolDefault: scale(cfg.ProtocolBaseLimit, cfg.ProtocolLimitIncrease, memory, numFD),
ProtocolPeerDefault: scale(cfg.ProtocolPeerBaseLimit, cfg.ProtocolPeerLimitIncrease, scaleFactor, numFD), ProtocolPeerDefault: scale(cfg.ProtocolPeerBaseLimit, cfg.ProtocolPeerLimitIncrease, memory, numFD),
PeerDefault: scale(cfg.PeerBaseLimit, cfg.PeerLimitIncrease, scaleFactor, numFD), PeerDefault: scale(cfg.PeerBaseLimit, cfg.PeerLimitIncrease, memory, numFD),
Conn: scale(cfg.ConnBaseLimit, cfg.ConnLimitIncrease, scaleFactor, numFD), Conn: scale(cfg.ConnBaseLimit, cfg.ConnLimitIncrease, memory, numFD),
Stream: scale(cfg.StreamBaseLimit, cfg.ConnLimitIncrease, scaleFactor, numFD), Stream: scale(cfg.StreamBaseLimit, cfg.ConnLimitIncrease, memory, numFD),
} }
if cfg.ServiceLimits != nil { if cfg.ServiceLimits != nil {
lc.Service = make(map[string]BaseLimit) lc.Service = make(map[string]BaseLimit)
for svc, l := range cfg.ServiceLimits { for svc, l := range cfg.ServiceLimits {
lc.Service[svc] = scale(l.BaseLimit, l.BaseLimitIncrease, scaleFactor, numFD) lc.Service[svc] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
} }
} }
if cfg.ProtocolLimits != nil { if cfg.ProtocolLimits != nil {
lc.Protocol = make(map[protocol.ID]BaseLimit) lc.Protocol = make(map[protocol.ID]BaseLimit)
for proto, l := range cfg.ProtocolLimits { for proto, l := range cfg.ProtocolLimits {
lc.Protocol[proto] = scale(l.BaseLimit, l.BaseLimitIncrease, scaleFactor, numFD) lc.Protocol[proto] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
} }
} }
if cfg.PeerLimits != nil { if cfg.PeerLimits != nil {
lc.Peer = make(map[peer.ID]BaseLimit) lc.Peer = make(map[peer.ID]BaseLimit)
for p, l := range cfg.PeerLimits { for p, l := range cfg.PeerLimits {
lc.Peer[p] = scale(l.BaseLimit, l.BaseLimitIncrease, scaleFactor, numFD) lc.Peer[p] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
} }
} }
if cfg.ServicePeerLimits != nil { if cfg.ServicePeerLimits != nil {
lc.ServicePeer = make(map[string]BaseLimit) lc.ServicePeer = make(map[string]BaseLimit)
for svc, l := range cfg.ServicePeerLimits { for svc, l := range cfg.ServicePeerLimits {
lc.ServicePeer[svc] = scale(l.BaseLimit, l.BaseLimitIncrease, scaleFactor, numFD) lc.ServicePeer[svc] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
} }
} }
if cfg.ProtocolPeerLimits != nil { if cfg.ProtocolPeerLimits != nil {
lc.ProtocolPeer = make(map[protocol.ID]BaseLimit) lc.ProtocolPeer = make(map[protocol.ID]BaseLimit)
for p, l := range cfg.ProtocolPeerLimits { for p, l := range cfg.ProtocolPeerLimits {
lc.ProtocolPeer[p] = scale(l.BaseLimit, l.BaseLimitIncrease, scaleFactor, numFD) lc.ProtocolPeer[p] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
} }
} }
return lc return lc
@ -316,20 +312,30 @@ func (cfg *ScalingLimitConfig) AutoScale() LimitConfig {
) )
} }
// factor is the number of MBs above the minimum (128 MB) func scale(base BaseLimit, inc BaseLimitIncrease, memory int64, numFD int) BaseLimit {
func scale(base BaseLimit, inc BaseLimitIncrease, factor int, numFD int) BaseLimit { // mebibytesAvailable represents how many MiBs we're allowed to use. Used to
// scale the limits. If this is below 128MiB we set it to 0 to just use the
// base amounts.
var mebibytesAvailable int
if memory > 128<<20 {
mebibytesAvailable = int((memory) >> 20)
}
l := BaseLimit{ l := BaseLimit{
StreamsInbound: base.StreamsInbound + (inc.StreamsInbound*factor)>>10, StreamsInbound: base.StreamsInbound + (inc.StreamsInbound*mebibytesAvailable)>>10,
StreamsOutbound: base.StreamsOutbound + (inc.StreamsOutbound*factor)>>10, StreamsOutbound: base.StreamsOutbound + (inc.StreamsOutbound*mebibytesAvailable)>>10,
Streams: base.Streams + (inc.Streams*factor)>>10, Streams: base.Streams + (inc.Streams*mebibytesAvailable)>>10,
ConnsInbound: base.ConnsInbound + (inc.ConnsInbound*factor)>>10, ConnsInbound: base.ConnsInbound + (inc.ConnsInbound*mebibytesAvailable)>>10,
ConnsOutbound: base.ConnsOutbound + (inc.ConnsOutbound*factor)>>10, ConnsOutbound: base.ConnsOutbound + (inc.ConnsOutbound*mebibytesAvailable)>>10,
Conns: base.Conns + (inc.Conns*factor)>>10, Conns: base.Conns + (inc.Conns*mebibytesAvailable)>>10,
Memory: base.Memory + (inc.Memory*int64(factor))>>10, Memory: base.Memory + (inc.Memory*int64(mebibytesAvailable))>>10,
FD: base.FD, FD: base.FD,
} }
if inc.FDFraction > 0 && numFD > 0 { if inc.FDFraction > 0 && numFD > 0 {
l.FD = int(inc.FDFraction * float64(numFD)) l.FD = int(inc.FDFraction * float64(numFD))
if l.FD < base.FD {
// Use at least the base amount
l.FD = base.FD
}
} }
return l return l
} }

View File

@ -59,6 +59,31 @@ func TestScaling(t *testing.T) {
require.Equal(t, base.Memory+4*7, scaled.Transient.Memory) require.Equal(t, base.Memory+4*7, scaled.Transient.Memory)
}) })
t.Run("scaling and using the base amounts", func(t *testing.T) {
cfg := ScalingLimitConfig{
TransientBaseLimit: base,
TransientLimitIncrease: BaseLimitIncrease{
Streams: 1,
StreamsInbound: 2,
StreamsOutbound: 3,
Conns: 4,
ConnsInbound: 5,
ConnsOutbound: 6,
Memory: 7,
FDFraction: 0.01,
},
}
scaled := cfg.Scale(1, 10)
require.Equal(t, 1, scaled.Transient.FD)
require.Equal(t, base.Streams, scaled.Transient.Streams)
require.Equal(t, base.StreamsInbound, scaled.Transient.StreamsInbound)
require.Equal(t, base.StreamsOutbound, scaled.Transient.StreamsOutbound)
require.Equal(t, base.Conns, scaled.Transient.Conns)
require.Equal(t, base.ConnsInbound, scaled.Transient.ConnsInbound)
require.Equal(t, base.ConnsOutbound, scaled.Transient.ConnsOutbound)
require.Equal(t, base.Memory, scaled.Transient.Memory)
})
t.Run("scaling limits in maps", func(t *testing.T) { t.Run("scaling limits in maps", func(t *testing.T) {
cfg := ScalingLimitConfig{ cfg := ScalingLimitConfig{
ServiceLimits: map[string]baseLimitConfig{ ServiceLimits: map[string]baseLimitConfig{
@ -113,6 +138,6 @@ func TestReadmeExample(t *testing.T) {
limitConf := scalingLimits.Scale(4<<30, 1000) limitConf := scalingLimits.Scale(4<<30, 1000)
require.Equal(t, limitConf.System.Conns, 376) require.Equal(t, 384, limitConf.System.Conns)
require.Equal(t, limitConf.System.FD, 1000) require.Equal(t, 1000, limitConf.System.FD)
} }