2020-05-15 09:39:16 +00:00
|
|
|
package leakybucket
|
|
|
|
|
|
|
|
import (
|
2020-11-30 09:37:17 +00:00
|
|
|
"fmt"
|
2021-02-25 10:26:46 +00:00
|
|
|
"sync"
|
2020-05-15 09:39:16 +00:00
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
|
2023-07-28 14:35:08 +00:00
|
|
|
"github.com/crowdsecurity/go-cs-lib/trace"
|
2023-05-23 08:52:47 +00:00
|
|
|
|
2020-05-15 09:39:16 +00:00
|
|
|
"github.com/crowdsecurity/crowdsec/pkg/time/rate"
|
|
|
|
"github.com/crowdsecurity/crowdsec/pkg/types"
|
|
|
|
"github.com/davecgh/go-spew/spew"
|
2023-03-16 15:25:50 +00:00
|
|
|
"github.com/mohae/deepcopy"
|
2020-05-15 09:39:16 +00:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
log "github.com/sirupsen/logrus"
|
2023-03-16 15:25:50 +00:00
|
|
|
"gopkg.in/tomb.v2"
|
2020-05-15 09:39:16 +00:00
|
|
|
)
|
|
|
|
|
2023-03-16 15:25:50 +00:00
|
|
|
// those constants are now defined in types/constants
|
|
|
|
// const (
|
|
|
|
// LIVE = iota
|
|
|
|
// TIMEMACHINE
|
|
|
|
// )
|
2020-05-15 09:39:16 +00:00
|
|
|
|
2022-10-07 09:05:35 +00:00
|
|
|
// Leaky represents one instance of a bucket
|
2020-05-15 09:39:16 +00:00
|
|
|
type Leaky struct {
|
|
|
|
Name string
|
|
|
|
Mode int //LIVE or TIMEMACHINE
|
|
|
|
//the limiter is what holds the proper "leaky aspect", it determines when/if we can pour objects
|
|
|
|
Limiter rate.RateLimiter `json:"-"`
|
|
|
|
SerializedState rate.Lstate
|
2023-11-07 14:07:36 +00:00
|
|
|
//Queue is used to hold the cache of objects in the bucket, it is used to know 'how many' objects we have in buffer.
|
2023-11-24 10:10:54 +00:00
|
|
|
Queue *types.Queue
|
2020-05-15 09:39:16 +00:00
|
|
|
//Leaky buckets are receiving message through a chan
|
2022-06-13 12:41:05 +00:00
|
|
|
In chan *types.Event `json:"-"`
|
2020-05-15 09:39:16 +00:00
|
|
|
//Leaky buckets are pushing their overflows through a chan
|
2023-11-24 10:10:54 +00:00
|
|
|
Out chan *types.Queue `json:"-"`
|
2023-11-07 14:07:36 +00:00
|
|
|
// shared for all buckets (the idea is to kill this afterward)
|
2021-02-25 10:26:46 +00:00
|
|
|
AllOut chan types.Event `json:"-"`
|
2020-05-15 09:39:16 +00:00
|
|
|
//max capacity (for burst)
|
|
|
|
Capacity int
|
|
|
|
//CacheRatio is the number of elements that should be kept in memory (compared to capacity)
|
|
|
|
CacheSize int
|
|
|
|
//the unique identifier of the bucket (a hash)
|
|
|
|
Mapkey string
|
|
|
|
// chan for signaling
|
|
|
|
Signal chan bool `json:"-"`
|
2021-12-17 08:56:02 +00:00
|
|
|
Suicide chan bool `json:"-"`
|
2020-05-15 09:39:16 +00:00
|
|
|
Reprocess bool
|
2020-11-30 09:37:17 +00:00
|
|
|
Simulated bool
|
2020-05-15 09:39:16 +00:00
|
|
|
Uuid string
|
|
|
|
First_ts time.Time
|
|
|
|
Last_ts time.Time
|
|
|
|
Ovflw_ts time.Time
|
|
|
|
Total_count int
|
|
|
|
Leakspeed time.Duration
|
|
|
|
BucketConfig *BucketFactory
|
|
|
|
Duration time.Duration
|
|
|
|
Pour func(*Leaky, types.Event) `json:"-"`
|
|
|
|
//Profiling when set to true enables profiling of bucket
|
2023-01-06 08:26:16 +00:00
|
|
|
Profiling bool
|
|
|
|
timedOverflow bool
|
|
|
|
conditionalOverflow bool
|
|
|
|
logger *log.Entry
|
|
|
|
scopeType types.ScopeType
|
|
|
|
hash string
|
|
|
|
scenarioVersion string
|
|
|
|
tomb *tomb.Tomb
|
|
|
|
wgPour *sync.WaitGroup
|
|
|
|
wgDumpState *sync.WaitGroup
|
|
|
|
mutex *sync.Mutex //used only for TIMEMACHINE mode to allow garbage collection without races
|
2023-07-20 09:41:30 +00:00
|
|
|
orderEvent bool
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var BucketsPour = prometheus.NewCounterVec(
|
|
|
|
prometheus.CounterOpts{
|
2020-07-29 13:03:15 +00:00
|
|
|
Name: "cs_bucket_poured_total",
|
|
|
|
Help: "Total events were poured in bucket.",
|
2020-05-15 09:39:16 +00:00
|
|
|
},
|
2021-06-11 07:53:53 +00:00
|
|
|
[]string{"source", "type", "name"},
|
2020-05-15 09:39:16 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var BucketsOverflow = prometheus.NewCounterVec(
|
|
|
|
prometheus.CounterOpts{
|
2020-07-29 13:03:15 +00:00
|
|
|
Name: "cs_bucket_overflowed_total",
|
|
|
|
Help: "Total buckets overflowed.",
|
2020-05-15 09:39:16 +00:00
|
|
|
},
|
|
|
|
[]string{"name"},
|
|
|
|
)
|
|
|
|
|
2021-12-17 08:56:02 +00:00
|
|
|
var BucketsCanceled = prometheus.NewCounterVec(
|
|
|
|
prometheus.CounterOpts{
|
|
|
|
Name: "cs_bucket_canceled_total",
|
|
|
|
Help: "Total buckets canceled.",
|
|
|
|
},
|
|
|
|
[]string{"name"},
|
|
|
|
)
|
|
|
|
|
2020-05-15 09:39:16 +00:00
|
|
|
var BucketsUnderflow = prometheus.NewCounterVec(
|
|
|
|
prometheus.CounterOpts{
|
2020-07-29 13:03:15 +00:00
|
|
|
Name: "cs_bucket_underflowed_total",
|
|
|
|
Help: "Total buckets underflowed.",
|
2020-05-15 09:39:16 +00:00
|
|
|
},
|
|
|
|
[]string{"name"},
|
|
|
|
)
|
|
|
|
|
2022-09-27 15:13:43 +00:00
|
|
|
var BucketsInstantiation = prometheus.NewCounterVec(
|
2020-05-15 09:39:16 +00:00
|
|
|
prometheus.CounterOpts{
|
2020-07-29 13:03:15 +00:00
|
|
|
Name: "cs_bucket_created_total",
|
2022-09-27 15:13:43 +00:00
|
|
|
Help: "Total buckets were instantiated.",
|
2020-05-15 09:39:16 +00:00
|
|
|
},
|
|
|
|
[]string{"name"},
|
|
|
|
)
|
|
|
|
|
2020-06-19 11:57:44 +00:00
|
|
|
var BucketsCurrentCount = prometheus.NewGaugeVec(
|
|
|
|
prometheus.GaugeOpts{
|
2020-07-29 13:03:15 +00:00
|
|
|
Name: "cs_buckets",
|
|
|
|
Help: "Number of buckets that currently exist.",
|
2020-06-19 11:57:44 +00:00
|
|
|
},
|
|
|
|
[]string{"name"},
|
|
|
|
)
|
2020-05-15 09:39:16 +00:00
|
|
|
|
2020-08-05 09:39:54 +00:00
|
|
|
var LeakyRoutineCount int64
|
|
|
|
|
2020-05-15 09:39:16 +00:00
|
|
|
// Newleaky creates a new leaky bucket from a BucketFactory
|
|
|
|
// Events created by the bucket (overflow, bucket empty) are sent to a chan defined by BucketFactory
|
|
|
|
// The leaky bucket implementation is based on rate limiter (see https://godoc.org/golang.org/x/time/rate)
|
|
|
|
// There's a trick to have an event said when the bucket gets empty to allow its destruction
|
2020-11-30 09:37:17 +00:00
|
|
|
func NewLeaky(bucketFactory BucketFactory) *Leaky {
|
|
|
|
bucketFactory.logger.Tracef("Instantiating live bucket %s", bucketFactory.Name)
|
|
|
|
return FromFactory(bucketFactory)
|
2020-06-19 11:57:44 +00:00
|
|
|
}
|
|
|
|
|
2020-11-30 09:37:17 +00:00
|
|
|
func FromFactory(bucketFactory BucketFactory) *Leaky {
|
2020-05-15 09:39:16 +00:00
|
|
|
var limiter rate.RateLimiter
|
|
|
|
//golang rate limiter. It's mainly intended for http rate limiter
|
2020-11-30 09:37:17 +00:00
|
|
|
Qsize := bucketFactory.Capacity
|
|
|
|
if bucketFactory.CacheSize > 0 {
|
2020-05-15 09:39:16 +00:00
|
|
|
//cache is smaller than actual capacity
|
2020-11-30 09:37:17 +00:00
|
|
|
if bucketFactory.CacheSize <= bucketFactory.Capacity {
|
|
|
|
Qsize = bucketFactory.CacheSize
|
2020-05-15 09:39:16 +00:00
|
|
|
//bucket might be counter (infinite size), allow cache limitation
|
2020-11-30 09:37:17 +00:00
|
|
|
} else if bucketFactory.Capacity == -1 {
|
|
|
|
Qsize = bucketFactory.CacheSize
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
}
|
2020-11-30 09:37:17 +00:00
|
|
|
if bucketFactory.Capacity == -1 {
|
2020-05-15 09:39:16 +00:00
|
|
|
//In this case we allow all events to pass.
|
|
|
|
//maybe in the future we could avoid using a limiter
|
|
|
|
limiter = &rate.AlwaysFull{}
|
|
|
|
} else {
|
2020-11-30 09:37:17 +00:00
|
|
|
limiter = rate.NewLimiter(rate.Every(bucketFactory.leakspeed), bucketFactory.Capacity)
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2022-09-27 15:13:43 +00:00
|
|
|
BucketsInstantiation.With(prometheus.Labels{"name": bucketFactory.Name}).Inc()
|
2020-06-19 11:57:44 +00:00
|
|
|
|
2020-05-15 09:39:16 +00:00
|
|
|
//create the leaky bucket per se
|
|
|
|
l := &Leaky{
|
2020-11-30 09:37:17 +00:00
|
|
|
Name: bucketFactory.Name,
|
|
|
|
Limiter: limiter,
|
2022-06-15 08:02:00 +00:00
|
|
|
Uuid: seed.Generate(),
|
2023-11-24 10:10:54 +00:00
|
|
|
Queue: types.NewQueue(Qsize),
|
2020-11-30 09:37:17 +00:00
|
|
|
CacheSize: bucketFactory.CacheSize,
|
2023-11-24 10:10:54 +00:00
|
|
|
Out: make(chan *types.Queue, 1),
|
2021-12-17 08:56:02 +00:00
|
|
|
Suicide: make(chan bool, 1),
|
2020-11-30 09:37:17 +00:00
|
|
|
AllOut: bucketFactory.ret,
|
|
|
|
Capacity: bucketFactory.Capacity,
|
|
|
|
Leakspeed: bucketFactory.leakspeed,
|
|
|
|
BucketConfig: &bucketFactory,
|
|
|
|
Pour: Pour,
|
|
|
|
Reprocess: bucketFactory.Reprocess,
|
|
|
|
Profiling: bucketFactory.Profiling,
|
2023-03-16 15:25:50 +00:00
|
|
|
Mode: types.LIVE,
|
2020-11-30 09:37:17 +00:00
|
|
|
scopeType: bucketFactory.ScopeType,
|
|
|
|
scenarioVersion: bucketFactory.ScenarioVersion,
|
|
|
|
hash: bucketFactory.hash,
|
|
|
|
Simulated: bucketFactory.Simulated,
|
2021-02-25 10:26:46 +00:00
|
|
|
tomb: bucketFactory.tomb,
|
|
|
|
wgPour: bucketFactory.wgPour,
|
|
|
|
wgDumpState: bucketFactory.wgDumpState,
|
|
|
|
mutex: &sync.Mutex{},
|
2023-07-20 09:41:30 +00:00
|
|
|
orderEvent: bucketFactory.orderEvent,
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
if l.BucketConfig.Capacity > 0 && l.BucketConfig.leakspeed != time.Duration(0) {
|
|
|
|
l.Duration = time.Duration(l.BucketConfig.Capacity+1) * l.BucketConfig.leakspeed
|
|
|
|
}
|
|
|
|
if l.BucketConfig.duration != time.Duration(0) {
|
|
|
|
l.Duration = l.BucketConfig.duration
|
|
|
|
l.timedOverflow = true
|
|
|
|
}
|
|
|
|
|
2023-01-06 08:26:16 +00:00
|
|
|
if l.BucketConfig.Type == "conditional" {
|
|
|
|
l.conditionalOverflow = true
|
|
|
|
l.Duration = l.BucketConfig.leakspeed
|
|
|
|
}
|
2023-06-21 13:08:27 +00:00
|
|
|
|
|
|
|
if l.BucketConfig.Type == "bayesian" {
|
|
|
|
l.Duration = l.BucketConfig.leakspeed
|
|
|
|
}
|
2020-05-15 09:39:16 +00:00
|
|
|
return l
|
|
|
|
}
|
|
|
|
|
|
|
|
/* for now mimic a leak routine */
|
2020-06-19 11:57:44 +00:00
|
|
|
//LeakRoutine us the life of a bucket. It dies when the bucket underflows or overflows
|
2021-02-25 10:26:46 +00:00
|
|
|
func LeakRoutine(leaky *Leaky) error {
|
2020-05-15 09:39:16 +00:00
|
|
|
|
|
|
|
var (
|
2023-03-09 10:56:02 +00:00
|
|
|
durationTickerChan = make(<-chan time.Time)
|
2022-11-04 12:56:43 +00:00
|
|
|
durationTicker *time.Ticker
|
2023-03-09 10:56:02 +00:00
|
|
|
firstEvent = true
|
2020-05-15 09:39:16 +00:00
|
|
|
)
|
|
|
|
|
2023-05-23 08:52:47 +00:00
|
|
|
defer trace.CatchPanic(fmt.Sprintf("crowdsec/LeakRoutine/%s", leaky.Name))
|
2020-11-30 09:37:17 +00:00
|
|
|
|
|
|
|
BucketsCurrentCount.With(prometheus.Labels{"name": leaky.Name}).Inc()
|
|
|
|
defer BucketsCurrentCount.With(prometheus.Labels{"name": leaky.Name}).Dec()
|
2020-06-19 11:57:44 +00:00
|
|
|
|
2020-05-15 09:39:16 +00:00
|
|
|
/*todo : we create a logger at runtime while we want leakroutine to be up asap, might not be a good idea*/
|
2023-10-06 12:42:44 +00:00
|
|
|
leaky.logger = leaky.BucketConfig.logger.WithFields(log.Fields{"partition": leaky.Mapkey, "bucket_id": leaky.Uuid})
|
2020-05-15 09:39:16 +00:00
|
|
|
|
2022-11-30 09:59:47 +00:00
|
|
|
//We copy the processors, as they are coming from the BucketFactory, and thus are shared between buckets
|
|
|
|
//If we don't copy, processors using local cache (such as Uniq) are subject to race conditions
|
|
|
|
//This can lead to creating buckets that will discard their first events, preventing the underflow ticker from being initialized
|
|
|
|
//and preventing them from being destroyed
|
|
|
|
processors := deepcopy.Copy(leaky.BucketConfig.processors).([]Processor)
|
|
|
|
|
2020-11-30 09:37:17 +00:00
|
|
|
leaky.Signal <- true
|
2020-05-15 09:39:16 +00:00
|
|
|
atomic.AddInt64(&LeakyRoutineCount, 1)
|
|
|
|
defer atomic.AddInt64(&LeakyRoutineCount, -1)
|
|
|
|
|
2022-11-30 09:59:47 +00:00
|
|
|
for _, f := range processors {
|
2020-11-30 09:37:17 +00:00
|
|
|
err := f.OnBucketInit(leaky.BucketConfig)
|
2020-05-15 09:39:16 +00:00
|
|
|
if err != nil {
|
2020-11-30 09:37:17 +00:00
|
|
|
leaky.logger.Errorf("Problem at bucket initializiation. Bail out %T : %v", f, err)
|
|
|
|
close(leaky.Signal)
|
2021-02-25 10:26:46 +00:00
|
|
|
return fmt.Errorf("Problem at bucket initializiation. Bail out %T : %v", f, err)
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-30 09:37:17 +00:00
|
|
|
leaky.logger.Debugf("Leaky routine starting, lifetime : %s", leaky.Duration)
|
2020-05-15 09:39:16 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
/*receiving an event*/
|
2020-11-30 09:37:17 +00:00
|
|
|
case msg := <-leaky.In:
|
2020-05-15 09:39:16 +00:00
|
|
|
/*the msg var use is confusing and is redeclared in a different type :/*/
|
2022-11-30 09:59:47 +00:00
|
|
|
for _, processor := range processors {
|
2022-06-16 12:41:54 +00:00
|
|
|
msg = processor.OnBucketPour(leaky.BucketConfig)(*msg, leaky)
|
2020-05-15 09:39:16 +00:00
|
|
|
// if &msg == nil we stop processing
|
|
|
|
if msg == nil {
|
2023-07-20 09:41:30 +00:00
|
|
|
if leaky.orderEvent {
|
|
|
|
orderEvent[leaky.Mapkey].Done()
|
|
|
|
}
|
2020-05-15 09:39:16 +00:00
|
|
|
goto End
|
|
|
|
}
|
|
|
|
}
|
2020-11-30 09:37:17 +00:00
|
|
|
if leaky.logger.Level >= log.TraceLevel {
|
|
|
|
leaky.logger.Tracef("Pour event: %s", spew.Sdump(msg))
|
|
|
|
}
|
2021-06-11 07:53:53 +00:00
|
|
|
BucketsPour.With(prometheus.Labels{"name": leaky.Name, "source": msg.Line.Src, "type": msg.Line.Module}).Inc()
|
2020-06-19 11:57:44 +00:00
|
|
|
|
2022-06-13 12:41:05 +00:00
|
|
|
leaky.Pour(leaky, *msg) // glue for now
|
2023-01-06 08:26:16 +00:00
|
|
|
|
|
|
|
for _, processor := range processors {
|
|
|
|
msg = processor.AfterBucketPour(leaky.BucketConfig)(*msg, leaky)
|
|
|
|
if msg == nil {
|
2023-07-20 09:41:30 +00:00
|
|
|
if leaky.orderEvent {
|
|
|
|
orderEvent[leaky.Mapkey].Done()
|
|
|
|
}
|
2023-01-06 08:26:16 +00:00
|
|
|
goto End
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-15 09:39:16 +00:00
|
|
|
//Clear cache on behalf of pour
|
2022-10-13 12:30:27 +00:00
|
|
|
|
|
|
|
// if durationTicker isn't initialized, then we're pouring our first event
|
|
|
|
|
|
|
|
// reinitialize the durationTicker when it's not a counter bucket
|
2022-11-04 12:56:43 +00:00
|
|
|
if !leaky.timedOverflow || firstEvent {
|
|
|
|
if firstEvent {
|
|
|
|
durationTicker = time.NewTicker(leaky.Duration)
|
|
|
|
durationTickerChan = durationTicker.C
|
|
|
|
defer durationTicker.Stop()
|
|
|
|
} else {
|
|
|
|
durationTicker.Reset(leaky.Duration)
|
|
|
|
}
|
2022-06-30 15:36:01 +00:00
|
|
|
}
|
2022-10-13 12:30:27 +00:00
|
|
|
firstEvent = false
|
2023-07-20 09:41:30 +00:00
|
|
|
/*we overflowed*/
|
|
|
|
if leaky.orderEvent {
|
|
|
|
orderEvent[leaky.Mapkey].Done()
|
|
|
|
}
|
2020-11-30 09:37:17 +00:00
|
|
|
case ofw := <-leaky.Out:
|
2021-02-25 10:26:46 +00:00
|
|
|
leaky.overflow(ofw)
|
|
|
|
return nil
|
2021-12-17 08:56:02 +00:00
|
|
|
/*suiciiiide*/
|
|
|
|
case <-leaky.Suicide:
|
|
|
|
close(leaky.Signal)
|
|
|
|
BucketsCanceled.With(prometheus.Labels{"name": leaky.Name}).Inc()
|
|
|
|
leaky.logger.Debugf("Suicide triggered")
|
|
|
|
leaky.AllOut <- types.Event{Type: types.OVFLW, Overflow: types.RuntimeAlert{Mapkey: leaky.Mapkey}}
|
|
|
|
leaky.logger.Tracef("Returning from leaky routine.")
|
|
|
|
return nil
|
|
|
|
/*we underflow or reach bucket deadline (timers)*/
|
2022-11-04 12:56:43 +00:00
|
|
|
case <-durationTickerChan:
|
2020-11-30 09:37:17 +00:00
|
|
|
var (
|
|
|
|
alert types.RuntimeAlert
|
|
|
|
err error
|
|
|
|
)
|
2022-01-19 13:56:05 +00:00
|
|
|
leaky.Ovflw_ts = time.Now().UTC()
|
2020-11-30 09:37:17 +00:00
|
|
|
close(leaky.Signal)
|
|
|
|
ofw := leaky.Queue
|
|
|
|
alert = types.RuntimeAlert{Mapkey: leaky.Mapkey}
|
|
|
|
|
|
|
|
if leaky.timedOverflow {
|
|
|
|
BucketsOverflow.With(prometheus.Labels{"name": leaky.Name}).Inc()
|
|
|
|
|
|
|
|
alert, err = NewAlert(leaky, ofw)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("%s", err)
|
|
|
|
}
|
|
|
|
for _, f := range leaky.BucketConfig.processors {
|
|
|
|
alert, ofw = f.OnBucketOverflow(leaky.BucketConfig)(leaky, alert, ofw)
|
2020-05-15 09:39:16 +00:00
|
|
|
if ofw == nil {
|
2020-11-30 09:37:17 +00:00
|
|
|
leaky.logger.Debugf("Overflow has been discarded (%T)", f)
|
2020-05-15 09:39:16 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-11-30 09:37:17 +00:00
|
|
|
leaky.logger.Infof("Timed Overflow")
|
2020-05-15 09:39:16 +00:00
|
|
|
} else {
|
2020-11-30 09:37:17 +00:00
|
|
|
leaky.logger.Debugf("bucket underflow, destroy")
|
|
|
|
BucketsUnderflow.With(prometheus.Labels{"name": leaky.Name}).Inc()
|
2020-05-15 09:39:16 +00:00
|
|
|
|
|
|
|
}
|
2020-11-30 09:37:17 +00:00
|
|
|
if leaky.logger.Level >= log.TraceLevel {
|
2023-11-07 14:07:36 +00:00
|
|
|
/*don't sdump if it's not going to be printed, it's expensive*/
|
2020-11-30 09:37:17 +00:00
|
|
|
leaky.logger.Tracef("Overflow event: %s", spew.Sdump(types.Event{Overflow: alert}))
|
|
|
|
}
|
2020-05-15 09:39:16 +00:00
|
|
|
|
2020-11-30 09:37:17 +00:00
|
|
|
leaky.AllOut <- types.Event{Overflow: alert, Type: types.OVFLW}
|
|
|
|
leaky.logger.Tracef("Returning from leaky routine.")
|
2021-02-25 10:26:46 +00:00
|
|
|
return nil
|
|
|
|
case <-leaky.tomb.Dying():
|
|
|
|
leaky.logger.Debugf("Bucket externally killed, return")
|
|
|
|
for len(leaky.Out) > 0 {
|
|
|
|
ofw := <-leaky.Out
|
|
|
|
leaky.overflow(ofw)
|
|
|
|
}
|
|
|
|
leaky.AllOut <- types.Event{Type: types.OVFLW, Overflow: types.RuntimeAlert{Mapkey: leaky.Mapkey}}
|
|
|
|
return nil
|
|
|
|
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
End:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-30 09:37:17 +00:00
|
|
|
func Pour(leaky *Leaky, msg types.Event) {
|
2021-02-25 10:26:46 +00:00
|
|
|
leaky.wgDumpState.Wait()
|
|
|
|
leaky.wgPour.Add(1)
|
|
|
|
defer leaky.wgPour.Done()
|
2020-05-15 09:39:16 +00:00
|
|
|
|
2020-11-30 09:37:17 +00:00
|
|
|
leaky.Total_count += 1
|
|
|
|
if leaky.First_ts.IsZero() {
|
2022-01-19 13:56:05 +00:00
|
|
|
leaky.First_ts = time.Now().UTC()
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2022-01-19 13:56:05 +00:00
|
|
|
leaky.Last_ts = time.Now().UTC()
|
2023-01-06 08:26:16 +00:00
|
|
|
|
|
|
|
if leaky.Limiter.Allow() || leaky.conditionalOverflow {
|
2020-11-30 09:37:17 +00:00
|
|
|
leaky.Queue.Add(msg)
|
2020-05-15 09:39:16 +00:00
|
|
|
} else {
|
2022-01-19 13:56:05 +00:00
|
|
|
leaky.Ovflw_ts = time.Now().UTC()
|
2020-11-30 09:37:17 +00:00
|
|
|
leaky.logger.Debugf("Last event to be poured, bucket overflow.")
|
|
|
|
leaky.Queue.Add(msg)
|
|
|
|
leaky.Out <- leaky.Queue
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
}
|
2021-02-25 10:26:46 +00:00
|
|
|
|
2023-11-24 10:10:54 +00:00
|
|
|
func (leaky *Leaky) overflow(ofw *types.Queue) {
|
2021-02-25 10:26:46 +00:00
|
|
|
close(leaky.Signal)
|
|
|
|
alert, err := NewAlert(leaky, ofw)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("%s", err)
|
|
|
|
}
|
|
|
|
leaky.logger.Tracef("Overflow hooks time : %v", leaky.BucketConfig.processors)
|
|
|
|
for _, f := range leaky.BucketConfig.processors {
|
|
|
|
alert, ofw = f.OnBucketOverflow(leaky.BucketConfig)(leaky, alert, ofw)
|
|
|
|
if ofw == nil {
|
|
|
|
leaky.logger.Debugf("Overflow has been discarded (%T)", f)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if leaky.logger.Level >= log.TraceLevel {
|
2022-06-16 12:41:54 +00:00
|
|
|
leaky.logger.Tracef("Overflow event: %s", spew.Sdump(alert))
|
2021-02-25 10:26:46 +00:00
|
|
|
}
|
|
|
|
mt, _ := leaky.Ovflw_ts.MarshalText()
|
|
|
|
leaky.logger.Tracef("overflow time : %s", mt)
|
|
|
|
|
|
|
|
BucketsOverflow.With(prometheus.Labels{"name": leaky.Name}).Inc()
|
|
|
|
|
|
|
|
leaky.AllOut <- types.Event{Overflow: alert, Type: types.OVFLW, MarshaledTime: string(mt)}
|
|
|
|
}
|