2020-08-05 09:39:54 +00:00
|
|
|
package leakybucket
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"math"
|
|
|
|
"os"
|
|
|
|
"time"
|
|
|
|
|
2022-01-04 13:02:07 +00:00
|
|
|
"github.com/pkg/errors"
|
|
|
|
|
2021-10-04 15:14:52 +00:00
|
|
|
"github.com/mohae/deepcopy"
|
2020-08-05 09:39:54 +00:00
|
|
|
log "github.com/sirupsen/logrus"
|
|
|
|
|
|
|
|
"github.com/antonmedv/expr"
|
|
|
|
"github.com/crowdsecurity/crowdsec/pkg/types"
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
)
|
|
|
|
|
|
|
|
var serialized map[string]Leaky
|
2021-10-04 15:14:52 +00:00
|
|
|
var BucketPourCache map[string][]types.Event
|
|
|
|
var BucketPourTrack bool
|
2020-08-05 09:39:54 +00:00
|
|
|
|
2023-03-16 15:25:50 +00:00
|
|
|
/*
|
|
|
|
The leaky routines lifecycle are based on "real" time.
|
2020-08-05 09:39:54 +00:00
|
|
|
But when we are running in time-machine mode, the reference time is in logs and not "real" time.
|
2023-03-16 15:25:50 +00:00
|
|
|
Thus we need to garbage collect them to avoid a skyrocketing memory usage.
|
|
|
|
*/
|
2020-08-05 09:39:54 +00:00
|
|
|
func GarbageCollectBuckets(deadline time.Time, buckets *Buckets) error {
|
2021-02-25 10:26:46 +00:00
|
|
|
buckets.wgPour.Wait()
|
|
|
|
buckets.wgDumpState.Add(1)
|
|
|
|
defer buckets.wgDumpState.Done()
|
|
|
|
|
2020-08-05 09:39:54 +00:00
|
|
|
total := 0
|
|
|
|
discard := 0
|
|
|
|
toflush := []string{}
|
|
|
|
buckets.Bucket_map.Range(func(rkey, rvalue interface{}) bool {
|
|
|
|
key := rkey.(string)
|
|
|
|
val := rvalue.(*Leaky)
|
|
|
|
total += 1
|
|
|
|
//bucket already overflowed, we can kill it
|
|
|
|
if !val.Ovflw_ts.IsZero() {
|
|
|
|
discard += 1
|
|
|
|
val.logger.Debugf("overflowed at %s.", val.Ovflw_ts)
|
|
|
|
toflush = append(toflush, key)
|
2021-02-25 10:26:46 +00:00
|
|
|
val.tomb.Kill(nil)
|
2020-08-05 09:39:54 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
/*FIXME : sometimes the gettokenscountat has some rounding issues when we try to
|
|
|
|
match it with bucket capacity, even if the bucket has long due underflow. Round to 2 decimals*/
|
|
|
|
tokat := val.Limiter.GetTokensCountAt(deadline)
|
|
|
|
tokcapa := float64(val.Capacity)
|
|
|
|
tokat = math.Round(tokat*100) / 100
|
|
|
|
tokcapa = math.Round(tokcapa*100) / 100
|
|
|
|
//bucket actually underflowed based on log time, but no in real time
|
|
|
|
if tokat >= tokcapa {
|
|
|
|
BucketsUnderflow.With(prometheus.Labels{"name": val.Name}).Inc()
|
|
|
|
val.logger.Debugf("UNDERFLOW : first_ts:%s tokens_at:%f capcity:%f", val.First_ts, tokat, tokcapa)
|
|
|
|
toflush = append(toflush, key)
|
2021-02-25 10:26:46 +00:00
|
|
|
val.tomb.Kill(nil)
|
2020-08-05 09:39:54 +00:00
|
|
|
return true
|
|
|
|
}
|
2022-02-01 21:08:06 +00:00
|
|
|
|
|
|
|
val.logger.Tracef("(%s) not dead, count:%f capacity:%f", val.First_ts, tokat, tokcapa)
|
2020-08-05 09:39:54 +00:00
|
|
|
if _, ok := serialized[key]; ok {
|
|
|
|
log.Errorf("entry %s already exists", key)
|
|
|
|
return false
|
|
|
|
}
|
2022-02-01 21:08:06 +00:00
|
|
|
log.Debugf("serialize %s of %s : %s", val.Name, val.Uuid, val.Mapkey)
|
|
|
|
|
2020-08-05 09:39:54 +00:00
|
|
|
return true
|
|
|
|
})
|
|
|
|
log.Infof("Cleaned %d buckets", len(toflush))
|
|
|
|
for _, flushkey := range toflush {
|
|
|
|
buckets.Bucket_map.Delete(flushkey)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-11-30 09:37:17 +00:00
|
|
|
func DumpBucketsStateAt(deadline time.Time, outputdir string, buckets *Buckets) (string, error) {
|
2021-02-25 10:26:46 +00:00
|
|
|
|
|
|
|
//synchronize with PourItemtoHolders
|
|
|
|
buckets.wgPour.Wait()
|
|
|
|
buckets.wgDumpState.Add(1)
|
|
|
|
defer buckets.wgDumpState.Done()
|
2020-11-30 09:37:17 +00:00
|
|
|
|
|
|
|
if outputdir == "" {
|
|
|
|
return "", fmt.Errorf("empty output dir for dump bucket state")
|
|
|
|
}
|
2022-09-06 11:55:03 +00:00
|
|
|
tmpFd, err := os.CreateTemp(os.TempDir(), "crowdsec-buckets-dump-")
|
2020-08-05 09:39:54 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("failed to create temp file : %s", err)
|
|
|
|
}
|
|
|
|
defer tmpFd.Close()
|
|
|
|
tmpFileName := tmpFd.Name()
|
|
|
|
serialized = make(map[string]Leaky)
|
|
|
|
log.Printf("Dumping buckets state at %s", deadline)
|
|
|
|
total := 0
|
|
|
|
discard := 0
|
|
|
|
buckets.Bucket_map.Range(func(rkey, rvalue interface{}) bool {
|
|
|
|
key := rkey.(string)
|
|
|
|
val := rvalue.(*Leaky)
|
|
|
|
total += 1
|
|
|
|
if !val.Ovflw_ts.IsZero() {
|
|
|
|
discard += 1
|
|
|
|
val.logger.Debugf("overflowed at %s.", val.Ovflw_ts)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
/*FIXME : sometimes the gettokenscountat has some rounding issues when we try to
|
|
|
|
match it with bucket capacity, even if the bucket has long due underflow. Round to 2 decimals*/
|
|
|
|
tokat := val.Limiter.GetTokensCountAt(deadline)
|
|
|
|
tokcapa := float64(val.Capacity)
|
|
|
|
tokat = math.Round(tokat*100) / 100
|
|
|
|
tokcapa = math.Round(tokcapa*100) / 100
|
|
|
|
|
|
|
|
if tokat >= tokcapa {
|
|
|
|
BucketsUnderflow.With(prometheus.Labels{"name": val.Name}).Inc()
|
|
|
|
val.logger.Debugf("UNDERFLOW : first_ts:%s tokens_at:%f capcity:%f", val.First_ts, tokat, tokcapa)
|
|
|
|
discard += 1
|
|
|
|
return true
|
|
|
|
}
|
2022-02-01 21:08:06 +00:00
|
|
|
val.logger.Debugf("(%s) not dead, count:%f capacity:%f", val.First_ts, tokat, tokcapa)
|
|
|
|
|
2020-08-05 09:39:54 +00:00
|
|
|
if _, ok := serialized[key]; ok {
|
|
|
|
log.Errorf("entry %s already exists", key)
|
|
|
|
return false
|
|
|
|
}
|
2022-02-01 21:08:06 +00:00
|
|
|
log.Debugf("serialize %s of %s : %s", val.Name, val.Uuid, val.Mapkey)
|
2020-08-05 09:39:54 +00:00
|
|
|
val.SerializedState = val.Limiter.Dump()
|
|
|
|
serialized[key] = *val
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
bbuckets, err := json.MarshalIndent(serialized, "", " ")
|
|
|
|
if err != nil {
|
2022-11-07 09:36:50 +00:00
|
|
|
return "", fmt.Errorf("Failed to unmarshal buckets : %s", err)
|
2020-08-05 09:39:54 +00:00
|
|
|
}
|
|
|
|
size, err := tmpFd.Write(bbuckets)
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("failed to write temp file : %s", err)
|
|
|
|
}
|
|
|
|
log.Infof("Serialized %d live buckets (+%d expired) in %d bytes to %s", len(serialized), discard, size, tmpFd.Name())
|
|
|
|
serialized = nil
|
|
|
|
return tmpFileName, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func ShutdownAllBuckets(buckets *Buckets) error {
|
|
|
|
buckets.Bucket_map.Range(func(rkey, rvalue interface{}) bool {
|
|
|
|
key := rkey.(string)
|
|
|
|
val := rvalue.(*Leaky)
|
2021-02-25 10:26:46 +00:00
|
|
|
val.tomb.Kill(nil)
|
2020-08-05 09:39:54 +00:00
|
|
|
log.Infof("killed %s", key)
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-06-13 12:41:05 +00:00
|
|
|
func PourItemToBucket(bucket *Leaky, holder BucketFactory, buckets *Buckets, parsed *types.Event) (bool, error) {
|
2022-01-04 13:02:07 +00:00
|
|
|
var sent bool
|
|
|
|
var buckey = bucket.Mapkey
|
|
|
|
var err error
|
|
|
|
|
|
|
|
sigclosed := 0
|
|
|
|
failed_sent := 0
|
|
|
|
attempts := 0
|
2022-01-19 13:56:05 +00:00
|
|
|
start := time.Now().UTC()
|
2022-01-04 13:02:07 +00:00
|
|
|
|
|
|
|
for !sent {
|
|
|
|
attempts += 1
|
|
|
|
/* Warn the user if we used more than a 100 ms to pour an event, it's at least an half lock*/
|
2022-01-19 13:56:05 +00:00
|
|
|
if attempts%100000 == 0 && start.Add(100*time.Millisecond).Before(time.Now().UTC()) {
|
2022-01-04 13:02:07 +00:00
|
|
|
holder.logger.Warningf("stuck for %s sending event to %s (sigclosed:%d failed_sent:%d attempts:%d)", time.Since(start),
|
|
|
|
buckey, sigclosed, failed_sent, attempts)
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check if leak routine is up */
|
|
|
|
select {
|
|
|
|
case _, ok := <-bucket.Signal:
|
|
|
|
if !ok {
|
|
|
|
//the bucket was found and dead, get a new one and continue
|
|
|
|
bucket.logger.Tracef("Bucket %s found dead, cleanup the body", buckey)
|
|
|
|
buckets.Bucket_map.Delete(buckey)
|
|
|
|
sigclosed += 1
|
|
|
|
bucket, err = LoadOrStoreBucketFromHolder(buckey, buckets, holder, parsed.ExpectMode)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
2022-06-13 12:41:05 +00:00
|
|
|
//holder.logger.Tracef("Signal exists, try to pour :)")
|
2022-01-04 13:02:07 +00:00
|
|
|
default:
|
|
|
|
/*nothing to read, but not closed, try to pour */
|
2022-06-13 12:41:05 +00:00
|
|
|
//holder.logger.Tracef("Signal exists but empty, try to pour :)")
|
2022-01-04 13:02:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*let's see if this time-bucket should have expired */
|
2023-03-16 15:25:50 +00:00
|
|
|
if bucket.Mode == types.TIMEMACHINE {
|
2022-01-04 13:02:07 +00:00
|
|
|
bucket.mutex.Lock()
|
|
|
|
firstTs := bucket.First_ts
|
|
|
|
lastTs := bucket.Last_ts
|
|
|
|
bucket.mutex.Unlock()
|
|
|
|
|
|
|
|
if !firstTs.IsZero() {
|
|
|
|
var d time.Time
|
|
|
|
err = d.UnmarshalText([]byte(parsed.MarshaledTime))
|
|
|
|
if err != nil {
|
|
|
|
holder.logger.Warningf("Failed unmarshaling event time (%s) : %v", parsed.MarshaledTime, err)
|
|
|
|
}
|
|
|
|
if d.After(lastTs.Add(bucket.Duration)) {
|
|
|
|
bucket.logger.Tracef("bucket is expired (curr event: %s, bucket deadline: %s), kill", d, lastTs.Add(bucket.Duration))
|
|
|
|
buckets.Bucket_map.Delete(buckey)
|
|
|
|
//not sure about this, should we create a new one ?
|
|
|
|
sigclosed += 1
|
|
|
|
bucket, err = LoadOrStoreBucketFromHolder(buckey, buckets, holder, parsed.ExpectMode)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*the bucket seems to be up & running*/
|
|
|
|
select {
|
|
|
|
case bucket.In <- parsed:
|
2022-06-13 12:41:05 +00:00
|
|
|
//holder.logger.Tracef("Successfully sent !")
|
2022-01-04 13:02:07 +00:00
|
|
|
if BucketPourTrack {
|
|
|
|
if _, ok := BucketPourCache[bucket.Name]; !ok {
|
|
|
|
BucketPourCache[bucket.Name] = make([]types.Event, 0)
|
|
|
|
}
|
2022-06-13 12:41:05 +00:00
|
|
|
evt := deepcopy.Copy(*parsed)
|
2022-01-04 13:02:07 +00:00
|
|
|
BucketPourCache[bucket.Name] = append(BucketPourCache[bucket.Name], evt.(types.Event))
|
|
|
|
}
|
|
|
|
sent = true
|
|
|
|
continue
|
|
|
|
default:
|
|
|
|
failed_sent += 1
|
2022-06-13 12:41:05 +00:00
|
|
|
//holder.logger.Tracef("Failed to send, try again")
|
2022-01-04 13:02:07 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
holder.logger.Debugf("bucket '%s' is poured", holder.Name)
|
|
|
|
return sent, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func LoadOrStoreBucketFromHolder(partitionKey string, buckets *Buckets, holder BucketFactory, expectMode int) (*Leaky, error) {
|
|
|
|
|
|
|
|
biface, ok := buckets.Bucket_map.Load(partitionKey)
|
|
|
|
|
|
|
|
/* the bucket doesn't exist, create it !*/
|
|
|
|
if !ok {
|
|
|
|
var fresh_bucket *Leaky
|
|
|
|
|
|
|
|
switch expectMode {
|
2023-03-16 15:25:50 +00:00
|
|
|
case types.TIMEMACHINE:
|
2022-01-04 13:02:07 +00:00
|
|
|
fresh_bucket = NewTimeMachine(holder)
|
|
|
|
holder.logger.Debugf("Creating TimeMachine bucket")
|
2023-03-16 15:25:50 +00:00
|
|
|
case types.LIVE:
|
2022-01-04 13:02:07 +00:00
|
|
|
fresh_bucket = NewLeaky(holder)
|
|
|
|
holder.logger.Debugf("Creating Live bucket")
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("input event has no expected mode : %+v", expectMode)
|
|
|
|
}
|
2022-06-13 12:41:05 +00:00
|
|
|
fresh_bucket.In = make(chan *types.Event)
|
2022-01-04 13:02:07 +00:00
|
|
|
fresh_bucket.Mapkey = partitionKey
|
|
|
|
fresh_bucket.Signal = make(chan bool, 1)
|
|
|
|
actual, stored := buckets.Bucket_map.LoadOrStore(partitionKey, fresh_bucket)
|
|
|
|
if !stored {
|
|
|
|
holder.tomb.Go(func() error {
|
|
|
|
return LeakRoutine(fresh_bucket)
|
|
|
|
})
|
|
|
|
biface = fresh_bucket
|
|
|
|
//once the created goroutine is ready to process event, we can return it
|
|
|
|
<-fresh_bucket.Signal
|
|
|
|
} else {
|
|
|
|
holder.logger.Debugf("Unexpectedly found exisint bucket for %s", partitionKey)
|
|
|
|
biface = actual
|
|
|
|
}
|
|
|
|
holder.logger.Debugf("Created new bucket %s", partitionKey)
|
|
|
|
}
|
|
|
|
return biface.(*Leaky), nil
|
|
|
|
}
|
|
|
|
|
2020-08-05 09:39:54 +00:00
|
|
|
func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buckets) (bool, error) {
|
|
|
|
var (
|
2022-01-04 13:02:07 +00:00
|
|
|
ok, condition, poured bool
|
2020-08-05 09:39:54 +00:00
|
|
|
)
|
|
|
|
|
2021-10-04 15:14:52 +00:00
|
|
|
if BucketPourTrack {
|
|
|
|
if BucketPourCache == nil {
|
|
|
|
BucketPourCache = make(map[string][]types.Event)
|
|
|
|
}
|
2022-06-16 12:41:54 +00:00
|
|
|
if _, ok = BucketPourCache["OK"]; !ok {
|
2021-10-04 15:14:52 +00:00
|
|
|
BucketPourCache["OK"] = make([]types.Event, 0)
|
|
|
|
}
|
|
|
|
evt := deepcopy.Copy(parsed)
|
|
|
|
BucketPourCache["OK"] = append(BucketPourCache["OK"], evt.(types.Event))
|
|
|
|
}
|
|
|
|
|
2022-01-04 13:02:07 +00:00
|
|
|
//find the relevant holders (scenarios)
|
2022-06-13 12:41:05 +00:00
|
|
|
for idx := 0; idx < len(holders); idx++ {
|
|
|
|
//for idx, holder := range holders {
|
2020-08-05 09:39:54 +00:00
|
|
|
|
2022-01-04 13:02:07 +00:00
|
|
|
//evaluate bucket's condition
|
2022-06-13 12:41:05 +00:00
|
|
|
if holders[idx].RunTimeFilter != nil {
|
|
|
|
holders[idx].logger.Tracef("event against holder %d/%d", idx, len(holders))
|
2023-03-28 08:49:01 +00:00
|
|
|
output, err := expr.Run(holders[idx].RunTimeFilter, map[string]interface{}{"evt": &parsed})
|
2020-08-05 09:39:54 +00:00
|
|
|
if err != nil {
|
2022-06-13 12:41:05 +00:00
|
|
|
holders[idx].logger.Errorf("failed parsing : %v", err)
|
2020-08-05 09:39:54 +00:00
|
|
|
return false, fmt.Errorf("leaky failed : %s", err)
|
|
|
|
}
|
|
|
|
// we assume we a bool should add type check here
|
|
|
|
if condition, ok = output.(bool); !ok {
|
2022-06-13 12:41:05 +00:00
|
|
|
holders[idx].logger.Errorf("unexpected non-bool return : %T", output)
|
|
|
|
holders[idx].logger.Fatalf("Filter issue")
|
2020-08-05 09:39:54 +00:00
|
|
|
}
|
2020-08-05 14:27:16 +00:00
|
|
|
|
2022-06-13 12:41:05 +00:00
|
|
|
if holders[idx].Debug {
|
2023-03-28 08:49:01 +00:00
|
|
|
holders[idx].ExprDebugger.Run(holders[idx].logger, condition, map[string]interface{}{"evt": &parsed})
|
2020-08-05 14:27:16 +00:00
|
|
|
}
|
2020-08-05 09:39:54 +00:00
|
|
|
if !condition {
|
2022-06-13 12:41:05 +00:00
|
|
|
holders[idx].logger.Debugf("Event leaving node : ko (filter mismatch)")
|
2020-08-05 09:39:54 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-04 13:02:07 +00:00
|
|
|
//groupby determines the partition key for the specific bucket
|
2020-08-05 09:39:54 +00:00
|
|
|
var groupby string
|
2022-06-13 12:41:05 +00:00
|
|
|
if holders[idx].RunTimeGroupBy != nil {
|
2023-03-28 08:49:01 +00:00
|
|
|
tmpGroupBy, err := expr.Run(holders[idx].RunTimeGroupBy, map[string]interface{}{"evt": &parsed})
|
2020-08-05 09:39:54 +00:00
|
|
|
if err != nil {
|
2022-06-13 12:41:05 +00:00
|
|
|
holders[idx].logger.Errorf("failed groupby : %v", err)
|
2020-08-05 09:39:54 +00:00
|
|
|
return false, errors.New("leaky failed :/")
|
|
|
|
}
|
|
|
|
|
|
|
|
if groupby, ok = tmpGroupBy.(string); !ok {
|
2022-06-13 12:41:05 +00:00
|
|
|
holders[idx].logger.Fatalf("failed groupby type : %v", err)
|
2020-08-05 09:39:54 +00:00
|
|
|
return false, errors.New("groupby wrong type")
|
|
|
|
}
|
|
|
|
}
|
2022-06-13 12:41:05 +00:00
|
|
|
buckey := GetKey(holders[idx], groupby)
|
2020-08-05 09:39:54 +00:00
|
|
|
|
2022-01-04 13:02:07 +00:00
|
|
|
//we need to either find the existing bucket, or create a new one (if it's the first event to hit it for this partition key)
|
2022-06-13 12:41:05 +00:00
|
|
|
bucket, err := LoadOrStoreBucketFromHolder(buckey, buckets, holders[idx], parsed.ExpectMode)
|
2022-01-04 13:02:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, errors.Wrap(err, "failed to load or store bucket")
|
|
|
|
}
|
|
|
|
//finally, pour the even into the bucket
|
2022-06-13 12:41:05 +00:00
|
|
|
ok, err := PourItemToBucket(bucket, holders[idx], buckets, &parsed)
|
2022-01-04 13:02:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, errors.Wrap(err, "failed to pour bucket")
|
|
|
|
}
|
|
|
|
if ok {
|
|
|
|
poured = true
|
2020-08-05 09:39:54 +00:00
|
|
|
}
|
|
|
|
}
|
2022-01-04 13:02:07 +00:00
|
|
|
return poured, nil
|
2020-08-05 09:39:54 +00:00
|
|
|
}
|