2020-05-15 09:39:16 +00:00
|
|
|
package leakybucket
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2020-11-30 09:37:17 +00:00
|
|
|
"encoding/json"
|
2022-11-29 08:16:07 +00:00
|
|
|
"errors"
|
2020-05-15 09:39:16 +00:00
|
|
|
"fmt"
|
|
|
|
"html/template"
|
|
|
|
"io"
|
|
|
|
"os"
|
2023-11-24 14:57:32 +00:00
|
|
|
"path/filepath"
|
2020-11-30 09:37:17 +00:00
|
|
|
"reflect"
|
2021-12-17 08:56:02 +00:00
|
|
|
"sync"
|
2020-05-15 09:39:16 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2023-12-19 16:20:09 +00:00
|
|
|
"github.com/davecgh/go-spew/spew"
|
|
|
|
log "github.com/sirupsen/logrus"
|
|
|
|
"gopkg.in/tomb.v2"
|
|
|
|
yaml "gopkg.in/yaml.v2"
|
|
|
|
|
2020-11-30 09:37:17 +00:00
|
|
|
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
|
2023-11-24 14:57:32 +00:00
|
|
|
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
|
2020-07-02 15:56:39 +00:00
|
|
|
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
|
2020-05-15 09:39:16 +00:00
|
|
|
"github.com/crowdsecurity/crowdsec/pkg/parser"
|
|
|
|
"github.com/crowdsecurity/crowdsec/pkg/types"
|
|
|
|
)
|
|
|
|
|
|
|
|
type TestFile struct {
|
|
|
|
Lines []types.Event `yaml:"lines,omitempty"`
|
|
|
|
Results []types.Event `yaml:"results,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestBucket(t *testing.T) {
|
2021-02-25 10:26:46 +00:00
|
|
|
var (
|
2023-03-09 10:56:02 +00:00
|
|
|
envSetting = os.Getenv("TEST_ONLY")
|
|
|
|
tomb = &tomb.Tomb{}
|
2021-02-25 10:26:46 +00:00
|
|
|
)
|
2023-11-24 14:57:32 +00:00
|
|
|
|
|
|
|
testdata := "./tests"
|
|
|
|
|
|
|
|
hubCfg := &csconfig.LocalHubCfg{
|
2023-12-19 16:20:09 +00:00
|
|
|
HubDir: filepath.Join(testdata, "hub"),
|
|
|
|
HubIndexFile: filepath.Join(testdata, "hub", "index.json"),
|
2023-11-24 14:57:32 +00:00
|
|
|
InstallDataDir: testdata,
|
|
|
|
}
|
|
|
|
|
2023-12-19 16:20:09 +00:00
|
|
|
hub, err := cwhub.NewHub(hubCfg, nil, false, nil)
|
2023-11-24 14:57:32 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to init hub: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = exprhelpers.Init(nil)
|
2020-07-02 15:56:39 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("exprhelpers init failed: %s", err)
|
|
|
|
}
|
2020-05-15 09:39:16 +00:00
|
|
|
|
|
|
|
if envSetting != "" {
|
2023-11-24 14:57:32 +00:00
|
|
|
if err := testOneBucket(t, hub, envSetting, tomb); err != nil {
|
2020-05-15 09:39:16 +00:00
|
|
|
t.Fatalf("Test '%s' failed : %s", envSetting, err)
|
|
|
|
}
|
|
|
|
} else {
|
2021-12-17 08:56:02 +00:00
|
|
|
wg := new(sync.WaitGroup)
|
2023-11-24 14:57:32 +00:00
|
|
|
fds, err := os.ReadDir(testdata)
|
2020-05-15 09:39:16 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to read test directory : %s", err)
|
|
|
|
}
|
|
|
|
for _, fd := range fds {
|
2023-11-24 14:57:32 +00:00
|
|
|
if fd.Name() == "hub" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
fname := filepath.Join(testdata, fd.Name())
|
2020-05-15 09:39:16 +00:00
|
|
|
log.Infof("Running test on %s", fname)
|
2021-02-25 10:26:46 +00:00
|
|
|
tomb.Go(func() error {
|
2021-12-17 08:56:02 +00:00
|
|
|
wg.Add(1)
|
|
|
|
defer wg.Done()
|
2023-11-24 14:57:32 +00:00
|
|
|
if err := testOneBucket(t, hub, fname, tomb); err != nil {
|
2021-02-25 10:26:46 +00:00
|
|
|
t.Fatalf("Test '%s' failed : %s", fname, err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2021-12-17 08:56:02 +00:00
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-06 08:26:16 +00:00
|
|
|
// during tests, we're likely to have only one scenario, and thus only one holder.
|
|
|
|
// we want to avoid the death of the tomb because all existing buckets have been destroyed.
|
2021-12-17 08:56:02 +00:00
|
|
|
func watchTomb(tomb *tomb.Tomb) {
|
|
|
|
for {
|
|
|
|
if tomb.Alive() == false {
|
2022-06-22 07:38:23 +00:00
|
|
|
log.Warning("Tomb is dead")
|
2021-12-17 08:56:02 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-24 14:57:32 +00:00
|
|
|
func testOneBucket(t *testing.T, hub *cwhub.Hub, dir string, tomb *tomb.Tomb) error {
|
2020-05-15 09:39:16 +00:00
|
|
|
|
2021-02-25 10:26:46 +00:00
|
|
|
var (
|
|
|
|
holders []BucketFactory
|
2020-05-15 09:39:16 +00:00
|
|
|
|
2021-02-25 10:26:46 +00:00
|
|
|
stagefiles []byte
|
|
|
|
stagecfg string
|
|
|
|
stages []parser.Stagefile
|
|
|
|
err error
|
|
|
|
buckets *Buckets
|
|
|
|
)
|
|
|
|
buckets = NewBuckets()
|
2020-05-15 09:39:16 +00:00
|
|
|
|
|
|
|
/*load the scenarios*/
|
|
|
|
stagecfg = dir + "/scenarios.yaml"
|
2022-09-06 11:55:03 +00:00
|
|
|
if stagefiles, err = os.ReadFile(stagecfg); err != nil {
|
2020-05-15 09:39:16 +00:00
|
|
|
t.Fatalf("Failed to load stage file %s : %s", stagecfg, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
tmpl, err := template.New("test").Parse(string(stagefiles))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to parse template %s : %s", stagefiles, err)
|
|
|
|
}
|
|
|
|
var out bytes.Buffer
|
|
|
|
err = tmpl.Execute(&out, map[string]string{"TestDirectory": dir})
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
if err := yaml.UnmarshalStrict(out.Bytes(), &stages); err != nil {
|
|
|
|
log.Fatalf("failed unmarshaling %s : %s", stagecfg, err)
|
|
|
|
}
|
|
|
|
files := []string{}
|
|
|
|
for _, x := range stages {
|
|
|
|
files = append(files, x.Filename)
|
|
|
|
}
|
2020-11-30 09:37:17 +00:00
|
|
|
|
2023-11-24 14:57:32 +00:00
|
|
|
cscfg := &csconfig.CrowdsecServiceCfg{}
|
|
|
|
holders, response, err := LoadBuckets(cscfg, hub, files, tomb, buckets, false)
|
2020-05-20 08:49:17 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed loading bucket : %s", err)
|
|
|
|
}
|
2021-12-17 08:56:02 +00:00
|
|
|
tomb.Go(func() error {
|
|
|
|
watchTomb(tomb)
|
|
|
|
return nil
|
|
|
|
})
|
2023-11-24 14:57:32 +00:00
|
|
|
if !testFile(t, filepath.Join(dir, "test.json"), filepath.Join(dir, "in-buckets_state.json"), holders, response, buckets) {
|
2020-11-30 09:37:17 +00:00
|
|
|
return fmt.Errorf("tests from %s failed", dir)
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-02-25 10:26:46 +00:00
|
|
|
func testFile(t *testing.T, file string, bs string, holders []BucketFactory, response chan types.Event, buckets *Buckets) bool {
|
2020-05-15 09:39:16 +00:00
|
|
|
|
|
|
|
var results []types.Event
|
|
|
|
var dump bool
|
|
|
|
|
|
|
|
//should we restore
|
|
|
|
if _, err := os.Stat(bs); err == nil {
|
|
|
|
dump = true
|
|
|
|
if err := LoadBucketsState(bs, buckets, holders); err != nil {
|
|
|
|
t.Fatalf("Failed to load bucket state : %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* now we can load the test files */
|
|
|
|
//process the yaml
|
|
|
|
yamlFile, err := os.Open(file)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("yamlFile.Get err #%v ", err)
|
|
|
|
}
|
2020-11-30 09:37:17 +00:00
|
|
|
dec := json.NewDecoder(yamlFile)
|
|
|
|
dec.DisallowUnknownFields()
|
|
|
|
//dec.SetStrict(true)
|
2020-05-15 09:39:16 +00:00
|
|
|
tf := TestFile{}
|
|
|
|
err = dec.Decode(&tf)
|
|
|
|
if err != nil {
|
2022-11-29 08:16:07 +00:00
|
|
|
if errors.Is(err, io.EOF) {
|
2020-05-15 09:39:16 +00:00
|
|
|
t.Errorf("Failed to load testfile '%s' yaml error : %v", file, err)
|
|
|
|
return false
|
|
|
|
}
|
2022-06-22 07:38:23 +00:00
|
|
|
log.Warning("end of test file")
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
var latest_ts time.Time
|
|
|
|
for _, in := range tf.Lines {
|
|
|
|
//just to avoid any race during ingestion of funny scenarios
|
|
|
|
time.Sleep(50 * time.Millisecond)
|
|
|
|
var ts time.Time
|
|
|
|
if err := ts.UnmarshalText([]byte(in.MarshaledTime)); err != nil {
|
|
|
|
t.Fatalf("Failed to unmarshal time from input event : %s", err)
|
|
|
|
}
|
|
|
|
if latest_ts.IsZero() {
|
|
|
|
latest_ts = ts
|
|
|
|
} else if ts.After(latest_ts) {
|
|
|
|
latest_ts = ts
|
|
|
|
}
|
|
|
|
|
2023-03-16 15:25:50 +00:00
|
|
|
in.ExpectMode = types.TIMEMACHINE
|
2020-11-30 09:37:17 +00:00
|
|
|
log.Infof("Buckets input : %s", spew.Sdump(in))
|
2020-05-15 09:39:16 +00:00
|
|
|
ok, err := PourItemToHolders(in, holders, buckets)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to pour : %s", err)
|
|
|
|
}
|
|
|
|
if !ok {
|
2022-06-22 07:38:23 +00:00
|
|
|
log.Warning("Event wasn't poured")
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
}
|
2022-06-22 07:38:23 +00:00
|
|
|
log.Warning("Done pouring !")
|
2020-05-15 09:39:16 +00:00
|
|
|
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
|
|
|
//Read results from chan
|
|
|
|
POLL_AGAIN:
|
|
|
|
fails := 0
|
|
|
|
for fails < 2 {
|
|
|
|
select {
|
|
|
|
case ret := <-response:
|
2022-06-22 07:38:23 +00:00
|
|
|
log.Warning("got one result")
|
2020-05-15 09:39:16 +00:00
|
|
|
results = append(results, ret)
|
|
|
|
if ret.Overflow.Reprocess {
|
2020-11-30 09:37:17 +00:00
|
|
|
log.Errorf("Overflow being reprocessed.")
|
2020-05-15 09:39:16 +00:00
|
|
|
ok, err := PourItemToHolders(ret, holders, buckets)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to pour : %s", err)
|
|
|
|
}
|
|
|
|
if !ok {
|
2022-06-22 07:38:23 +00:00
|
|
|
log.Warning("Event wasn't poured")
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
goto POLL_AGAIN
|
|
|
|
}
|
|
|
|
fails = 0
|
|
|
|
default:
|
2022-06-22 07:38:23 +00:00
|
|
|
log.Warning("no more results")
|
2020-05-15 09:39:16 +00:00
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
fails += 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log.Warningf("Got %d overflows from run", len(results))
|
|
|
|
/*
|
|
|
|
check the results we got against the expected ones
|
|
|
|
only the keys of the expected part are checked against result
|
|
|
|
*/
|
2020-06-19 11:57:44 +00:00
|
|
|
var tmpFile string
|
2020-05-15 09:39:16 +00:00
|
|
|
|
|
|
|
for {
|
|
|
|
if len(tf.Results) == 0 && len(results) == 0 {
|
2022-06-22 07:38:23 +00:00
|
|
|
log.Warning("Test is successful")
|
2020-05-15 09:39:16 +00:00
|
|
|
if dump {
|
2020-11-30 09:37:17 +00:00
|
|
|
if tmpFile, err = DumpBucketsStateAt(latest_ts, ".", buckets); err != nil {
|
2022-10-14 14:12:21 +00:00
|
|
|
t.Fatalf("Failed to dump bucket state: %s", err)
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2020-06-19 11:57:44 +00:00
|
|
|
log.Infof("dumped bucket to %s", tmpFile)
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
return true
|
2022-02-01 21:08:06 +00:00
|
|
|
}
|
|
|
|
log.Warningf("%d results to check against %d expected results", len(results), len(tf.Results))
|
|
|
|
if len(tf.Results) != len(results) {
|
|
|
|
if dump {
|
|
|
|
if tmpFile, err = DumpBucketsStateAt(latest_ts, ".", buckets); err != nil {
|
2022-10-14 14:12:21 +00:00
|
|
|
t.Fatalf("Failed to dump bucket state: %s", err)
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2022-02-01 21:08:06 +00:00
|
|
|
log.Infof("dumped bucket to %s", tmpFile)
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2022-02-01 21:08:06 +00:00
|
|
|
log.Errorf("results / expected count doesn't match results = %d / expected = %d", len(results), len(tf.Results))
|
|
|
|
return false
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
checkresultsloop:
|
|
|
|
for eidx, out := range results {
|
|
|
|
for ridx, expected := range tf.Results {
|
|
|
|
|
2020-11-30 09:37:17 +00:00
|
|
|
log.Tracef("Checking next expected result.")
|
2020-05-15 09:39:16 +00:00
|
|
|
|
2020-11-30 09:37:17 +00:00
|
|
|
//empty overflow
|
|
|
|
if out.Overflow.Alert == nil && expected.Overflow.Alert == nil {
|
|
|
|
//match stuff
|
2020-05-15 09:39:16 +00:00
|
|
|
} else {
|
2020-11-30 09:37:17 +00:00
|
|
|
if out.Overflow.Alert == nil || expected.Overflow.Alert == nil {
|
|
|
|
log.Printf("Here ?")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-02-01 21:08:06 +00:00
|
|
|
//Scenario
|
2020-11-30 09:37:17 +00:00
|
|
|
if *out.Overflow.Alert.Scenario != *expected.Overflow.Alert.Scenario {
|
|
|
|
log.Errorf("(scenario) %v != %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario)
|
|
|
|
continue
|
|
|
|
}
|
2022-02-01 21:08:06 +00:00
|
|
|
log.Infof("(scenario) %v == %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario)
|
|
|
|
|
2020-11-30 09:37:17 +00:00
|
|
|
//EventsCount
|
|
|
|
if *out.Overflow.Alert.EventsCount != *expected.Overflow.Alert.EventsCount {
|
|
|
|
log.Errorf("(EventsCount) %d != %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount)
|
|
|
|
continue
|
|
|
|
}
|
2022-02-01 21:08:06 +00:00
|
|
|
log.Infof("(EventsCount) %d == %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount)
|
|
|
|
|
2020-11-30 09:37:17 +00:00
|
|
|
//Sources
|
|
|
|
if !reflect.DeepEqual(out.Overflow.Sources, expected.Overflow.Sources) {
|
|
|
|
log.Errorf("(Sources %s != %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources))
|
|
|
|
continue
|
|
|
|
}
|
2022-02-01 21:08:06 +00:00
|
|
|
log.Infof("(Sources: %s == %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources))
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2020-11-30 09:37:17 +00:00
|
|
|
//Events
|
|
|
|
// if !reflect.DeepEqual(out.Overflow.Alert.Events, expected.Overflow.Alert.Events) {
|
|
|
|
// log.Errorf("(Events %s != %s)", spew.Sdump(out.Overflow.Alert.Events), spew.Sdump(expected.Overflow.Alert.Events))
|
|
|
|
// valid = false
|
|
|
|
// continue
|
|
|
|
// } else {
|
|
|
|
// log.Infof("(Events: %s == %s)", spew.Sdump(out.Overflow.Alert.Events), spew.Sdump(expected.Overflow.Alert.Events))
|
|
|
|
// }
|
2020-05-15 09:39:16 +00:00
|
|
|
|
|
|
|
//CheckFailed:
|
|
|
|
|
2020-11-30 09:37:17 +00:00
|
|
|
log.Warningf("The test is valid, remove entry %d from expects, and %d from t.Results", eidx, ridx)
|
|
|
|
//don't do this at home : delete current element from list and redo
|
|
|
|
results[eidx] = results[len(results)-1]
|
|
|
|
results = results[:len(results)-1]
|
|
|
|
tf.Results[ridx] = tf.Results[len(tf.Results)-1]
|
|
|
|
tf.Results = tf.Results[:len(tf.Results)-1]
|
|
|
|
goto checkresultsloop
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
}
|
2020-11-30 09:37:17 +00:00
|
|
|
if len(results) != 0 && len(tf.Results) != 0 {
|
|
|
|
log.Errorf("mismatching entries left")
|
|
|
|
log.Errorf("we got: %s", spew.Sdump(results))
|
|
|
|
log.Errorf("we expected: %s", spew.Sdump(tf.Results))
|
|
|
|
return false
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2022-06-22 07:38:23 +00:00
|
|
|
log.Warning("entry valid at end of loop")
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
}
|