Merge branch 'crowdsecurity:master' into master

This commit is contained in:
Manuel Sabban 2021-12-13 17:38:57 +01:00 committed by GitHub
commit 9c133ee3d0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
101 changed files with 2399 additions and 637 deletions

View file

@ -32,6 +32,9 @@ jobs:
echo ::set-output name=version::${VERSION}
echo ::set-output name=tags::${TAGS}
echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
@ -49,7 +52,7 @@ jobs:
file: ./Dockerfile
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.prep.outputs.tags }}
platforms: linux/amd64
platforms: linux/amd64,linux/arm64
labels: |
org.opencontainers.image.source=${{ github.event.repository.html_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}

4
.gitignore vendored
View file

@ -18,3 +18,7 @@
# crowdsec binaries
cmd/crowdsec-cli/cscli
cmd/crowdsec/crowdsec
plugins/notifications/http/notification-http
plugins/notifications/slack/notification-slack
plugins/notifications/splunk/notification-splunk

View file

@ -4,7 +4,8 @@ FROM golang:${GOVERSION}-alpine AS build
WORKDIR /go/src/crowdsec
RUN apk update && apk add --no-cache git jq gcc libc-dev make bash gettext binutils-gold
# wizard.sh requires GNU coreutils
RUN apk update && apk add --no-cache git jq gcc libc-dev make bash gettext binutils-gold coreutils
COPY . .

View file

@ -42,7 +42,7 @@ BUILD_VERSION?="$(shell git describe --tags `git rev-list --tags --max-count=1`)
BUILD_GOVERSION="$(shell go version | cut -d " " -f3 | sed -E 's/[go]+//g')"
BUILD_CODENAME=$(shell cat RELEASE.json | jq -r .CodeName)
BUILD_TIMESTAMP=$(shell date +%F"_"%T)
BUILD_TAG="$(shell git rev-parse HEAD)"
BUILD_TAG?="$(shell git rev-parse HEAD)"
export LD_OPTS=-ldflags "-s -w -X github.com/crowdsecurity/crowdsec/pkg/cwversion.Version=$(BUILD_VERSION) \
-X github.com/crowdsecurity/crowdsec/pkg/cwversion.System=$(SYSTEM) \
@ -88,6 +88,9 @@ clean:
@rm -f $(CSCLI_BIN)
@rm -f *.log
@rm -f crowdsec-release.tgz
@rm -f $(HTTP_PLUGIN_FOLDER)/$(HTTP_PLUGIN_BIN)
@rm -f $(SLACK_PLUGIN_FOLDER)/$(SLACK_PLUGIN_BIN)
@rm -f $(SPLUNK_PLUGIN_FOLDER)/$(SPLUNK_PLUGIN_BIN)
cscli: goversion
@GOARCH=$(GOARCH) GOOS=$(GOOS) $(MAKE) -C $(CSCLI_FOLDER) build --no-print-directory

View file

@ -27,7 +27,7 @@
## <TL;DR>
CrowdSec is a free, modern & collaborative behavior detection engine, coupled with a global IP reputation network. It stacks on fail2ban's philosophy but is IPV6 compatible and 60x faster (Go vs Python), uses Grok patterns to parse logs and YAML scenario to identify behaviors. CrowdSec is engineered for modern Cloud / Containers / VM based infrastructures (by decoupling detection and remediation). Once detected you can remedy threats with various bouncers (firewall block, nginx http 403, Captchas, etc.) while the aggressive IP can be sent to CrowdSec for curation before being shared among all users to further improve everyone's security. See [FAQ](https://doc.crowdsec.net/docs/faq) or read bellow for more.
CrowdSec is a free, modern & collaborative behavior detection engine, coupled with a global IP reputation network. It stacks on fail2ban's philosophy but is IPV6 compatible and 60x faster (Go vs Python), uses Grok patterns to parse logs and YAML scenario to identify behaviors. CrowdSec is engineered for modern Cloud / Containers / VM based infrastructures (by decoupling detection and remediation). Once detected you can remedy threats with various bouncers (firewall block, nginx http 403, Captchas, etc.) while the aggressive IP can be sent to CrowdSec for curation before being shared among all users to further improve everyone's security. See [FAQ](https://doc.crowdsec.net/docs/faq) or read below for more.
## 2 mins install

View file

@ -13,6 +13,7 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/apiclient"
"github.com/crowdsecurity/crowdsec/pkg/cwversion"
"github.com/crowdsecurity/crowdsec/pkg/database"
"github.com/crowdsecurity/crowdsec/pkg/models"
"github.com/go-openapi/strfmt"
"github.com/olekukonko/tablewriter"
@ -469,5 +470,40 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`,
cmdAlerts.AddCommand(cmdAlertsInspect)
var maxItems int
var maxAge string
var cmdAlertsFlush = &cobra.Command{
Use: `flush`,
Short: `Flush alerts
/!\ This command can be used only on the same machine than the local API`,
Example: `cscli alerts flush --max-items 1000 --max-age 7d`,
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
var err error
if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI {
log.Fatal("Local API is disabled, please run this command on the local API machine")
}
if err := csConfig.LoadDBConfig(); err != nil {
log.Fatalf(err.Error())
}
dbClient, err = database.NewClient(csConfig.DbConfig)
if err != nil {
log.Fatalf("unable to create new database client: %s", err)
}
log.Info("Flushing alerts. !! This may take a long time !!")
err = dbClient.FlushAlerts(maxAge, maxItems)
if err != nil {
log.Fatalf("unable to flush alerts: %s", err)
}
log.Info("Alerts flushed")
},
}
cmdAlertsFlush.Flags().SortFlags = false
cmdAlertsFlush.Flags().IntVar(&maxItems, "max-items", 5000, "Maximum number of alert items to keep in the database")
cmdAlertsFlush.Flags().StringVar(&maxAge, "max-age", "7d", "Maximum age of alert items to keep in the database")
cmdAlerts.AddCommand(cmdAlertsFlush)
return cmdAlerts
}

View file

@ -17,6 +17,7 @@ import (
var keyName string
var keyIP string
var keyLength int
var key string
func NewBouncersCmd() *cobra.Command {
/* ---- DECISIONS COMMAND */
@ -99,25 +100,30 @@ Note: This command requires database direct access, so is intended to be run on
Use: "add MyBouncerName [--length 16]",
Short: "add bouncer",
Long: `add bouncer`,
Example: `cscli bouncers add MyBouncerName
cscli bouncers add MyBouncerName -l 24`,
Example: fmt.Sprintf(`cscli bouncers add MyBouncerName
cscli bouncers add MyBouncerName -l 24
cscli bouncers add MyBouncerName -k %s`, generatePassword(32)),
Args: cobra.ExactArgs(1),
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, arg []string) {
keyName := arg[0]
var apiKey string
var err error
if keyName == "" {
log.Errorf("Please provide a name for the api key")
return
}
apiKey, err := middlewares.GenerateAPIKey(keyLength)
apiKey = key
if key == "" {
apiKey, err = middlewares.GenerateAPIKey(keyLength)
}
if err != nil {
log.Errorf("unable to generate api key: %s", err)
return
}
err = dbClient.CreateBouncer(keyName, keyIP, middlewares.HashSHA512(apiKey))
if err != nil {
log.Errorf("unable to create bouncer: %s", err)
return
log.Fatalf("unable to create bouncer: %s", err)
}
if csConfig.Cscli.Output == "human" {
@ -136,6 +142,7 @@ cscli bouncers add MyBouncerName -l 24`,
},
}
cmdBouncersAdd.Flags().IntVarP(&keyLength, "length", "l", 16, "length of the api key")
cmdBouncersAdd.Flags().StringVarP(&key, "key", "k", "", "api key for the bouncer")
cmdBouncers.AddCommand(cmdBouncersAdd)
var cmdBouncersDelete = &cobra.Command{

View file

@ -7,6 +7,8 @@ import (
"os"
"path/filepath"
"github.com/antonmedv/expr"
"github.com/pkg/errors"
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
@ -279,6 +281,10 @@ func NewConfigCmd() *cobra.Command {
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
}
var key string
type Env struct {
Config *csconfig.Config
}
var cmdConfigShow = &cobra.Command{
Use: "show",
Short: "Displays current config",
@ -286,6 +292,36 @@ func NewConfigCmd() *cobra.Command {
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
if key != "" {
program, err := expr.Compile(key, expr.Env(Env{}))
if err != nil {
log.Fatal(err)
}
output, err := expr.Run(program, Env{Config: csConfig})
if err != nil {
log.Fatal(err)
}
switch csConfig.Cscli.Output {
case "human", "raw":
switch output.(type) {
case string:
fmt.Printf("%s\n", output)
case int:
fmt.Printf("%d\n", output)
default:
fmt.Printf("%v\n", output)
}
case "json":
data, err := json.MarshalIndent(output, "", " ")
if err != nil {
log.Fatalf("failed to marshal configuration: %s", err)
}
fmt.Printf("%s\n", string(data))
}
return
}
switch csConfig.Cscli.Output {
case "human":
fmt.Printf("Global:\n")
@ -374,6 +410,7 @@ func NewConfigCmd() *cobra.Command {
}
},
}
cmdConfigShow.Flags().StringVar(&key, "key", "", "Display only this value (config.API.Server.ListenURI")
cmdConfig.AddCommand(cmdConfigShow)
var cmdConfigBackup = &cobra.Command{

View file

@ -31,6 +31,9 @@ func NewConsoleCmd() *cobra.Command {
},
}
name := ""
tags := []string{}
cmdEnroll := &cobra.Command{
Use: "enroll [enroll-key]",
Short: "Enroll this instance to https://app.crowdsec.net [requires local API]",
@ -39,7 +42,10 @@ Enroll this instance to https://app.crowdsec.net
You can get your enrollment key by creating an account on https://app.crowdsec.net.
After running this command your will need to validate the enrollment in the webapp.`,
Example: "cscli console enroll YOUR-ENROLL-KEY",
Example: `cscli console enroll YOUR-ENROLL-KEY
cscli console enroll --name [instance_name] YOUR-ENROLL-KEY
cscli console enroll --name [instance_name] --tags [tag_1] --tags [tag_2] YOUR-ENROLL-KEY
`,
Args: cobra.ExactArgs(1),
DisableAutoGenTag: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
@ -87,14 +93,15 @@ After running this command your will need to validate the enrollment in the weba
URL: apiURL,
VersionPrefix: "v2",
})
_, err = c.Auth.EnrollWatcher(context.Background(), args[0])
_, err = c.Auth.EnrollWatcher(context.Background(), args[0], name, tags)
if err != nil {
log.Fatalf("Could not enroll instance: %s", err)
}
log.Infof("Watcher successfully enrolled. Visit https://app.crowdsec.net to accept it.")
},
}
cmdEnroll.Flags().StringVarP(&name, "name", "n", "", "Name to display in the console")
cmdEnroll.Flags().StringSliceVarP(&tags, "tags", "t", tags, "Tags to display in the console")
cmdConsole.AddCommand(cmdEnroll)
return cmdConsole
}

View file

@ -143,6 +143,7 @@ func NewDecisionsCmd() *cobra.Command {
Until: new(string),
TypeEquals: new(string),
IncludeCAPI: new(bool),
Limit: new(int),
}
NoSimu := new(bool)
contained := new(bool)
@ -196,6 +197,9 @@ cscli decisions list -t ban
*filter.Since = fmt.Sprintf("%d%s", days*24, "h")
}
}
if *filter.IncludeCAPI {
*filter.Limit = 0
}
if *filter.TypeEquals == "" {
filter.TypeEquals = nil
}
@ -240,6 +244,7 @@ cscli decisions list -t ban
cmdDecisionsList.Flags().StringVarP(filter.ScenarioEquals, "scenario", "s", "", "restrict to this scenario (ie. crowdsecurity/ssh-bf)")
cmdDecisionsList.Flags().StringVarP(filter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value <IP>)")
cmdDecisionsList.Flags().StringVarP(filter.RangeEquals, "range", "r", "", "restrict to alerts from this source range (shorthand for --scope range --value <RANGE>)")
cmdDecisionsList.Flags().IntVarP(filter.Limit, "limit", "l", 100, "number of alerts to get (use 0 to remove the limit)")
cmdDecisionsList.Flags().BoolVar(NoSimu, "no-simu", false, "exclude decisions in simulation mode")
cmdDecisionsList.Flags().BoolVar(contained, "contained", false, "query decisions contained by range")

View file

@ -17,6 +17,7 @@ func NewExplainCmd() *cobra.Command {
var dsn string
var logLine string
var logType string
var opts cstest.DumpOpts
var cmdExplain = &cobra.Command{
Use: "explain",
@ -27,7 +28,7 @@ Explain log pipeline
Example: `
cscli explain --file ./myfile.log --type nginx
cscli explain --log "Sep 19 18:33:22 scw-d95986 sshd[24347]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=1.2.3.4" --type syslog
cscli explain -dsn "file://myfile.log" --type nginx
cscli explain --dsn "file://myfile.log" --type nginx
`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
@ -95,15 +96,15 @@ cscli explain -dsn "file://myfile.log" --type nginx
log.Fatalf("unable to load bucket dump result: %s", err)
}
if err := cstest.DumpTree(*parserDump, *bucketStateDump); err != nil {
log.Fatalf(err.Error())
}
cstest.DumpTree(*parserDump, *bucketStateDump, opts)
},
}
cmdExplain.PersistentFlags().StringVarP(&logFile, "file", "f", "", "Log file to test")
cmdExplain.PersistentFlags().StringVarP(&dsn, "dsn", "d", "", "DSN to test")
cmdExplain.PersistentFlags().StringVarP(&logLine, "log", "l", "", "Lgg line to test")
cmdExplain.PersistentFlags().StringVarP(&logType, "type", "t", "", "Type of the acquisition to test")
cmdExplain.PersistentFlags().BoolVarP(&opts.Details, "verbose", "v", false, "Display individual changes")
cmdExplain.PersistentFlags().BoolVar(&opts.SkipOk, "failures", false, "Only show failed lines")
return cmdExplain
}

View file

@ -52,6 +52,7 @@ func NewHubTestCmd() *cobra.Command {
postoverflows := []string{}
scenarios := []string{}
var ignoreParsers bool
var labels map[string]string
var cmdHubTestCreate = &cobra.Command{
Use: "create",
@ -119,6 +120,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios
LogFile: logFileName,
LogType: logType,
IgnoreParsers: ignoreParsers,
Labels: labels,
}
configFilePath := filepath.Join(testPath, "config.yaml")
@ -574,8 +576,8 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios
log.Fatalf("unable to load scenario result after run: %s", err)
}
}
cstest.DumpTree(*test.ParserAssert.TestData, *test.ScenarioAssert.PourData)
opts := cstest.DumpOpts{}
cstest.DumpTree(*test.ParserAssert.TestData, *test.ScenarioAssert.PourData, opts)
}
},
}

View file

@ -181,7 +181,7 @@ cscli machines add MyTestMachine --password MyPassword
var dumpFile string
var err error
// create machineID if doesn't specified by user
// create machineID if not specified by user
if len(args) == 0 {
if !autoAdd {
err = cmd.Help()
@ -312,7 +312,7 @@ cscli machines add MyTestMachine --password MyPassword
if err := dbClient.ValidateMachine(machineID); err != nil {
log.Fatalf("unable to validate machine '%s': %s", machineID, err)
}
log.Infof("machine '%s' validated successfuly", machineID)
log.Infof("machine '%s' validated successfully", machineID)
},
}
cmdMachines.AddCommand(cmdMachinesValidate)

View file

@ -160,7 +160,7 @@ func InstallItem(name string, obtype string, force bool) {
}
item, err = cwhub.EnableItem(csConfig.Hub, item)
if err != nil {
log.Fatalf("error while enabled %s : %v.", item.Name, err)
log.Fatalf("error while enabling %s : %v.", item.Name, err)
}
cwhub.AddItem(obtype, item)
log.Infof("Enabled %s", item.Name)
@ -528,7 +528,7 @@ func silenceInstallItem(name string, obtype string) (string, error) {
}
it, err = cwhub.EnableItem(csConfig.Hub, it)
if err != nil {
return "", fmt.Errorf("error while enabled %s : %v", it.Name, err)
return "", fmt.Errorf("error while enabling %s : %v", it.Name, err)
}
if err := cwhub.AddItem(obtype, it); err != nil {
return "", err

View file

@ -19,7 +19,7 @@ func initAPIServer(cConfig *csconfig.Config) (*apiserver.APIServer, error) {
if hasPlugins(cConfig.API.Server.Profiles) {
log.Info("initiating plugin broker")
if cConfig.PluginConfig == nil {
return nil, fmt.Errorf("plugins are enabled, but no plugin_config section is missing in the configuration")
return nil, fmt.Errorf("plugins are enabled, but the plugin_config section is missing in the configuration")
}
if cConfig.ConfigPaths.NotificationDir == "" {
return nil, fmt.Errorf("plugins are enabled, but config_paths.notification_dir is not defined")

View file

@ -50,7 +50,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers) error {
inputLineChan := make(chan types.Event)
inputEventChan := make(chan types.Event)
//start go-routines for parsing, buckets pour and ouputs.
//start go-routines for parsing, buckets pour and outputs.
parserWg := &sync.WaitGroup{}
parsersTomb.Go(func() error {
parserWg.Add(1)
@ -72,7 +72,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers) error {
bucketWg := &sync.WaitGroup{}
bucketsTomb.Go(func() error {
bucketWg.Add(1)
/*restore as well previous state if present*/
/*restore previous state as well if present*/
if cConfig.Crowdsec.BucketStateFile != "" {
log.Warningf("Restoring buckets state from %s", cConfig.Crowdsec.BucketStateFile)
if err := leaky.LoadBucketsState(cConfig.Crowdsec.BucketStateFile, buckets, holders); err != nil {

View file

@ -5,6 +5,7 @@ import (
"fmt"
"os"
"sort"
"strings"
_ "net/http/pprof"
"time"
@ -56,12 +57,15 @@ type Flags struct {
InfoLevel bool
PrintVersion bool
SingleFileType string
Labels map[string]string
OneShotDSN string
TestMode bool
DisableAgent bool
DisableAPI bool
}
type labelsMap map[string]string
type parsers struct {
ctx *parser.UnixParserCtx
povfwctx *parser.UnixParserCtx
@ -146,8 +150,10 @@ func LoadAcquisition(cConfig *csconfig.Config) error {
if flags.OneShotDSN == "" || flags.SingleFileType == "" {
return fmt.Errorf("-type requires a -dsn argument")
}
flags.Labels = labels
flags.Labels["type"] = flags.SingleFileType
dataSources, err = acquisition.LoadAcquisitionFromDSN(flags.OneShotDSN, flags.SingleFileType)
dataSources, err = acquisition.LoadAcquisitionFromDSN(flags.OneShotDSN, flags.Labels)
if err != nil {
return errors.Wrapf(err, "failed to configure datasource for %s", flags.OneShotDSN)
}
@ -163,6 +169,20 @@ func LoadAcquisition(cConfig *csconfig.Config) error {
var dumpFolder string
var dumpStates bool
var labels = make(labelsMap)
func (l *labelsMap) String() string {
return "labels"
}
func (l labelsMap) Set(label string) error {
split := strings.Split(label, ":")
if len(split) != 2 {
return errors.Wrapf(errors.New("Bad Format"), "for Label '%s'", label)
}
l[split[0]] = split[1]
return nil
}
func (f *Flags) Parse() {
@ -173,6 +193,7 @@ func (f *Flags) Parse() {
flag.BoolVar(&f.PrintVersion, "version", false, "display version")
flag.StringVar(&f.OneShotDSN, "dsn", "", "Process a single data source in time-machine")
flag.StringVar(&f.SingleFileType, "type", "", "Labels.type for file in time-machine")
flag.Var(&labels, "label", "Additional Labels for file in time-machine")
flag.BoolVar(&f.TestMode, "t", false, "only test configs")
flag.BoolVar(&f.DisableAgent, "no-cs", false, "disable crowdsec agent")
flag.BoolVar(&f.DisableAPI, "no-api", false, "disable local API")

View file

@ -104,12 +104,14 @@ func ShutdownCrowdsecRoutines() error {
var reterr error
log.Debugf("Shutting down crowdsec sub-routines")
if len(dataSources) > 0 {
acquisTomb.Kill(nil)
log.Debugf("waiting for acquisition to finish")
if err := acquisTomb.Wait(); err != nil {
log.Warningf("Acquisition returned error : %s", err)
reterr = err
}
}
log.Debugf("acquisition is finished, wait for parser/bucket/ouputs.")
parsersTomb.Kill(nil)
if err := parsersTomb.Wait(); err != nil {

View file

@ -1,6 +0,0 @@
# /etc/cron.d/crowdsec_pull: crontab to pull crowdsec API
# bad IP in ban DB.
# Run everyday at 08:00 A.M
0 8 * * * root /usr/local/bin/cscli api pull >> /var/log/cscli.log 2>&1

7
debian/.gitignore vendored Normal file
View file

@ -0,0 +1,7 @@
# Generated during the build
/crowdsec
/files
/*.substvars
/*.log
/*.debhelper
/*-stamp

View file

@ -1 +0,0 @@
crowdsec

6
debian/postinst vendored
View file

@ -89,13 +89,13 @@ if [ "$1" = configure ]; then
systemctl --quiet is-enabled crowdsec || systemctl unmask crowdsec && systemctl enable crowdsec
if [ -z "$(ss -nlt 'sport = 8080' | grep -v ^State)" ]; then
if [ -z "$(ss -nlt 'sport = 8080' | grep -v ^State)" ] || [ "$LAPI" = false ]; then
systemctl start crowdsec
else
echo "Not attempting to start crowdsec, port 8080 is already used"
echo "Not attempting to start crowdsec, port 8080 is already used or lapi was disabled"
echo "This port is configured through /etc/crowdsec/config.yaml and /etc/crowdsec/local_api_credentials.yaml"
fi
fi
echo "You always can run the configuration again interactively using '/usr/share/crowdsec/wizard.sh -c"
echo "You can always run the configuration again interactively by using '/usr/share/crowdsec/wizard.sh -c"

2
debian/preinst vendored
View file

@ -40,4 +40,4 @@ if [ "$1" = upgrade ]; then
fi
fi
echo "You always can run the configuration again interactively using '/usr/share/crowdsec/wizard.sh -c"
echo "You can always run the configuration again interactively by using '/usr/share/crowdsec/wizard.sh -c"

4
docker/docker_start.sh Normal file → Executable file
View file

@ -6,7 +6,7 @@ if [ "$CONFIG_FILE" != "" ]; then
CS_CONFIG_FILE="$CONFIG_FILE"
fi
# regenerate lcaol agent credentials (ignore if agent is disabled)
# regenerate local agent credentials (ignore if agent is disabled)
if [ "$DISABLE_AGENT" == "" ] ; then
echo "Regenerate local agent credentials"
cscli -c "$CS_CONFIG_FILE" machines delete localhost
@ -20,7 +20,7 @@ if [ "$DISABLE_AGENT" == "" ] ; then
fi
fi
# Check if lapi need to register automatically an agent
# Check if lapi needs to automatically register an agent
echo Check if lapi need to register automatically an agent
if [ "$DISABLE_LOCAL_API" == "" ] && [ "$AGENT_USERNAME" != "" ] && [ "$AGENT_PASSWORD" != "" ] ; then
cscli -c "$CS_CONFIG_FILE" machines add $AGENT_USERNAME --password $AGENT_PASSWORD

15
go.mod
View file

@ -3,12 +3,13 @@ module github.com/crowdsecurity/crowdsec
go 1.13
require (
entgo.io/ent v0.7.0
entgo.io/ent v0.9.1
github.com/AlecAivazis/survey/v2 v2.2.7
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver v1.5.0 // indirect
github.com/Masterminds/sprig v2.22.0+incompatible
github.com/Microsoft/go-winio v0.4.16 // indirect
github.com/ahmetb/dlog v0.0.0-20170105205344-4fb5f8204f26
github.com/alexliesenfeld/health v0.5.1
github.com/antonmedv/expr v1.8.9
github.com/appleboy/gin-jwt/v2 v2.6.4
@ -24,9 +25,10 @@ require (
github.com/docker/docker v20.10.2+incompatible
github.com/docker/go-connections v0.4.0
github.com/enescakir/emoji v1.0.0
github.com/fatih/color v1.13.0
github.com/fsnotify/fsnotify v1.4.9
github.com/gin-gonic/gin v1.6.3
github.com/go-co-op/gocron v0.5.1
github.com/go-co-op/gocron v1.9.0
github.com/go-openapi/errors v0.19.9
github.com/go-openapi/strfmt v0.19.11
github.com/go-openapi/swag v0.19.12
@ -43,10 +45,9 @@ require (
github.com/imdario/mergo v0.3.12 // indirect
github.com/influxdata/go-syslog/v3 v3.0.0
github.com/leodido/go-urn v1.2.1 // indirect
github.com/lib/pq v1.10.0
github.com/mattn/go-colorable v0.1.8 // indirect
github.com/lib/pq v1.10.2
github.com/mattn/go-runewidth v0.0.10 // indirect
github.com/mattn/go-sqlite3 v2.0.3+incompatible
github.com/mattn/go-sqlite3 v1.14.8
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect
@ -62,6 +63,7 @@ require (
github.com/prometheus/client_golang v1.10.0
github.com/prometheus/client_model v0.2.0
github.com/prometheus/prom2json v1.3.0
github.com/r3labs/diff/v2 v2.14.1
github.com/rivo/uniseg v0.2.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sirupsen/logrus v1.8.1
@ -70,8 +72,7 @@ require (
github.com/ugorji/go v1.2.3 // indirect
github.com/vjeantet/grok v1.0.1 // indirect
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
golang.org/x/mod v0.4.1
golang.org/x/net v0.0.0-20201224014010-6772e930b67b // indirect
golang.org/x/mod v0.4.2
golang.org/x/sys v0.0.0-20210921065528-437939a70204
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf // indirect
golang.org/x/text v0.3.5 // indirect

62
go.sum
View file

@ -11,8 +11,8 @@ cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqCl
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
entgo.io/ent v0.7.0 h1:E3EjO0cUL61DvUg5ZEZdxa4yTL+4SuZv0LqBExo8CQA=
entgo.io/ent v0.7.0/go.mod h1:HZZJxglL8ro4OVDmM06lijj4bOTGcaDdrZttDZ8fVJs=
entgo.io/ent v0.9.1 h1:IG8andyeD79GG24U8Q+1Y45hQXj6gY5evSBcva5gtBk=
entgo.io/ent v0.9.1/go.mod h1:6NUeTfUN5mp5YN+5tgoH1SlakSvYPTBOYotSOvaI4ak=
github.com/AlecAivazis/survey/v2 v2.2.7 h1:5NbxkF4RSKmpywYdcRgUmos1o+roJY8duCLZXbVjoig=
github.com/AlecAivazis/survey/v2 v2.2.7/go.mod h1:9DYvHgXtiXm6nCn+jXnOXLKbH+Yo9u8fAS/SduGdoPk=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
@ -45,6 +45,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
github.com/ahmetb/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:3YVZUqkoev4mL+aCwVOSWV4M7pN+NURHL38Z2zq5JKA=
github.com/ahmetb/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:ymXt5bw5uSNu4jveerFxE0vNYxF8ncqbptntMaFMg3k=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@ -154,6 +156,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@ -169,8 +173,8 @@ github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwv
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/go-bindata/go-bindata v1.0.1-0.20190711162640-ee3c2418e368/go.mod h1:7xCgX1lzlrXPHkfvn3EhumqHkmSlzt8at9q7v0ax19c=
github.com/go-co-op/gocron v0.5.1 h1:Cni1V7mt184+HnYTDYe6MH7siofCvf94PrGyIDI1v1U=
github.com/go-co-op/gocron v0.5.1/go.mod h1:6Btk4lVj3bnFAgbVfr76W8impTyhYrEi1pV5Pt4Tp/M=
github.com/go-co-op/gocron v1.9.0 h1:+V+DDenw3ryB7B+tK1bAIC5p0ruw4oX9IqAsdRnGIf0=
github.com/go-co-op/gocron v1.9.0/go.mod h1:DbJm9kdgr1sEvWpHCA7dFFs/PGHPMil9/97EXCRPr4k=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@ -350,8 +354,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e h1:XmA6L9IPRdUr28a+SK/oMchGgQy159wvzXA5tJ7l+40=
@ -412,7 +416,7 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
github.com/influxdata/go-syslog/v3 v3.0.0 h1:jichmjSZlYK0VMmlz+k4WeOQd7z745YLsvGMqwtYt4I=
github.com/influxdata/go-syslog/v3 v3.0.0/go.mod h1:tulsOp+CecTAYC27u9miMgq21GqXRW6VdKbOG+QSP4Q=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE=
github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
@ -460,8 +464,8 @@ github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgx
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg=
github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E=
github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8=
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s=
@ -482,21 +486,24 @@ github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg=
github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.8 h1:gDp86IdQsN/xWjIEmr9MF6o9mpksUgh0fu+9ByFxzIU=
github.com/mattn/go-sqlite3 v1.14.8/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
@ -628,11 +635,15 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
github.com/prometheus/prom2json v1.3.0 h1:BlqrtbT9lLH3ZsOVhXPsHzFrApCTKRifB7gjJuypu6Y=
github.com/prometheus/prom2json v1.3.0/go.mod h1:rMN7m0ApCowcoDlypBHlkNbp5eJQf/+1isKykIP5ZnM=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/r3labs/diff/v2 v2.14.1 h1:wRZ3jB44Ny50DSXsoIcFQ27l2x+n5P31K/Pk+b9B0Ic=
github.com/r3labs/diff/v2 v2.14.1/go.mod h1:I8noH9Fc2fjSaMxqF3G2lhDdC0b+JXCfyx85tWFM9kc=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
@ -705,11 +716,14 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4=
github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo=
github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI=
github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
@ -770,8 +784,8 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -801,8 +815,8 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -815,6 +829,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -857,11 +873,12 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6 h1:cdsMqa2nXzqlgs183pHxtvoVwU7CyzaCTAUOg94af4c=
golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210921065528-437939a70204 h1:JJhkWtBuTQKyz2bd5WG9H8iUsJRU3En/KRfN8B2RnDs=
golang.org/x/sys v0.0.0-20210921065528-437939a70204/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
@ -914,7 +931,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -931,6 +948,8 @@ google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@ -1003,8 +1022,9 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=

View file

@ -8,10 +8,10 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration"
cloudwatchacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/cloudwatch"
dockeracquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/docker"
fileacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/file"
journalctlacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/journalctl"
syslogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog"
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/pkg/errors"
@ -27,7 +27,7 @@ type DataSource interface {
GetMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module
GetAggregMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module (aggregated mode, limits cardinality)
Configure([]byte, *log.Entry) error // Configure the datasource
ConfigureByDSN(string, string, *log.Entry) error // Configure the datasource
ConfigureByDSN(string, map[string]string, *log.Entry) error // Configure the datasource
GetMode() string // Get the mode (TAIL, CAT or SERVER)
GetName() string // Get the name of the module
OneShotAcquisition(chan types.Event, *tomb.Tomb) error // Start one shot acquisition(eg, cat a file)
@ -56,6 +56,10 @@ var AcquisitionSources = []struct {
name: "syslog",
iface: func() DataSource { return &syslogacquisition.SyslogSource{} },
},
{
name: "docker",
iface: func() DataSource { return &dockeracquisition.DockerSource{} },
},
}
func GetDataSourceIface(dataSourceType string) DataSource {
@ -120,7 +124,7 @@ func detectBackwardCompatAcquis(sub configuration.DataSourceCommonCfg) string {
return ""
}
func LoadAcquisitionFromDSN(dsn string, label string) ([]DataSource, error) {
func LoadAcquisitionFromDSN(dsn string, labels map[string]string) ([]DataSource, error) {
var sources []DataSource
frags := strings.Split(dsn, ":")
@ -139,7 +143,7 @@ func LoadAcquisitionFromDSN(dsn string, label string) ([]DataSource, error) {
subLogger := clog.WithFields(log.Fields{
"type": dsn,
})
err := dataSrc.ConfigureByDSN(dsn, label, subLogger)
err := dataSrc.ConfigureByDSN(dsn, labels, subLogger)
if err != nil {
return nil, errors.Wrapf(err, "while configuration datasource for %s", dsn)
}

View file

@ -47,7 +47,7 @@ func (f *MockSource) GetMetrics() []prometheus.Collector {
func (f *MockSource) GetAggregMetrics() []prometheus.Collector { return nil }
func (f *MockSource) Dump() interface{} { return f }
func (f *MockSource) GetName() string { return "mock" }
func (f *MockSource) ConfigureByDSN(string, string, *log.Entry) error {
func (f *MockSource) ConfigureByDSN(string, map[string]string, *log.Entry) error {
return fmt.Errorf("not supported")
}
@ -342,7 +342,7 @@ func (f *MockCat) CanRun() error { return nil
func (f *MockCat) GetMetrics() []prometheus.Collector { return nil }
func (f *MockCat) GetAggregMetrics() []prometheus.Collector { return nil }
func (f *MockCat) Dump() interface{} { return f }
func (f *MockCat) ConfigureByDSN(string, string, *log.Entry) error { return fmt.Errorf("not supported") }
func (f *MockCat) ConfigureByDSN(string, map[string]string, *log.Entry) error { return fmt.Errorf("not supported") }
//----
@ -381,7 +381,7 @@ func (f *MockTail) CanRun() error { return nil }
func (f *MockTail) GetMetrics() []prometheus.Collector { return nil }
func (f *MockTail) GetAggregMetrics() []prometheus.Collector { return nil }
func (f *MockTail) Dump() interface{} { return f }
func (f *MockTail) ConfigureByDSN(string, string, *log.Entry) error {
func (f *MockTail) ConfigureByDSN(string, map[string]string, *log.Entry) error {
return fmt.Errorf("not supported")
}
@ -511,7 +511,7 @@ func (f *MockSourceByDSN) GetMetrics() []prometheus.Collector
func (f *MockSourceByDSN) GetAggregMetrics() []prometheus.Collector { return nil }
func (f *MockSourceByDSN) Dump() interface{} { return f }
func (f *MockSourceByDSN) GetName() string { return "mockdsn" }
func (f *MockSourceByDSN) ConfigureByDSN(dsn string, logType string, logger *log.Entry) error {
func (f *MockSourceByDSN) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry) error {
dsn = strings.TrimPrefix(dsn, "mockdsn://")
if dsn != "test_expect" {
return fmt.Errorf("unexpected value")
@ -555,7 +555,7 @@ func TestConfigureByDSN(t *testing.T) {
}
for _, test := range tests {
srcs, err := LoadAcquisitionFromDSN(test.dsn, "test_label")
srcs, err := LoadAcquisitionFromDSN(test.dsn, map[string]string{"type": "test_label"})
if err != nil && test.ExpectedError != "" {
if !strings.Contains(err.Error(), test.ExpectedError) {
t.Fatalf("expected '%s', got '%s'", test.ExpectedError, err.Error())

View file

@ -7,6 +7,7 @@ import (
"os"
"regexp"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
@ -32,6 +33,8 @@ var openedStreams = prometheus.NewGaugeVec(
[]string{"group"},
)
var streamIndexMutex = sync.Mutex{}
var linesRead = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "cs_cloudwatch_stream_hits_total",
@ -68,6 +71,7 @@ type CloudwatchSourceConfiguration struct {
AwsProfile *string `yaml:"aws_profile,omitempty"`
PrependCloudwatchTimestamp *bool `yaml:"prepend_cloudwatch_timestamp,omitempty"`
AwsConfigDir *string `yaml:"aws_config_dir,omitempty"`
AwsRegion *string `yaml:"aws_region,omitempty"`
}
//LogStreamTailConfig is the configuration for one given stream within one group
@ -94,7 +98,7 @@ var (
def_StreamReadTimeout = 10 * time.Minute
def_PollDeadStreamInterval = 10 * time.Second
def_GetLogEventsPagesLimit = int64(1000)
def_AwsConfigDir = "/root/.aws/"
def_AwsConfigDir = ""
)
func (cw *CloudwatchSource) Configure(cfg []byte, logger *log.Entry) error {
@ -148,20 +152,28 @@ func (cw *CloudwatchSource) Configure(cfg []byte, logger *log.Entry) error {
cw.Config.AwsConfigDir = &def_AwsConfigDir
}
logger.Tracef("aws_config_dir set to %s", *cw.Config.AwsConfigDir)
if *cw.Config.AwsConfigDir != "" {
_, err := os.Stat(*cw.Config.AwsConfigDir)
if os.IsNotExist(err) {
logger.Errorf("aws_config_dir '%s' : directory does not exists", *cw.Config.AwsConfigDir)
return fmt.Errorf("aws_config_dir %s does not exist", *cw.Config.AwsConfigDir)
if err != nil {
logger.Errorf("can't read aws_config_dir '%s' got err %s", *cw.Config.AwsConfigDir, err)
return fmt.Errorf("can't read aws_config_dir %s got err %s ", *cw.Config.AwsConfigDir, err)
}
os.Setenv("AWS_SDK_LOAD_CONFIG", "1")
//as aws sdk relies on $HOME, let's allow the user to override it :)
os.Setenv("AWS_CONFIG_FILE", fmt.Sprintf("%s/config", *cw.Config.AwsConfigDir))
os.Setenv("AWS_SHARED_CREDENTIALS_FILE", fmt.Sprintf("%s/credentials", *cw.Config.AwsConfigDir))
} else {
if cw.Config.AwsRegion == nil {
logger.Errorf("aws_region is not specified, specify it or aws_config_dir")
return fmt.Errorf("aws_region is not specified, specify it or aws_config_dir")
}
os.Setenv("AWS_REGION", *cw.Config.AwsRegion)
}
if err := cw.newClient(); err != nil {
return err
}
cw.streamIndexes = make(map[string]string)
if cw.Config.StreamRegexp != nil {
if _, err := regexp.Compile(*cw.Config.StreamRegexp); err != nil {
return errors.Wrapf(err, "error while compiling regexp '%s'", *cw.Config.StreamRegexp)
@ -366,14 +378,16 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha
cw.monitoredStreams = append(cw.monitoredStreams, &newStream)
}
case <-pollDeadStreamInterval.C:
newMonitoredStreams := cw.monitoredStreams[:0]
for idx, stream := range cw.monitoredStreams {
if !cw.monitoredStreams[idx].t.Alive() {
cw.logger.Debugf("remove dead stream %s", stream.StreamName)
openedStreams.With(prometheus.Labels{"group": cw.monitoredStreams[idx].GroupName}).Dec()
cw.monitoredStreams = append(cw.monitoredStreams[:idx], cw.monitoredStreams[idx+1:]...)
break
} else {
newMonitoredStreams = append(newMonitoredStreams, stream)
}
}
cw.monitoredStreams = newMonitoredStreams
case <-cw.t.Dying():
cw.logger.Infof("LogStreamManager for %s is dying, %d alive streams", cw.Config.GroupName, len(cw.monitoredStreams))
for idx, stream := range cw.monitoredStreams {
@ -383,10 +397,9 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha
if err := cw.monitoredStreams[idx].t.Wait(); err != nil {
cw.logger.Debugf("error while waiting for death of %s : %s", stream.StreamName, err)
}
} else {
cw.monitoredStreams = append(cw.monitoredStreams[:idx], cw.monitoredStreams[idx+1:]...)
}
}
cw.monitoredStreams = nil
cw.logger.Debugf("routine cleanup done, return")
return nil
}
@ -396,14 +409,14 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha
func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan types.Event) error {
var startFrom *string
var lastReadMessage time.Time = time.Now()
startup := true
ticker := time.NewTicker(cfg.PollStreamInterval)
//resume at existing index if we already had
if v, ok := cw.streamIndexes[cfg.GroupName+"+"+cfg.StreamName]; ok && v != "" {
streamIndexMutex.Lock()
v := cw.streamIndexes[cfg.GroupName+"+"+cfg.StreamName]
streamIndexMutex.Unlock()
if v != "" {
cfg.logger.Debugf("restarting on index %s", v)
startFrom = &v
startup = false
}
/*during first run, we want to avoid reading any message, but just get a token.
if we don't, we might end up sending the same item several times. hence the 'startup' hack */
@ -414,27 +427,23 @@ func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan
hasMorePages := true
for hasMorePages {
/*for the first call, we only consume the last item*/
limit := cfg.GetLogEventsPagesLimit
if startup {
limit = 1
}
cfg.logger.Tracef("calling GetLogEventsPagesWithContext")
ctx := context.Background()
err := cw.cwClient.GetLogEventsPagesWithContext(ctx,
&cloudwatchlogs.GetLogEventsInput{
Limit: aws.Int64(limit),
Limit: aws.Int64(cfg.GetLogEventsPagesLimit),
LogGroupName: aws.String(cfg.GroupName),
LogStreamName: aws.String(cfg.StreamName),
NextToken: startFrom,
StartFromHead: aws.Bool(true),
},
func(page *cloudwatchlogs.GetLogEventsOutput, lastPage bool) bool {
cfg.logger.Tracef("%d results, last:%t", len(page.Events), lastPage)
startFrom = page.NextForwardToken
if page.NextForwardToken != nil {
streamIndexMutex.Lock()
cw.streamIndexes[cfg.GroupName+"+"+cfg.StreamName] = *page.NextForwardToken
}
if startup { //we grab the NextForwardToken and we return on first iteration
return false
streamIndexMutex.Unlock()
}
if lastPage { /*wait another ticker to check on new log availability*/
cfg.logger.Tracef("last page")
@ -451,7 +460,6 @@ func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan
cfg.logger.Debugf("pushing message : %s", evt.Line.Raw)
linesRead.With(prometheus.Labels{"group": cfg.GroupName, "stream": cfg.StreamName}).Inc()
outChan <- evt
}
}
return true
@ -462,11 +470,7 @@ func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan
cfg.logger.Warningf("err : %s", newerr)
return newerr
}
if startup {
startup = false
}
cfg.logger.Tracef("done reading GetLogEventsPagesWithContext")
if time.Since(lastReadMessage) > cfg.StreamReadTimeout {
cfg.logger.Infof("%s/%s reached timeout (%s) (last message was %s)", cfg.GroupName, cfg.StreamName, time.Since(lastReadMessage),
lastReadMessage)
@ -480,7 +484,7 @@ func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan
}
}
func (cw *CloudwatchSource) ConfigureByDSN(dsn string, logtype string, logger *log.Entry) error {
func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry) error {
cw.logger = logger
dsn = strings.TrimPrefix(dsn, cw.GetName()+"://")
@ -494,9 +498,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, logtype string, logger *l
}
cw.Config.GroupName = frags[0]
cw.Config.StreamName = &frags[1]
cw.Config.Labels = make(map[string]string)
cw.Config.Labels["type"] = logtype
cw.Config.Labels = labels
u, err := url.ParseQuery(args[1])
if err != nil {
return errors.Wrapf(err, "while parsing %s", dsn)
@ -665,7 +667,6 @@ func cwLogToEvent(log *cloudwatchlogs.OutputLogEvent, cfg *LogStreamTailConfig)
eventTimestamp := time.Unix(0, *log.Timestamp*int64(time.Millisecond))
msg = eventTimestamp.String() + " " + msg
}
l.Raw = msg
l.Labels = cfg.Labels
l.Time = time.Now()

View file

@ -67,6 +67,7 @@ func TestWatchLogGroupForStreams(t *testing.T) {
name: "group_does_not_exists",
config: []byte(`
source: cloudwatch
aws_region: us-east-1
labels:
type: test_source
group_name: b
@ -92,6 +93,7 @@ stream_name: test_stream`),
name: "group_exists_bad_stream_name",
config: []byte(`
source: cloudwatch
aws_region: us-east-1
labels:
type: test_source
group_name: test_group1
@ -136,6 +138,7 @@ stream_name: test_stream_bad`),
name: "group_exists_bad_stream_regexp",
config: []byte(`
source: cloudwatch
aws_region: us-east-1
labels:
type: test_source
group_name: test_group1
@ -182,6 +185,7 @@ stream_regexp: test_bad[0-9]+`),
name: "group_exists_stream_exists_has_events",
config: []byte(`
source: cloudwatch
aws_region: us-east-1
labels:
type: test_source
group_name: test_log_group1
@ -253,14 +257,15 @@ stream_name: test_stream`),
}
},
expectedResLen: 2,
expectedResMessages: []string{"test_message_4", "test_message_5"},
expectedResLen: 3,
expectedResMessages: []string{"test_message_1", "test_message_4", "test_message_5"},
},
//have a stream generate events, reach time-out and gets polled again
{
name: "group_exists_stream_exists_has_events+timeout",
config: []byte(`
source: cloudwatch
aws_region: us-east-1
labels:
type: test_source
group_name: test_log_group1
@ -345,14 +350,15 @@ stream_name: test_stream`),
}
},
expectedResLen: 2,
expectedResMessages: []string{"test_message_41", "test_message_51"},
expectedResLen: 3,
expectedResMessages: []string{"test_message_1", "test_message_41", "test_message_51"},
},
//have a stream generate events, reach time-out and dead body collection
{
name: "group_exists_stream_exists_has_events+timeout+GC",
config: []byte(`
source: cloudwatch
aws_region: us-east-1
labels:
type: test_source
group_name: test_log_group1
@ -406,7 +412,7 @@ stream_name: test_stream`),
}
},
expectedResLen: 0,
expectedResLen: 1,
},
}
@ -527,6 +533,7 @@ func TestConfiguration(t *testing.T) {
name: "group_does_not_exists",
config: []byte(`
source: cloudwatch
aws_region: us-east-1
labels:
type: test_source
group_name: test_group
@ -546,6 +553,7 @@ stream_name: test_stream`),
name: "missing_group_name",
config: []byte(`
source: cloudwatch
aws_region: us-east-1
labels:
type: test_source
stream_name: test_stream`),
@ -601,7 +609,8 @@ func TestConfigureByDSN(t *testing.T) {
var err error
log.SetLevel(log.DebugLevel)
tests := []struct {
dsn, logtype string
dsn string
labels map[string]string
expectedCfgErr string
name string
}{
@ -632,7 +641,7 @@ func TestConfigureByDSN(t *testing.T) {
dbgLogger.Logger.SetLevel(log.DebugLevel)
log.Printf("%d/%d", idx, len(tests))
cw := CloudwatchSource{}
err = cw.ConfigureByDSN(test.dsn, test.logtype, dbgLogger)
err = cw.ConfigureByDSN(test.dsn, test.labels, dbgLogger)
if err != nil && test.expectedCfgErr != "" {
if !strings.Contains(err.Error(), test.expectedCfgErr) {
t.Fatalf("%s expected error '%s' got error '%s'", test.name, test.expectedCfgErr, err.Error())
@ -761,7 +770,7 @@ func TestOneShotAcquisition(t *testing.T) {
dbgLogger.Logger.SetLevel(log.DebugLevel)
dbgLogger.Infof("starting test")
cw := CloudwatchSource{}
err = cw.ConfigureByDSN(test.dsn, "test", dbgLogger)
err = cw.ConfigureByDSN(test.dsn, map[string]string{"type": "test"}, dbgLogger)
if err != nil && test.expectedCfgErr != "" {
if !strings.Contains(err.Error(), test.expectedCfgErr) {
t.Fatalf("%s expected error '%s' got error '%s'", test.name, test.expectedCfgErr, err.Error())

View file

@ -0,0 +1,518 @@
package dockeracquisition
import (
"bufio"
"context"
"fmt"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"github.com/ahmetb/dlog"
"github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration"
leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
"github.com/crowdsecurity/crowdsec/pkg/types"
dockerTypes "github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"gopkg.in/tomb.v2"
"gopkg.in/yaml.v2"
)
var linesRead = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "cs_dockersource_hits_total",
Help: "Total lines that were read.",
},
[]string{"source"})
type DockerConfiguration struct {
CheckInterval string `yaml:"check_interval"`
FollowStdout bool `yaml:"follow_stdout"`
FollowStdErr bool `yaml:"follow_stderr"`
Until string `yaml:"until"`
Since string `yaml:"since"`
DockerHost string `yaml:"docker_host"`
ContainerName []string `yaml:"container_name"`
ContainerID []string `yaml:"container_id"`
ContainerNameRegexp []string `yaml:"container_name_regexp"`
ContainerIDRegexp []string `yaml:"container_id_regexp"`
ForceInotify bool `yaml:"force_inotify"`
configuration.DataSourceCommonCfg `yaml:",inline"`
}
type DockerSource struct {
Config DockerConfiguration
runningContainerState map[string]*ContainerConfig
compiledContainerName []*regexp.Regexp
compiledContainerID []*regexp.Regexp
CheckIntervalDuration time.Duration
logger *log.Entry
Client client.CommonAPIClient
t *tomb.Tomb
containerLogsOptions *dockerTypes.ContainerLogsOptions
}
type ContainerConfig struct {
Name string
ID string
t *tomb.Tomb
logger *log.Entry
Labels map[string]string
}
func (d *DockerSource) Configure(Config []byte, logger *log.Entry) error {
var err error
d.Config = DockerConfiguration{
FollowStdout: true, // default
FollowStdErr: true, // default
CheckInterval: "1s", // default
}
d.logger = logger
d.runningContainerState = make(map[string]*ContainerConfig)
err = yaml.UnmarshalStrict(Config, &d.Config)
if err != nil {
return errors.Wrap(err, "Cannot parse DockerAcquisition configuration")
}
d.logger.Tracef("DockerAcquisition configuration: %+v", d.Config)
if len(d.Config.ContainerName) == 0 && len(d.Config.ContainerID) == 0 && len(d.Config.ContainerIDRegexp) == 0 && len(d.Config.ContainerNameRegexp) == 0 {
return fmt.Errorf("no containers names or containers ID configuration provided")
}
d.CheckIntervalDuration, err = time.ParseDuration(d.Config.CheckInterval)
if err != nil {
return fmt.Errorf("parsing 'check_interval' parameters: %s", d.CheckIntervalDuration)
}
if d.Config.Mode == "" {
d.Config.Mode = configuration.TAIL_MODE
}
if d.Config.Mode != configuration.CAT_MODE && d.Config.Mode != configuration.TAIL_MODE {
return fmt.Errorf("unsupported mode %s for docker datasource", d.Config.Mode)
}
d.logger.Tracef("Actual DockerAcquisition configuration %+v", d.Config)
for _, cont := range d.Config.ContainerNameRegexp {
d.compiledContainerName = append(d.compiledContainerName, regexp.MustCompile(cont))
}
for _, cont := range d.Config.ContainerIDRegexp {
d.compiledContainerID = append(d.compiledContainerID, regexp.MustCompile(cont))
}
dockerClient, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
return err
}
if d.Config.Since == "" {
d.Config.Since = time.Now().Format(time.RFC3339)
}
d.containerLogsOptions = &dockerTypes.ContainerLogsOptions{
ShowStdout: d.Config.FollowStdout,
ShowStderr: d.Config.FollowStdErr,
Follow: true,
Since: d.Config.Since,
}
if d.Config.Until != "" {
d.containerLogsOptions.Until = d.Config.Until
}
if d.Config.DockerHost != "" {
if err := client.WithHost(d.Config.DockerHost)(dockerClient); err != nil {
return err
}
}
d.Client = dockerClient
return nil
}
func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry) error {
var err error
if !strings.HasPrefix(dsn, d.GetName()+"://") {
return fmt.Errorf("invalid DSN %s for docker source, must start with %s://", dsn, d.GetName())
}
d.Config = DockerConfiguration{
FollowStdout: true,
FollowStdErr: true,
CheckInterval: "1s",
}
d.Config.ContainerName = make([]string, 0)
d.Config.ContainerID = make([]string, 0)
d.runningContainerState = make(map[string]*ContainerConfig)
d.Config.Mode = configuration.CAT_MODE
d.logger = logger
d.Config.Labels = labels
dockerClient, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
return err
}
d.containerLogsOptions = &dockerTypes.ContainerLogsOptions{
ShowStdout: d.Config.FollowStdout,
ShowStderr: d.Config.FollowStdErr,
Follow: false,
}
dsn = strings.TrimPrefix(dsn, d.GetName()+"://")
args := strings.Split(dsn, "?")
if len(args) == 0 {
return fmt.Errorf("invalid dsn: %s", dsn)
}
if len(args) == 1 && args[0] == "" {
return fmt.Errorf("empty %s DSN", d.GetName()+"://")
}
d.Config.ContainerName = append(d.Config.ContainerName, args[0])
// we add it as an ID also so user can provide docker name or docker ID
d.Config.ContainerID = append(d.Config.ContainerID, args[0])
// no parameters
if len(args) == 1 {
d.Client = dockerClient
return nil
}
parameters, err := url.ParseQuery(args[1])
if err != nil {
return errors.Wrapf(err, "while parsing parameters %s: %s", dsn, err)
}
for k, v := range parameters {
switch k {
case "log_level":
if len(v) != 1 {
return fmt.Errorf("only one 'log_level' parameters is required, not many")
}
lvl, err := log.ParseLevel(v[0])
if err != nil {
return errors.Wrapf(err, "unknown level %s", v[0])
}
d.logger.Logger.SetLevel(lvl)
case "until":
if len(v) != 1 {
return fmt.Errorf("only one 'until' parameters is required, not many")
}
d.containerLogsOptions.Until = v[0]
case "since":
if len(v) != 1 {
return fmt.Errorf("only one 'since' parameters is required, not many")
}
d.containerLogsOptions.Since = v[0]
case "follow_stdout":
if len(v) != 1 {
return fmt.Errorf("only one 'follow_stdout' parameters is required, not many")
}
followStdout, err := strconv.ParseBool(v[0])
if err != nil {
return fmt.Errorf("parsing 'follow_stdout' parameters: %s", err)
}
d.Config.FollowStdout = followStdout
d.containerLogsOptions.ShowStdout = followStdout
case "follow_stderr":
if len(v) != 1 {
return fmt.Errorf("only one 'follow_stderr' parameters is required, not many")
}
followStdErr, err := strconv.ParseBool(v[0])
if err != nil {
return fmt.Errorf("parsing 'follow_stderr' parameters: %s", err)
}
d.Config.FollowStdErr = followStdErr
d.containerLogsOptions.ShowStderr = followStdErr
case "docker_host":
if len(v) != 1 {
return fmt.Errorf("only one 'docker_host' parameters is required, not many")
}
if err := client.WithHost(v[0])(dockerClient); err != nil {
return err
}
}
}
d.Client = dockerClient
return nil
}
func (d *DockerSource) GetMode() string {
return d.Config.Mode
}
//SupportedModes returns the supported modes by the acquisition module
func (d *DockerSource) SupportedModes() []string {
return []string{configuration.TAIL_MODE, configuration.CAT_MODE}
}
//OneShotAcquisition reads a set of file and returns when done
func (d *DockerSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error {
d.logger.Debug("In oneshot")
runningContainer, err := d.Client.ContainerList(context.Background(), dockerTypes.ContainerListOptions{})
if err != nil {
return err
}
foundOne := false
for _, container := range runningContainer {
if _, ok := d.runningContainerState[container.ID]; ok {
d.logger.Debugf("container with id %s is already being read from", container.ID)
continue
}
if containerConfig, ok := d.EvalContainer(container); ok {
d.logger.Infof("reading logs from container %s", containerConfig.Name)
d.logger.Debugf("logs options: %+v", *d.containerLogsOptions)
dockerReader, err := d.Client.ContainerLogs(context.Background(), containerConfig.ID, *d.containerLogsOptions)
if err != nil {
d.logger.Errorf("unable to read logs from container: %+v", err)
return err
}
// we use this library to normalize docker API logs (cf. https://ahmet.im/blog/docker-logs-api-binary-format-explained/)
reader := dlog.NewReader(dockerReader)
foundOne = true
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
line := scanner.Text()
if line == "" {
continue
}
l := types.Line{}
l.Raw = line
l.Labels = d.Config.Labels
l.Time = time.Now()
l.Src = containerConfig.Name
l.Process = true
l.Module = d.GetName()
linesRead.With(prometheus.Labels{"source": containerConfig.Name}).Inc()
evt := types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.LIVE}
out <- evt
d.logger.Debugf("Sent line to parsing: %+v", evt.Line.Raw)
}
d.runningContainerState[container.ID] = containerConfig
}
}
t.Kill(nil)
if !foundOne {
return fmt.Errorf("no docker found, can't run one shot acquisition")
}
return nil
}
func (d *DockerSource) GetMetrics() []prometheus.Collector {
return []prometheus.Collector{linesRead}
}
func (d *DockerSource) GetAggregMetrics() []prometheus.Collector {
return []prometheus.Collector{linesRead}
}
func (d *DockerSource) GetName() string {
return "docker"
}
func (d *DockerSource) CanRun() error {
return nil
}
func (d *DockerSource) EvalContainer(container dockerTypes.Container) (*ContainerConfig, bool) {
for _, containerID := range d.Config.ContainerID {
if containerID == container.ID {
return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels}, true
}
}
for _, containerName := range d.Config.ContainerName {
for _, name := range container.Names {
if strings.HasPrefix(name, "/") && len(name) > 0 {
name = name[1:]
}
if name == containerName {
return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels}, true
}
}
}
for _, cont := range d.compiledContainerID {
if matched := cont.Match([]byte(container.ID)); matched {
return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels}, true
}
}
for _, cont := range d.compiledContainerName {
for _, name := range container.Names {
if matched := cont.Match([]byte(name)); matched {
return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels}, true
}
}
}
return &ContainerConfig{}, false
}
func (d *DockerSource) WatchContainer(monitChan chan *ContainerConfig, deleteChan chan *ContainerConfig) error {
ticker := time.NewTicker(d.CheckIntervalDuration)
d.logger.Infof("Container watcher started, interval: %s", d.CheckIntervalDuration.String())
for {
select {
case <-d.t.Dying():
d.logger.Infof("stopping container watcher")
return nil
case <-ticker.C:
// to track for garbage collection
runningContainersID := make(map[string]bool)
runningContainer, err := d.Client.ContainerList(context.Background(), dockerTypes.ContainerListOptions{})
if err != nil {
if strings.Contains(strings.ToLower(err.Error()), "cannot connect to the docker daemon at") {
for idx, container := range d.runningContainerState {
if d.runningContainerState[idx].t.Alive() {
d.logger.Infof("killing tail for container %s", container.Name)
d.runningContainerState[idx].t.Kill(nil)
if err := d.runningContainerState[idx].t.Wait(); err != nil {
d.logger.Infof("error while waiting for death of %s : %s", container.Name, err)
}
}
delete(d.runningContainerState, idx)
}
} else {
log.Debugf("container list err: %s", err.Error())
}
continue
}
for _, container := range runningContainer {
runningContainersID[container.ID] = true
// don't need to re eval an already monitored container
if _, ok := d.runningContainerState[container.ID]; ok {
continue
}
if containerConfig, ok := d.EvalContainer(container); ok {
monitChan <- containerConfig
}
}
for containerStateID, containerConfig := range d.runningContainerState {
if _, ok := runningContainersID[containerStateID]; !ok {
deleteChan <- containerConfig
}
}
d.logger.Tracef("Reading logs from %d containers", len(d.runningContainerState))
ticker.Reset(d.CheckIntervalDuration)
}
}
}
func (d *DockerSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error {
d.t = t
monitChan := make(chan *ContainerConfig)
deleteChan := make(chan *ContainerConfig)
d.logger.Infof("Starting docker acquisition")
t.Go(func() error {
return d.DockerManager(monitChan, deleteChan, out)
})
return d.WatchContainer(monitChan, deleteChan)
}
func (d *DockerSource) Dump() interface{} {
return d
}
func ReadTailScanner(scanner *bufio.Scanner, out chan string, t *tomb.Tomb) error {
for scanner.Scan() {
out <- scanner.Text()
}
return nil
}
func (d *DockerSource) TailDocker(container *ContainerConfig, outChan chan types.Event) error {
container.logger.Infof("start tail for container %s", container.Name)
dockerReader, err := d.Client.ContainerLogs(context.Background(), container.ID, *d.containerLogsOptions)
if err != nil {
container.logger.Errorf("unable to read logs from container: %+v", err)
return err
}
// we use this library to normalize docker API logs (cf. https://ahmet.im/blog/docker-logs-api-binary-format-explained/)
reader := dlog.NewReader(dockerReader)
scanner := bufio.NewScanner(reader)
readerChan := make(chan string)
readerTomb := &tomb.Tomb{}
readerTomb.Go(func() error {
return ReadTailScanner(scanner, readerChan, readerTomb)
})
for {
select {
case <-container.t.Dying():
readerTomb.Kill(nil)
container.logger.Infof("tail stopped for container %s", container.Name)
return nil
case line := <-readerChan:
if line == "" {
continue
}
l := types.Line{}
l.Raw = line
l.Labels = d.Config.Labels
l.Time = time.Now()
l.Src = container.Name
l.Process = true
l.Module = d.GetName()
evt := types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.LIVE}
linesRead.With(prometheus.Labels{"source": container.Name}).Inc()
outChan <- evt
d.logger.Debugf("Sent line to parsing: %+v", evt.Line.Raw)
}
}
}
func (d *DockerSource) DockerManager(in chan *ContainerConfig, deleteChan chan *ContainerConfig, outChan chan types.Event) error {
d.logger.Info("DockerSource Manager started")
for {
select {
case newContainer := <-in:
if _, ok := d.runningContainerState[newContainer.ID]; !ok {
newContainer.t = &tomb.Tomb{}
newContainer.logger = d.logger.WithFields(log.Fields{"container_name": newContainer.Name})
newContainer.t.Go(func() error {
return d.TailDocker(newContainer, outChan)
})
d.runningContainerState[newContainer.ID] = newContainer
}
case containerToDelete := <-deleteChan:
if containerConfig, ok := d.runningContainerState[containerToDelete.ID]; ok {
log.Infof("container acquisition stopped for container '%s'", containerConfig.Name)
containerConfig.t.Kill(nil)
delete(d.runningContainerState, containerToDelete.ID)
}
case <-d.t.Dying():
for idx, container := range d.runningContainerState {
if d.runningContainerState[idx].t.Alive() {
d.logger.Infof("killing tail for container %s", container.Name)
d.runningContainerState[idx].t.Kill(nil)
if err := d.runningContainerState[idx].t.Wait(); err != nil {
d.logger.Infof("error while waiting for death of %s : %s", container.Name, err)
}
}
}
d.runningContainerState = nil
d.logger.Debugf("routine cleanup done, return")
return nil
}
}
}

View file

@ -0,0 +1,330 @@
package dockeracquisition
import (
"context"
"fmt"
"io"
"os"
"strings"
"testing"
"time"
"github.com/crowdsecurity/crowdsec/pkg/types"
dockerTypes "github.com/docker/docker/api/types"
"github.com/docker/docker/client"
log "github.com/sirupsen/logrus"
"gopkg.in/tomb.v2"
"github.com/stretchr/testify/assert"
)
const testContainerName = "docker_test"
func TestConfigure(t *testing.T) {
log.Infof("Test 'TestConfigure'")
tests := []struct {
config string
expectedErr string
}{
{
config: `foobar: asd`,
expectedErr: "line 1: field foobar not found in type dockeracquisition.DockerConfiguration",
},
{
config: `
mode: tail
source: docker`,
expectedErr: "no containers names or containers ID configuration provided",
},
{
config: `
mode: cat
source: docker
container_name:
- toto`,
expectedErr: "",
},
}
subLogger := log.WithFields(log.Fields{
"type": "docker",
})
for _, test := range tests {
f := DockerSource{}
err := f.Configure([]byte(test.config), subLogger)
if test.expectedErr != "" && err == nil {
t.Fatalf("Expected err %s but got nil !", test.expectedErr)
}
if test.expectedErr != "" {
assert.Contains(t, err.Error(), test.expectedErr)
}
}
}
func TestConfigureDSN(t *testing.T) {
log.Infof("Test 'TestConfigureDSN'")
tests := []struct {
name string
dsn string
expectedErr string
}{
{
name: "invalid DSN",
dsn: "asd://",
expectedErr: "invalid DSN asd:// for docker source, must start with docker://",
},
{
name: "empty DSN",
dsn: "docker://",
expectedErr: "empty docker:// DSN",
},
{
name: "DSN ok with log_level",
dsn: "docker://test_docker?log_level=warn",
expectedErr: "",
},
{
name: "DSN invalid log_level",
dsn: "docker://test_docker?log_level=foobar",
expectedErr: "unknown level foobar: not a valid logrus Level:",
},
{
name: "DSN ok with multiple parameters",
dsn: "docker://test_docker?since=42min&docker_host=unix:///var/run/podman/podman.sock",
expectedErr: "",
},
}
subLogger := log.WithFields(log.Fields{
"type": "docker",
})
for _, test := range tests {
f := DockerSource{}
err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "testtype"}, subLogger)
if test.expectedErr != "" {
assert.Contains(t, err.Error(), test.expectedErr)
} else {
assert.Equal(t, err, nil)
}
}
}
type mockDockerCli struct {
client.Client
}
func TestStreamingAcquisition(t *testing.T) {
log.SetOutput(os.Stdout)
log.SetLevel(log.InfoLevel)
log.Info("Test 'TestStreamingAcquisition'")
tests := []struct {
config string
expectedErr string
expectedOutput string
expectedLines int
logType string
logLevel log.Level
}{
{
config: `
source: docker
mode: cat
container_name:
- docker_test`,
expectedErr: "",
expectedOutput: "",
expectedLines: 3,
logType: "test",
logLevel: log.InfoLevel,
},
{
config: `
source: docker
mode: cat
container_name_regexp:
- docker_*`,
expectedErr: "",
expectedOutput: "",
expectedLines: 3,
logType: "test",
logLevel: log.InfoLevel,
},
}
for _, ts := range tests {
var logger *log.Logger
var subLogger *log.Entry
if ts.expectedOutput != "" {
logger.SetLevel(ts.logLevel)
subLogger = logger.WithFields(log.Fields{
"type": "docker",
})
} else {
subLogger = log.WithFields(log.Fields{
"type": "docker",
})
}
dockerTomb := tomb.Tomb{}
out := make(chan types.Event)
dockerSource := DockerSource{}
err := dockerSource.Configure([]byte(ts.config), subLogger)
if err != nil {
t.Fatalf("Unexpected error : %s", err)
}
dockerSource.Client = new(mockDockerCli)
actualLines := 0
readerTomb := &tomb.Tomb{}
streamTomb := tomb.Tomb{}
streamTomb.Go(func() error {
return dockerSource.StreamingAcquisition(out, &dockerTomb)
})
readerTomb.Go(func() error {
time.Sleep(1 * time.Second)
ticker := time.NewTicker(1 * time.Second)
for {
select {
case <-out:
actualLines++
ticker.Reset(1 * time.Second)
case <-ticker.C:
log.Infof("no more line to read")
readerTomb.Kill(nil)
return nil
}
}
})
time.Sleep(10 * time.Second)
if ts.expectedErr == "" && err != nil {
t.Fatalf("Unexpected error : %s", err)
} else if ts.expectedErr != "" && err != nil {
assert.Contains(t, err.Error(), ts.expectedErr)
continue
} else if ts.expectedErr != "" && err == nil {
t.Fatalf("Expected error %s, but got nothing !", ts.expectedErr)
}
if err := readerTomb.Wait(); err != nil {
t.Fatal(err)
}
//time.Sleep(4 * time.Second)
if ts.expectedLines != 0 {
assert.Equal(t, ts.expectedLines, actualLines)
}
dockerSource.t.Kill(nil)
err = streamTomb.Wait()
if err != nil {
t.Fatalf("docker acquisition error: %s", err)
}
}
}
func (cli *mockDockerCli) ContainerList(ctx context.Context, options dockerTypes.ContainerListOptions) ([]dockerTypes.Container, error) {
containers := make([]dockerTypes.Container, 0)
container := &dockerTypes.Container{
ID: "12456",
Names: []string{testContainerName},
}
containers = append(containers, *container)
return containers, nil
}
func (cli *mockDockerCli) ContainerLogs(ctx context.Context, container string, options dockerTypes.ContainerLogsOptions) (io.ReadCloser, error) {
startLineByte := "\x01\x00\x00\x00\x00\x00\x00\x1f"
data := []string{"docker", "test", "1234"}
ret := ""
for _, line := range data {
ret += fmt.Sprintf("%s%s\n", startLineByte, line)
}
r := io.NopCloser(strings.NewReader(ret)) // r type is io.ReadCloser
return r, nil
}
func TestOneShot(t *testing.T) {
log.Infof("Test 'TestOneShot'")
tests := []struct {
dsn string
expectedErr string
expectedOutput string
expectedLines int
logType string
logLevel log.Level
}{
{
dsn: "docker://non_exist_docker",
expectedErr: "no docker found, can't run one shot acquisition",
expectedOutput: "",
expectedLines: 0,
logType: "test",
logLevel: log.InfoLevel,
},
{
dsn: "docker://" + testContainerName,
expectedErr: "",
expectedOutput: "",
expectedLines: 3,
logType: "test",
logLevel: log.InfoLevel,
},
}
for _, ts := range tests {
var subLogger *log.Entry
var logger *log.Logger
if ts.expectedOutput != "" {
logger.SetLevel(ts.logLevel)
subLogger = logger.WithFields(log.Fields{
"type": "docker",
})
} else {
log.SetLevel(ts.logLevel)
subLogger = log.WithFields(log.Fields{
"type": "docker",
})
}
dockerClient := &DockerSource{}
labels := make(map[string]string)
labels["type"] = ts.logType
if err := dockerClient.ConfigureByDSN(ts.dsn, labels, subLogger); err != nil {
t.Fatalf("unable to configure dsn '%s': %s", ts.dsn, err)
}
dockerClient.Client = new(mockDockerCli)
out := make(chan types.Event)
actualLines := 0
if ts.expectedLines != 0 {
go func() {
READLOOP:
for {
select {
case <-out:
actualLines++
case <-time.After(1 * time.Second):
break READLOOP
}
}
}()
}
tomb := tomb.Tomb{}
err := dockerClient.OneShotAcquisition(out, &tomb)
if ts.expectedErr == "" && err != nil {
t.Fatalf("Unexpected error : %s", err)
} else if ts.expectedErr != "" && err != nil {
assert.Contains(t, err.Error(), ts.expectedErr)
continue
} else if ts.expectedErr != "" && err == nil {
t.Fatalf("Expected error %s, but got nothing !", ts.expectedErr)
}
// else we do the check before actualLines is incremented ...
time.Sleep(1 * time.Second)
if ts.expectedLines != 0 {
assert.Equal(t, ts.expectedLines, actualLines)
}
}
}

View file

@ -117,7 +117,7 @@ func (f *FileSource) Configure(Config []byte, logger *log.Entry) error {
return nil
}
func (f *FileSource) ConfigureByDSN(dsn string, labelType string, logger *log.Entry) error {
func (f *FileSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry) error {
if !strings.HasPrefix(dsn, "file://") {
return fmt.Errorf("invalid DSN %s for file source, must start with file://", dsn)
}
@ -153,7 +153,7 @@ func (f *FileSource) ConfigureByDSN(dsn string, labelType string, logger *log.En
}
f.config = FileConfiguration{}
f.config.Labels = map[string]string{"type": labelType}
f.config.Labels = labels
f.config.Mode = configuration.CAT_MODE
f.logger.Debugf("Will try pattern %s", args[0])

View file

@ -69,7 +69,7 @@ func TestConfigureDSN(t *testing.T) {
})
for _, test := range tests {
f := FileSource{}
err := f.ConfigureByDSN(test.dsn, "testtype", subLogger)
err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "testtype"}, subLogger)
if test.expectedErr != "" {
assert.Contains(t, err.Error(), test.expectedErr)
} else {

View file

@ -182,11 +182,11 @@ func (j *JournalCtlSource) Configure(yamlConfig []byte, logger *log.Entry) error
return nil
}
func (j *JournalCtlSource) ConfigureByDSN(dsn string, labelType string, logger *log.Entry) error {
func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry) error {
j.logger = logger
j.config = JournalCtlConfiguration{}
j.config.Mode = configuration.CAT_MODE
j.config.Labels = map[string]string{"type": labelType}
j.config.Labels = labels
//format for the DSN is : journalctl://filters=FILTER1&filters=FILTER2
if !strings.HasPrefix(dsn, "journalctl://") {

View file

@ -92,7 +92,7 @@ func TestConfigureDSN(t *testing.T) {
})
for _, test := range tests {
f := JournalCtlSource{}
err := f.ConfigureByDSN(test.dsn, "testtype", subLogger)
err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "testtype"}, subLogger)
if test.expectedErr != "" {
assert.Contains(t, err.Error(), test.expectedErr)
} else {

View file

@ -37,6 +37,7 @@ func (s *SyslogServer) Listen(listenAddr string, port int) error {
if err != nil {
return errors.Wrapf(err, "could not listen on port %d", s.port)
}
s.Logger.Debugf("listening on %s:%d", s.listenAddr, s.port)
s.udpConn = udpConn
err = s.udpConn.SetReadBuffer(s.MaxMessageLen) // FIXME probably
if err != nil {

View file

@ -73,7 +73,7 @@ func (s *SyslogSource) GetAggregMetrics() []prometheus.Collector {
return []prometheus.Collector{linesReceived, linesParsed}
}
func (s *SyslogSource) ConfigureByDSN(dsn string, labelType string, logger *log.Entry) error {
func (s *SyslogSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry) error {
return fmt.Errorf("syslog datasource does not support one shot acquisition")
}
@ -136,14 +136,19 @@ func (s *SyslogSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb)
func (s *SyslogSource) buildLogFromSyslog(ts *time.Time, hostname *string,
appname *string, pid *string, msg *string) (string, error) {
ret := ""
if msg == nil {
return "", errors.Errorf("missing message field in syslog message")
}
if ts != nil {
ret += ts.Format("Jan 2 15:04:05")
} else {
s.logger.Tracef("%s - missing TS", *msg)
ret += time.Now().Format("Jan 2 15:04:05")
}
if hostname != nil {
ret += " " + *hostname
} else {
s.logger.Tracef("%s - missing host", *msg)
ret += " unknownhost"
}
if appname != nil {
@ -169,8 +174,6 @@ func (s *SyslogSource) buildLogFromSyslog(ts *time.Time, hostname *string,
}
if msg != nil {
ret += *msg
} else {
return "", errors.Errorf("missing message field in syslog message")
}
return ret, nil
@ -194,6 +197,7 @@ func (s *SyslogSource) handleSyslogMsg(out chan types.Event, t *tomb.Tomb, c cha
var ts time.Time
logger := s.logger.WithField("client", syslogLine.Client)
logger.Tracef("raw: %s", syslogLine)
linesReceived.With(prometheus.Labels{"source": syslogLine.Client}).Inc()
p := rfc5424.NewParser()
m, err := p.Parse(syslogLine.Message)

View file

@ -14,6 +14,8 @@ type AuthService service
// Don't add it to the models, as they are used with LAPI, but the enroll endpoint is specific to CAPI
type enrollRequest struct {
EnrollKey string `json:"attachment_key"`
Name string `json:"name"`
Tags []string `json:"tags"`
}
func (s *AuthService) UnregisterWatcher(ctx context.Context) (*Response, error) {
@ -61,9 +63,9 @@ func (s *AuthService) AuthenticateWatcher(ctx context.Context, auth models.Watch
return resp, nil
}
func (s *AuthService) EnrollWatcher(ctx context.Context, enrollKey string) (*Response, error) {
func (s *AuthService) EnrollWatcher(ctx context.Context, enrollKey string, name string, tags []string) (*Response, error) {
u := fmt.Sprintf("%s/watchers/enroll", s.client.URLPrefix)
req, err := s.client.NewRequest("POST", u, &enrollRequest{EnrollKey: enrollKey})
req, err := s.client.NewRequest("POST", u, &enrollRequest{EnrollKey: enrollKey, Name: name, Tags: tags})
if err != nil {
return nil, err
}

View file

@ -192,7 +192,7 @@ func TestWatcherEnroll(t *testing.T) {
_, _ = buf.ReadFrom(r.Body)
newStr := buf.String()
log.Debugf("body -> %s", newStr)
if newStr == `{"attachment_key":"goodkey"}
if newStr == `{"attachment_key":"goodkey","name":"","tags":[]}
` {
log.Print("good key")
w.WriteHeader(http.StatusOK)
@ -228,11 +228,11 @@ func TestWatcherEnroll(t *testing.T) {
log.Fatalf("new api client: %s", err.Error())
}
_, err = client.Auth.EnrollWatcher(context.Background(), "goodkey")
_, err = client.Auth.EnrollWatcher(context.Background(), "goodkey", "", []string{})
if err != nil {
t.Fatalf("unexpect auth err: %s", err)
t.Fatalf("unexpect enroll err: %s", err)
}
_, err = client.Auth.EnrollWatcher(context.Background(), "badkey")
_, err = client.Auth.EnrollWatcher(context.Background(), "badkey", "", []string{})
assert.Contains(t, err.Error(), "the attachment key provided is not valid")
}

View file

@ -366,14 +366,7 @@ func (a *apic) Pull() error {
}
}
func (a *apic) SendMetrics() error {
defer types.CatchPanic("lapi/metricsToAPIC")
log.Infof("start crowdsec api send metrics (interval: %s)", MetricsInterval)
ticker := time.NewTicker(a.metricsInterval)
for {
select {
case <-ticker.C:
func (a *apic) GetMetrics() (*models.Metrics, error) {
version := cwversion.VersionStr()
metric := &models.Metrics{
ApilVersion: &version,
@ -382,11 +375,11 @@ func (a *apic) SendMetrics() error {
}
machines, err := a.dbClient.ListMachines()
if err != nil {
return err
return metric, err
}
bouncers, err := a.dbClient.ListBouncers()
if err != nil {
return err
return metric, err
}
for _, machine := range machines {
m := &models.MetricsSoftInfo{
@ -403,11 +396,36 @@ func (a *apic) SendMetrics() error {
}
metric.Bouncers = append(metric.Bouncers, m)
}
_, _, err = a.apiClient.Metrics.Add(context.Background(), metric)
return metric, nil
}
func (a *apic) SendMetrics() error {
defer types.CatchPanic("lapi/metricsToAPIC")
metrics, err := a.GetMetrics()
if err != nil {
return errors.Wrap(err, "sending metrics failed")
log.Errorf("unable to get metrics (%s), will retry", err)
}
_, _, err = a.apiClient.Metrics.Add(context.Background(), metrics)
if err != nil {
log.Errorf("unable to send metrics (%s), will retry", err)
}
log.Infof("capi metrics: metrics sent successfully")
log.Infof("start crowdsec api send metrics (interval: %s)", MetricsInterval)
ticker := time.NewTicker(a.metricsInterval)
for {
select {
case <-ticker.C:
metrics, err := a.GetMetrics()
if err != nil {
log.Errorf("unable to get metrics (%s), will retry", err)
}
_, _, err = a.apiClient.Metrics.Add(context.Background(), metrics)
if err != nil {
log.Errorf("capi metrics: failed: %s", err.Error())
} else {
log.Infof("capi metrics: metrics sent successfully")
}
case <-a.metricsTomb.Dying(): // if one apic routine is dying, do we kill the others?
a.pullTomb.Kill(nil)
a.pushTomb.Kill(nil)

View file

@ -60,6 +60,12 @@ func (c *Config) LoadCrowdsec() error {
return errors.Wrap(err, "while globing acquis_dir")
}
c.Crowdsec.AcquisitionFiles = append(c.Crowdsec.AcquisitionFiles, files...)
files, err = filepath.Glob(c.Crowdsec.AcquisitionDirPath + "/*.yml")
if err != nil {
return errors.Wrap(err, "while globing acquis_dir")
}
c.Crowdsec.AcquisitionFiles = append(c.Crowdsec.AcquisitionFiles, files...)
}
if c.Crowdsec.AcquisitionDirPath == "" && c.Crowdsec.AcquisitionFilePath == "" {
log.Warningf("no acquisition_path nor acquisition_dir")

View file

@ -412,14 +412,14 @@ func getProcessAtr(username string, groupname string) (*syscall.SysProcAttr, err
if err != nil {
return nil, err
}
if uid < 0 && uid > math.MaxUint32 {
if uid < 0 && uid > math.MaxInt32 {
return nil, fmt.Errorf("out of bound uid")
}
gid, err := strconv.Atoi(g.Gid)
if err != nil {
return nil, err
}
if gid < 0 && gid > math.MaxUint32 {
if gid < 0 && gid > math.MaxInt32 {
return nil, fmt.Errorf("out of bound gid")
}
return &syscall.SysProcAttr{

View file

@ -20,6 +20,7 @@ type HubTestItemConfig struct {
PostOVerflows []string `yaml:"postoverflows"`
LogFile string `yaml:"log_file"`
LogType string `yaml:"log_type"`
Labels map[string]string `yaml:"labels"`
IgnoreParsers bool `yaml:"ignore_parsers"` // if we test a scenario, we don't want to assert on Parser
}
@ -513,6 +514,10 @@ func (t *HubTestItem) Run() error {
}
cmdArgs = []string{"-c", t.RuntimeConfigFilePath, "-type", logType, "-dsn", dsn, "-dump-data", t.ResultsPath}
for labelKey, labelValue := range t.Config.Labels {
arg := fmt.Sprintf("%s:%s", labelKey, labelValue)
cmdArgs = append(cmdArgs, "-label", arg)
}
crowdsecCmd := exec.Command(t.CrowdSecPath, cmdArgs...)
log.Debugf("%s", crowdsecCmd.String())
output, err = crowdsecCmd.CombinedOutput()

View file

@ -15,7 +15,9 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/enescakir/emoji"
"github.com/fatih/color"
"github.com/pkg/errors"
diff "github.com/r3labs/diff/v2"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
@ -153,9 +155,6 @@ func (p *ParserAssert) RunExpression(expression string) (interface{}, error) {
if runtimeFilter, err = expr.Compile(expression, expr.Env(exprhelpers.GetExprEnv(env))); err != nil {
return output, err
}
// if debugFilter, err = exprhelpers.NewDebugger(assert, expr.Env(exprhelpers.GetExprEnv(env))); err != nil {
// log.Warningf("Failed building debugher for %s : %s", assert, err)
// }
//dump opcode in trace level
log.Tracef("%s", runtimeFilter.Disassemble())
@ -272,10 +271,15 @@ func LoadParserDump(filepath string) (*ParserResults, error) {
return &pdump, nil
}
func DumpTree(parser_results ParserResults, bucket_pour BucketPourInfo) error {
type DumpOpts struct {
Details bool
SkipOk bool
}
func DumpTree(parser_results ParserResults, bucket_pour BucketPourInfo, opts DumpOpts) {
//note : we can use line -> time as the unique identifier (of acquisition)
state := make(map[time.Time]map[string]map[string]bool, 0)
state := make(map[time.Time]map[string]map[string]ParserResult)
assoc := make(map[time.Time]string, 0)
for stage, parsers := range parser_results {
@ -283,14 +287,15 @@ func DumpTree(parser_results ParserResults, bucket_pour BucketPourInfo) error {
for _, parser_res := range results {
evt := parser_res.Evt
if _, ok := state[evt.Line.Time]; !ok {
state[evt.Line.Time] = make(map[string]map[string]bool)
state[evt.Line.Time] = make(map[string]map[string]ParserResult)
assoc[evt.Line.Time] = evt.Line.Raw
}
if _, ok := state[evt.Line.Time][stage]; !ok {
state[evt.Line.Time][stage] = make(map[string]bool)
state[evt.Line.Time][stage] = make(map[string]ParserResult)
}
state[evt.Line.Time][stage][parser] = parser_res.Success
state[evt.Line.Time][stage][parser] = ParserResult{Evt: evt, Success: parser_res.Success}
}
}
}
@ -301,20 +306,27 @@ func DumpTree(parser_results ParserResults, bucket_pour BucketPourInfo) error {
}
//it might be bucket oveflow being reprocessed, skip this
if _, ok := state[evt.Line.Time]; !ok {
state[evt.Line.Time] = make(map[string]map[string]bool)
state[evt.Line.Time] = make(map[string]map[string]ParserResult)
assoc[evt.Line.Time] = evt.Line.Raw
}
//there is a trick : to know if an event succesfully exit the parsers, we check if it reached the pour() phase
//we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered
if _, ok := state[evt.Line.Time]["buckets"]; !ok {
state[evt.Line.Time]["buckets"] = make(map[string]bool)
state[evt.Line.Time]["buckets"] = make(map[string]ParserResult)
}
state[evt.Line.Time]["buckets"][bname] = true
state[evt.Line.Time]["buckets"][bname] = ParserResult{Success: true}
}
}
yellow := color.New(color.FgYellow).SprintFunc()
red := color.New(color.FgRed).SprintFunc()
green := color.New(color.FgGreen).SprintFunc()
//get each line
for tstamp, rawstr := range assoc {
if opts.SkipOk {
if _, ok := state[tstamp]["buckets"]["OK"]; ok {
continue
}
}
fmt.Printf("line: %s\n", rawstr)
skeys := make([]string, 0, len(state[tstamp]))
for k := range state[tstamp] {
@ -327,6 +339,8 @@ func DumpTree(parser_results ParserResults, bucket_pour BucketPourInfo) error {
}
sort.Strings(skeys)
//iterate stage
var prev_item types.Event
for _, stage := range skeys {
parsers := state[tstamp][stage]
@ -342,13 +356,72 @@ func DumpTree(parser_results ParserResults, bucket_pour BucketPourInfo) error {
sort.Strings(pkeys)
for idx, parser := range pkeys {
res := parsers[parser]
res := parsers[parser].Success
sep := "├"
if idx == len(pkeys)-1 {
sep = "└"
}
created := 0
updated := 0
deleted := 0
whitelisted := false
changeStr := ""
detailsDisplay := ""
if res {
fmt.Printf("\t%s\t%s %s %s\n", presep, sep, emoji.GreenCircle, parser)
if prev_item.Stage == "" {
changeStr = "first_parser"
} else {
changelog, _ := diff.Diff(prev_item, parsers[parser].Evt)
for _, change := range changelog {
switch change.Type {
case "create":
created++
detailsDisplay += fmt.Sprintf("\t%s\t\t%s %s evt.%s : %s\n", presep, sep, change.Type, strings.Join(change.Path, "."), green(change.To))
case "update":
detailsDisplay += fmt.Sprintf("\t%s\t\t%s %s evt.%s : %s -> %s\n", presep, sep, change.Type, strings.Join(change.Path, "."), change.From, yellow(change.To))
if change.Path[0] == "Whitelisted" && change.To == true {
whitelisted = true
}
updated++
case "delete":
deleted++
detailsDisplay += fmt.Sprintf("\t%s\t\t%s %s evt.%s\n", presep, sep, change.Type, red(strings.Join(change.Path, ".")))
}
}
}
prev_item = parsers[parser].Evt
}
if created > 0 {
changeStr += green(fmt.Sprintf("+%d", created))
}
if updated > 0 {
if len(changeStr) > 0 {
changeStr += " "
}
changeStr += yellow(fmt.Sprintf("~%d", updated))
}
if deleted > 0 {
if len(changeStr) > 0 {
changeStr += " "
}
changeStr += red(fmt.Sprintf("-%d", deleted))
}
if whitelisted {
if len(changeStr) > 0 {
changeStr += " "
}
changeStr += red("[whitelisted]")
}
if changeStr == "" {
changeStr = yellow("unchanged")
}
if res {
fmt.Printf("\t%s\t%s %s %s (%s)\n", presep, sep, emoji.GreenCircle, parser, changeStr)
if opts.Details {
fmt.Print(detailsDisplay)
}
} else {
fmt.Printf("\t%s\t%s %s %s\n", presep, sep, emoji.RedCircle, parser)
@ -388,5 +461,4 @@ func DumpTree(parser_results ParserResults, bucket_pour BucketPourInfo) error {
}
fmt.Println()
}
return nil
}

View file

@ -208,7 +208,7 @@ func testInstallItem(cfg *csconfig.Hub, t *testing.T, item Item) {
item, err = EnableItem(cfg, item)
if err != nil {
t.Fatalf("error while enabled %s : %v.", item.Name, err)
t.Fatalf("error while enabling %s : %v.", item.Name, err)
}
if err, _ := LocalSync(cfg); err != nil {
t.Fatalf("taint: failed to run localSync : %s", err)

View file

@ -109,7 +109,7 @@ func parser_visit(path string, f os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("unable to read symlink of %s", path)
}
//the symlink target doesn't exist, user might have remove ~/.hub/hub/...yaml without deleting /etc/crowdsec/....yaml
//the symlink target doesn't exist, user might have removed ~/.hub/hub/...yaml without deleting /etc/crowdsec/....yaml
_, err := os.Lstat(hubpath)
if os.IsNotExist(err) {
log.Infof("%s is a symlink to %s that doesn't exist, deleting symlink", path, hubpath)

View file

@ -730,6 +730,24 @@ func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert,
return ret, nil
}
func (c *Client) DeleteAlertGraphBatch(alertItems []*ent.Alert) (int, error) {
idList := make([]int, 0)
for _, alert := range alertItems {
idList = append(idList, int(alert.ID))
}
deleted, err := c.Ent.Alert.Delete().
Where(alert.IDIn(idList...)).Exec(c.CTX)
if err != nil {
c.Log.Warningf("DeleteAlertGraph : %s", err)
return deleted, errors.Wrapf(DeleteFail, "alert graph delete batch")
}
c.Log.Debug("Done batch delete alerts")
return deleted, nil
}
func (c *Client) DeleteAlertGraph(alertItem *ent.Alert) error {
// delete the associated events
_, err := c.Ent.Event.Delete().
@ -810,12 +828,15 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error {
var totalAlerts int
var err error
c.Log.Debug("Flushing orphan alerts")
c.FlushOrphans()
c.Log.Debug("Done flushing orphan alerts")
totalAlerts, err = c.TotalAlerts()
if err != nil {
c.Log.Warningf("FlushAlerts (max items count) : %s", err)
return errors.Wrap(err, "unable to get alerts count")
}
c.Log.Debugf("FlushAlerts (Total alerts): %d", totalAlerts)
if MaxAge != "" {
filter := map[string][]string{
"created_before": {MaxAge},
@ -825,27 +846,37 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error {
c.Log.Warningf("FlushAlerts (max age) : %s", err)
return errors.Wrapf(err, "unable to flush alerts with filter until: %s", MaxAge)
}
c.Log.Debugf("FlushAlerts (deleted max age alerts): %d", nbDeleted)
deletedByAge = nbDeleted
}
if MaxItems > 0 {
if totalAlerts > MaxItems {
nbToDelete := totalAlerts - MaxItems
alerts, err := c.QueryAlertWithFilter(map[string][]string{
"sort": {"ASC"},
"limit": {strconv.Itoa(nbToDelete)},
}) // we want to delete older alerts if we reach the max number of items
//We get the highest id for the alerts
//We substract MaxItems to avoid deleting alerts that are not old enough
//This gives us the oldest alert that we want to keep
//We then delete all the alerts with an id lower than this one
//We can do this because the id is auto-increment, and the database won't reuse the same id twice
lastAlert, err := c.QueryAlertWithFilter(map[string][]string{
"sort": {"DESC"},
"limit": {"1"},
})
c.Log.Debugf("FlushAlerts (last alert): %+v", lastAlert)
if err != nil {
c.Log.Warningf("FlushAlerts (max items query) : %s", err)
return errors.Wrap(err, "unable to get all alerts")
c.Log.Errorf("FlushAlerts: could not get last alert: %s", err)
return errors.Wrap(err, "could not get last alert")
}
for itemNb, alert := range alerts {
if itemNb < nbToDelete {
err := c.DeleteAlertGraph(alert)
if len(lastAlert) != 0 {
maxid := lastAlert[0].ID - MaxItems
c.Log.Debugf("FlushAlerts (max id): %d", maxid)
if maxid > 0 {
//This may lead to orphan alerts (at least on MySQL), but the next time the flush job will run, they will be deleted
deletedByNbItem, err = c.Ent.Alert.Delete().Where(alert.IDLT(maxid)).Exec(c.CTX)
if err != nil {
c.Log.Warningf("FlushAlerts : %s", err)
return errors.Wrap(err, "unable to flush alert")
}
deletedByNbItem++
c.Log.Errorf("FlushAlerts: Could not delete alerts : %s", err)
return errors.Wrap(err, "could not delete alerts")
}
}
}

View file

@ -100,7 +100,8 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched
}
// Init & Start cronjob every minute
scheduler := gocron.NewScheduler(time.UTC)
scheduler.Every(1).Minute().Do(c.FlushAlerts, maxAge, maxItems)
job, _ := scheduler.Every(1).Minute().Do(c.FlushAlerts, maxAge, maxItems)
job.SingletonMode()
scheduler.StartAsync()
return scheduler, nil

View file

@ -129,17 +129,17 @@ func (*Alert) scanValues(columns []string) ([]interface{}, error) {
for i := range columns {
switch columns[i] {
case alert.FieldSimulated:
values[i] = &sql.NullBool{}
values[i] = new(sql.NullBool)
case alert.FieldSourceLatitude, alert.FieldSourceLongitude:
values[i] = &sql.NullFloat64{}
values[i] = new(sql.NullFloat64)
case alert.FieldID, alert.FieldEventsCount, alert.FieldCapacity:
values[i] = &sql.NullInt64{}
values[i] = new(sql.NullInt64)
case alert.FieldScenario, alert.FieldBucketId, alert.FieldMessage, alert.FieldSourceIp, alert.FieldSourceRange, alert.FieldSourceAsNumber, alert.FieldSourceAsName, alert.FieldSourceCountry, alert.FieldSourceScope, alert.FieldSourceValue, alert.FieldLeakSpeed, alert.FieldScenarioVersion, alert.FieldScenarioHash:
values[i] = &sql.NullString{}
values[i] = new(sql.NullString)
case alert.FieldCreatedAt, alert.FieldUpdatedAt, alert.FieldStartedAt, alert.FieldStoppedAt:
values[i] = &sql.NullTime{}
values[i] = new(sql.NullTime)
case alert.ForeignKeys[0]: // machine_alerts
values[i] = &sql.NullInt64{}
values[i] = new(sql.NullInt64)
default:
return nil, fmt.Errorf("unexpected column %q for type Alert", columns[i])
}

View file

@ -65,28 +65,28 @@ const (
EdgeMetas = "metas"
// Table holds the table name of the alert in the database.
Table = "alerts"
// OwnerTable is the table the holds the owner relation/edge.
// OwnerTable is the table that holds the owner relation/edge.
OwnerTable = "alerts"
// OwnerInverseTable is the table name for the Machine entity.
// It exists in this package in order to avoid circular dependency with the "machine" package.
OwnerInverseTable = "machines"
// OwnerColumn is the table column denoting the owner relation/edge.
OwnerColumn = "machine_alerts"
// DecisionsTable is the table the holds the decisions relation/edge.
// DecisionsTable is the table that holds the decisions relation/edge.
DecisionsTable = "decisions"
// DecisionsInverseTable is the table name for the Decision entity.
// It exists in this package in order to avoid circular dependency with the "decision" package.
DecisionsInverseTable = "decisions"
// DecisionsColumn is the table column denoting the decisions relation/edge.
DecisionsColumn = "alert_decisions"
// EventsTable is the table the holds the events relation/edge.
// EventsTable is the table that holds the events relation/edge.
EventsTable = "events"
// EventsInverseTable is the table name for the Event entity.
// It exists in this package in order to avoid circular dependency with the "event" package.
EventsInverseTable = "events"
// EventsColumn is the table column denoting the events relation/edge.
EventsColumn = "alert_events"
// MetasTable is the table the holds the metas relation/edge.
// MetasTable is the table that holds the metas relation/edge.
MetasTable = "meta"
// MetasInverseTable is the table name for the Meta entity.
// It exists in this package in order to avoid circular dependency with the "meta" package.

View file

@ -415,11 +415,17 @@ func (ac *AlertCreate) Save(ctx context.Context) (*Alert, error) {
return nil, err
}
ac.mutation = mutation
node, err = ac.sqlSave(ctx)
if node, err = ac.sqlSave(ctx); err != nil {
return nil, err
}
mutation.id = &node.ID
mutation.done = true
return node, err
})
for i := len(ac.hooks) - 1; i >= 0; i-- {
if ac.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = ac.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, ac.mutation); err != nil {
@ -438,6 +444,19 @@ func (ac *AlertCreate) SaveX(ctx context.Context) *Alert {
return v
}
// Exec executes the query.
func (ac *AlertCreate) Exec(ctx context.Context) error {
_, err := ac.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (ac *AlertCreate) ExecX(ctx context.Context) {
if err := ac.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (ac *AlertCreate) defaults() {
if _, ok := ac.mutation.CreatedAt(); !ok {
@ -477,16 +496,16 @@ func (ac *AlertCreate) defaults() {
// check runs all checks and user-defined validators on the builder.
func (ac *AlertCreate) check() error {
if _, ok := ac.mutation.CreatedAt(); !ok {
return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")}
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "created_at"`)}
}
if _, ok := ac.mutation.UpdatedAt(); !ok {
return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")}
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "updated_at"`)}
}
if _, ok := ac.mutation.Scenario(); !ok {
return &ValidationError{Name: "scenario", err: errors.New("ent: missing required field \"scenario\"")}
return &ValidationError{Name: "scenario", err: errors.New(`ent: missing required field "scenario"`)}
}
if _, ok := ac.mutation.Simulated(); !ok {
return &ValidationError{Name: "simulated", err: errors.New("ent: missing required field \"simulated\"")}
return &ValidationError{Name: "simulated", err: errors.New(`ent: missing required field "simulated"`)}
}
return nil
}
@ -494,8 +513,8 @@ func (ac *AlertCreate) check() error {
func (ac *AlertCreate) sqlSave(ctx context.Context) (*Alert, error) {
_node, _spec := ac.createSpec()
if err := sqlgraph.CreateNode(ctx, ac.driver, _spec); err != nil {
if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
return nil, err
}
@ -800,19 +819,23 @@ func (acb *AlertCreateBulk) Save(ctx context.Context) ([]*Alert, error) {
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, acb.builders[i+1].mutation)
} else {
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
// Invoke the actual operation on the latest mutation in the chain.
if err = sqlgraph.BatchCreate(ctx, acb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil {
if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
if err = sqlgraph.BatchCreate(ctx, acb.driver, spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
}
}
mutation.done = true
if err != nil {
return nil, err
}
mutation.id = &nodes[i].ID
mutation.done = true
if specs[i].ID.Value != nil {
id := specs[i].ID.Value.(int64)
nodes[i].ID = int(id)
}
return nodes[i], nil
})
for i := len(builder.hooks) - 1; i >= 0; i-- {
@ -837,3 +860,16 @@ func (acb *AlertCreateBulk) SaveX(ctx context.Context) []*Alert {
}
return v
}
// Exec executes the query.
func (acb *AlertCreateBulk) Exec(ctx context.Context) error {
_, err := acb.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (acb *AlertCreateBulk) ExecX(ctx context.Context) {
if err := acb.Exec(ctx); err != nil {
panic(err)
}
}

View file

@ -20,9 +20,9 @@ type AlertDelete struct {
mutation *AlertMutation
}
// Where adds a new predicate to the AlertDelete builder.
// Where appends a list predicates to the AlertDelete builder.
func (ad *AlertDelete) Where(ps ...predicate.Alert) *AlertDelete {
ad.mutation.predicates = append(ad.mutation.predicates, ps...)
ad.mutation.Where(ps...)
return ad
}
@ -46,6 +46,9 @@ func (ad *AlertDelete) Exec(ctx context.Context) (int, error) {
return affected, err
})
for i := len(ad.hooks) - 1; i >= 0; i-- {
if ad.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = ad.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, ad.mutation); err != nil {

View file

@ -25,6 +25,7 @@ type AlertQuery struct {
config
limit *int
offset *int
unique *bool
order []OrderFunc
fields []string
predicates []predicate.Alert
@ -57,6 +58,13 @@ func (aq *AlertQuery) Offset(offset int) *AlertQuery {
return aq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (aq *AlertQuery) Unique(unique bool) *AlertQuery {
aq.unique = &unique
return aq
}
// Order adds an order step to the query.
func (aq *AlertQuery) Order(o ...OrderFunc) *AlertQuery {
aq.order = append(aq.order, o...)
@ -426,8 +434,8 @@ func (aq *AlertQuery) GroupBy(field string, fields ...string) *AlertGroupBy {
// Select(alert.FieldCreatedAt).
// Scan(ctx, &v)
//
func (aq *AlertQuery) Select(field string, fields ...string) *AlertSelect {
aq.fields = append([]string{field}, fields...)
func (aq *AlertQuery) Select(fields ...string) *AlertSelect {
aq.fields = append(aq.fields, fields...)
return &AlertSelect{AlertQuery: aq}
}
@ -489,11 +497,14 @@ func (aq *AlertQuery) sqlAll(ctx context.Context) ([]*Alert, error) {
ids := make([]int, 0, len(nodes))
nodeids := make(map[int][]*Alert)
for i := range nodes {
fk := nodes[i].machine_alerts
if fk != nil {
ids = append(ids, *fk)
nodeids[*fk] = append(nodeids[*fk], nodes[i])
if nodes[i].machine_alerts == nil {
continue
}
fk := *nodes[i].machine_alerts
if _, ok := nodeids[fk]; !ok {
ids = append(ids, fk)
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
query.Where(machine.IDIn(ids...))
neighbors, err := query.All(ctx)
@ -627,6 +638,9 @@ func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec {
From: aq.sql,
Unique: true,
}
if unique := aq.unique; unique != nil {
_spec.Unique = *unique
}
if fields := aq.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, alert.FieldID)
@ -652,7 +666,7 @@ func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec {
if ps := aq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector, alert.ValidColumn)
ps[i](selector)
}
}
}
@ -662,16 +676,20 @@ func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec {
func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(aq.driver.Dialect())
t1 := builder.Table(alert.Table)
selector := builder.Select(t1.Columns(alert.Columns...)...).From(t1)
columns := aq.fields
if len(columns) == 0 {
columns = alert.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if aq.sql != nil {
selector = aq.sql
selector.Select(selector.Columns(alert.Columns...)...)
selector.Select(selector.Columns(columns...)...)
}
for _, p := range aq.predicates {
p(selector)
}
for _, p := range aq.order {
p(selector, alert.ValidColumn)
p(selector)
}
if offset := aq.offset; offset != nil {
// limit is mandatory for offset clause. We start
@ -933,13 +951,24 @@ func (agb *AlertGroupBy) sqlScan(ctx context.Context, v interface{}) error {
}
func (agb *AlertGroupBy) sqlQuery() *sql.Selector {
selector := agb.sql
columns := make([]string, 0, len(agb.fields)+len(agb.fns))
columns = append(columns, agb.fields...)
selector := agb.sql.Select()
aggregation := make([]string, 0, len(agb.fns))
for _, fn := range agb.fns {
columns = append(columns, fn(selector, alert.ValidColumn))
aggregation = append(aggregation, fn(selector))
}
return selector.Select(columns...).GroupBy(agb.fields...)
// If no columns were selected in a custom aggregation function, the default
// selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(agb.fields)+len(agb.fns))
for _, f := range agb.fields {
columns = append(columns, selector.C(f))
}
for _, c := range aggregation {
columns = append(columns, c)
}
selector.Select(columns...)
}
return selector.GroupBy(selector.Columns(agb.fields...)...)
}
// AlertSelect is the builder for selecting fields of Alert entities.
@ -1155,16 +1184,10 @@ func (as *AlertSelect) BoolX(ctx context.Context) bool {
func (as *AlertSelect) sqlScan(ctx context.Context, v interface{}) error {
rows := &sql.Rows{}
query, args := as.sqlQuery().Query()
query, args := as.sql.Query()
if err := as.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
func (as *AlertSelect) sqlQuery() sql.Querier {
selector := as.sql
selector.Select(selector.Columns(as.fields...)...)
return selector
}

View file

@ -25,9 +25,9 @@ type AlertUpdate struct {
mutation *AlertMutation
}
// Where adds a new predicate for the AlertUpdate builder.
// Where appends a list predicates to the AlertUpdate builder.
func (au *AlertUpdate) Where(ps ...predicate.Alert) *AlertUpdate {
au.mutation.predicates = append(au.mutation.predicates, ps...)
au.mutation.Where(ps...)
return au
}
@ -625,6 +625,9 @@ func (au *AlertUpdate) Save(ctx context.Context) (int, error) {
return affected, err
})
for i := len(au.hooks) - 1; i >= 0; i-- {
if au.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = au.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, au.mutation); err != nil {
@ -1164,8 +1167,8 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) {
if n, err = sqlgraph.UpdateNodes(ctx, au.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{alert.Label}
} else if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
return 0, err
}
@ -1175,6 +1178,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) {
// AlertUpdateOne is the builder for updating a single Alert entity.
type AlertUpdateOne struct {
config
fields []string
hooks []Hook
mutation *AlertMutation
}
@ -1753,6 +1757,13 @@ func (auo *AlertUpdateOne) RemoveMetas(m ...*Meta) *AlertUpdateOne {
return auo.RemoveMetaIDs(ids...)
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (auo *AlertUpdateOne) Select(field string, fields ...string) *AlertUpdateOne {
auo.fields = append([]string{field}, fields...)
return auo
}
// Save executes the query and returns the updated Alert entity.
func (auo *AlertUpdateOne) Save(ctx context.Context) (*Alert, error) {
var (
@ -1773,6 +1784,9 @@ func (auo *AlertUpdateOne) Save(ctx context.Context) (*Alert, error) {
return node, err
})
for i := len(auo.hooks) - 1; i >= 0; i-- {
if auo.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = auo.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, auo.mutation); err != nil {
@ -1820,6 +1834,18 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Alert.ID for update")}
}
_spec.Node.ID.Value = id
if fields := auo.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, alert.FieldID)
for _, f := range fields {
if !alert.ValidColumn(f) {
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
if f != alert.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, f)
}
}
}
if ps := auo.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -2320,8 +2346,8 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error
if err = sqlgraph.UpdateNode(ctx, auo.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{alert.Label}
} else if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
return nil, err
}

View file

@ -44,13 +44,13 @@ func (*Bouncer) scanValues(columns []string) ([]interface{}, error) {
for i := range columns {
switch columns[i] {
case bouncer.FieldRevoked:
values[i] = &sql.NullBool{}
values[i] = new(sql.NullBool)
case bouncer.FieldID:
values[i] = &sql.NullInt64{}
values[i] = new(sql.NullInt64)
case bouncer.FieldName, bouncer.FieldAPIKey, bouncer.FieldIPAddress, bouncer.FieldType, bouncer.FieldVersion:
values[i] = &sql.NullString{}
values[i] = new(sql.NullString)
case bouncer.FieldCreatedAt, bouncer.FieldUpdatedAt, bouncer.FieldUntil, bouncer.FieldLastPull:
values[i] = &sql.NullTime{}
values[i] = new(sql.NullTime)
default:
return nil, fmt.Errorf("unexpected column %q for type Bouncer", columns[i])
}

View file

@ -163,11 +163,17 @@ func (bc *BouncerCreate) Save(ctx context.Context) (*Bouncer, error) {
return nil, err
}
bc.mutation = mutation
node, err = bc.sqlSave(ctx)
if node, err = bc.sqlSave(ctx); err != nil {
return nil, err
}
mutation.id = &node.ID
mutation.done = true
return node, err
})
for i := len(bc.hooks) - 1; i >= 0; i-- {
if bc.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = bc.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, bc.mutation); err != nil {
@ -186,6 +192,19 @@ func (bc *BouncerCreate) SaveX(ctx context.Context) *Bouncer {
return v
}
// Exec executes the query.
func (bc *BouncerCreate) Exec(ctx context.Context) error {
_, err := bc.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (bc *BouncerCreate) ExecX(ctx context.Context) {
if err := bc.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (bc *BouncerCreate) defaults() {
if _, ok := bc.mutation.CreatedAt(); !ok {
@ -213,22 +232,22 @@ func (bc *BouncerCreate) defaults() {
// check runs all checks and user-defined validators on the builder.
func (bc *BouncerCreate) check() error {
if _, ok := bc.mutation.CreatedAt(); !ok {
return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")}
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "created_at"`)}
}
if _, ok := bc.mutation.UpdatedAt(); !ok {
return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")}
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "updated_at"`)}
}
if _, ok := bc.mutation.Name(); !ok {
return &ValidationError{Name: "name", err: errors.New("ent: missing required field \"name\"")}
return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "name"`)}
}
if _, ok := bc.mutation.APIKey(); !ok {
return &ValidationError{Name: "api_key", err: errors.New("ent: missing required field \"api_key\"")}
return &ValidationError{Name: "api_key", err: errors.New(`ent: missing required field "api_key"`)}
}
if _, ok := bc.mutation.Revoked(); !ok {
return &ValidationError{Name: "revoked", err: errors.New("ent: missing required field \"revoked\"")}
return &ValidationError{Name: "revoked", err: errors.New(`ent: missing required field "revoked"`)}
}
if _, ok := bc.mutation.LastPull(); !ok {
return &ValidationError{Name: "last_pull", err: errors.New("ent: missing required field \"last_pull\"")}
return &ValidationError{Name: "last_pull", err: errors.New(`ent: missing required field "last_pull"`)}
}
return nil
}
@ -236,8 +255,8 @@ func (bc *BouncerCreate) check() error {
func (bc *BouncerCreate) sqlSave(ctx context.Context) (*Bouncer, error) {
_node, _spec := bc.createSpec()
if err := sqlgraph.CreateNode(ctx, bc.driver, _spec); err != nil {
if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
return nil, err
}
@ -369,19 +388,23 @@ func (bcb *BouncerCreateBulk) Save(ctx context.Context) ([]*Bouncer, error) {
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, bcb.builders[i+1].mutation)
} else {
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
// Invoke the actual operation on the latest mutation in the chain.
if err = sqlgraph.BatchCreate(ctx, bcb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil {
if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
if err = sqlgraph.BatchCreate(ctx, bcb.driver, spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
}
}
mutation.done = true
if err != nil {
return nil, err
}
mutation.id = &nodes[i].ID
mutation.done = true
if specs[i].ID.Value != nil {
id := specs[i].ID.Value.(int64)
nodes[i].ID = int(id)
}
return nodes[i], nil
})
for i := len(builder.hooks) - 1; i >= 0; i-- {
@ -406,3 +429,16 @@ func (bcb *BouncerCreateBulk) SaveX(ctx context.Context) []*Bouncer {
}
return v
}
// Exec executes the query.
func (bcb *BouncerCreateBulk) Exec(ctx context.Context) error {
_, err := bcb.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (bcb *BouncerCreateBulk) ExecX(ctx context.Context) {
if err := bcb.Exec(ctx); err != nil {
panic(err)
}
}

View file

@ -20,9 +20,9 @@ type BouncerDelete struct {
mutation *BouncerMutation
}
// Where adds a new predicate to the BouncerDelete builder.
// Where appends a list predicates to the BouncerDelete builder.
func (bd *BouncerDelete) Where(ps ...predicate.Bouncer) *BouncerDelete {
bd.mutation.predicates = append(bd.mutation.predicates, ps...)
bd.mutation.Where(ps...)
return bd
}
@ -46,6 +46,9 @@ func (bd *BouncerDelete) Exec(ctx context.Context) (int, error) {
return affected, err
})
for i := len(bd.hooks) - 1; i >= 0; i-- {
if bd.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = bd.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, bd.mutation); err != nil {

View file

@ -20,6 +20,7 @@ type BouncerQuery struct {
config
limit *int
offset *int
unique *bool
order []OrderFunc
fields []string
predicates []predicate.Bouncer
@ -46,6 +47,13 @@ func (bq *BouncerQuery) Offset(offset int) *BouncerQuery {
return bq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (bq *BouncerQuery) Unique(unique bool) *BouncerQuery {
bq.unique = &unique
return bq
}
// Order adds an order step to the query.
func (bq *BouncerQuery) Order(o ...OrderFunc) *BouncerQuery {
bq.order = append(bq.order, o...)
@ -279,8 +287,8 @@ func (bq *BouncerQuery) GroupBy(field string, fields ...string) *BouncerGroupBy
// Select(bouncer.FieldCreatedAt).
// Scan(ctx, &v)
//
func (bq *BouncerQuery) Select(field string, fields ...string) *BouncerSelect {
bq.fields = append([]string{field}, fields...)
func (bq *BouncerQuery) Select(fields ...string) *BouncerSelect {
bq.fields = append(bq.fields, fields...)
return &BouncerSelect{BouncerQuery: bq}
}
@ -352,6 +360,9 @@ func (bq *BouncerQuery) querySpec() *sqlgraph.QuerySpec {
From: bq.sql,
Unique: true,
}
if unique := bq.unique; unique != nil {
_spec.Unique = *unique
}
if fields := bq.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, bouncer.FieldID)
@ -377,7 +388,7 @@ func (bq *BouncerQuery) querySpec() *sqlgraph.QuerySpec {
if ps := bq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector, bouncer.ValidColumn)
ps[i](selector)
}
}
}
@ -387,16 +398,20 @@ func (bq *BouncerQuery) querySpec() *sqlgraph.QuerySpec {
func (bq *BouncerQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(bq.driver.Dialect())
t1 := builder.Table(bouncer.Table)
selector := builder.Select(t1.Columns(bouncer.Columns...)...).From(t1)
columns := bq.fields
if len(columns) == 0 {
columns = bouncer.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if bq.sql != nil {
selector = bq.sql
selector.Select(selector.Columns(bouncer.Columns...)...)
selector.Select(selector.Columns(columns...)...)
}
for _, p := range bq.predicates {
p(selector)
}
for _, p := range bq.order {
p(selector, bouncer.ValidColumn)
p(selector)
}
if offset := bq.offset; offset != nil {
// limit is mandatory for offset clause. We start
@ -658,13 +673,24 @@ func (bgb *BouncerGroupBy) sqlScan(ctx context.Context, v interface{}) error {
}
func (bgb *BouncerGroupBy) sqlQuery() *sql.Selector {
selector := bgb.sql
columns := make([]string, 0, len(bgb.fields)+len(bgb.fns))
columns = append(columns, bgb.fields...)
selector := bgb.sql.Select()
aggregation := make([]string, 0, len(bgb.fns))
for _, fn := range bgb.fns {
columns = append(columns, fn(selector, bouncer.ValidColumn))
aggregation = append(aggregation, fn(selector))
}
return selector.Select(columns...).GroupBy(bgb.fields...)
// If no columns were selected in a custom aggregation function, the default
// selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(bgb.fields)+len(bgb.fns))
for _, f := range bgb.fields {
columns = append(columns, selector.C(f))
}
for _, c := range aggregation {
columns = append(columns, c)
}
selector.Select(columns...)
}
return selector.GroupBy(selector.Columns(bgb.fields...)...)
}
// BouncerSelect is the builder for selecting fields of Bouncer entities.
@ -880,16 +906,10 @@ func (bs *BouncerSelect) BoolX(ctx context.Context) bool {
func (bs *BouncerSelect) sqlScan(ctx context.Context, v interface{}) error {
rows := &sql.Rows{}
query, args := bs.sqlQuery().Query()
query, args := bs.sql.Query()
if err := bs.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
func (bs *BouncerSelect) sqlQuery() sql.Querier {
selector := bs.sql
selector.Select(selector.Columns(bs.fields...)...)
return selector
}

View file

@ -21,9 +21,9 @@ type BouncerUpdate struct {
mutation *BouncerMutation
}
// Where adds a new predicate for the BouncerUpdate builder.
// Where appends a list predicates to the BouncerUpdate builder.
func (bu *BouncerUpdate) Where(ps ...predicate.Bouncer) *BouncerUpdate {
bu.mutation.predicates = append(bu.mutation.predicates, ps...)
bu.mutation.Where(ps...)
return bu
}
@ -192,6 +192,9 @@ func (bu *BouncerUpdate) Save(ctx context.Context) (int, error) {
return affected, err
})
for i := len(bu.hooks) - 1; i >= 0; i-- {
if bu.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = bu.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, bu.mutation); err != nil {
@ -338,8 +341,8 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) {
if n, err = sqlgraph.UpdateNodes(ctx, bu.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{bouncer.Label}
} else if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
return 0, err
}
@ -349,6 +352,7 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) {
// BouncerUpdateOne is the builder for updating a single Bouncer entity.
type BouncerUpdateOne struct {
config
fields []string
hooks []Hook
mutation *BouncerMutation
}
@ -498,6 +502,13 @@ func (buo *BouncerUpdateOne) Mutation() *BouncerMutation {
return buo.mutation
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (buo *BouncerUpdateOne) Select(field string, fields ...string) *BouncerUpdateOne {
buo.fields = append([]string{field}, fields...)
return buo
}
// Save executes the query and returns the updated Bouncer entity.
func (buo *BouncerUpdateOne) Save(ctx context.Context) (*Bouncer, error) {
var (
@ -518,6 +529,9 @@ func (buo *BouncerUpdateOne) Save(ctx context.Context) (*Bouncer, error) {
return node, err
})
for i := len(buo.hooks) - 1; i >= 0; i-- {
if buo.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = buo.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, buo.mutation); err != nil {
@ -565,6 +579,18 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Bouncer.ID for update")}
}
_spec.Node.ID.Value = id
if fields := buo.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, bouncer.FieldID)
for _, f := range fields {
if !bouncer.ValidColumn(f) {
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
if f != bouncer.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, f)
}
}
}
if ps := buo.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -672,8 +698,8 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e
if err = sqlgraph.UpdateNode(ctx, buo.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{bouncer.Label}
} else if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
return nil, err
}

View file

@ -223,7 +223,9 @@ func (c *AlertClient) DeleteOneID(id int) *AlertDeleteOne {
// Query returns a query builder for Alert.
func (c *AlertClient) Query() *AlertQuery {
return &AlertQuery{config: c.config}
return &AlertQuery{
config: c.config,
}
}
// Get returns a Alert entity by its id.
@ -375,7 +377,9 @@ func (c *BouncerClient) DeleteOneID(id int) *BouncerDeleteOne {
// Query returns a query builder for Bouncer.
func (c *BouncerClient) Query() *BouncerQuery {
return &BouncerQuery{config: c.config}
return &BouncerQuery{
config: c.config,
}
}
// Get returns a Bouncer entity by its id.
@ -463,7 +467,9 @@ func (c *DecisionClient) DeleteOneID(id int) *DecisionDeleteOne {
// Query returns a query builder for Decision.
func (c *DecisionClient) Query() *DecisionQuery {
return &DecisionQuery{config: c.config}
return &DecisionQuery{
config: c.config,
}
}
// Get returns a Decision entity by its id.
@ -567,7 +573,9 @@ func (c *EventClient) DeleteOneID(id int) *EventDeleteOne {
// Query returns a query builder for Event.
func (c *EventClient) Query() *EventQuery {
return &EventQuery{config: c.config}
return &EventQuery{
config: c.config,
}
}
// Get returns a Event entity by its id.
@ -671,7 +679,9 @@ func (c *MachineClient) DeleteOneID(id int) *MachineDeleteOne {
// Query returns a query builder for Machine.
func (c *MachineClient) Query() *MachineQuery {
return &MachineQuery{config: c.config}
return &MachineQuery{
config: c.config,
}
}
// Get returns a Machine entity by its id.
@ -775,7 +785,9 @@ func (c *MetaClient) DeleteOneID(id int) *MetaDeleteOne {
// Query returns a query builder for Meta.
func (c *MetaClient) Query() *MetaQuery {
return &MetaQuery{config: c.config}
return &MetaQuery{
config: c.config,
}
}
// Get returns a Meta entity by its id.

View file

@ -80,15 +80,15 @@ func (*Decision) scanValues(columns []string) ([]interface{}, error) {
for i := range columns {
switch columns[i] {
case decision.FieldSimulated:
values[i] = &sql.NullBool{}
values[i] = new(sql.NullBool)
case decision.FieldID, decision.FieldStartIP, decision.FieldEndIP, decision.FieldStartSuffix, decision.FieldEndSuffix, decision.FieldIPSize:
values[i] = &sql.NullInt64{}
values[i] = new(sql.NullInt64)
case decision.FieldScenario, decision.FieldType, decision.FieldScope, decision.FieldValue, decision.FieldOrigin:
values[i] = &sql.NullString{}
values[i] = new(sql.NullString)
case decision.FieldCreatedAt, decision.FieldUpdatedAt, decision.FieldUntil:
values[i] = &sql.NullTime{}
values[i] = new(sql.NullTime)
case decision.ForeignKeys[0]: // alert_decisions
values[i] = &sql.NullInt64{}
values[i] = new(sql.NullInt64)
default:
return nil, fmt.Errorf("unexpected column %q for type Decision", columns[i])
}

View file

@ -43,7 +43,7 @@ const (
EdgeOwner = "owner"
// Table holds the table name of the decision in the database.
Table = "decisions"
// OwnerTable is the table the holds the owner relation/edge.
// OwnerTable is the table that holds the owner relation/edge.
OwnerTable = "decisions"
// OwnerInverseTable is the table name for the Alert entity.
// It exists in this package in order to avoid circular dependency with the "alert" package.

View file

@ -215,11 +215,17 @@ func (dc *DecisionCreate) Save(ctx context.Context) (*Decision, error) {
return nil, err
}
dc.mutation = mutation
node, err = dc.sqlSave(ctx)
if node, err = dc.sqlSave(ctx); err != nil {
return nil, err
}
mutation.id = &node.ID
mutation.done = true
return node, err
})
for i := len(dc.hooks) - 1; i >= 0; i-- {
if dc.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = dc.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, dc.mutation); err != nil {
@ -238,6 +244,19 @@ func (dc *DecisionCreate) SaveX(ctx context.Context) *Decision {
return v
}
// Exec executes the query.
func (dc *DecisionCreate) Exec(ctx context.Context) error {
_, err := dc.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dc *DecisionCreate) ExecX(ctx context.Context) {
if err := dc.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (dc *DecisionCreate) defaults() {
if _, ok := dc.mutation.CreatedAt(); !ok {
@ -257,31 +276,31 @@ func (dc *DecisionCreate) defaults() {
// check runs all checks and user-defined validators on the builder.
func (dc *DecisionCreate) check() error {
if _, ok := dc.mutation.CreatedAt(); !ok {
return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")}
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "created_at"`)}
}
if _, ok := dc.mutation.UpdatedAt(); !ok {
return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")}
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "updated_at"`)}
}
if _, ok := dc.mutation.Until(); !ok {
return &ValidationError{Name: "until", err: errors.New("ent: missing required field \"until\"")}
return &ValidationError{Name: "until", err: errors.New(`ent: missing required field "until"`)}
}
if _, ok := dc.mutation.Scenario(); !ok {
return &ValidationError{Name: "scenario", err: errors.New("ent: missing required field \"scenario\"")}
return &ValidationError{Name: "scenario", err: errors.New(`ent: missing required field "scenario"`)}
}
if _, ok := dc.mutation.GetType(); !ok {
return &ValidationError{Name: "type", err: errors.New("ent: missing required field \"type\"")}
return &ValidationError{Name: "type", err: errors.New(`ent: missing required field "type"`)}
}
if _, ok := dc.mutation.Scope(); !ok {
return &ValidationError{Name: "scope", err: errors.New("ent: missing required field \"scope\"")}
return &ValidationError{Name: "scope", err: errors.New(`ent: missing required field "scope"`)}
}
if _, ok := dc.mutation.Value(); !ok {
return &ValidationError{Name: "value", err: errors.New("ent: missing required field \"value\"")}
return &ValidationError{Name: "value", err: errors.New(`ent: missing required field "value"`)}
}
if _, ok := dc.mutation.Origin(); !ok {
return &ValidationError{Name: "origin", err: errors.New("ent: missing required field \"origin\"")}
return &ValidationError{Name: "origin", err: errors.New(`ent: missing required field "origin"`)}
}
if _, ok := dc.mutation.Simulated(); !ok {
return &ValidationError{Name: "simulated", err: errors.New("ent: missing required field \"simulated\"")}
return &ValidationError{Name: "simulated", err: errors.New(`ent: missing required field "simulated"`)}
}
return nil
}
@ -289,8 +308,8 @@ func (dc *DecisionCreate) check() error {
func (dc *DecisionCreate) sqlSave(ctx context.Context) (*Decision, error) {
_node, _spec := dc.createSpec()
if err := sqlgraph.CreateNode(ctx, dc.driver, _spec); err != nil {
if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
return nil, err
}
@ -474,19 +493,23 @@ func (dcb *DecisionCreateBulk) Save(ctx context.Context) ([]*Decision, error) {
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, dcb.builders[i+1].mutation)
} else {
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
// Invoke the actual operation on the latest mutation in the chain.
if err = sqlgraph.BatchCreate(ctx, dcb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil {
if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
if err = sqlgraph.BatchCreate(ctx, dcb.driver, spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
}
}
mutation.done = true
if err != nil {
return nil, err
}
mutation.id = &nodes[i].ID
mutation.done = true
if specs[i].ID.Value != nil {
id := specs[i].ID.Value.(int64)
nodes[i].ID = int(id)
}
return nodes[i], nil
})
for i := len(builder.hooks) - 1; i >= 0; i-- {
@ -511,3 +534,16 @@ func (dcb *DecisionCreateBulk) SaveX(ctx context.Context) []*Decision {
}
return v
}
// Exec executes the query.
func (dcb *DecisionCreateBulk) Exec(ctx context.Context) error {
_, err := dcb.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dcb *DecisionCreateBulk) ExecX(ctx context.Context) {
if err := dcb.Exec(ctx); err != nil {
panic(err)
}
}

View file

@ -20,9 +20,9 @@ type DecisionDelete struct {
mutation *DecisionMutation
}
// Where adds a new predicate to the DecisionDelete builder.
// Where appends a list predicates to the DecisionDelete builder.
func (dd *DecisionDelete) Where(ps ...predicate.Decision) *DecisionDelete {
dd.mutation.predicates = append(dd.mutation.predicates, ps...)
dd.mutation.Where(ps...)
return dd
}
@ -46,6 +46,9 @@ func (dd *DecisionDelete) Exec(ctx context.Context) (int, error) {
return affected, err
})
for i := len(dd.hooks) - 1; i >= 0; i-- {
if dd.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = dd.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, dd.mutation); err != nil {

View file

@ -21,6 +21,7 @@ type DecisionQuery struct {
config
limit *int
offset *int
unique *bool
order []OrderFunc
fields []string
predicates []predicate.Decision
@ -50,6 +51,13 @@ func (dq *DecisionQuery) Offset(offset int) *DecisionQuery {
return dq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (dq *DecisionQuery) Unique(unique bool) *DecisionQuery {
dq.unique = &unique
return dq
}
// Order adds an order step to the query.
func (dq *DecisionQuery) Order(o ...OrderFunc) *DecisionQuery {
dq.order = append(dq.order, o...)
@ -317,8 +325,8 @@ func (dq *DecisionQuery) GroupBy(field string, fields ...string) *DecisionGroupB
// Select(decision.FieldCreatedAt).
// Scan(ctx, &v)
//
func (dq *DecisionQuery) Select(field string, fields ...string) *DecisionSelect {
dq.fields = append([]string{field}, fields...)
func (dq *DecisionQuery) Select(fields ...string) *DecisionSelect {
dq.fields = append(dq.fields, fields...)
return &DecisionSelect{DecisionQuery: dq}
}
@ -377,11 +385,14 @@ func (dq *DecisionQuery) sqlAll(ctx context.Context) ([]*Decision, error) {
ids := make([]int, 0, len(nodes))
nodeids := make(map[int][]*Decision)
for i := range nodes {
fk := nodes[i].alert_decisions
if fk != nil {
ids = append(ids, *fk)
nodeids[*fk] = append(nodeids[*fk], nodes[i])
if nodes[i].alert_decisions == nil {
continue
}
fk := *nodes[i].alert_decisions
if _, ok := nodeids[fk]; !ok {
ids = append(ids, fk)
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
query.Where(alert.IDIn(ids...))
neighbors, err := query.All(ctx)
@ -428,6 +439,9 @@ func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec {
From: dq.sql,
Unique: true,
}
if unique := dq.unique; unique != nil {
_spec.Unique = *unique
}
if fields := dq.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, decision.FieldID)
@ -453,7 +467,7 @@ func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec {
if ps := dq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector, decision.ValidColumn)
ps[i](selector)
}
}
}
@ -463,16 +477,20 @@ func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec {
func (dq *DecisionQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(dq.driver.Dialect())
t1 := builder.Table(decision.Table)
selector := builder.Select(t1.Columns(decision.Columns...)...).From(t1)
columns := dq.fields
if len(columns) == 0 {
columns = decision.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if dq.sql != nil {
selector = dq.sql
selector.Select(selector.Columns(decision.Columns...)...)
selector.Select(selector.Columns(columns...)...)
}
for _, p := range dq.predicates {
p(selector)
}
for _, p := range dq.order {
p(selector, decision.ValidColumn)
p(selector)
}
if offset := dq.offset; offset != nil {
// limit is mandatory for offset clause. We start
@ -734,13 +752,24 @@ func (dgb *DecisionGroupBy) sqlScan(ctx context.Context, v interface{}) error {
}
func (dgb *DecisionGroupBy) sqlQuery() *sql.Selector {
selector := dgb.sql
columns := make([]string, 0, len(dgb.fields)+len(dgb.fns))
columns = append(columns, dgb.fields...)
selector := dgb.sql.Select()
aggregation := make([]string, 0, len(dgb.fns))
for _, fn := range dgb.fns {
columns = append(columns, fn(selector, decision.ValidColumn))
aggregation = append(aggregation, fn(selector))
}
return selector.Select(columns...).GroupBy(dgb.fields...)
// If no columns were selected in a custom aggregation function, the default
// selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(dgb.fields)+len(dgb.fns))
for _, f := range dgb.fields {
columns = append(columns, selector.C(f))
}
for _, c := range aggregation {
columns = append(columns, c)
}
selector.Select(columns...)
}
return selector.GroupBy(selector.Columns(dgb.fields...)...)
}
// DecisionSelect is the builder for selecting fields of Decision entities.
@ -956,16 +985,10 @@ func (ds *DecisionSelect) BoolX(ctx context.Context) bool {
func (ds *DecisionSelect) sqlScan(ctx context.Context, v interface{}) error {
rows := &sql.Rows{}
query, args := ds.sqlQuery().Query()
query, args := ds.sql.Query()
if err := ds.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
func (ds *DecisionSelect) sqlQuery() sql.Querier {
selector := ds.sql
selector.Select(selector.Columns(ds.fields...)...)
return selector
}

View file

@ -22,9 +22,9 @@ type DecisionUpdate struct {
mutation *DecisionMutation
}
// Where adds a new predicate for the DecisionUpdate builder.
// Where appends a list predicates to the DecisionUpdate builder.
func (du *DecisionUpdate) Where(ps ...predicate.Decision) *DecisionUpdate {
du.mutation.predicates = append(du.mutation.predicates, ps...)
du.mutation.Where(ps...)
return du
}
@ -291,6 +291,9 @@ func (du *DecisionUpdate) Save(ctx context.Context) (int, error) {
return affected, err
})
for i := len(du.hooks) - 1; i >= 0; i-- {
if du.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = du.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, du.mutation); err != nil {
@ -541,8 +544,8 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) {
if n, err = sqlgraph.UpdateNodes(ctx, du.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{decision.Label}
} else if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
return 0, err
}
@ -552,6 +555,7 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) {
// DecisionUpdateOne is the builder for updating a single Decision entity.
type DecisionUpdateOne struct {
config
fields []string
hooks []Hook
mutation *DecisionMutation
}
@ -799,6 +803,13 @@ func (duo *DecisionUpdateOne) ClearOwner() *DecisionUpdateOne {
return duo
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (duo *DecisionUpdateOne) Select(field string, fields ...string) *DecisionUpdateOne {
duo.fields = append([]string{field}, fields...)
return duo
}
// Save executes the query and returns the updated Decision entity.
func (duo *DecisionUpdateOne) Save(ctx context.Context) (*Decision, error) {
var (
@ -819,6 +830,9 @@ func (duo *DecisionUpdateOne) Save(ctx context.Context) (*Decision, error) {
return node, err
})
for i := len(duo.hooks) - 1; i >= 0; i-- {
if duo.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = duo.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, duo.mutation); err != nil {
@ -866,6 +880,18 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Decision.ID for update")}
}
_spec.Node.ID.Value = id
if fields := duo.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, decision.FieldID)
for _, f := range fields {
if !decision.ValidColumn(f) {
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
if f != decision.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, f)
}
}
}
if ps := duo.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -1077,8 +1103,8 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err
if err = sqlgraph.UpdateNode(ctx, duo.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{decision.Label}
} else if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
return nil, err
}

View file

@ -7,9 +7,13 @@ import (
"fmt"
"entgo.io/ent"
"entgo.io/ent/dialect"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/alert"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/meta"
)
// ent aliases to avoid import conflicts in user's code.
@ -25,36 +29,60 @@ type (
)
// OrderFunc applies an ordering on the sql selector.
type OrderFunc func(*sql.Selector, func(string) bool)
type OrderFunc func(*sql.Selector)
// columnChecker returns a function indicates if the column exists in the given column.
func columnChecker(table string) func(string) error {
checks := map[string]func(string) bool{
alert.Table: alert.ValidColumn,
bouncer.Table: bouncer.ValidColumn,
decision.Table: decision.ValidColumn,
event.Table: event.ValidColumn,
machine.Table: machine.ValidColumn,
meta.Table: meta.ValidColumn,
}
check, ok := checks[table]
if !ok {
return func(string) error {
return fmt.Errorf("unknown table %q", table)
}
}
return func(column string) error {
if !check(column) {
return fmt.Errorf("unknown column %q for table %q", column, table)
}
return nil
}
}
// Asc applies the given fields in ASC order.
func Asc(fields ...string) OrderFunc {
return func(s *sql.Selector, check func(string) bool) {
return func(s *sql.Selector) {
check := columnChecker(s.TableName())
for _, f := range fields {
if check(f) {
s.OrderBy(sql.Asc(f))
} else {
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("invalid field %q for ordering", f)})
if err := check(f); err != nil {
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
}
s.OrderBy(sql.Asc(s.C(f)))
}
}
}
// Desc applies the given fields in DESC order.
func Desc(fields ...string) OrderFunc {
return func(s *sql.Selector, check func(string) bool) {
return func(s *sql.Selector) {
check := columnChecker(s.TableName())
for _, f := range fields {
if check(f) {
s.OrderBy(sql.Desc(f))
} else {
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("invalid field %q for ordering", f)})
if err := check(f); err != nil {
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
}
s.OrderBy(sql.Desc(s.C(f)))
}
}
}
// AggregateFunc applies an aggregation step on the group-by traversal/selector.
type AggregateFunc func(*sql.Selector, func(string) bool) string
type AggregateFunc func(*sql.Selector) string
// As is a pseudo aggregation function for renaming another other functions with custom names. For example:
//
@ -63,23 +91,24 @@ type AggregateFunc func(*sql.Selector, func(string) bool) string
// Scan(ctx, &v)
//
func As(fn AggregateFunc, end string) AggregateFunc {
return func(s *sql.Selector, check func(string) bool) string {
return sql.As(fn(s, check), end)
return func(s *sql.Selector) string {
return sql.As(fn(s), end)
}
}
// Count applies the "count" aggregation function on each group.
func Count() AggregateFunc {
return func(s *sql.Selector, _ func(string) bool) string {
return func(s *sql.Selector) string {
return sql.Count("*")
}
}
// Max applies the "max" aggregation function on the given field of each group.
func Max(field string) AggregateFunc {
return func(s *sql.Selector, check func(string) bool) string {
if !check(field) {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("invalid field %q for grouping", field)})
return func(s *sql.Selector) string {
check := columnChecker(s.TableName())
if err := check(field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
return sql.Max(s.C(field))
@ -88,9 +117,10 @@ func Max(field string) AggregateFunc {
// Mean applies the "mean" aggregation function on the given field of each group.
func Mean(field string) AggregateFunc {
return func(s *sql.Selector, check func(string) bool) string {
if !check(field) {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("invalid field %q for grouping", field)})
return func(s *sql.Selector) string {
check := columnChecker(s.TableName())
if err := check(field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
return sql.Avg(s.C(field))
@ -99,9 +129,10 @@ func Mean(field string) AggregateFunc {
// Min applies the "min" aggregation function on the given field of each group.
func Min(field string) AggregateFunc {
return func(s *sql.Selector, check func(string) bool) string {
if !check(field) {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("invalid field %q for grouping", field)})
return func(s *sql.Selector) string {
check := columnChecker(s.TableName())
if err := check(field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
return sql.Min(s.C(field))
@ -110,9 +141,10 @@ func Min(field string) AggregateFunc {
// Sum applies the "sum" aggregation function on the given field of each group.
func Sum(field string) AggregateFunc {
return func(s *sql.Selector, check func(string) bool) string {
if !check(field) {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("invalid field %q for grouping", field)})
return func(s *sql.Selector) string {
check := columnChecker(s.TableName())
if err := check(field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
return sql.Sum(s.C(field))
@ -135,7 +167,7 @@ func (e *ValidationError) Unwrap() error {
return e.err
}
// IsValidationError returns a boolean indicating whether the error is a validaton error.
// IsValidationError returns a boolean indicating whether the error is a validation error.
func IsValidationError(err error) bool {
if err == nil {
return false
@ -235,21 +267,3 @@ func IsConstraintError(err error) bool {
var e *ConstraintError
return errors.As(err, &e)
}
func isSQLConstraintError(err error) (*ConstraintError, bool) {
if sqlgraph.IsConstraintError(err) {
return &ConstraintError{err.Error(), err}, true
}
return nil, false
}
// rollback calls tx.Rollback and wraps the given error with the rollback error if present.
func rollback(tx dialect.Tx, err error) error {
if rerr := tx.Rollback(); rerr != nil {
err = fmt.Errorf("%w: %v", err, rerr)
}
if err, ok := isSQLConstraintError(err); ok {
return err
}
return err
}

View file

@ -60,13 +60,13 @@ func (*Event) scanValues(columns []string) ([]interface{}, error) {
for i := range columns {
switch columns[i] {
case event.FieldID:
values[i] = &sql.NullInt64{}
values[i] = new(sql.NullInt64)
case event.FieldSerialized:
values[i] = &sql.NullString{}
values[i] = new(sql.NullString)
case event.FieldCreatedAt, event.FieldUpdatedAt, event.FieldTime:
values[i] = &sql.NullTime{}
values[i] = new(sql.NullTime)
case event.ForeignKeys[0]: // alert_events
values[i] = &sql.NullInt64{}
values[i] = new(sql.NullInt64)
default:
return nil, fmt.Errorf("unexpected column %q for type Event", columns[i])
}

View file

@ -23,7 +23,7 @@ const (
EdgeOwner = "owner"
// Table holds the table name of the event in the database.
Table = "events"
// OwnerTable is the table the holds the owner relation/edge.
// OwnerTable is the table that holds the owner relation/edge.
OwnerTable = "events"
// OwnerInverseTable is the table name for the Alert entity.
// It exists in this package in order to avoid circular dependency with the "alert" package.

View file

@ -107,11 +107,17 @@ func (ec *EventCreate) Save(ctx context.Context) (*Event, error) {
return nil, err
}
ec.mutation = mutation
node, err = ec.sqlSave(ctx)
if node, err = ec.sqlSave(ctx); err != nil {
return nil, err
}
mutation.id = &node.ID
mutation.done = true
return node, err
})
for i := len(ec.hooks) - 1; i >= 0; i-- {
if ec.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = ec.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, ec.mutation); err != nil {
@ -130,6 +136,19 @@ func (ec *EventCreate) SaveX(ctx context.Context) *Event {
return v
}
// Exec executes the query.
func (ec *EventCreate) Exec(ctx context.Context) error {
_, err := ec.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (ec *EventCreate) ExecX(ctx context.Context) {
if err := ec.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (ec *EventCreate) defaults() {
if _, ok := ec.mutation.CreatedAt(); !ok {
@ -145,20 +164,20 @@ func (ec *EventCreate) defaults() {
// check runs all checks and user-defined validators on the builder.
func (ec *EventCreate) check() error {
if _, ok := ec.mutation.CreatedAt(); !ok {
return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")}
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "created_at"`)}
}
if _, ok := ec.mutation.UpdatedAt(); !ok {
return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")}
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "updated_at"`)}
}
if _, ok := ec.mutation.Time(); !ok {
return &ValidationError{Name: "time", err: errors.New("ent: missing required field \"time\"")}
return &ValidationError{Name: "time", err: errors.New(`ent: missing required field "time"`)}
}
if _, ok := ec.mutation.Serialized(); !ok {
return &ValidationError{Name: "serialized", err: errors.New("ent: missing required field \"serialized\"")}
return &ValidationError{Name: "serialized", err: errors.New(`ent: missing required field "serialized"`)}
}
if v, ok := ec.mutation.Serialized(); ok {
if err := event.SerializedValidator(v); err != nil {
return &ValidationError{Name: "serialized", err: fmt.Errorf("ent: validator failed for field \"serialized\": %w", err)}
return &ValidationError{Name: "serialized", err: fmt.Errorf(`ent: validator failed for field "serialized": %w`, err)}
}
}
return nil
@ -167,8 +186,8 @@ func (ec *EventCreate) check() error {
func (ec *EventCreate) sqlSave(ctx context.Context) (*Event, error) {
_node, _spec := ec.createSpec()
if err := sqlgraph.CreateNode(ctx, ec.driver, _spec); err != nil {
if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
return nil, err
}
@ -272,19 +291,23 @@ func (ecb *EventCreateBulk) Save(ctx context.Context) ([]*Event, error) {
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, ecb.builders[i+1].mutation)
} else {
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
// Invoke the actual operation on the latest mutation in the chain.
if err = sqlgraph.BatchCreate(ctx, ecb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil {
if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
if err = sqlgraph.BatchCreate(ctx, ecb.driver, spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
}
}
mutation.done = true
if err != nil {
return nil, err
}
mutation.id = &nodes[i].ID
mutation.done = true
if specs[i].ID.Value != nil {
id := specs[i].ID.Value.(int64)
nodes[i].ID = int(id)
}
return nodes[i], nil
})
for i := len(builder.hooks) - 1; i >= 0; i-- {
@ -309,3 +332,16 @@ func (ecb *EventCreateBulk) SaveX(ctx context.Context) []*Event {
}
return v
}
// Exec executes the query.
func (ecb *EventCreateBulk) Exec(ctx context.Context) error {
_, err := ecb.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (ecb *EventCreateBulk) ExecX(ctx context.Context) {
if err := ecb.Exec(ctx); err != nil {
panic(err)
}
}

View file

@ -20,9 +20,9 @@ type EventDelete struct {
mutation *EventMutation
}
// Where adds a new predicate to the EventDelete builder.
// Where appends a list predicates to the EventDelete builder.
func (ed *EventDelete) Where(ps ...predicate.Event) *EventDelete {
ed.mutation.predicates = append(ed.mutation.predicates, ps...)
ed.mutation.Where(ps...)
return ed
}
@ -46,6 +46,9 @@ func (ed *EventDelete) Exec(ctx context.Context) (int, error) {
return affected, err
})
for i := len(ed.hooks) - 1; i >= 0; i-- {
if ed.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = ed.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, ed.mutation); err != nil {

View file

@ -21,6 +21,7 @@ type EventQuery struct {
config
limit *int
offset *int
unique *bool
order []OrderFunc
fields []string
predicates []predicate.Event
@ -50,6 +51,13 @@ func (eq *EventQuery) Offset(offset int) *EventQuery {
return eq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (eq *EventQuery) Unique(unique bool) *EventQuery {
eq.unique = &unique
return eq
}
// Order adds an order step to the query.
func (eq *EventQuery) Order(o ...OrderFunc) *EventQuery {
eq.order = append(eq.order, o...)
@ -317,8 +325,8 @@ func (eq *EventQuery) GroupBy(field string, fields ...string) *EventGroupBy {
// Select(event.FieldCreatedAt).
// Scan(ctx, &v)
//
func (eq *EventQuery) Select(field string, fields ...string) *EventSelect {
eq.fields = append([]string{field}, fields...)
func (eq *EventQuery) Select(fields ...string) *EventSelect {
eq.fields = append(eq.fields, fields...)
return &EventSelect{EventQuery: eq}
}
@ -377,11 +385,14 @@ func (eq *EventQuery) sqlAll(ctx context.Context) ([]*Event, error) {
ids := make([]int, 0, len(nodes))
nodeids := make(map[int][]*Event)
for i := range nodes {
fk := nodes[i].alert_events
if fk != nil {
ids = append(ids, *fk)
nodeids[*fk] = append(nodeids[*fk], nodes[i])
if nodes[i].alert_events == nil {
continue
}
fk := *nodes[i].alert_events
if _, ok := nodeids[fk]; !ok {
ids = append(ids, fk)
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
query.Where(alert.IDIn(ids...))
neighbors, err := query.All(ctx)
@ -428,6 +439,9 @@ func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec {
From: eq.sql,
Unique: true,
}
if unique := eq.unique; unique != nil {
_spec.Unique = *unique
}
if fields := eq.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, event.FieldID)
@ -453,7 +467,7 @@ func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec {
if ps := eq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector, event.ValidColumn)
ps[i](selector)
}
}
}
@ -463,16 +477,20 @@ func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec {
func (eq *EventQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(eq.driver.Dialect())
t1 := builder.Table(event.Table)
selector := builder.Select(t1.Columns(event.Columns...)...).From(t1)
columns := eq.fields
if len(columns) == 0 {
columns = event.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if eq.sql != nil {
selector = eq.sql
selector.Select(selector.Columns(event.Columns...)...)
selector.Select(selector.Columns(columns...)...)
}
for _, p := range eq.predicates {
p(selector)
}
for _, p := range eq.order {
p(selector, event.ValidColumn)
p(selector)
}
if offset := eq.offset; offset != nil {
// limit is mandatory for offset clause. We start
@ -734,13 +752,24 @@ func (egb *EventGroupBy) sqlScan(ctx context.Context, v interface{}) error {
}
func (egb *EventGroupBy) sqlQuery() *sql.Selector {
selector := egb.sql
columns := make([]string, 0, len(egb.fields)+len(egb.fns))
columns = append(columns, egb.fields...)
selector := egb.sql.Select()
aggregation := make([]string, 0, len(egb.fns))
for _, fn := range egb.fns {
columns = append(columns, fn(selector, event.ValidColumn))
aggregation = append(aggregation, fn(selector))
}
return selector.Select(columns...).GroupBy(egb.fields...)
// If no columns were selected in a custom aggregation function, the default
// selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(egb.fields)+len(egb.fns))
for _, f := range egb.fields {
columns = append(columns, selector.C(f))
}
for _, c := range aggregation {
columns = append(columns, c)
}
selector.Select(columns...)
}
return selector.GroupBy(selector.Columns(egb.fields...)...)
}
// EventSelect is the builder for selecting fields of Event entities.
@ -956,16 +985,10 @@ func (es *EventSelect) BoolX(ctx context.Context) bool {
func (es *EventSelect) sqlScan(ctx context.Context, v interface{}) error {
rows := &sql.Rows{}
query, args := es.sqlQuery().Query()
query, args := es.sql.Query()
if err := es.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
func (es *EventSelect) sqlQuery() sql.Querier {
selector := es.sql
selector.Select(selector.Columns(es.fields...)...)
return selector
}

View file

@ -22,9 +22,9 @@ type EventUpdate struct {
mutation *EventMutation
}
// Where adds a new predicate for the EventUpdate builder.
// Where appends a list predicates to the EventUpdate builder.
func (eu *EventUpdate) Where(ps ...predicate.Event) *EventUpdate {
eu.mutation.predicates = append(eu.mutation.predicates, ps...)
eu.mutation.Where(ps...)
return eu
}
@ -124,6 +124,9 @@ func (eu *EventUpdate) Save(ctx context.Context) (int, error) {
return affected, err
})
for i := len(eu.hooks) - 1; i >= 0; i-- {
if eu.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = eu.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, eu.mutation); err != nil {
@ -249,8 +252,8 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) {
if n, err = sqlgraph.UpdateNodes(ctx, eu.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{event.Label}
} else if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
return 0, err
}
@ -260,6 +263,7 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) {
// EventUpdateOne is the builder for updating a single Event entity.
type EventUpdateOne struct {
config
fields []string
hooks []Hook
mutation *EventMutation
}
@ -334,6 +338,13 @@ func (euo *EventUpdateOne) ClearOwner() *EventUpdateOne {
return euo
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (euo *EventUpdateOne) Select(field string, fields ...string) *EventUpdateOne {
euo.fields = append([]string{field}, fields...)
return euo
}
// Save executes the query and returns the updated Event entity.
func (euo *EventUpdateOne) Save(ctx context.Context) (*Event, error) {
var (
@ -360,6 +371,9 @@ func (euo *EventUpdateOne) Save(ctx context.Context) (*Event, error) {
return node, err
})
for i := len(euo.hooks) - 1; i >= 0; i-- {
if euo.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = euo.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, euo.mutation); err != nil {
@ -417,6 +431,18 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Event.ID for update")}
}
_spec.Node.ID.Value = id
if fields := euo.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, event.FieldID)
for _, f := range fields {
if !event.ValidColumn(f) {
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
if f != event.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, f)
}
}
}
if ps := euo.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -493,8 +519,8 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error
if err = sqlgraph.UpdateNode(ctx, euo.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{event.Label}
} else if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
return nil, err
}

View file

@ -0,0 +1,4 @@
package ent
//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate ./schema

View file

@ -63,13 +63,13 @@ func (*Machine) scanValues(columns []string) ([]interface{}, error) {
for i := range columns {
switch columns[i] {
case machine.FieldIsValidated:
values[i] = &sql.NullBool{}
values[i] = new(sql.NullBool)
case machine.FieldID:
values[i] = &sql.NullInt64{}
values[i] = new(sql.NullInt64)
case machine.FieldMachineId, machine.FieldPassword, machine.FieldIpAddress, machine.FieldScenarios, machine.FieldVersion, machine.FieldStatus:
values[i] = &sql.NullString{}
values[i] = new(sql.NullString)
case machine.FieldCreatedAt, machine.FieldUpdatedAt:
values[i] = &sql.NullTime{}
values[i] = new(sql.NullTime)
default:
return nil, fmt.Errorf("unexpected column %q for type Machine", columns[i])
}

View file

@ -33,7 +33,7 @@ const (
EdgeAlerts = "alerts"
// Table holds the table name of the machine in the database.
Table = "machines"
// AlertsTable is the table the holds the alerts relation/edge.
// AlertsTable is the table that holds the alerts relation/edge.
AlertsTable = "alerts"
// AlertsInverseTable is the table name for the Alert entity.
// It exists in this package in order to avoid circular dependency with the "alert" package.

View file

@ -165,11 +165,17 @@ func (mc *MachineCreate) Save(ctx context.Context) (*Machine, error) {
return nil, err
}
mc.mutation = mutation
node, err = mc.sqlSave(ctx)
if node, err = mc.sqlSave(ctx); err != nil {
return nil, err
}
mutation.id = &node.ID
mutation.done = true
return node, err
})
for i := len(mc.hooks) - 1; i >= 0; i-- {
if mc.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = mc.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, mc.mutation); err != nil {
@ -188,6 +194,19 @@ func (mc *MachineCreate) SaveX(ctx context.Context) *Machine {
return v
}
// Exec executes the query.
func (mc *MachineCreate) Exec(ctx context.Context) error {
_, err := mc.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (mc *MachineCreate) ExecX(ctx context.Context) {
if err := mc.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (mc *MachineCreate) defaults() {
if _, ok := mc.mutation.CreatedAt(); !ok {
@ -207,27 +226,27 @@ func (mc *MachineCreate) defaults() {
// check runs all checks and user-defined validators on the builder.
func (mc *MachineCreate) check() error {
if _, ok := mc.mutation.CreatedAt(); !ok {
return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")}
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "created_at"`)}
}
if _, ok := mc.mutation.UpdatedAt(); !ok {
return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")}
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "updated_at"`)}
}
if _, ok := mc.mutation.MachineId(); !ok {
return &ValidationError{Name: "machineId", err: errors.New("ent: missing required field \"machineId\"")}
return &ValidationError{Name: "machineId", err: errors.New(`ent: missing required field "machineId"`)}
}
if _, ok := mc.mutation.Password(); !ok {
return &ValidationError{Name: "password", err: errors.New("ent: missing required field \"password\"")}
return &ValidationError{Name: "password", err: errors.New(`ent: missing required field "password"`)}
}
if _, ok := mc.mutation.IpAddress(); !ok {
return &ValidationError{Name: "ipAddress", err: errors.New("ent: missing required field \"ipAddress\"")}
return &ValidationError{Name: "ipAddress", err: errors.New(`ent: missing required field "ipAddress"`)}
}
if v, ok := mc.mutation.Scenarios(); ok {
if err := machine.ScenariosValidator(v); err != nil {
return &ValidationError{Name: "scenarios", err: fmt.Errorf("ent: validator failed for field \"scenarios\": %w", err)}
return &ValidationError{Name: "scenarios", err: fmt.Errorf(`ent: validator failed for field "scenarios": %w`, err)}
}
}
if _, ok := mc.mutation.IsValidated(); !ok {
return &ValidationError{Name: "isValidated", err: errors.New("ent: missing required field \"isValidated\"")}
return &ValidationError{Name: "isValidated", err: errors.New(`ent: missing required field "isValidated"`)}
}
return nil
}
@ -235,8 +254,8 @@ func (mc *MachineCreate) check() error {
func (mc *MachineCreate) sqlSave(ctx context.Context) (*Machine, error) {
_node, _spec := mc.createSpec()
if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil {
if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
return nil, err
}
@ -379,19 +398,23 @@ func (mcb *MachineCreateBulk) Save(ctx context.Context) ([]*Machine, error) {
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation)
} else {
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
// Invoke the actual operation on the latest mutation in the chain.
if err = sqlgraph.BatchCreate(ctx, mcb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil {
if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
if err = sqlgraph.BatchCreate(ctx, mcb.driver, spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
}
}
mutation.done = true
if err != nil {
return nil, err
}
mutation.id = &nodes[i].ID
mutation.done = true
if specs[i].ID.Value != nil {
id := specs[i].ID.Value.(int64)
nodes[i].ID = int(id)
}
return nodes[i], nil
})
for i := len(builder.hooks) - 1; i >= 0; i-- {
@ -416,3 +439,16 @@ func (mcb *MachineCreateBulk) SaveX(ctx context.Context) []*Machine {
}
return v
}
// Exec executes the query.
func (mcb *MachineCreateBulk) Exec(ctx context.Context) error {
_, err := mcb.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (mcb *MachineCreateBulk) ExecX(ctx context.Context) {
if err := mcb.Exec(ctx); err != nil {
panic(err)
}
}

View file

@ -20,9 +20,9 @@ type MachineDelete struct {
mutation *MachineMutation
}
// Where adds a new predicate to the MachineDelete builder.
// Where appends a list predicates to the MachineDelete builder.
func (md *MachineDelete) Where(ps ...predicate.Machine) *MachineDelete {
md.mutation.predicates = append(md.mutation.predicates, ps...)
md.mutation.Where(ps...)
return md
}
@ -46,6 +46,9 @@ func (md *MachineDelete) Exec(ctx context.Context) (int, error) {
return affected, err
})
for i := len(md.hooks) - 1; i >= 0; i-- {
if md.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = md.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, md.mutation); err != nil {

View file

@ -22,6 +22,7 @@ type MachineQuery struct {
config
limit *int
offset *int
unique *bool
order []OrderFunc
fields []string
predicates []predicate.Machine
@ -50,6 +51,13 @@ func (mq *MachineQuery) Offset(offset int) *MachineQuery {
return mq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (mq *MachineQuery) Unique(unique bool) *MachineQuery {
mq.unique = &unique
return mq
}
// Order adds an order step to the query.
func (mq *MachineQuery) Order(o ...OrderFunc) *MachineQuery {
mq.order = append(mq.order, o...)
@ -317,8 +325,8 @@ func (mq *MachineQuery) GroupBy(field string, fields ...string) *MachineGroupBy
// Select(machine.FieldCreatedAt).
// Scan(ctx, &v)
//
func (mq *MachineQuery) Select(field string, fields ...string) *MachineSelect {
mq.fields = append([]string{field}, fields...)
func (mq *MachineQuery) Select(fields ...string) *MachineSelect {
mq.fields = append(mq.fields, fields...)
return &MachineSelect{MachineQuery: mq}
}
@ -424,6 +432,9 @@ func (mq *MachineQuery) querySpec() *sqlgraph.QuerySpec {
From: mq.sql,
Unique: true,
}
if unique := mq.unique; unique != nil {
_spec.Unique = *unique
}
if fields := mq.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, machine.FieldID)
@ -449,7 +460,7 @@ func (mq *MachineQuery) querySpec() *sqlgraph.QuerySpec {
if ps := mq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector, machine.ValidColumn)
ps[i](selector)
}
}
}
@ -459,16 +470,20 @@ func (mq *MachineQuery) querySpec() *sqlgraph.QuerySpec {
func (mq *MachineQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(mq.driver.Dialect())
t1 := builder.Table(machine.Table)
selector := builder.Select(t1.Columns(machine.Columns...)...).From(t1)
columns := mq.fields
if len(columns) == 0 {
columns = machine.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if mq.sql != nil {
selector = mq.sql
selector.Select(selector.Columns(machine.Columns...)...)
selector.Select(selector.Columns(columns...)...)
}
for _, p := range mq.predicates {
p(selector)
}
for _, p := range mq.order {
p(selector, machine.ValidColumn)
p(selector)
}
if offset := mq.offset; offset != nil {
// limit is mandatory for offset clause. We start
@ -730,13 +745,24 @@ func (mgb *MachineGroupBy) sqlScan(ctx context.Context, v interface{}) error {
}
func (mgb *MachineGroupBy) sqlQuery() *sql.Selector {
selector := mgb.sql
columns := make([]string, 0, len(mgb.fields)+len(mgb.fns))
columns = append(columns, mgb.fields...)
selector := mgb.sql.Select()
aggregation := make([]string, 0, len(mgb.fns))
for _, fn := range mgb.fns {
columns = append(columns, fn(selector, machine.ValidColumn))
aggregation = append(aggregation, fn(selector))
}
return selector.Select(columns...).GroupBy(mgb.fields...)
// If no columns were selected in a custom aggregation function, the default
// selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(mgb.fields)+len(mgb.fns))
for _, f := range mgb.fields {
columns = append(columns, selector.C(f))
}
for _, c := range aggregation {
columns = append(columns, c)
}
selector.Select(columns...)
}
return selector.GroupBy(selector.Columns(mgb.fields...)...)
}
// MachineSelect is the builder for selecting fields of Machine entities.
@ -952,16 +978,10 @@ func (ms *MachineSelect) BoolX(ctx context.Context) bool {
func (ms *MachineSelect) sqlScan(ctx context.Context, v interface{}) error {
rows := &sql.Rows{}
query, args := ms.sqlQuery().Query()
query, args := ms.sql.Query()
if err := ms.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
func (ms *MachineSelect) sqlQuery() sql.Querier {
selector := ms.sql
selector.Select(selector.Columns(ms.fields...)...)
return selector
}

View file

@ -22,9 +22,9 @@ type MachineUpdate struct {
mutation *MachineMutation
}
// Where adds a new predicate for the MachineUpdate builder.
// Where appends a list predicates to the MachineUpdate builder.
func (mu *MachineUpdate) Where(ps ...predicate.Machine) *MachineUpdate {
mu.mutation.predicates = append(mu.mutation.predicates, ps...)
mu.mutation.Where(ps...)
return mu
}
@ -215,6 +215,9 @@ func (mu *MachineUpdate) Save(ctx context.Context) (int, error) {
return affected, err
})
for i := len(mu.hooks) - 1; i >= 0; i-- {
if mu.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = mu.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, mu.mutation); err != nil {
@ -412,8 +415,8 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) {
if n, err = sqlgraph.UpdateNodes(ctx, mu.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{machine.Label}
} else if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
return 0, err
}
@ -423,6 +426,7 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) {
// MachineUpdateOne is the builder for updating a single Machine entity.
type MachineUpdateOne struct {
config
fields []string
hooks []Hook
mutation *MachineMutation
}
@ -588,6 +592,13 @@ func (muo *MachineUpdateOne) RemoveAlerts(a ...*Alert) *MachineUpdateOne {
return muo.RemoveAlertIDs(ids...)
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (muo *MachineUpdateOne) Select(field string, fields ...string) *MachineUpdateOne {
muo.fields = append([]string{field}, fields...)
return muo
}
// Save executes the query and returns the updated Machine entity.
func (muo *MachineUpdateOne) Save(ctx context.Context) (*Machine, error) {
var (
@ -614,6 +625,9 @@ func (muo *MachineUpdateOne) Save(ctx context.Context) (*Machine, error) {
return node, err
})
for i := len(muo.hooks) - 1; i >= 0; i-- {
if muo.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = muo.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, muo.mutation); err != nil {
@ -671,6 +685,18 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Machine.ID for update")}
}
_spec.Node.ID.Value = id
if fields := muo.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, machine.FieldID)
for _, f := range fields {
if !machine.ValidColumn(f) {
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
if f != machine.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, f)
}
}
}
if ps := muo.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -819,8 +845,8 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e
if err = sqlgraph.UpdateNode(ctx, muo.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{machine.Label}
} else if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
return nil, err
}

View file

@ -60,13 +60,13 @@ func (*Meta) scanValues(columns []string) ([]interface{}, error) {
for i := range columns {
switch columns[i] {
case meta.FieldID:
values[i] = &sql.NullInt64{}
values[i] = new(sql.NullInt64)
case meta.FieldKey, meta.FieldValue:
values[i] = &sql.NullString{}
values[i] = new(sql.NullString)
case meta.FieldCreatedAt, meta.FieldUpdatedAt:
values[i] = &sql.NullTime{}
values[i] = new(sql.NullTime)
case meta.ForeignKeys[0]: // alert_metas
values[i] = &sql.NullInt64{}
values[i] = new(sql.NullInt64)
default:
return nil, fmt.Errorf("unexpected column %q for type Meta", columns[i])
}

View file

@ -23,7 +23,7 @@ const (
EdgeOwner = "owner"
// Table holds the table name of the meta in the database.
Table = "meta"
// OwnerTable is the table the holds the owner relation/edge.
// OwnerTable is the table that holds the owner relation/edge.
OwnerTable = "meta"
// OwnerInverseTable is the table name for the Alert entity.
// It exists in this package in order to avoid circular dependency with the "alert" package.

View file

@ -107,11 +107,17 @@ func (mc *MetaCreate) Save(ctx context.Context) (*Meta, error) {
return nil, err
}
mc.mutation = mutation
node, err = mc.sqlSave(ctx)
if node, err = mc.sqlSave(ctx); err != nil {
return nil, err
}
mutation.id = &node.ID
mutation.done = true
return node, err
})
for i := len(mc.hooks) - 1; i >= 0; i-- {
if mc.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = mc.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, mc.mutation); err != nil {
@ -130,6 +136,19 @@ func (mc *MetaCreate) SaveX(ctx context.Context) *Meta {
return v
}
// Exec executes the query.
func (mc *MetaCreate) Exec(ctx context.Context) error {
_, err := mc.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (mc *MetaCreate) ExecX(ctx context.Context) {
if err := mc.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (mc *MetaCreate) defaults() {
if _, ok := mc.mutation.CreatedAt(); !ok {
@ -145,20 +164,20 @@ func (mc *MetaCreate) defaults() {
// check runs all checks and user-defined validators on the builder.
func (mc *MetaCreate) check() error {
if _, ok := mc.mutation.CreatedAt(); !ok {
return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")}
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "created_at"`)}
}
if _, ok := mc.mutation.UpdatedAt(); !ok {
return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")}
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "updated_at"`)}
}
if _, ok := mc.mutation.Key(); !ok {
return &ValidationError{Name: "key", err: errors.New("ent: missing required field \"key\"")}
return &ValidationError{Name: "key", err: errors.New(`ent: missing required field "key"`)}
}
if _, ok := mc.mutation.Value(); !ok {
return &ValidationError{Name: "value", err: errors.New("ent: missing required field \"value\"")}
return &ValidationError{Name: "value", err: errors.New(`ent: missing required field "value"`)}
}
if v, ok := mc.mutation.Value(); ok {
if err := meta.ValueValidator(v); err != nil {
return &ValidationError{Name: "value", err: fmt.Errorf("ent: validator failed for field \"value\": %w", err)}
return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "value": %w`, err)}
}
}
return nil
@ -167,8 +186,8 @@ func (mc *MetaCreate) check() error {
func (mc *MetaCreate) sqlSave(ctx context.Context) (*Meta, error) {
_node, _spec := mc.createSpec()
if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil {
if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
return nil, err
}
@ -272,19 +291,23 @@ func (mcb *MetaCreateBulk) Save(ctx context.Context) ([]*Meta, error) {
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation)
} else {
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
// Invoke the actual operation on the latest mutation in the chain.
if err = sqlgraph.BatchCreate(ctx, mcb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil {
if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
if err = sqlgraph.BatchCreate(ctx, mcb.driver, spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
}
}
mutation.done = true
if err != nil {
return nil, err
}
mutation.id = &nodes[i].ID
mutation.done = true
if specs[i].ID.Value != nil {
id := specs[i].ID.Value.(int64)
nodes[i].ID = int(id)
}
return nodes[i], nil
})
for i := len(builder.hooks) - 1; i >= 0; i-- {
@ -309,3 +332,16 @@ func (mcb *MetaCreateBulk) SaveX(ctx context.Context) []*Meta {
}
return v
}
// Exec executes the query.
func (mcb *MetaCreateBulk) Exec(ctx context.Context) error {
_, err := mcb.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (mcb *MetaCreateBulk) ExecX(ctx context.Context) {
if err := mcb.Exec(ctx); err != nil {
panic(err)
}
}

View file

@ -20,9 +20,9 @@ type MetaDelete struct {
mutation *MetaMutation
}
// Where adds a new predicate to the MetaDelete builder.
// Where appends a list predicates to the MetaDelete builder.
func (md *MetaDelete) Where(ps ...predicate.Meta) *MetaDelete {
md.mutation.predicates = append(md.mutation.predicates, ps...)
md.mutation.Where(ps...)
return md
}
@ -46,6 +46,9 @@ func (md *MetaDelete) Exec(ctx context.Context) (int, error) {
return affected, err
})
for i := len(md.hooks) - 1; i >= 0; i-- {
if md.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = md.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, md.mutation); err != nil {

View file

@ -21,6 +21,7 @@ type MetaQuery struct {
config
limit *int
offset *int
unique *bool
order []OrderFunc
fields []string
predicates []predicate.Meta
@ -50,6 +51,13 @@ func (mq *MetaQuery) Offset(offset int) *MetaQuery {
return mq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (mq *MetaQuery) Unique(unique bool) *MetaQuery {
mq.unique = &unique
return mq
}
// Order adds an order step to the query.
func (mq *MetaQuery) Order(o ...OrderFunc) *MetaQuery {
mq.order = append(mq.order, o...)
@ -317,8 +325,8 @@ func (mq *MetaQuery) GroupBy(field string, fields ...string) *MetaGroupBy {
// Select(meta.FieldCreatedAt).
// Scan(ctx, &v)
//
func (mq *MetaQuery) Select(field string, fields ...string) *MetaSelect {
mq.fields = append([]string{field}, fields...)
func (mq *MetaQuery) Select(fields ...string) *MetaSelect {
mq.fields = append(mq.fields, fields...)
return &MetaSelect{MetaQuery: mq}
}
@ -377,11 +385,14 @@ func (mq *MetaQuery) sqlAll(ctx context.Context) ([]*Meta, error) {
ids := make([]int, 0, len(nodes))
nodeids := make(map[int][]*Meta)
for i := range nodes {
fk := nodes[i].alert_metas
if fk != nil {
ids = append(ids, *fk)
nodeids[*fk] = append(nodeids[*fk], nodes[i])
if nodes[i].alert_metas == nil {
continue
}
fk := *nodes[i].alert_metas
if _, ok := nodeids[fk]; !ok {
ids = append(ids, fk)
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
query.Where(alert.IDIn(ids...))
neighbors, err := query.All(ctx)
@ -428,6 +439,9 @@ func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec {
From: mq.sql,
Unique: true,
}
if unique := mq.unique; unique != nil {
_spec.Unique = *unique
}
if fields := mq.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, meta.FieldID)
@ -453,7 +467,7 @@ func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec {
if ps := mq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector, meta.ValidColumn)
ps[i](selector)
}
}
}
@ -463,16 +477,20 @@ func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec {
func (mq *MetaQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(mq.driver.Dialect())
t1 := builder.Table(meta.Table)
selector := builder.Select(t1.Columns(meta.Columns...)...).From(t1)
columns := mq.fields
if len(columns) == 0 {
columns = meta.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if mq.sql != nil {
selector = mq.sql
selector.Select(selector.Columns(meta.Columns...)...)
selector.Select(selector.Columns(columns...)...)
}
for _, p := range mq.predicates {
p(selector)
}
for _, p := range mq.order {
p(selector, meta.ValidColumn)
p(selector)
}
if offset := mq.offset; offset != nil {
// limit is mandatory for offset clause. We start
@ -734,13 +752,24 @@ func (mgb *MetaGroupBy) sqlScan(ctx context.Context, v interface{}) error {
}
func (mgb *MetaGroupBy) sqlQuery() *sql.Selector {
selector := mgb.sql
columns := make([]string, 0, len(mgb.fields)+len(mgb.fns))
columns = append(columns, mgb.fields...)
selector := mgb.sql.Select()
aggregation := make([]string, 0, len(mgb.fns))
for _, fn := range mgb.fns {
columns = append(columns, fn(selector, meta.ValidColumn))
aggregation = append(aggregation, fn(selector))
}
return selector.Select(columns...).GroupBy(mgb.fields...)
// If no columns were selected in a custom aggregation function, the default
// selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(mgb.fields)+len(mgb.fns))
for _, f := range mgb.fields {
columns = append(columns, selector.C(f))
}
for _, c := range aggregation {
columns = append(columns, c)
}
selector.Select(columns...)
}
return selector.GroupBy(selector.Columns(mgb.fields...)...)
}
// MetaSelect is the builder for selecting fields of Meta entities.
@ -956,16 +985,10 @@ func (ms *MetaSelect) BoolX(ctx context.Context) bool {
func (ms *MetaSelect) sqlScan(ctx context.Context, v interface{}) error {
rows := &sql.Rows{}
query, args := ms.sqlQuery().Query()
query, args := ms.sql.Query()
if err := ms.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
func (ms *MetaSelect) sqlQuery() sql.Querier {
selector := ms.sql
selector.Select(selector.Columns(ms.fields...)...)
return selector
}

View file

@ -22,9 +22,9 @@ type MetaUpdate struct {
mutation *MetaMutation
}
// Where adds a new predicate for the MetaUpdate builder.
// Where appends a list predicates to the MetaUpdate builder.
func (mu *MetaUpdate) Where(ps ...predicate.Meta) *MetaUpdate {
mu.mutation.predicates = append(mu.mutation.predicates, ps...)
mu.mutation.Where(ps...)
return mu
}
@ -124,6 +124,9 @@ func (mu *MetaUpdate) Save(ctx context.Context) (int, error) {
return affected, err
})
for i := len(mu.hooks) - 1; i >= 0; i-- {
if mu.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = mu.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, mu.mutation); err != nil {
@ -249,8 +252,8 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) {
if n, err = sqlgraph.UpdateNodes(ctx, mu.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{meta.Label}
} else if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
return 0, err
}
@ -260,6 +263,7 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) {
// MetaUpdateOne is the builder for updating a single Meta entity.
type MetaUpdateOne struct {
config
fields []string
hooks []Hook
mutation *MetaMutation
}
@ -334,6 +338,13 @@ func (muo *MetaUpdateOne) ClearOwner() *MetaUpdateOne {
return muo
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (muo *MetaUpdateOne) Select(field string, fields ...string) *MetaUpdateOne {
muo.fields = append([]string{field}, fields...)
return muo
}
// Save executes the query and returns the updated Meta entity.
func (muo *MetaUpdateOne) Save(ctx context.Context) (*Meta, error) {
var (
@ -360,6 +371,9 @@ func (muo *MetaUpdateOne) Save(ctx context.Context) (*Meta, error) {
return node, err
})
for i := len(muo.hooks) - 1; i >= 0; i-- {
if muo.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = muo.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, muo.mutation); err != nil {
@ -417,6 +431,18 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error)
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Meta.ID for update")}
}
_spec.Node.ID.Value = id
if fields := muo.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, meta.FieldID)
for _, f := range fields {
if !meta.ValidColumn(f) {
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
if f != meta.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, f)
}
}
}
if ps := muo.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -493,8 +519,8 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error)
if err = sqlgraph.UpdateNode(ctx, muo.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{meta.Label}
} else if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{err.Error(), err}
}
return nil, err
}

View file

@ -48,6 +48,13 @@ var (
OnDelete: schema.SetNull,
},
},
Indexes: []*schema.Index{
{
Name: "alert_id",
Unique: false,
Columns: []*schema.Column{AlertsColumns[0]},
},
},
}
// BouncersColumns holds the columns for the "bouncers" table.
BouncersColumns = []*schema.Column{
@ -68,7 +75,6 @@ var (
Name: "bouncers",
Columns: BouncersColumns,
PrimaryKey: []*schema.Column{BouncersColumns[0]},
ForeignKeys: []*schema.ForeignKey{},
}
// DecisionsColumns holds the columns for the "decisions" table.
DecisionsColumns = []*schema.Column{
@ -99,7 +105,7 @@ var (
Symbol: "decisions_alerts_decisions",
Columns: []*schema.Column{DecisionsColumns[15]},
RefColumns: []*schema.Column{AlertsColumns[0]},
OnDelete: schema.SetNull,
OnDelete: schema.Cascade,
},
},
}
@ -109,7 +115,7 @@ var (
{Name: "created_at", Type: field.TypeTime},
{Name: "updated_at", Type: field.TypeTime},
{Name: "time", Type: field.TypeTime},
{Name: "serialized", Type: field.TypeString, Size: 4095},
{Name: "serialized", Type: field.TypeString, Size: 8191},
{Name: "alert_events", Type: field.TypeInt, Nullable: true},
}
// EventsTable holds the schema information for the "events" table.
@ -122,7 +128,7 @@ var (
Symbol: "events_alerts_events",
Columns: []*schema.Column{EventsColumns[5]},
RefColumns: []*schema.Column{AlertsColumns[0]},
OnDelete: schema.SetNull,
OnDelete: schema.Cascade,
},
},
}
@ -144,7 +150,6 @@ var (
Name: "machines",
Columns: MachinesColumns,
PrimaryKey: []*schema.Column{MachinesColumns[0]},
ForeignKeys: []*schema.ForeignKey{},
}
// MetaColumns holds the columns for the "meta" table.
MetaColumns = []*schema.Column{
@ -165,7 +170,7 @@ var (
Symbol: "meta_alerts_metas",
Columns: []*schema.Column{MetaColumns[5]},
RefColumns: []*schema.Column{AlertsColumns[0]},
OnDelete: schema.SetNull,
OnDelete: schema.Cascade,
},
},
}

View file

@ -155,8 +155,8 @@ func (m AlertMutation) Tx() (*Tx, error) {
return tx, nil
}
// ID returns the ID value in the mutation. Note that the ID
// is only available if it was provided to the builder.
// ID returns the ID value in the mutation. Note that the ID is only available
// if it was provided to the builder or after it was returned from the database.
func (m *AlertMutation) ID() (id int, exists bool) {
if m.id == nil {
return
@ -1284,7 +1284,7 @@ func (m *AlertMutation) ClearOwner() {
m.clearedowner = true
}
// OwnerCleared returns if the "owner" edge to the Machine entity was cleared.
// OwnerCleared reports if the "owner" edge to the Machine entity was cleared.
func (m *AlertMutation) OwnerCleared() bool {
return m.clearedowner
}
@ -1328,7 +1328,7 @@ func (m *AlertMutation) ClearDecisions() {
m.cleareddecisions = true
}
// DecisionsCleared returns if the "decisions" edge to the Decision entity was cleared.
// DecisionsCleared reports if the "decisions" edge to the Decision entity was cleared.
func (m *AlertMutation) DecisionsCleared() bool {
return m.cleareddecisions
}
@ -1339,6 +1339,7 @@ func (m *AlertMutation) RemoveDecisionIDs(ids ...int) {
m.removeddecisions = make(map[int]struct{})
}
for i := range ids {
delete(m.decisions, ids[i])
m.removeddecisions[ids[i]] = struct{}{}
}
}
@ -1381,7 +1382,7 @@ func (m *AlertMutation) ClearEvents() {
m.clearedevents = true
}
// EventsCleared returns if the "events" edge to the Event entity was cleared.
// EventsCleared reports if the "events" edge to the Event entity was cleared.
func (m *AlertMutation) EventsCleared() bool {
return m.clearedevents
}
@ -1392,6 +1393,7 @@ func (m *AlertMutation) RemoveEventIDs(ids ...int) {
m.removedevents = make(map[int]struct{})
}
for i := range ids {
delete(m.events, ids[i])
m.removedevents[ids[i]] = struct{}{}
}
}
@ -1434,7 +1436,7 @@ func (m *AlertMutation) ClearMetas() {
m.clearedmetas = true
}
// MetasCleared returns if the "metas" edge to the Meta entity was cleared.
// MetasCleared reports if the "metas" edge to the Meta entity was cleared.
func (m *AlertMutation) MetasCleared() bool {
return m.clearedmetas
}
@ -1445,6 +1447,7 @@ func (m *AlertMutation) RemoveMetaIDs(ids ...int) {
m.removedmetas = make(map[int]struct{})
}
for i := range ids {
delete(m.metas, ids[i])
m.removedmetas[ids[i]] = struct{}{}
}
}
@ -1472,6 +1475,11 @@ func (m *AlertMutation) ResetMetas() {
m.removedmetas = nil
}
// Where appends a list predicates to the AlertMutation builder.
func (m *AlertMutation) Where(ps ...predicate.Alert) {
m.predicates = append(m.predicates, ps...)
}
// Op returns the operation name.
func (m *AlertMutation) Op() Op {
return m.op
@ -2348,8 +2356,8 @@ func (m BouncerMutation) Tx() (*Tx, error) {
return tx, nil
}
// ID returns the ID value in the mutation. Note that the ID
// is only available if it was provided to the builder.
// ID returns the ID value in the mutation. Note that the ID is only available
// if it was provided to the builder or after it was returned from the database.
func (m *BouncerMutation) ID() (id int, exists bool) {
if m.id == nil {
return
@ -2769,6 +2777,11 @@ func (m *BouncerMutation) ResetLastPull() {
m.last_pull = nil
}
// Where appends a list predicates to the BouncerMutation builder.
func (m *BouncerMutation) Where(ps ...predicate.Bouncer) {
m.predicates = append(m.predicates, ps...)
}
// Op returns the operation name.
func (m *BouncerMutation) Op() Op {
return m.op
@ -3211,8 +3224,8 @@ func (m DecisionMutation) Tx() (*Tx, error) {
return tx, nil
}
// ID returns the ID value in the mutation. Note that the ID
// is only available if it was provided to the builder.
// ID returns the ID value in the mutation. Note that the ID is only available
// if it was provided to the builder or after it was returned from the database.
func (m *DecisionMutation) ID() (id int, exists bool) {
if m.id == nil {
return
@ -3904,7 +3917,7 @@ func (m *DecisionMutation) ClearOwner() {
m.clearedowner = true
}
// OwnerCleared returns if the "owner" edge to the Alert entity was cleared.
// OwnerCleared reports if the "owner" edge to the Alert entity was cleared.
func (m *DecisionMutation) OwnerCleared() bool {
return m.clearedowner
}
@ -3933,6 +3946,11 @@ func (m *DecisionMutation) ResetOwner() {
m.clearedowner = false
}
// Where appends a list predicates to the DecisionMutation builder.
func (m *DecisionMutation) Where(ps ...predicate.Decision) {
m.predicates = append(m.predicates, ps...)
}
// Op returns the operation name.
func (m *DecisionMutation) Op() Op {
return m.op
@ -4525,8 +4543,8 @@ func (m EventMutation) Tx() (*Tx, error) {
return tx, nil
}
// ID returns the ID value in the mutation. Note that the ID
// is only available if it was provided to the builder.
// ID returns the ID value in the mutation. Note that the ID is only available
// if it was provided to the builder or after it was returned from the database.
func (m *EventMutation) ID() (id int, exists bool) {
if m.id == nil {
return
@ -4688,7 +4706,7 @@ func (m *EventMutation) ClearOwner() {
m.clearedowner = true
}
// OwnerCleared returns if the "owner" edge to the Alert entity was cleared.
// OwnerCleared reports if the "owner" edge to the Alert entity was cleared.
func (m *EventMutation) OwnerCleared() bool {
return m.clearedowner
}
@ -4717,6 +4735,11 @@ func (m *EventMutation) ResetOwner() {
m.clearedowner = false
}
// Where appends a list predicates to the EventMutation builder.
func (m *EventMutation) Where(ps ...predicate.Event) {
m.predicates = append(m.predicates, ps...)
}
// Op returns the operation name.
func (m *EventMutation) Op() Op {
return m.op
@ -5049,8 +5072,8 @@ func (m MachineMutation) Tx() (*Tx, error) {
return tx, nil
}
// ID returns the ID value in the mutation. Note that the ID
// is only available if it was provided to the builder.
// ID returns the ID value in the mutation. Note that the ID is only available
// if it was provided to the builder or after it was returned from the database.
func (m *MachineMutation) ID() (id int, exists bool) {
if m.id == nil {
return
@ -5436,7 +5459,7 @@ func (m *MachineMutation) ClearAlerts() {
m.clearedalerts = true
}
// AlertsCleared returns if the "alerts" edge to the Alert entity was cleared.
// AlertsCleared reports if the "alerts" edge to the Alert entity was cleared.
func (m *MachineMutation) AlertsCleared() bool {
return m.clearedalerts
}
@ -5447,6 +5470,7 @@ func (m *MachineMutation) RemoveAlertIDs(ids ...int) {
m.removedalerts = make(map[int]struct{})
}
for i := range ids {
delete(m.alerts, ids[i])
m.removedalerts[ids[i]] = struct{}{}
}
}
@ -5474,6 +5498,11 @@ func (m *MachineMutation) ResetAlerts() {
m.removedalerts = nil
}
// Where appends a list predicates to the MachineMutation builder.
func (m *MachineMutation) Where(ps ...predicate.Machine) {
m.predicates = append(m.predicates, ps...)
}
// Op returns the operation name.
func (m *MachineMutation) Op() Op {
return m.op
@ -5914,8 +5943,8 @@ func (m MetaMutation) Tx() (*Tx, error) {
return tx, nil
}
// ID returns the ID value in the mutation. Note that the ID
// is only available if it was provided to the builder.
// ID returns the ID value in the mutation. Note that the ID is only available
// if it was provided to the builder or after it was returned from the database.
func (m *MetaMutation) ID() (id int, exists bool) {
if m.id == nil {
return
@ -6077,7 +6106,7 @@ func (m *MetaMutation) ClearOwner() {
m.clearedowner = true
}
// OwnerCleared returns if the "owner" edge to the Alert entity was cleared.
// OwnerCleared reports if the "owner" edge to the Alert entity was cleared.
func (m *MetaMutation) OwnerCleared() bool {
return m.clearedowner
}
@ -6106,6 +6135,11 @@ func (m *MetaMutation) ResetOwner() {
m.clearedowner = false
}
// Where appends a list predicates to the MetaMutation builder.
func (m *MetaMutation) Where(ps ...predicate.Meta) {
m.predicates = append(m.predicates, ps...)
}
// Op returns the operation name.
func (m *MetaMutation) Op() Op {
return m.op

View file

@ -5,6 +5,6 @@ package runtime
// The schema-stitching logic is generated in github.com/crowdsecurity/crowdsec/pkg/database/ent/runtime.go
const (
Version = "v0.7.0" // Version of ent codegen.
Sum = "h1:E3EjO0cUL61DvUg5ZEZdxa4yTL+4SuZv0LqBExo8CQA=" // Sum of ent codegen.
Version = "v0.9.1" // Version of ent codegen.
Sum = "h1:IG8andyeD79GG24U8Q+1Y45hQXj6gY5evSBcva5gtBk=" // Sum of ent codegen.
)

View file

@ -4,8 +4,10 @@ import (
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/entsql"
"entgo.io/ent/schema/edge"
"entgo.io/ent/schema/field"
"entgo.io/ent/schema/index"
)
// Alert holds the schema definition for the Alert entity.
@ -56,8 +58,23 @@ func (Alert) Edges() []ent.Edge {
edge.From("owner", Machine.Type).
Ref("alerts").
Unique(),
edge.To("decisions", Decision.Type),
edge.To("events", Event.Type),
edge.To("metas", Meta.Type),
edge.To("decisions", Decision.Type).
Annotations(entsql.Annotation{
OnDelete: entsql.Cascade,
}),
edge.To("events", Event.Type).
Annotations(entsql.Annotation{
OnDelete: entsql.Cascade,
}),
edge.To("metas", Meta.Type).
Annotations(entsql.Annotation{
OnDelete: entsql.Cascade,
}),
}
}
func (Alert) Indexes() []ent.Index {
return []ent.Index{
index.Fields("id"),
}
}

View file

@ -77,6 +77,8 @@ func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, e
src.Scope = &leaky.scopeType.Scope
if v, ok := evt.Enriched["ASNumber"]; ok {
src.AsNumber = v
} else if v, ok := evt.Enriched["ASNNumber"]; ok {
src.AsNumber = v
}
if v, ok := evt.Enriched["IsoCode"]; ok {
src.Cn = v

View file

@ -44,7 +44,7 @@ type Config struct {
var (
metabaseDefaultUser = "crowdsec@crowdsec.net"
metabaseDefaultPassword = "!!Cr0wdS3c_M3t4b4s3??"
metabaseImage = "metabase/metabase:v0.37.0.2"
metabaseImage = "metabase/metabase:v0.41.4"
containerSharedFolder = "/metabase-data"
metabaseSQLiteDBURL = "https://crowdsec-statics-assets.s3-eu-west-1.amazonaws.com/metabase_sqlite.zip"
)

View file

@ -54,6 +54,7 @@ func GeoIpASN(field string, p *types.Event, ctx interface{}) (map[string]string,
return nil, nil
}
ret["ASNNumber"] = fmt.Sprintf("%d", record.AutonomousSystemNumber)
ret["ASNumber"] = fmt.Sprintf("%d", record.AutonomousSystemNumber)
ret["ASNOrg"] = record.AutonomousSystemOrganization
log.Tracef("geoip ASN %s -> %s, %s", field, ret["ASNNumber"], ret["ASNOrg"])

View file

@ -198,7 +198,7 @@ var NodesHits = prometheus.NewCounterVec(
var NodesHitsOk = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "cs_node_hits_ok_total",
Help: "Total events successfuly exited node.",
Help: "Total events successfully exited node.",
},
[]string{"source", "type", "name"},
)
@ -206,7 +206,7 @@ var NodesHitsOk = prometheus.NewCounterVec(
var NodesHitsKo = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "cs_node_hits_ko_total",
Help: "Total events unsuccessfuly exited node.",
Help: "Total events unsuccessfully exited node.",
},
[]string{"source", "type", "name"},
)

View file

@ -34,7 +34,7 @@ function wait_for_service {
while ! nc -z localhost 6060; do
sleep 0.5
((count ++))
if [[ count == 21 ]]; then
if [[ $count == 21 ]]; then
fail "$@"
fi
done

View file

@ -12,8 +12,8 @@ ${CSCLI} bouncers list -ojson | ${JQ} '. | length == 0' || fail "expected 0 bou
# we can add one bouncer - should we save token for later ?
${CSCLI} bouncers add ciTestBouncer || fail "failed to add bouncer"
# but we can't add it twice - we would get an error
${CSCLI} bouncers add ciTestBouncer -ojson 2>&1 | ${JQ} '.level == "error"' || fail "didn't receive the expected error"
# but we can't add it twice - we would get a fatal error
${CSCLI} bouncers add ciTestBouncer -ojson 2>&1 | ${JQ} '.level == "fatal"' || fail "didn't receive the expected error"
# we should have 1 bouncer
${CSCLI} bouncers list -ojson | ${JQ} '. | length == 1' || fail "expected 1 bouncers"

View file

@ -40,7 +40,7 @@ function setup_tests() {
while ! nc -z localhost 9999; do
sleep 0.5
((count ++))
if [[ count == 41 ]]; then
if [[ $count == 41 ]]; then
fail "mock server not up after 20s"
fi
done

View file

@ -2,6 +2,14 @@
BASE="./tests"
usage() {
echo "Usage:"
echo " ./wizard.sh -h Display this help message."
echo " ./test_env.sh -d ./tests Create test environment in './tests' folder"
exit 0
}
while [[ $# -gt 0 ]]
do
key="${1}"
@ -111,14 +119,4 @@ main() {
}
usage() {
echo "Usage:"
echo " ./wizard.sh -h Display this help message."
echo " ./env_test.sh -d ./tests Create test environment in './tests' folder"
exit 0
}
main

View file

@ -140,7 +140,7 @@ function down
rm -rf cs-firewall-bouncer-*
rm -f crowdsec-release.tgz
rm -f cs-firewall-bouncer.tgz
rm *.md5
rm -- *.md5
}
function assert_equal

Some files were not shown because too many files have changed in this diff Show more