Refactor Acquisition Interface (#773)
* Add new acquisition interface + new modules (cloudwatch, syslog) Co-authored-by: Sebastien Blot <sebastien@crowdsec.net>
This commit is contained in:
parent
71c1d9431f
commit
ce6a61df1c
47 changed files with 4468 additions and 1378 deletions
34
.github/workflows/ci_go-test.yml
vendored
34
.github/workflows/ci_go-test.yml
vendored
|
@ -1,5 +1,16 @@
|
|||
name: tests
|
||||
|
||||
#those env variables are for localstack, so we can emulate aws services
|
||||
env:
|
||||
AWS_HOST: localstack
|
||||
SERVICES: cloudwatch,logs
|
||||
#those are to mimic aws config
|
||||
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||
AWS_REGION: us-east-1
|
||||
#and to override our endpoint in aws sdk
|
||||
AWS_ENDPOINT_FORCE: http://localhost:4566
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
|
@ -19,6 +30,27 @@ jobs:
|
|||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
localstack:
|
||||
image: localstack/localstack:0.12.11
|
||||
ports:
|
||||
- 4566:4566 # Localstack exposes all services on same port
|
||||
env:
|
||||
SERVICES: ${{ env.SERVICES }}
|
||||
DEBUG: ""
|
||||
DATA_DIR: ""
|
||||
LAMBDA_EXECUTOR: ""
|
||||
KINESIS_ERROR_PROBABILITY: ""
|
||||
DOCKER_HOST: unix:///var/run/docker.sock
|
||||
HOST_TMP_FOLDER: "/tmp"
|
||||
HOSTNAME_EXTERNAL: ${{ env.AWS_HOST }} # Required so that resource urls are provided properly
|
||||
# e.g sqs url will get localhost if we don't set this env to map our service
|
||||
options: >-
|
||||
--name=localstack
|
||||
--health-cmd="curl -sS 127.0.0.1:4566 || exit 1"
|
||||
--health-interval=10s
|
||||
--health-timeout=5s
|
||||
--health-retries=3
|
||||
steps:
|
||||
- name: Set up Go 1.13
|
||||
uses: actions/setup-go@v1
|
||||
|
@ -40,4 +72,4 @@ jobs:
|
|||
uses: coverallsapp/github-action@master
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
path-to-lcov: coverage.txt
|
||||
path-to-lcov: coverage.txt
|
|
@ -134,8 +134,13 @@ func ShowPrometheus(url string) {
|
|||
}
|
||||
source, ok := metric.Labels["source"]
|
||||
if !ok {
|
||||
log.Debugf("no source in Metric %v", metric.Labels)
|
||||
log.Debugf("no source in Metric %v for %s", metric.Labels, fam.Name)
|
||||
} else {
|
||||
if srctype, ok := metric.Labels["type"]; ok {
|
||||
source = srctype + ":" + source
|
||||
}
|
||||
}
|
||||
|
||||
value := m.(prom2json.Metric).Value
|
||||
machine := metric.Labels["machine"]
|
||||
bouncer := metric.Labels["bouncer"]
|
||||
|
@ -180,7 +185,7 @@ func ShowPrometheus(url string) {
|
|||
}
|
||||
buckets_stats[name]["underflow"] += ival
|
||||
/*acquis*/
|
||||
case "cs_reader_hits_total":
|
||||
case "cs_parser_hits_total":
|
||||
if _, ok := acquis_stats[source]; !ok {
|
||||
acquis_stats[source] = make(map[string]int)
|
||||
}
|
||||
|
|
|
@ -334,6 +334,10 @@ func GetParserMetric(url string, itemName string) map[string]map[string]int {
|
|||
source, ok := metric.Labels["source"]
|
||||
if !ok {
|
||||
log.Debugf("no source in Metric %v", metric.Labels)
|
||||
} else {
|
||||
if srctype, ok := metric.Labels["type"]; ok {
|
||||
source = srctype + ":" + source
|
||||
}
|
||||
}
|
||||
value := m.(prom2json.Metric).Value
|
||||
fval, err := strconv.ParseFloat(value, 32)
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/parser"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -114,6 +115,17 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if cConfig.Prometheus != nil && cConfig.Prometheus.Enabled {
|
||||
aggregated := false
|
||||
if cConfig.Prometheus.Level == "aggregated" {
|
||||
aggregated = true
|
||||
}
|
||||
if err := acquisition.GetMetrics(dataSources, aggregated); err != nil {
|
||||
return errors.Wrap(err, "while fetching prometheus metrics for datasources.")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
_ "net/http/pprof"
|
||||
"time"
|
||||
|
@ -18,6 +17,7 @@ import (
|
|||
leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/parser"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
|
@ -46,18 +46,16 @@ var (
|
|||
)
|
||||
|
||||
type Flags struct {
|
||||
ConfigFile string
|
||||
TraceLevel bool
|
||||
DebugLevel bool
|
||||
InfoLevel bool
|
||||
PrintVersion bool
|
||||
SingleFilePath string
|
||||
SingleJournalctlFilter string
|
||||
SingleFileType string
|
||||
SingleFileJsonOutput string
|
||||
TestMode bool
|
||||
DisableAgent bool
|
||||
DisableAPI bool
|
||||
ConfigFile string
|
||||
TraceLevel bool
|
||||
DebugLevel bool
|
||||
InfoLevel bool
|
||||
PrintVersion bool
|
||||
SingleFileType string
|
||||
OneShotDSN string
|
||||
TestMode bool
|
||||
DisableAgent bool
|
||||
DisableAPI bool
|
||||
}
|
||||
|
||||
type parsers struct {
|
||||
|
@ -140,30 +138,19 @@ func LoadBuckets(cConfig *csconfig.Config) error {
|
|||
func LoadAcquisition(cConfig *csconfig.Config) error {
|
||||
var err error
|
||||
|
||||
if flags.SingleFilePath != "" || flags.SingleJournalctlFilter != "" {
|
||||
|
||||
tmpCfg := acquisition.DataSourceCfg{}
|
||||
tmpCfg.Mode = acquisition.CAT_MODE
|
||||
tmpCfg.Labels = map[string]string{"type": flags.SingleFileType}
|
||||
|
||||
if flags.SingleFilePath != "" {
|
||||
tmpCfg.Filename = flags.SingleFilePath
|
||||
} else if flags.SingleJournalctlFilter != "" {
|
||||
tmpCfg.JournalctlFilters = strings.Split(flags.SingleJournalctlFilter, " ")
|
||||
if flags.SingleFileType != "" || flags.OneShotDSN != "" {
|
||||
if flags.OneShotDSN == "" || flags.SingleFileType == "" {
|
||||
return fmt.Errorf("-type requires a -dsn argument")
|
||||
}
|
||||
|
||||
datasrc, err := acquisition.DataSourceConfigure(tmpCfg)
|
||||
dataSources, err = acquisition.LoadAcquisitionFromDSN(flags.OneShotDSN, flags.SingleFileType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("while configuring specified file datasource : %s", err)
|
||||
return errors.Wrapf(err, "failed to configure datasource for %s", flags.OneShotDSN)
|
||||
}
|
||||
if dataSources == nil {
|
||||
dataSources = make([]acquisition.DataSource, 0)
|
||||
}
|
||||
dataSources = append(dataSources, datasrc)
|
||||
} else {
|
||||
dataSources, err = acquisition.LoadAcquisitionFromFile(cConfig.Crowdsec)
|
||||
if err != nil {
|
||||
log.Fatalf("While loading acquisition configuration : %s", err)
|
||||
return errors.Wrap(err, "while loading acquisition configuration")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -177,8 +164,7 @@ func (f *Flags) Parse() {
|
|||
flag.BoolVar(&f.DebugLevel, "debug", false, "print debug-level on stdout")
|
||||
flag.BoolVar(&f.InfoLevel, "info", false, "print info-level on stdout")
|
||||
flag.BoolVar(&f.PrintVersion, "version", false, "display version")
|
||||
flag.StringVar(&f.SingleFilePath, "file", "", "Process a single file in time-machine")
|
||||
flag.StringVar(&f.SingleJournalctlFilter, "jfilter", "", "Process a single journalctl output in time-machine")
|
||||
flag.StringVar(&f.OneShotDSN, "dsn", "", "Process a single data source in time-machine")
|
||||
flag.StringVar(&f.SingleFileType, "type", "", "Labels.type for file in time-machine")
|
||||
flag.BoolVar(&f.TestMode, "t", false, "only test configs")
|
||||
flag.BoolVar(&f.DisableAgent, "no-cs", false, "disable crowdsec agent")
|
||||
|
@ -210,18 +196,6 @@ func LoadConfig(cConfig *csconfig.Config) error {
|
|||
log.Fatalf("You must run at least the API Server or crowdsec")
|
||||
}
|
||||
|
||||
if flags.SingleFilePath != "" {
|
||||
if flags.SingleFileType == "" {
|
||||
return fmt.Errorf("-file requires -type")
|
||||
}
|
||||
}
|
||||
|
||||
if flags.SingleJournalctlFilter != "" {
|
||||
if flags.SingleFileType == "" {
|
||||
return fmt.Errorf("-jfilter requires -type")
|
||||
}
|
||||
}
|
||||
|
||||
if flags.DebugLevel {
|
||||
logLevel := log.DebugLevel
|
||||
cConfig.Common.LogLevel = &logLevel
|
||||
|
@ -239,7 +213,7 @@ func LoadConfig(cConfig *csconfig.Config) error {
|
|||
cConfig.Crowdsec.LintOnly = true
|
||||
}
|
||||
|
||||
if flags.SingleFilePath != "" || flags.SingleJournalctlFilter != "" {
|
||||
if flags.SingleFileType != "" && flags.OneShotDSN != "" {
|
||||
cConfig.API.Server.OnlineClient = nil
|
||||
/*if the api is disabled as well, just read file and exit, don't daemonize*/
|
||||
if flags.DisableAPI {
|
||||
|
|
|
@ -3,7 +3,6 @@ package main
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/acquisition"
|
||||
v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/cwversion"
|
||||
|
@ -24,21 +23,21 @@ var globalParserHits = prometheus.NewCounterVec(
|
|||
Name: "cs_parser_hits_total",
|
||||
Help: "Total events entered the parser.",
|
||||
},
|
||||
[]string{"source"},
|
||||
[]string{"source", "type"},
|
||||
)
|
||||
var globalParserHitsOk = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cs_parser_hits_ok_total",
|
||||
Help: "Total events were successfully parsed.",
|
||||
},
|
||||
[]string{"source"},
|
||||
[]string{"source", "type"},
|
||||
)
|
||||
var globalParserHitsKo = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cs_parser_hits_ko_total",
|
||||
Help: "Total events were unsuccessfully parsed.",
|
||||
},
|
||||
[]string{"source"},
|
||||
[]string{"source", "type"},
|
||||
)
|
||||
|
||||
var globalBucketPourKo = prometheus.NewCounter(
|
||||
|
@ -82,7 +81,7 @@ func registerPrometheus(config *csconfig.PrometheusCfg) {
|
|||
if config.Level == "aggregated" {
|
||||
log.Infof("Loading aggregated prometheus collectors")
|
||||
prometheus.MustRegister(globalParserHits, globalParserHitsOk, globalParserHitsKo,
|
||||
acquisition.ReaderHits, globalCsInfo,
|
||||
globalCsInfo,
|
||||
leaky.BucketsUnderflow, leaky.BucketsInstanciation, leaky.BucketsOverflow,
|
||||
v1.LapiRouteHits,
|
||||
leaky.BucketsCurrentCount)
|
||||
|
@ -90,7 +89,7 @@ func registerPrometheus(config *csconfig.PrometheusCfg) {
|
|||
log.Infof("Loading prometheus collectors")
|
||||
prometheus.MustRegister(globalParserHits, globalParserHitsOk, globalParserHitsKo,
|
||||
parser.NodesHits, parser.NodesHitsOk, parser.NodesHitsKo,
|
||||
acquisition.ReaderHits, globalCsInfo,
|
||||
globalCsInfo,
|
||||
v1.LapiRouteHits, v1.LapiMachineHits, v1.LapiBouncerHits, v1.LapiNilDecisions, v1.LapiNonNilDecisions,
|
||||
leaky.BucketsPour, leaky.BucketsUnderflow, leaky.BucketsInstanciation, leaky.BucketsOverflow, leaky.BucketsCurrentCount)
|
||||
|
||||
|
|
|
@ -22,7 +22,11 @@ LOOP:
|
|||
if !event.Process {
|
||||
continue
|
||||
}
|
||||
globalParserHits.With(prometheus.Labels{"source": event.Line.Src}).Inc()
|
||||
if event.Line.Module == "" {
|
||||
log.Errorf("empty event.Line.Module field, the acquisition module must set it ! : %+v", event.Line)
|
||||
continue
|
||||
}
|
||||
globalParserHits.With(prometheus.Labels{"source": event.Line.Src, "type": event.Line.Module}).Inc()
|
||||
|
||||
/* parse the log using magic */
|
||||
parsed, error := parser.Parse(parserCTX, event, nodes)
|
||||
|
@ -31,11 +35,11 @@ LOOP:
|
|||
return errors.New("parsing failed :/")
|
||||
}
|
||||
if !parsed.Process {
|
||||
globalParserHitsKo.With(prometheus.Labels{"source": event.Line.Src}).Inc()
|
||||
globalParserHitsKo.With(prometheus.Labels{"source": event.Line.Src, "type": event.Line.Module}).Inc()
|
||||
log.Debugf("Discarding line %+v", parsed)
|
||||
continue
|
||||
}
|
||||
globalParserHitsOk.With(prometheus.Labels{"source": event.Line.Src}).Inc()
|
||||
globalParserHitsOk.With(prometheus.Labels{"source": event.Line.Src, "type": event.Line.Module}).Inc()
|
||||
if parsed.Whitelisted {
|
||||
log.Debugf("event whitelisted, discard")
|
||||
continue
|
||||
|
|
14
go.mod
14
go.mod
|
@ -8,6 +8,7 @@ require (
|
|||
github.com/Microsoft/go-winio v0.4.16 // indirect
|
||||
github.com/antonmedv/expr v1.8.9
|
||||
github.com/appleboy/gin-jwt/v2 v2.6.4
|
||||
github.com/aws/aws-sdk-go v1.38.34
|
||||
github.com/buger/jsonparser v1.1.1
|
||||
github.com/containerd/containerd v1.4.3 // indirect
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
||||
|
@ -18,6 +19,7 @@ require (
|
|||
github.com/docker/docker v20.10.2+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/enescakir/emoji v1.0.0
|
||||
github.com/fsnotify/fsnotify v1.4.9
|
||||
github.com/gin-gonic/gin v1.6.3
|
||||
github.com/go-co-op/gocron v0.5.1
|
||||
github.com/go-openapi/errors v0.19.9
|
||||
|
@ -30,6 +32,7 @@ require (
|
|||
github.com/google/go-querystring v1.0.0
|
||||
github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e
|
||||
github.com/hashicorp/go-version v1.2.1
|
||||
github.com/influxdata/go-syslog/v3 v3.0.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/lib/pq v1.10.0
|
||||
github.com/logrusorgru/grokky v0.0.0-20180829062225-47edf017d42c
|
||||
|
@ -47,21 +50,20 @@ require (
|
|||
github.com/oschwald/geoip2-golang v1.4.0
|
||||
github.com/oschwald/maxminddb-golang v1.8.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.9.0
|
||||
github.com/prometheus/client_golang v1.10.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/procfs v0.3.0 // indirect
|
||||
github.com/prometheus/prom2json v1.3.0
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/ugorji/go v1.2.3 // indirect
|
||||
github.com/vjeantet/grok v1.0.1 // indirect
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
|
||||
golang.org/x/mod v0.4.1
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b // indirect
|
||||
golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6 // indirect
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf // indirect
|
||||
golang.org/x/text v0.3.5 // indirect
|
||||
google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f // indirect
|
||||
|
@ -69,7 +71,7 @@ require (
|
|||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
gotest.tools/v3 v3.0.3
|
||||
)
|
||||
|
||||
replace golang.org/x/time/rate => github.com/crowdsecurity/crowdsec/pkg/time/rate v0.0.0
|
||||
|
|
109
go.sum
109
go.sum
|
@ -40,11 +40,9 @@ github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/
|
|||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/antonmedv/expr v1.8.9 h1:O9stiHmHHww9b4ozhPx7T6BK7fXfOCHJ8ybxf0833zw=
|
||||
|
@ -62,13 +60,15 @@ github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6l
|
|||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.34.28 h1:sscPpn/Ns3i0F4HPEWAVcwdIRaZZCuL7llJ2/60yPIk=
|
||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.38.34 h1:JSAyS6hSDLbRmCAz9VAkwDf5oh/olt9mBTrVBWGJcU8=
|
||||
github.com/aws/aws-sdk-go v1.38.34/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
|
@ -98,7 +98,6 @@ github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc
|
|||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
|
@ -147,7 +146,6 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
|||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
|
@ -160,7 +158,6 @@ github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14=
|
|||
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
|
||||
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||
github.com/go-bindata/go-bindata v1.0.1-0.20190711162640-ee3c2418e368 h1:WNHfSP1q2vuAa9vF54RrhCl4nqxCjVcXhlbsRXbGOSY=
|
||||
github.com/go-bindata/go-bindata v1.0.1-0.20190711162640-ee3c2418e368/go.mod h1:7xCgX1lzlrXPHkfvn3EhumqHkmSlzt8at9q7v0ax19c=
|
||||
github.com/go-co-op/gocron v0.5.1 h1:Cni1V7mt184+HnYTDYe6MH7siofCvf94PrGyIDI1v1U=
|
||||
github.com/go-co-op/gocron v0.5.1/go.mod h1:6Btk4lVj3bnFAgbVfr76W8impTyhYrEi1pV5Pt4Tp/M=
|
||||
|
@ -177,7 +174,6 @@ github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpR
|
|||
github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
|
||||
github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
|
||||
github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU=
|
||||
github.com/go-openapi/analysis v0.19.10 h1:5BHISBAXOc/aJK25irLZnx2D3s6WyYaY9D4gmuz9fdE=
|
||||
github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ=
|
||||
github.com/go-openapi/analysis v0.19.16 h1:Ub9e++M8sDwtHD+S587TYi+6ANBG1NRYGZDihqk0SaY=
|
||||
github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk=
|
||||
|
@ -186,25 +182,20 @@ github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQH
|
|||
github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
|
||||
github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
|
||||
github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.19.7 h1:Lcq+o0mSwCLKACMxZhreVHigB9ebghJ/lrmeaqASbjo=
|
||||
github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.19.8 h1:doM+tQdZbUm9gydV9yR+iQNmztbjj7I3sW4sIcAwIzc=
|
||||
github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.19.9 h1:9SnKdGhiPZHF3ttwFMiCBEb8jQ4IDdrK+5+a0oTygA4=
|
||||
github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4=
|
||||
github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4=
|
||||
github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
|
||||
github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
|
||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||
github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
|
||||
github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
|
||||
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
||||
github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
|
||||
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
||||
|
@ -213,9 +204,7 @@ github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf
|
|||
github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
|
||||
github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs=
|
||||
github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI=
|
||||
github.com/go-openapi/loads v0.19.5 h1:jZVYWawIQiA1NBnHla28ktg6hrcfTHsCE+3QLVRBIls=
|
||||
github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY=
|
||||
github.com/go-openapi/loads v0.19.6 h1:6IAtnx22MNSjPocZZ2sV7EjgF6wW5rDC9r6ZkNxjiN8=
|
||||
github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc=
|
||||
github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc=
|
||||
github.com/go-openapi/loads v0.20.0 h1:Pymw1O8zDmWeNv4kVsHd0W3cvgdp8juRa4U/U/8D/Pk=
|
||||
|
@ -224,7 +213,6 @@ github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6
|
|||
github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64=
|
||||
github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4=
|
||||
github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo=
|
||||
github.com/go-openapi/runtime v0.19.16 h1:tQMAY5s5BfmmCC31+ufDCsGrr8iO1A8UIdYfDo5ADvs=
|
||||
github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98=
|
||||
github.com/go-openapi/runtime v0.19.24 h1:TqagMVlRAOTwllE/7hNKx6rQ10O6T8ZzeJdMjSTKaD4=
|
||||
github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk=
|
||||
|
@ -233,7 +221,6 @@ github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsd
|
|||
github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
|
||||
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
||||
github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
|
||||
github.com/go-openapi/spec v0.19.8 h1:qAdZLh1r6QF/hI/gTq+TJTvsQUodZsM7KLqkAJdiJNg=
|
||||
github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
|
||||
github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU=
|
||||
github.com/go-openapi/spec v0.20.0 h1:HGLc8AJ7ynOxwv0Lq4TsnwLsWMawHAYiJIFzbcML86I=
|
||||
|
@ -244,7 +231,6 @@ github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+Z
|
|||
github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
|
||||
github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
|
||||
github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
|
||||
github.com/go-openapi/strfmt v0.19.5 h1:0utjKrw+BAh8s57XE9Xz8DUBsVvPmRUB6styvl9wWIM=
|
||||
github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
|
||||
github.com/go-openapi/strfmt v0.19.11 h1:0+YvbNh05rmBkgztd6zHp4OCFn7Mtu30bn46NQo2ZRw=
|
||||
github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc=
|
||||
|
@ -253,7 +239,6 @@ github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/
|
|||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY=
|
||||
github.com/go-openapi/swag v0.19.9 h1:1IxuqvBUU3S2Bi4YC7tlP9SJF1gVpCvqN0T2Qof4azE=
|
||||
github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY=
|
||||
github.com/go-openapi/swag v0.19.12 h1:Bc0bnY2c3AoF7Gc+IMIAQQsD8fLHjHpc19wXvYuayQI=
|
||||
github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M=
|
||||
|
@ -261,7 +246,6 @@ github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+
|
|||
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
|
||||
github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo=
|
||||
github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8=
|
||||
github.com/go-openapi/validate v0.19.12 h1:mPLM/bfbd00PGOCJlU0yJL7IulkZ+q9VjPv7U11RMQQ=
|
||||
github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4=
|
||||
github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI=
|
||||
github.com/go-openapi/validate v0.20.0 h1:pzutNCCBZGZlE+u8HD3JZyWdc/TVbtVwlWUp8/vgUKk=
|
||||
|
@ -272,7 +256,6 @@ github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8c
|
|||
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
|
||||
github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
|
||||
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
|
||||
github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY=
|
||||
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
|
||||
github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE=
|
||||
github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
|
||||
|
@ -309,11 +292,9 @@ github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY9
|
|||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
@ -324,9 +305,7 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
|
|||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
|
@ -346,10 +325,10 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
|||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
|
@ -392,7 +371,6 @@ github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerX
|
|||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI=
|
||||
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
|
@ -410,9 +388,12 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO
|
|||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/go-syslog/v3 v3.0.0 h1:jichmjSZlYK0VMmlz+k4WeOQd7z745YLsvGMqwtYt4I=
|
||||
github.com/influxdata/go-syslog/v3 v3.0.0/go.mod h1:tulsOp+CecTAYC27u9miMgq21GqXRW6VdKbOG+QSP4Q=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
||||
|
@ -440,7 +421,6 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
|
|||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
|
@ -452,10 +432,10 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
|
||||
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
||||
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg=
|
||||
github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E=
|
||||
github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
|
@ -470,14 +450,12 @@ github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN
|
|||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8=
|
||||
github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
|
||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
|
||||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
|
@ -488,9 +466,7 @@ github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHX
|
|||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuujKs0=
|
||||
github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg=
|
||||
github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
|
||||
|
@ -499,7 +475,6 @@ github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJK
|
|||
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
|
@ -512,10 +487,8 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4
|
|||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.3.2 h1:mRS76wmkOn3KkKAyXDu42V+6ebnXWIztFSYGN7GeoRg=
|
||||
github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.0 h1:7ks8ZkOP5/ujthUsT07rNv+nkLXCQWKNHuwzOAesEks=
|
||||
github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
|
@ -569,7 +542,6 @@ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh
|
|||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/oschwald/geoip2-golang v1.4.0 h1:5RlrjCgRyIGDz/mBmPfnAF4h8k0IAcRv9PvrpOfz+Ug=
|
||||
github.com/oschwald/geoip2-golang v1.4.0/go.mod h1:8QwxJvRImBH+Zl6Aa6MaIcs5YdlZSTKtzmPGzQqi9ng=
|
||||
github.com/oschwald/maxminddb-golang v1.6.0 h1:KAJSjdHQ8Kv45nFIbtoLGrGWqHFajOIm7skTyz/+Dls=
|
||||
github.com/oschwald/maxminddb-golang v1.6.0/go.mod h1:DUJFucBg2cvqx42YmDa/+xHvb0elJtOm3o4aFQ/nb/w=
|
||||
github.com/oschwald/maxminddb-golang v1.8.0 h1:Uh/DSnGoxsyp/KYbY1AuP0tYEwfs0sCph9p/UMXK/Hk=
|
||||
github.com/oschwald/maxminddb-golang v1.8.0/go.mod h1:RXZtst0N6+FY/3qCNmZMBApR19cdQj43/NM9VkrNAis=
|
||||
|
@ -583,7 +555,6 @@ github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9
|
|||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
@ -598,8 +569,8 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf
|
|||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU=
|
||||
github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
|
||||
github.com/prometheus/client_golang v1.10.0 h1:/o0BDeWzLWXNZ+4q5gXltUvaMpJqckTa+jTNoB+z4cg=
|
||||
github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
|
@ -613,19 +584,16 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
|
|||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM=
|
||||
github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||
github.com/prometheus/common v0.18.0 h1:WCVKW7aL6LEe1uryfI9dnEc2ZqNB1Fn0ok930v0iL1Y=
|
||||
github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.3.0 h1:Uehi/mxLK0eiUc0H0++5tpMGTexB8wZ598MIgU8VpDM=
|
||||
github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/prom2json v1.3.0 h1:BlqrtbT9lLH3ZsOVhXPsHzFrApCTKRifB7gjJuypu6Y=
|
||||
github.com/prometheus/prom2json v1.3.0/go.mod h1:rMN7m0ApCowcoDlypBHlkNbp5eJQf/+1isKykIP5ZnM=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
|
@ -638,7 +606,6 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So
|
|||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
|
@ -647,15 +614,14 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0
|
|||
github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
|
@ -668,7 +634,6 @@ github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
|
|||
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
|
@ -684,9 +649,7 @@ github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
|||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
|
@ -699,11 +662,9 @@ github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
|
|||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go v1.2.3 h1:WbFSXLxDFKVN69Sk8t+XHGzVCD7R8UoAATR8NqZgTbk=
|
||||
github.com/ugorji/go v1.2.3/go.mod h1:5l8GZ8hZvmL4uMdy+mhCO1LjswGRYco9Q3HfuisB21A=
|
||||
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/ugorji/go/codec v1.2.3 h1:/mVYEV+Jo3IZKeA5gBngN0AvNnQltEDkR+eQikkWQu0=
|
||||
github.com/ugorji/go/codec v1.2.3/go.mod h1:5FxzDJIgeiWJZslYHPj+LS1dq1ZBQVelZFnjsFGI/Uc=
|
||||
|
@ -723,9 +684,7 @@ go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mI
|
|||
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
|
||||
go.mongodb.org/mongo-driver v1.3.4 h1:zs/dKNwX0gYUtzwrN9lLiR15hCO0nDwQj5xXx+vjCdE=
|
||||
go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
|
||||
go.mongodb.org/mongo-driver v1.4.3 h1:moga+uhicpVshTyaqY9L23E6QqwcHRUv1sqyOsoyOO8=
|
||||
go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc=
|
||||
go.mongodb.org/mongo-driver v1.4.4 h1:bsPHfODES+/yx2PCWzUYMH8xj6PVniPI8DQrsJuSXSs=
|
||||
go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc=
|
||||
|
@ -755,7 +714,6 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
|
@ -769,7 +727,6 @@ golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+o
|
|||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
|
@ -780,7 +737,6 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG
|
|||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
|
@ -801,19 +757,15 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
|||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM=
|
||||
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw=
|
||||
|
@ -829,6 +781,7 @@ golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -862,30 +815,28 @@ golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20191224085550-c709ea063b76/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210309074719-68d13333faf2 h1:46ULzRKLh1CwgRq2dC5SlBzEqqNCi8rreOZnNrbqcIY=
|
||||
golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6 h1:cdsMqa2nXzqlgs183pHxtvoVwU7CyzaCTAUOg94af4c=
|
||||
golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M=
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
|
@ -901,7 +852,6 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm
|
|||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
|
@ -929,11 +879,9 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
|
|||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -948,7 +896,6 @@ google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
|||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
|
@ -958,9 +905,7 @@ google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dT
|
|||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f h1:izedQ6yVIc5mZsRuXzmSreCOlzI0lCU1HpG8yEdMiKw=
|
||||
google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
|
@ -968,16 +913,13 @@ google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3
|
|||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0 h1:G+97AoqBnmZIT91cLG/EkCoK9NSelj64P8bOHHNmGn0=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.35.0 h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8=
|
||||
|
@ -988,13 +930,11 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
|
|||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
@ -1019,14 +959,11 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c h1:grhR+C34yXImVGp7EzNk+DTIk+323eIUWOmEevy6bDo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
135
pkg/acquisition/README.md
Normal file
135
pkg/acquisition/README.md
Normal file
|
@ -0,0 +1,135 @@
|
|||
# pkg/acquisition
|
||||
|
||||
## What is it
|
||||
|
||||
`pkg/acquisition` is in charge of reading data sources and feeding events to the parser(s).
|
||||
Most data sources can either be used :
|
||||
- in [one-shot](https://doc.crowdsec.net/v1.X/docs/user_guide/forensic_mode/#forensic-mode) mode : data source (ie. file) is read at once
|
||||
- in streaming mode : data source is constantly monitored, and events are fed to the parsers in real time
|
||||
|
||||
## Scope
|
||||
|
||||
This documentation aims at providing guidelines for implementation of new data sources.
|
||||
|
||||
# Writting modules
|
||||
|
||||
Each module must implement the `DataSource` interface.
|
||||
|
||||
```golang
|
||||
type DataSource interface {
|
||||
GetMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module
|
||||
Configure([]byte, *log.Entry) error // Configure the datasource
|
||||
ConfigureByDSN(string, string, *log.Entry) error // Configure the datasource
|
||||
GetMode() string // Get the mode (TAIL, CAT or SERVER)
|
||||
GetName() string
|
||||
OneShotAcquisition(chan types.Event, *tomb.Tomb) error // Start one shot acquisition(eg, cat a file)
|
||||
StreamingAcquisition(chan types.Event, *tomb.Tomb) error // Start live acquisition (eg, tail a file)
|
||||
CanRun() error // Whether the datasource can run or not (eg, journalctl on BSD is a non-sense)
|
||||
Dump() interface{}
|
||||
}
|
||||
```
|
||||
|
||||
Ground rules :
|
||||
|
||||
- All modules must respect the `tomb.Tomb`
|
||||
- `StreamingAcquisition` starts dedicated routines (via the `tomb.Tomb`) and returns, while `OneShotAcquisition` returns when datasource is consumed
|
||||
- `ConfigureByDSN` allows to configure datasource via cli for command-line invokation. Liberties can be taken with dsn format
|
||||
- Each datasource will be given a logger at configuration time, that is configured according to `DataSourceCommonCfg`. It is advised to customize it via [`.WithFields`](https://pkg.go.dev/github.com/sirupsen/logrus#WithFields) to take advantage of structured logging.
|
||||
- You must set `Module` in the `types.Event.Line` that you're pushing the chan
|
||||
|
||||
Note about configuration format :
|
||||
|
||||
- Each data source can have their custom configuration.
|
||||
- All datasource share a "common" configuration section (`DataSourceCommonCfg`). To achieve this, you might want to inlines `DataSourceCommonCfg` in your datasource-specific configuration structure.
|
||||
|
||||
|
||||
## Interface methods
|
||||
|
||||
### GetMetrics
|
||||
|
||||
Each data source can and should return custom prometheus metrics.
|
||||
This is called for each data source that has at least one configured instance.
|
||||
|
||||
Using `cs_yoursource_` is advised, along with [prometheus naming good practices](https://prometheus.io/docs/practices/naming/) for reference.
|
||||
|
||||
|
||||
### Configure
|
||||
|
||||
Configure is fed with the raw yaml configuration for your data source.
|
||||
This is meant to allow freedom for each data source's configurations.
|
||||
|
||||
## ConfigureByDSN
|
||||
|
||||
When used in one-shot mode, your datasource is going to be configured via cli arguments.
|
||||
The first argument is the `dsn`, the second on is the `label->type` to set on the logs.
|
||||
|
||||
Datasource implementations are allowed a lot of freedom around the [`dsn`](https://en.wikipedia.org/wiki/Data_source_name) specifications, but are expected :
|
||||
|
||||
- to allow `log_level` configuration via dsn (ie. `mod://source;log_level=trace`)
|
||||
|
||||
## GetMode
|
||||
|
||||
Returns the mode `TAIL_MODE` or `CAT_MODE` of the current instance.
|
||||
|
||||
## OneShotAcquisition
|
||||
|
||||
Start a one-shot (or `CAT_MODE`, commonly used for forensic) acquisition that is expected to return once the datasource has been consumed.
|
||||
|
||||
## StreamingAcquisition
|
||||
|
||||
Start a streaming (or `TAIL_MODE`, commonly used when crowdsec runs as a daemon) acquisition. Starts appropriate go-routines via the `tomb.Tomb` and returns.
|
||||
|
||||
## CanRun
|
||||
|
||||
Can be used to prevent specific data source to run on specific platforms (ie. journalctl on BSD)
|
||||
|
||||
|
||||
## Dump
|
||||
|
||||
Simply return a pointer to the actual object, kept for future.
|
||||
|
||||
## GetName
|
||||
|
||||
Returns the name, as expected in the `source` argument of the configuration.
|
||||
|
||||
# BoilerPlate code
|
||||
|
||||
Taking a look at `acquisition_test.go` is advised for up-to-date boilerplate code.
|
||||
|
||||
```golang
|
||||
|
||||
type MockSource struct {
|
||||
configuration.DataSourceCommonCfg `yaml:",inline"`
|
||||
Toto string `yaml:"toto"`
|
||||
logger *log.Entry
|
||||
}
|
||||
|
||||
func (f *MockSource) Configure(cfg []byte, logger *log.Entry) error {
|
||||
if err := yaml.UnmarshalStrict(cfg, &f); err != nil {
|
||||
return errors.Wrap(err, "while unmarshaling to reader specific config")
|
||||
}
|
||||
if f.Mode == "" {
|
||||
f.Mode = configuration.CAT_MODE
|
||||
}
|
||||
if f.Mode != configuration.CAT_MODE && f.Mode != configuration.TAIL_MODE {
|
||||
return fmt.Errorf("mode %s is not supported", f.Mode)
|
||||
}
|
||||
if f.Toto == "" {
|
||||
return fmt.Errorf("expect non-empty toto")
|
||||
}
|
||||
f.logger = logger.WithField("toto", f.Toto)
|
||||
return nil
|
||||
}
|
||||
func (f *MockSource) GetMode() string { return f.Mode }
|
||||
func (f *MockSource) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil }
|
||||
func (f *MockSource) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { return nil }
|
||||
func (f *MockSource) CanRun() error { return nil }
|
||||
func (f *MockSource) GetMetrics() []prometheus.Collector { return nil }
|
||||
func (f *MockSource) Dump() interface{} { return f }
|
||||
func (f *MockSource) GetName() string { return "mock" }
|
||||
func (f *MockSource) ConfigureByDSN(string, string, *log.Entry) error {
|
||||
return fmt.Errorf("not supported")
|
||||
}
|
||||
|
||||
```
|
||||
|
|
@ -4,6 +4,13 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration"
|
||||
cloudwatchacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/cloudwatch"
|
||||
fileacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/file"
|
||||
journalctlacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/journalctl"
|
||||
syslogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
|
@ -15,102 +22,137 @@ import (
|
|||
tomb "gopkg.in/tomb.v2"
|
||||
)
|
||||
|
||||
var ReaderHits = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cs_reader_hits_total",
|
||||
Help: "Total lines where read.",
|
||||
},
|
||||
[]string{"source"},
|
||||
)
|
||||
|
||||
/*
|
||||
current limits :
|
||||
- The acquisition is not yet modular (cf. traefik/yaegi), but we start with an interface to pave the road for it.
|
||||
- The configuration item unmarshaled (DataSourceCfg) isn't generic neither yet.
|
||||
- This changes should be made when we're ready to have acquisition managed by the hub & cscli
|
||||
once this change is done, we might go for the following configuration format instead :
|
||||
```yaml
|
||||
---
|
||||
type: nginx
|
||||
source: journald
|
||||
filter: "PROG=nginx"
|
||||
---
|
||||
type: nginx
|
||||
source: files
|
||||
filenames:
|
||||
- "/var/log/nginx/*.log"
|
||||
```
|
||||
*/
|
||||
|
||||
/* Approach
|
||||
|
||||
We support acquisition in two modes :
|
||||
- tail mode : we're following a stream of info (tail -f $src). this is used when monitoring live logs
|
||||
- cat mode : we're reading a file/source one-shot (cat $src), and scenarios will match the timestamp extracted from logs.
|
||||
|
||||
One DataSourceCfg can lead to multiple goroutines, hence the Tombs passing around to allow proper tracking.
|
||||
tail mode shouldn't return except on errors or when externally killed via tombs.
|
||||
cat mode will return once source has been exhausted.
|
||||
|
||||
|
||||
TBD in current iteration :
|
||||
- how to deal with "file was not present at startup but might appear later" ?
|
||||
*/
|
||||
|
||||
var TAIL_MODE = "tail"
|
||||
var CAT_MODE = "cat"
|
||||
|
||||
type DataSourceCfg struct {
|
||||
Mode string `yaml:"mode,omitempty"` //tail|cat|...
|
||||
Filename string `yaml:"filename,omitempty"`
|
||||
Filenames []string `yaml:"filenames,omitempty"`
|
||||
JournalctlFilters []string `yaml:"journalctl_filter,omitempty"`
|
||||
Labels map[string]string `yaml:"labels,omitempty"`
|
||||
Profiling bool `yaml:"profiling,omitempty"`
|
||||
}
|
||||
|
||||
// The interface each datasource must implement
|
||||
type DataSource interface {
|
||||
Configure(DataSourceCfg) error
|
||||
/*the readers must watch the tomb (especially in tail mode) to know when to shutdown.
|
||||
tomb is as well used to trigger general shutdown when a datasource errors */
|
||||
StartReading(chan types.Event, *tomb.Tomb) error
|
||||
Mode() string //return CAT_MODE or TAIL_MODE
|
||||
//Not sure it makes sense to make those funcs part of the interface.
|
||||
//While 'cat' and 'tail' are the only two modes we see now, other modes might appear
|
||||
//StartTail(chan types.Event, *tomb.Tomb) error
|
||||
//StartCat(chan types.Event, *tomb.Tomb) error
|
||||
GetMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module
|
||||
GetAggregMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module (aggregated mode, limits cardinality)
|
||||
Configure([]byte, *log.Entry) error // Configure the datasource
|
||||
ConfigureByDSN(string, string, *log.Entry) error // Configure the datasource
|
||||
GetMode() string // Get the mode (TAIL, CAT or SERVER)
|
||||
GetName() string // Get the name of the module
|
||||
OneShotAcquisition(chan types.Event, *tomb.Tomb) error // Start one shot acquisition(eg, cat a file)
|
||||
StreamingAcquisition(chan types.Event, *tomb.Tomb) error // Start live acquisition (eg, tail a file)
|
||||
CanRun() error // Whether the datasource can run or not (eg, journalctl on BSD is a non-sense)
|
||||
Dump() interface{}
|
||||
}
|
||||
|
||||
func DataSourceConfigure(config DataSourceCfg) (DataSource, error) {
|
||||
if config.Mode == "" { /*default mode is tail*/
|
||||
config.Mode = TAIL_MODE
|
||||
}
|
||||
|
||||
if len(config.Filename) > 0 || len(config.Filenames) > 0 { /*it's file acquisition*/
|
||||
|
||||
fileSrc := new(FileSource)
|
||||
if err := fileSrc.Configure(config); err != nil {
|
||||
return nil, errors.Wrap(err, "configuring file datasource")
|
||||
}
|
||||
return fileSrc, nil
|
||||
} else if len(config.JournalctlFilters) > 0 { /*it's journald acquisition*/
|
||||
|
||||
journaldSrc := new(JournaldSource)
|
||||
if err := journaldSrc.Configure(config); err != nil {
|
||||
return nil, errors.Wrap(err, "configuring journald datasource")
|
||||
}
|
||||
return journaldSrc, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("empty filename(s) and journalctl filter, malformed datasource")
|
||||
}
|
||||
var AcquisitionSources = []struct {
|
||||
name string
|
||||
iface func() DataSource
|
||||
}{
|
||||
{
|
||||
name: "file",
|
||||
iface: func() DataSource { return &fileacquisition.FileSource{} },
|
||||
},
|
||||
{
|
||||
name: "journalctl",
|
||||
iface: func() DataSource { return &journalctlacquisition.JournalCtlSource{} },
|
||||
},
|
||||
{
|
||||
name: "cloudwatch",
|
||||
iface: func() DataSource { return &cloudwatchacquisition.CloudwatchSource{} },
|
||||
},
|
||||
{
|
||||
name: "syslog",
|
||||
iface: func() DataSource { return &syslogacquisition.SyslogSource{} },
|
||||
},
|
||||
}
|
||||
|
||||
func GetDataSourceIface(dataSourceType string) DataSource {
|
||||
for _, source := range AcquisitionSources {
|
||||
if source.name == dataSourceType {
|
||||
return source.iface()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg) (*DataSource, error) {
|
||||
|
||||
//we dump it back to []byte, because we want to decode the yaml blob twice :
|
||||
//once to DataSourceCommonCfg, and then later to the dedicated type of the datasource
|
||||
yamlConfig, err := yaml.Marshal(commonConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to marshal back interface")
|
||||
}
|
||||
if dataSrc := GetDataSourceIface(commonConfig.Source); dataSrc != nil {
|
||||
/* this logger will then be used by the datasource at runtime */
|
||||
clog := log.New()
|
||||
if err := types.ConfigureLogger(clog); err != nil {
|
||||
return nil, errors.Wrap(err, "while configuring datasource logger")
|
||||
}
|
||||
if commonConfig.LogLevel != nil {
|
||||
clog.SetLevel(*commonConfig.LogLevel)
|
||||
}
|
||||
customLog := log.Fields{
|
||||
"type": commonConfig.Source,
|
||||
}
|
||||
if commonConfig.Name != "" {
|
||||
customLog["name"] = commonConfig.Name
|
||||
}
|
||||
subLogger := clog.WithFields(customLog)
|
||||
/* check eventual dependencies are satisfied (ie. journald will check journalctl availability) */
|
||||
if err := dataSrc.CanRun(); err != nil {
|
||||
return nil, errors.Wrapf(err, "datasource %s cannot be run", commonConfig.Source)
|
||||
}
|
||||
/* configure the actual datasource */
|
||||
if err := dataSrc.Configure(yamlConfig, subLogger); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to configure datasource %s", commonConfig.Source)
|
||||
|
||||
}
|
||||
return &dataSrc, nil
|
||||
}
|
||||
return nil, fmt.Errorf("cannot find source %s", commonConfig.Source)
|
||||
}
|
||||
|
||||
//detectBackwardCompatAcquis : try to magically detect the type for backward compat (type was not mandatory then)
|
||||
func detectBackwardCompatAcquis(sub configuration.DataSourceCommonCfg) string {
|
||||
|
||||
if _, ok := sub.Config["filename"]; ok {
|
||||
return "file"
|
||||
}
|
||||
if _, ok := sub.Config["filenames"]; ok {
|
||||
return "file"
|
||||
}
|
||||
if _, ok := sub.Config["journalctl_filter"]; ok {
|
||||
return "journalctl"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func LoadAcquisitionFromDSN(dsn string, label string) ([]DataSource, error) {
|
||||
var sources []DataSource
|
||||
|
||||
frags := strings.Split(dsn, ":")
|
||||
if len(frags) == 1 {
|
||||
return nil, fmt.Errorf("%s isn't valid dsn (no protocol)", dsn)
|
||||
}
|
||||
dataSrc := GetDataSourceIface(frags[0])
|
||||
if dataSrc == nil {
|
||||
return nil, fmt.Errorf("no acquisition for protocol %s://", frags[0])
|
||||
}
|
||||
/* this logger will then be used by the datasource at runtime */
|
||||
clog := log.New()
|
||||
if err := types.ConfigureLogger(clog); err != nil {
|
||||
return nil, errors.Wrap(err, "while configuring datasource logger")
|
||||
}
|
||||
subLogger := clog.WithFields(log.Fields{
|
||||
"type": dsn,
|
||||
})
|
||||
err := dataSrc.ConfigureByDSN(dsn, label, subLogger)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "while configuration datasource for %s", dsn)
|
||||
}
|
||||
sources = append(sources, dataSrc)
|
||||
return sources, nil
|
||||
}
|
||||
|
||||
// LoadAcquisitionFromFile unmarshals the configuration item and checks its availability
|
||||
func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg) ([]DataSource, error) {
|
||||
|
||||
var sources []DataSource
|
||||
var acquisSources = config.AcquisitionFiles
|
||||
|
||||
for _, acquisFile := range acquisSources {
|
||||
for _, acquisFile := range config.AcquisitionFiles {
|
||||
log.Infof("loading acquisition file : %s", acquisFile)
|
||||
yamlFile, err := os.Open(acquisFile)
|
||||
if err != nil {
|
||||
|
@ -119,36 +161,82 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg) ([]DataSource,
|
|||
dec := yaml.NewDecoder(yamlFile)
|
||||
dec.SetStrict(true)
|
||||
for {
|
||||
sub := DataSourceCfg{}
|
||||
var sub configuration.DataSourceCommonCfg
|
||||
err = dec.Decode(&sub)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
log.Tracef("End of yaml file")
|
||||
break
|
||||
}
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("failed to yaml decode %s", acquisFile))
|
||||
return nil, errors.Wrapf(err, "failed to yaml decode %s", acquisFile)
|
||||
}
|
||||
|
||||
//for backward compat ('type' was not mandatory, detect it)
|
||||
if guessType := detectBackwardCompatAcquis(sub); guessType != "" {
|
||||
sub.Source = guessType
|
||||
}
|
||||
//it's an empty item, skip it
|
||||
if len(sub.Labels) == 0 {
|
||||
if sub.Source == "" {
|
||||
log.Debugf("skipping empty item in %s", acquisFile)
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("missing labels in %s", acquisFile)
|
||||
}
|
||||
if sub.Source == "" {
|
||||
return nil, fmt.Errorf("data source type is empty ('source') in %s", acquisFile)
|
||||
}
|
||||
if GetDataSourceIface(sub.Source) == nil {
|
||||
return nil, fmt.Errorf("unknown data source %s in %s", sub.Source, acquisFile)
|
||||
}
|
||||
src, err := DataSourceConfigure(sub)
|
||||
if err != nil {
|
||||
log.Warningf("while configuring datasource : %s", err)
|
||||
continue
|
||||
return nil, errors.Wrapf(err, "while configuring datasource of type %s from %s", sub.Source, acquisFile)
|
||||
}
|
||||
sources = append(sources, src)
|
||||
sources = append(sources, *src)
|
||||
}
|
||||
}
|
||||
|
||||
return sources, nil
|
||||
}
|
||||
|
||||
func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error {
|
||||
func GetMetrics(sources []DataSource, aggregated bool) error {
|
||||
var metrics []prometheus.Collector
|
||||
for i := 0; i < len(sources); i++ {
|
||||
if aggregated {
|
||||
metrics = sources[i].GetMetrics()
|
||||
} else {
|
||||
metrics = sources[i].GetAggregMetrics()
|
||||
}
|
||||
for _, metric := range metrics {
|
||||
if err := prometheus.Register(metric); err != nil {
|
||||
if _, ok := err.(prometheus.AlreadyRegisteredError); ok {
|
||||
//ignore the error
|
||||
} else {
|
||||
return errors.Wrapf(err, "could not register metrics for datasource %s", sources[i].GetName())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error {
|
||||
for i := 0; i < len(sources); i++ {
|
||||
subsrc := sources[i] //ensure its a copy
|
||||
log.Debugf("starting one source %d/%d ->> %T", i, len(sources), subsrc)
|
||||
|
||||
AcquisTomb.Go(func() error {
|
||||
defer types.CatchPanic("crowdsec/acquis")
|
||||
if err := subsrc.StartReading(output, AcquisTomb); err != nil {
|
||||
return err
|
||||
var err error
|
||||
if subsrc.GetMode() == configuration.TAIL_MODE {
|
||||
err = subsrc.StreamingAcquisition(output, AcquisTomb)
|
||||
} else {
|
||||
err = subsrc.OneShotAcquisition(output, AcquisTomb)
|
||||
}
|
||||
if err != nil {
|
||||
//if one of the acqusition returns an error, we kill the others to properly shutdown
|
||||
AcquisTomb.Kill(err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
|
|
@ -2,137 +2,571 @@ package acquisition
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
tomb "gopkg.in/tomb.v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestConfigLoading(t *testing.T) {
|
||||
//bad filename
|
||||
cfg := csconfig.CrowdsecServiceCfg{
|
||||
AcquisitionFiles: []string{"./tests/xxx.yaml"},
|
||||
type MockSource struct {
|
||||
configuration.DataSourceCommonCfg `yaml:",inline"`
|
||||
Toto string `yaml:"toto"`
|
||||
logger *log.Entry
|
||||
}
|
||||
|
||||
func (f *MockSource) Configure(cfg []byte, logger *log.Entry) error {
|
||||
f.logger = logger
|
||||
if err := yaml.UnmarshalStrict(cfg, &f); err != nil {
|
||||
return errors.Wrap(err, "while unmarshaling to reader specific config")
|
||||
}
|
||||
_, err := LoadAcquisitionFromFile(&cfg)
|
||||
assert.Contains(t, fmt.Sprintf("%s", err), "can't open ./tests/xxx.yaml: open ./tests/xxx.yaml: no such file or directory")
|
||||
//bad config file
|
||||
cfg = csconfig.CrowdsecServiceCfg{
|
||||
AcquisitionFiles: []string{"./tests/test.log"},
|
||||
if f.Mode == "" {
|
||||
f.Mode = configuration.CAT_MODE
|
||||
}
|
||||
_, err = LoadAcquisitionFromFile(&cfg)
|
||||
assert.Contains(t, fmt.Sprintf("%s", err), "failed to yaml decode ./tests/test.log: yaml: unmarshal errors")
|
||||
//correct config file
|
||||
cfg = csconfig.CrowdsecServiceCfg{
|
||||
AcquisitionFiles: []string{"./tests/acquis_test.yaml"},
|
||||
if f.Mode != configuration.CAT_MODE && f.Mode != configuration.TAIL_MODE {
|
||||
return fmt.Errorf("mode %s is not supported", f.Mode)
|
||||
}
|
||||
srcs, err := LoadAcquisitionFromFile(&cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error : %s", err)
|
||||
if f.Toto == "" {
|
||||
return fmt.Errorf("expect non-empty toto")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (f *MockSource) GetMode() string { return f.Mode }
|
||||
func (f *MockSource) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil }
|
||||
func (f *MockSource) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { return nil }
|
||||
func (f *MockSource) CanRun() error { return nil }
|
||||
func (f *MockSource) GetMetrics() []prometheus.Collector { return nil }
|
||||
func (f *MockSource) GetAggregMetrics() []prometheus.Collector { return nil }
|
||||
func (f *MockSource) Dump() interface{} { return f }
|
||||
func (f *MockSource) GetName() string { return "mock" }
|
||||
func (f *MockSource) ConfigureByDSN(string, string, *log.Entry) error {
|
||||
return fmt.Errorf("not supported")
|
||||
}
|
||||
|
||||
//copy the mocksource, but this one can't run
|
||||
type MockSourceCantRun struct {
|
||||
MockSource
|
||||
}
|
||||
|
||||
func (f *MockSourceCantRun) CanRun() error { return fmt.Errorf("can't run bro") }
|
||||
func (f *MockSourceCantRun) GetName() string { return "mock_cant_run" }
|
||||
|
||||
//appendMockSource is only used to add mock source for tests
|
||||
func appendMockSource() {
|
||||
if GetDataSourceIface("mock") == nil {
|
||||
mock := struct {
|
||||
name string
|
||||
iface func() DataSource
|
||||
}{
|
||||
name: "mock",
|
||||
iface: func() DataSource { return &MockSource{} },
|
||||
}
|
||||
AcquisitionSources = append(AcquisitionSources, mock)
|
||||
}
|
||||
if GetDataSourceIface("mock_cant_run") == nil {
|
||||
mock := struct {
|
||||
name string
|
||||
iface func() DataSource
|
||||
}{
|
||||
name: "mock_cant_run",
|
||||
iface: func() DataSource { return &MockSourceCantRun{} },
|
||||
}
|
||||
AcquisitionSources = append(AcquisitionSources, mock)
|
||||
}
|
||||
assert.Equal(t, len(srcs), 1)
|
||||
}
|
||||
|
||||
func TestDataSourceConfigure(t *testing.T) {
|
||||
appendMockSource()
|
||||
tests := []struct {
|
||||
cfg DataSourceCfg
|
||||
//tombState
|
||||
config_error string
|
||||
read_error string
|
||||
tomb_error string
|
||||
lines int
|
||||
TestName string
|
||||
RawBytes []byte
|
||||
ExpectedError string
|
||||
}{
|
||||
{ //missing filename(s)
|
||||
cfg: DataSourceCfg{
|
||||
Mode: CAT_MODE,
|
||||
},
|
||||
config_error: "empty filename(s) and journalctl filter, malformed datasource",
|
||||
{
|
||||
TestName: "basic_valid_config",
|
||||
RawBytes: []byte(`
|
||||
mode: cat
|
||||
labels:
|
||||
test: foobar
|
||||
log_level: info
|
||||
source: mock
|
||||
toto: test_value1
|
||||
`),
|
||||
},
|
||||
{ //missing filename(s)
|
||||
cfg: DataSourceCfg{
|
||||
Mode: TAIL_MODE,
|
||||
},
|
||||
config_error: "empty filename(s) and journalctl filter, malformed datasource",
|
||||
{
|
||||
TestName: "basic_debug_config",
|
||||
RawBytes: []byte(`
|
||||
mode: cat
|
||||
labels:
|
||||
test: foobar
|
||||
log_level: debug
|
||||
source: mock
|
||||
toto: test_value1
|
||||
`),
|
||||
},
|
||||
{ //bad mode(s)
|
||||
cfg: DataSourceCfg{
|
||||
Filename: "./tests/test.log",
|
||||
Mode: "ratata",
|
||||
},
|
||||
config_error: "configuring file datasource: unknown mode ratata for file acquisition",
|
||||
{
|
||||
TestName: "basic_tailmode_config",
|
||||
RawBytes: []byte(`
|
||||
mode: tail
|
||||
labels:
|
||||
test: foobar
|
||||
log_level: debug
|
||||
source: mock
|
||||
toto: test_value1
|
||||
`),
|
||||
},
|
||||
{ //ok test
|
||||
cfg: DataSourceCfg{
|
||||
Mode: CAT_MODE,
|
||||
Filename: "./tests/test.log",
|
||||
},
|
||||
{
|
||||
TestName: "bad_mode_config",
|
||||
RawBytes: []byte(`
|
||||
mode: ratata
|
||||
labels:
|
||||
test: foobar
|
||||
log_level: debug
|
||||
source: mock
|
||||
toto: test_value1
|
||||
`),
|
||||
ExpectedError: "failed to configure datasource mock: mode ratata is not supported",
|
||||
},
|
||||
{ //missing mode, default to CAT_MODE
|
||||
cfg: DataSourceCfg{
|
||||
Filename: "./tests/test.log",
|
||||
},
|
||||
{
|
||||
TestName: "bad_type_config",
|
||||
RawBytes: []byte(`
|
||||
mode: cat
|
||||
labels:
|
||||
test: foobar
|
||||
log_level: debug
|
||||
source: tutu
|
||||
`),
|
||||
ExpectedError: "cannot find source tutu",
|
||||
},
|
||||
{ //ok test for journalctl
|
||||
cfg: DataSourceCfg{
|
||||
Mode: CAT_MODE,
|
||||
JournalctlFilters: []string{"-test.run=TestSimJournalctlCatOneLine", "--"},
|
||||
},
|
||||
{
|
||||
TestName: "mismatch_config",
|
||||
RawBytes: []byte(`
|
||||
mode: cat
|
||||
labels:
|
||||
test: foobar
|
||||
log_level: debug
|
||||
source: mock
|
||||
wowo: ajsajasjas
|
||||
`),
|
||||
ExpectedError: "field wowo not found in type acquisition.MockSource",
|
||||
},
|
||||
{
|
||||
TestName: "cant_run_error",
|
||||
RawBytes: []byte(`
|
||||
mode: cat
|
||||
labels:
|
||||
test: foobar
|
||||
log_level: debug
|
||||
source: mock_cant_run
|
||||
wowo: ajsajasjas
|
||||
`),
|
||||
ExpectedError: "datasource mock_cant_run cannot be run: can't run bro",
|
||||
},
|
||||
}
|
||||
|
||||
for tidx, test := range tests {
|
||||
|
||||
srcs, err := DataSourceConfigure(test.cfg)
|
||||
if test.config_error != "" {
|
||||
assert.Contains(t, fmt.Sprintf("%s", err), test.config_error)
|
||||
log.Infof("expected config error ok : %s", test.config_error)
|
||||
continue
|
||||
for _, test := range tests {
|
||||
common := configuration.DataSourceCommonCfg{}
|
||||
yaml.Unmarshal(test.RawBytes, &common)
|
||||
ds, err := DataSourceConfigure(common)
|
||||
if test.ExpectedError != "" {
|
||||
if err == nil {
|
||||
t.Fatalf("expected error %s, got none", test.ExpectedError)
|
||||
}
|
||||
if !strings.Contains(err.Error(), test.ExpectedError) {
|
||||
t.Fatalf("%s : expected error '%s' in '%s'", test.TestName, test.ExpectedError, err.Error())
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatalf("%d/%d unexpected config error %s", tidx, len(tests), err)
|
||||
t.Fatalf("%s : unexpected error '%s'", test.TestName, err)
|
||||
}
|
||||
}
|
||||
|
||||
//check we got the expected mode
|
||||
if tests[tidx].cfg.Mode == "" {
|
||||
tests[tidx].cfg.Mode = TAIL_MODE
|
||||
switch test.TestName {
|
||||
case "basic_valid_config":
|
||||
mock := (*ds).Dump().(*MockSource)
|
||||
assert.Equal(t, mock.Toto, "test_value1")
|
||||
assert.Equal(t, mock.Mode, "cat")
|
||||
assert.Equal(t, mock.logger.Logger.Level, log.InfoLevel)
|
||||
assert.DeepEqual(t, mock.Labels, map[string]string{"test": "foobar"})
|
||||
case "basic_debug_config":
|
||||
mock := (*ds).Dump().(*MockSource)
|
||||
assert.Equal(t, mock.Toto, "test_value1")
|
||||
assert.Equal(t, mock.Mode, "cat")
|
||||
assert.Equal(t, mock.logger.Logger.Level, log.DebugLevel)
|
||||
assert.DeepEqual(t, mock.Labels, map[string]string{"test": "foobar"})
|
||||
case "basic_tailmode_config":
|
||||
mock := (*ds).Dump().(*MockSource)
|
||||
assert.Equal(t, mock.Toto, "test_value1")
|
||||
assert.Equal(t, mock.Mode, "tail")
|
||||
assert.Equal(t, mock.logger.Logger.Level, log.DebugLevel)
|
||||
assert.DeepEqual(t, mock.Labels, map[string]string{"test": "foobar"})
|
||||
}
|
||||
assert.Equal(t, srcs.Mode(), tests[tidx].cfg.Mode)
|
||||
}
|
||||
}
|
||||
|
||||
out := make(chan types.Event)
|
||||
tomb := tomb.Tomb{}
|
||||
|
||||
go func() {
|
||||
err = StartAcquisition([]DataSource{srcs}, out, &tomb)
|
||||
if test.read_error != "" {
|
||||
assert.Contains(t, fmt.Sprintf("%s", err), test.read_error)
|
||||
log.Infof("expected read error ok : %s", test.read_error)
|
||||
func TestLoadAcquisitionFromFile(t *testing.T) {
|
||||
appendMockSource()
|
||||
tests := []struct {
|
||||
TestName string
|
||||
Config csconfig.CrowdsecServiceCfg
|
||||
ExpectedError string
|
||||
ExpectedLen int
|
||||
}{
|
||||
{
|
||||
TestName: "non_existent_file",
|
||||
Config: csconfig.CrowdsecServiceCfg{
|
||||
AcquisitionFiles: []string{"does_not_exist"},
|
||||
},
|
||||
ExpectedError: "can't open does_not_exist",
|
||||
ExpectedLen: 0,
|
||||
},
|
||||
{
|
||||
TestName: "invalid_yaml_file",
|
||||
Config: csconfig.CrowdsecServiceCfg{
|
||||
AcquisitionFiles: []string{"test_files/badyaml.yaml"},
|
||||
},
|
||||
ExpectedError: "failed to yaml decode test_files/badyaml.yaml: yaml: unmarshal errors",
|
||||
ExpectedLen: 0,
|
||||
},
|
||||
{
|
||||
TestName: "invalid_empty_yaml",
|
||||
Config: csconfig.CrowdsecServiceCfg{
|
||||
AcquisitionFiles: []string{"test_files/emptyitem.yaml"},
|
||||
},
|
||||
ExpectedLen: 0,
|
||||
},
|
||||
{
|
||||
TestName: "basic_valid",
|
||||
Config: csconfig.CrowdsecServiceCfg{
|
||||
AcquisitionFiles: []string{"test_files/basic_filemode.yaml"},
|
||||
},
|
||||
ExpectedLen: 2,
|
||||
},
|
||||
{
|
||||
TestName: "missing_labels",
|
||||
Config: csconfig.CrowdsecServiceCfg{
|
||||
AcquisitionFiles: []string{"test_files/missing_labels.yaml"},
|
||||
},
|
||||
ExpectedError: "missing labels in test_files/missing_labels.yaml",
|
||||
},
|
||||
{
|
||||
TestName: "backward_compat",
|
||||
Config: csconfig.CrowdsecServiceCfg{
|
||||
AcquisitionFiles: []string{"test_files/backward_compat.yaml"},
|
||||
},
|
||||
ExpectedLen: 2,
|
||||
},
|
||||
{
|
||||
TestName: "bad_type",
|
||||
Config: csconfig.CrowdsecServiceCfg{
|
||||
AcquisitionFiles: []string{"test_files/bad_source.yaml"},
|
||||
},
|
||||
ExpectedError: "unknown data source does_not_exist in test_files/bad_source.yaml",
|
||||
},
|
||||
{
|
||||
TestName: "invalid_filetype_config",
|
||||
Config: csconfig.CrowdsecServiceCfg{
|
||||
AcquisitionFiles: []string{"test_files/bad_filetype.yaml"},
|
||||
},
|
||||
ExpectedError: "while configuring datasource of type file from test_files/bad_filetype.yaml",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
dss, err := LoadAcquisitionFromFile(&test.Config)
|
||||
if test.ExpectedError != "" {
|
||||
if err == nil {
|
||||
t.Fatalf("expected error %s, got none", test.ExpectedError)
|
||||
}
|
||||
if !strings.Contains(err.Error(), test.ExpectedError) {
|
||||
t.Fatalf("%s : expected error '%s' in '%s'", test.TestName, test.ExpectedError, err.Error())
|
||||
} else {
|
||||
if err != nil {
|
||||
log.Fatalf("%d/%d unexpected read error %s", tidx, len(tests), err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}()
|
||||
|
||||
log.Printf("kill iiittt")
|
||||
//we're actually not interested in the result :)
|
||||
tomb.Kill(nil)
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
if test.tomb_error != "" {
|
||||
assert.Contains(t, fmt.Sprintf("%s", tomb.Err()), test.tomb_error)
|
||||
log.Infof("expected tomb error ok : %s", test.read_error)
|
||||
continue
|
||||
} else {
|
||||
if tomb.Err() != nil {
|
||||
t.Fatalf("%d/%d unexpected tomb error %s", tidx, len(tests), tomb.Err())
|
||||
if err != nil {
|
||||
t.Fatalf("%s : unexpected error '%s'", test.TestName, err)
|
||||
}
|
||||
}
|
||||
if len(dss) != test.ExpectedLen {
|
||||
t.Fatalf("%s : expected %d datasources got %d", test.TestName, test.ExpectedLen, len(dss))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
test start acquisition :
|
||||
- create mock parser in cat mode : start acquisition, check it returns, count items in chan
|
||||
- create mock parser in tail mode : start acquisition, sleep, check item count, tomb kill it, wait for it to return
|
||||
*/
|
||||
|
||||
type MockCat struct {
|
||||
configuration.DataSourceCommonCfg `yaml:",inline"`
|
||||
logger *log.Entry
|
||||
}
|
||||
|
||||
func (f *MockCat) Configure(cfg []byte, logger *log.Entry) error {
|
||||
f.logger = logger
|
||||
if f.Mode == "" {
|
||||
f.Mode = configuration.CAT_MODE
|
||||
}
|
||||
if f.Mode != configuration.CAT_MODE {
|
||||
return fmt.Errorf("mode %s is not supported", f.Mode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (f *MockCat) GetName() string { return "mock_cat" }
|
||||
func (f *MockCat) GetMode() string { return "cat" }
|
||||
func (f *MockCat) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) error {
|
||||
for i := 0; i < 10; i++ {
|
||||
evt := types.Event{}
|
||||
evt.Line.Src = "test"
|
||||
out <- evt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (f *MockCat) StreamingAcquisition(chan types.Event, *tomb.Tomb) error {
|
||||
return fmt.Errorf("can't run in tail")
|
||||
}
|
||||
func (f *MockCat) CanRun() error { return nil }
|
||||
func (f *MockCat) GetMetrics() []prometheus.Collector { return nil }
|
||||
func (f *MockCat) GetAggregMetrics() []prometheus.Collector { return nil }
|
||||
func (f *MockCat) Dump() interface{} { return f }
|
||||
func (f *MockCat) ConfigureByDSN(string, string, *log.Entry) error { return fmt.Errorf("not supported") }
|
||||
|
||||
//----
|
||||
|
||||
type MockTail struct {
|
||||
configuration.DataSourceCommonCfg `yaml:",inline"`
|
||||
logger *log.Entry
|
||||
}
|
||||
|
||||
func (f *MockTail) Configure(cfg []byte, logger *log.Entry) error {
|
||||
f.logger = logger
|
||||
if f.Mode == "" {
|
||||
f.Mode = configuration.TAIL_MODE
|
||||
}
|
||||
if f.Mode != configuration.TAIL_MODE {
|
||||
return fmt.Errorf("mode %s is not supported", f.Mode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (f *MockTail) GetName() string { return "mock_tail" }
|
||||
func (f *MockTail) GetMode() string { return "tail" }
|
||||
func (f *MockTail) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) error {
|
||||
return fmt.Errorf("can't run in cat mode")
|
||||
}
|
||||
func (f *MockTail) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
||||
for i := 0; i < 10; i++ {
|
||||
evt := types.Event{}
|
||||
evt.Line.Src = "test"
|
||||
out <- evt
|
||||
}
|
||||
select {
|
||||
case <-t.Dying():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
func (f *MockTail) CanRun() error { return nil }
|
||||
func (f *MockTail) GetMetrics() []prometheus.Collector { return nil }
|
||||
func (f *MockTail) GetAggregMetrics() []prometheus.Collector { return nil }
|
||||
func (f *MockTail) Dump() interface{} { return f }
|
||||
func (f *MockTail) ConfigureByDSN(string, string, *log.Entry) error {
|
||||
return fmt.Errorf("not supported")
|
||||
}
|
||||
|
||||
//func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error {
|
||||
|
||||
func TestStartAcquisitionCat(t *testing.T) {
|
||||
sources := []DataSource{
|
||||
&MockCat{},
|
||||
}
|
||||
out := make(chan types.Event)
|
||||
acquisTomb := tomb.Tomb{}
|
||||
|
||||
go func() {
|
||||
if err := StartAcquisition(sources, out, &acquisTomb); err != nil {
|
||||
t.Fatalf("unexpected error")
|
||||
}
|
||||
}()
|
||||
|
||||
count := 0
|
||||
READLOOP:
|
||||
for {
|
||||
select {
|
||||
case <-out:
|
||||
count++
|
||||
case <-time.After(1 * time.Second):
|
||||
break READLOOP
|
||||
}
|
||||
}
|
||||
if count != 10 {
|
||||
t.Fatalf("expected 10 results, got %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartAcquisitionTail(t *testing.T) {
|
||||
sources := []DataSource{
|
||||
&MockTail{},
|
||||
}
|
||||
out := make(chan types.Event)
|
||||
acquisTomb := tomb.Tomb{}
|
||||
|
||||
go func() {
|
||||
if err := StartAcquisition(sources, out, &acquisTomb); err != nil {
|
||||
t.Fatalf("unexpected error")
|
||||
}
|
||||
}()
|
||||
|
||||
count := 0
|
||||
READLOOP:
|
||||
for {
|
||||
select {
|
||||
case <-out:
|
||||
count++
|
||||
case <-time.After(1 * time.Second):
|
||||
break READLOOP
|
||||
}
|
||||
}
|
||||
if count != 10 {
|
||||
t.Fatalf("expected 10 results, got %d", count)
|
||||
}
|
||||
acquisTomb.Kill(nil)
|
||||
time.Sleep(1 * time.Second)
|
||||
if acquisTomb.Err() != nil {
|
||||
t.Fatalf("unexpected tomb error %s (should be dead)", acquisTomb.Err())
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
type MockTailError struct {
|
||||
MockTail
|
||||
}
|
||||
|
||||
func (f *MockTailError) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
||||
for i := 0; i < 10; i++ {
|
||||
evt := types.Event{}
|
||||
evt.Line.Src = "test"
|
||||
out <- evt
|
||||
}
|
||||
t.Kill(fmt.Errorf("got error (tomb)"))
|
||||
return fmt.Errorf("got error")
|
||||
}
|
||||
|
||||
func TestStartAcquisitionTailError(t *testing.T) {
|
||||
sources := []DataSource{
|
||||
&MockTailError{},
|
||||
}
|
||||
out := make(chan types.Event)
|
||||
acquisTomb := tomb.Tomb{}
|
||||
|
||||
go func() {
|
||||
if err := StartAcquisition(sources, out, &acquisTomb); err != nil && err.Error() != "got error (tomb)" {
|
||||
t.Fatalf("expected error, got '%s'", err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
count := 0
|
||||
READLOOP:
|
||||
for {
|
||||
select {
|
||||
case <-out:
|
||||
count++
|
||||
case <-time.After(1 * time.Second):
|
||||
break READLOOP
|
||||
}
|
||||
}
|
||||
if count != 10 {
|
||||
t.Fatalf("expected 10 results, got %d", count)
|
||||
}
|
||||
//acquisTomb.Kill(nil)
|
||||
time.Sleep(1 * time.Second)
|
||||
if acquisTomb.Err().Error() != "got error (tomb)" {
|
||||
t.Fatalf("didn't got expected error, got '%s'", acquisTomb.Err().Error())
|
||||
}
|
||||
}
|
||||
|
||||
type MockSourceByDSN struct {
|
||||
configuration.DataSourceCommonCfg `yaml:",inline"`
|
||||
Toto string `yaml:"toto"`
|
||||
logger *log.Entry
|
||||
}
|
||||
|
||||
func (f *MockSourceByDSN) Configure(cfg []byte, logger *log.Entry) error { return nil }
|
||||
func (f *MockSourceByDSN) GetMode() string { return f.Mode }
|
||||
func (f *MockSourceByDSN) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil }
|
||||
func (f *MockSourceByDSN) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { return nil }
|
||||
func (f *MockSourceByDSN) CanRun() error { return nil }
|
||||
func (f *MockSourceByDSN) GetMetrics() []prometheus.Collector { return nil }
|
||||
func (f *MockSourceByDSN) GetAggregMetrics() []prometheus.Collector { return nil }
|
||||
func (f *MockSourceByDSN) Dump() interface{} { return f }
|
||||
func (f *MockSourceByDSN) GetName() string { return "mockdsn" }
|
||||
func (f *MockSourceByDSN) ConfigureByDSN(dsn string, logType string, logger *log.Entry) error {
|
||||
dsn = strings.TrimPrefix(dsn, "mockdsn://")
|
||||
if dsn != "test_expect" {
|
||||
return fmt.Errorf("unexpected value")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestConfigureByDSN(t *testing.T) {
|
||||
tests := []struct {
|
||||
dsn string
|
||||
ExpectedError string
|
||||
ExpectedResLen int
|
||||
}{
|
||||
{
|
||||
dsn: "baddsn",
|
||||
ExpectedError: "baddsn isn't valid dsn (no protocol)",
|
||||
},
|
||||
{
|
||||
dsn: "foobar://toto",
|
||||
ExpectedError: "no acquisition for protocol foobar://",
|
||||
},
|
||||
{
|
||||
dsn: "mockdsn://test_expect",
|
||||
ExpectedResLen: 1,
|
||||
},
|
||||
{
|
||||
dsn: "mockdsn://bad",
|
||||
ExpectedError: "unexpected value",
|
||||
},
|
||||
}
|
||||
|
||||
if GetDataSourceIface("mockdsn") == nil {
|
||||
mock := struct {
|
||||
name string
|
||||
iface func() DataSource
|
||||
}{
|
||||
name: "mockdsn",
|
||||
iface: func() DataSource { return &MockSourceByDSN{} },
|
||||
}
|
||||
AcquisitionSources = append(AcquisitionSources, mock)
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
srcs, err := LoadAcquisitionFromDSN(test.dsn, "test_label")
|
||||
if err != nil && test.ExpectedError != "" {
|
||||
if !strings.Contains(err.Error(), test.ExpectedError) {
|
||||
t.Fatalf("expected '%s', got '%s'", test.ExpectedError, err.Error())
|
||||
}
|
||||
} else if err != nil && test.ExpectedError == "" {
|
||||
t.Fatalf("got unexpected error '%s'", err.Error())
|
||||
} else if err == nil && test.ExpectedError != "" {
|
||||
t.Fatalf("expected error '%s' got none", test.ExpectedError)
|
||||
}
|
||||
if len(srcs) != test.ExpectedResLen {
|
||||
t.Fatalf("expected %d results, got %d", test.ExpectedResLen, len(srcs))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
18
pkg/acquisition/configuration/configuration.go
Normal file
18
pkg/acquisition/configuration/configuration.go
Normal file
|
@ -0,0 +1,18 @@
|
|||
package configuration
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type DataSourceCommonCfg struct {
|
||||
Mode string `yaml:"mode,omitempty"`
|
||||
Labels map[string]string `yaml:"labels,omitempty"`
|
||||
LogLevel *log.Level `yaml:"log_level,omitempty"`
|
||||
Source string `yaml:"source,omitempty"`
|
||||
Name string `yaml:"name,omitempty"`
|
||||
Config map[string]interface{} `yaml:",inline"` //to keep the datasource-specific configuration directives
|
||||
}
|
||||
|
||||
var TAIL_MODE = "tail"
|
||||
var CAT_MODE = "cat"
|
||||
var SERVER_MODE = "server" // No difference with tail, just a bit more verbose
|
|
@ -1,227 +0,0 @@
|
|||
package acquisition
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
"github.com/nxadm/tail"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
tomb "gopkg.in/tomb.v2"
|
||||
)
|
||||
|
||||
type FileSource struct {
|
||||
Config DataSourceCfg
|
||||
tails []*tail.Tail
|
||||
Files []string
|
||||
}
|
||||
|
||||
func (f *FileSource) Configure(Config DataSourceCfg) error {
|
||||
f.Config = Config
|
||||
if len(Config.Filename) == 0 && len(Config.Filenames) == 0 {
|
||||
return fmt.Errorf("no filename or filenames")
|
||||
}
|
||||
|
||||
//let's deal with the array no matter what
|
||||
if len(Config.Filename) != 0 {
|
||||
Config.Filenames = append(Config.Filenames, Config.Filename)
|
||||
}
|
||||
|
||||
for _, fexpr := range Config.Filenames {
|
||||
files, err := filepath.Glob(fexpr)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "while globbing %s", fexpr)
|
||||
}
|
||||
if len(files) == 0 {
|
||||
log.Warningf("[file datasource] no results for %s", fexpr)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
/*check that we can read said file*/
|
||||
if err := unix.Access(file, unix.R_OK); err != nil {
|
||||
return fmt.Errorf("unable to open %s : %s", file, err)
|
||||
}
|
||||
log.Infof("[file datasource] opening file '%s'", file)
|
||||
|
||||
if f.Config.Mode == TAIL_MODE {
|
||||
tail, err := tail.TailFile(file, tail.Config{ReOpen: true, Follow: true, Poll: true, Location: &tail.SeekInfo{Offset: 0, Whence: 2}})
|
||||
if err != nil {
|
||||
log.Errorf("[file datasource] skipping %s : %v", file, err)
|
||||
continue
|
||||
}
|
||||
f.Files = append(f.Files, file)
|
||||
f.tails = append(f.tails, tail)
|
||||
} else if f.Config.Mode == CAT_MODE {
|
||||
//simply check that the file exists, it will be read differently
|
||||
if _, err := os.Stat(file); err != nil {
|
||||
return fmt.Errorf("can't open file %s : %s", file, err)
|
||||
}
|
||||
f.Files = append(f.Files, file)
|
||||
} else {
|
||||
return fmt.Errorf("unknown mode %s for file acquisition", f.Config.Mode)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
if len(f.Files) == 0 {
|
||||
return fmt.Errorf("no files to read for %+v", Config.Filenames)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FileSource) Mode() string {
|
||||
return f.Config.Mode
|
||||
}
|
||||
|
||||
func (f *FileSource) StartReading(out chan types.Event, t *tomb.Tomb) error {
|
||||
|
||||
if f.Config.Mode == CAT_MODE {
|
||||
return f.StartCat(out, t)
|
||||
} else if f.Config.Mode == TAIL_MODE {
|
||||
return f.StartTail(out, t)
|
||||
} else {
|
||||
return fmt.Errorf("unknown mode '%s' for file acquisition", f.Config.Mode)
|
||||
}
|
||||
}
|
||||
|
||||
/*A tail-mode file reader (tail) */
|
||||
func (f *FileSource) StartTail(output chan types.Event, AcquisTomb *tomb.Tomb) error {
|
||||
log.Debugf("starting file tail with %d items", len(f.tails))
|
||||
for i := 0; i < len(f.tails); i++ {
|
||||
idx := i
|
||||
log.Debugf("starting %d", idx)
|
||||
AcquisTomb.Go(func() error {
|
||||
defer types.CatchPanic("crowdsec/acquis/tailfile")
|
||||
return f.TailOneFile(output, AcquisTomb, idx)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/*A one shot file reader (cat) */
|
||||
func (f *FileSource) StartCat(output chan types.Event, AcquisTomb *tomb.Tomb) error {
|
||||
for i := 0; i < len(f.Files); i++ {
|
||||
idx := i
|
||||
log.Debugf("starting %d", idx)
|
||||
AcquisTomb.Go(func() error {
|
||||
defer types.CatchPanic("crowdsec/acquis/catfile")
|
||||
return f.CatOneFile(output, AcquisTomb, idx)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/*A tail-mode file reader (tail) */
|
||||
func (f *FileSource) TailOneFile(output chan types.Event, AcquisTomb *tomb.Tomb, idx int) error {
|
||||
|
||||
file := f.Files[idx]
|
||||
tail := f.tails[idx]
|
||||
|
||||
clog := log.WithFields(log.Fields{
|
||||
"acquisition file": f.Files[idx],
|
||||
})
|
||||
clog.Debugf("starting")
|
||||
|
||||
timeout := time.Tick(1 * time.Second)
|
||||
|
||||
for {
|
||||
l := types.Line{}
|
||||
select {
|
||||
case <-AcquisTomb.Dying(): //we are being killed by main
|
||||
clog.Infof("file datasource %s stopping", file)
|
||||
if err := tail.Stop(); err != nil {
|
||||
clog.Errorf("error in stop : %s", err)
|
||||
}
|
||||
return nil
|
||||
case <-tail.Tomb.Dying(): //our tailer is dying
|
||||
clog.Warningf("File reader of %s died", file)
|
||||
AcquisTomb.Kill(fmt.Errorf("dead reader for %s", file))
|
||||
return fmt.Errorf("reader for %s is dead", file)
|
||||
case line := <-tail.Lines:
|
||||
if line == nil {
|
||||
clog.Debugf("Nil line")
|
||||
return fmt.Errorf("tail for %s is empty", file)
|
||||
}
|
||||
if line.Err != nil {
|
||||
log.Warningf("fetch error : %v", line.Err)
|
||||
return line.Err
|
||||
}
|
||||
if line.Text == "" { //skip empty lines
|
||||
continue
|
||||
}
|
||||
ReaderHits.With(prometheus.Labels{"source": file}).Inc()
|
||||
|
||||
l.Raw = line.Text
|
||||
l.Labels = f.Config.Labels
|
||||
l.Time = line.Time
|
||||
l.Src = file
|
||||
l.Process = true
|
||||
//we're tailing, it must be real time logs
|
||||
log.Debugf("pushing %+v", l)
|
||||
output <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.LIVE}
|
||||
case <-timeout:
|
||||
//time out, shall we do stuff ?
|
||||
clog.Debugf("timeout")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*A one shot file reader (cat) */
|
||||
func (f *FileSource) CatOneFile(output chan types.Event, AcquisTomb *tomb.Tomb, idx int) error {
|
||||
var scanner *bufio.Scanner
|
||||
|
||||
log.Infof("reading %s at once", f.Files[idx])
|
||||
file := f.Files[idx]
|
||||
|
||||
clog := log.WithFields(log.Fields{
|
||||
"file": file,
|
||||
})
|
||||
fd, err := os.Open(file)
|
||||
defer fd.Close()
|
||||
if err != nil {
|
||||
clog.Errorf("Failed opening file: %s", err)
|
||||
return errors.Wrapf(err, "failed opening %s", f.Files[idx])
|
||||
}
|
||||
|
||||
if strings.HasSuffix(file, ".gz") {
|
||||
gz, err := gzip.NewReader(fd)
|
||||
if err != nil {
|
||||
clog.Errorf("Failed to read gz file: %s", err)
|
||||
return errors.Wrapf(err, "failed to read gz %s", f.Files[idx])
|
||||
}
|
||||
defer gz.Close()
|
||||
scanner = bufio.NewScanner(gz)
|
||||
|
||||
} else {
|
||||
scanner = bufio.NewScanner(fd)
|
||||
}
|
||||
scanner.Split(bufio.ScanLines)
|
||||
for scanner.Scan() {
|
||||
log.Tracef("line %s", scanner.Text())
|
||||
l := types.Line{}
|
||||
l.Raw = scanner.Text()
|
||||
l.Time = time.Now()
|
||||
l.Src = file
|
||||
l.Labels = f.Config.Labels
|
||||
l.Process = true
|
||||
ReaderHits.With(prometheus.Labels{"source": file}).Inc()
|
||||
//we're reading logs at once, it must be time-machine buckets
|
||||
output <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.TIMEMACHINE}
|
||||
}
|
||||
AcquisTomb.Kill(nil)
|
||||
return nil
|
||||
}
|
|
@ -1,383 +0,0 @@
|
|||
package acquisition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
tomb "gopkg.in/tomb.v2"
|
||||
)
|
||||
|
||||
func TestAcquisCat(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
cfg DataSourceCfg
|
||||
//tombState
|
||||
config_error string
|
||||
read_error string
|
||||
tomb_error string
|
||||
lines int
|
||||
}{
|
||||
{ //missing filename(s)
|
||||
cfg: DataSourceCfg{
|
||||
Mode: CAT_MODE,
|
||||
},
|
||||
config_error: "no filename or filenames",
|
||||
},
|
||||
{ //forbiden file
|
||||
cfg: DataSourceCfg{
|
||||
Mode: CAT_MODE,
|
||||
Filename: "/etc/shadow",
|
||||
},
|
||||
config_error: "unable to open /etc/shadow : permission denied",
|
||||
},
|
||||
{ //bad regexp
|
||||
cfg: DataSourceCfg{
|
||||
Filename: "[a-",
|
||||
Mode: CAT_MODE,
|
||||
},
|
||||
config_error: "while globbing [a-: syntax error in pattern",
|
||||
},
|
||||
{ //inexisting file
|
||||
cfg: DataSourceCfg{
|
||||
Filename: "/does/not/exists",
|
||||
Mode: CAT_MODE,
|
||||
},
|
||||
config_error: "no files to read for [/does/not/exists]",
|
||||
},
|
||||
{ //ok file
|
||||
cfg: DataSourceCfg{
|
||||
Filename: "./tests/test.log",
|
||||
Mode: CAT_MODE,
|
||||
},
|
||||
lines: 1,
|
||||
},
|
||||
{ //invalid gz
|
||||
cfg: DataSourceCfg{
|
||||
Filename: "./tests/badlog.gz",
|
||||
Mode: CAT_MODE,
|
||||
},
|
||||
lines: 0,
|
||||
tomb_error: "failed to read gz ./tests/badlog.gz: EOF",
|
||||
},
|
||||
{ //good gz
|
||||
cfg: DataSourceCfg{
|
||||
Filename: "./tests/test.log.gz",
|
||||
Mode: CAT_MODE,
|
||||
},
|
||||
lines: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for tidx, test := range tests {
|
||||
fileSrc := new(FileSource)
|
||||
err := fileSrc.Configure(test.cfg)
|
||||
if test.config_error != "" {
|
||||
assert.Contains(t, fmt.Sprintf("%s", err), test.config_error)
|
||||
log.Infof("expected config error ok : %s", test.config_error)
|
||||
continue
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatalf("%d/%d unexpected config error %s", tidx, len(tests), err)
|
||||
}
|
||||
}
|
||||
|
||||
out := make(chan types.Event)
|
||||
tomb := tomb.Tomb{}
|
||||
count := 0
|
||||
|
||||
err = fileSrc.StartReading(out, &tomb)
|
||||
if test.read_error != "" {
|
||||
assert.Contains(t, fmt.Sprintf("%s", err), test.read_error)
|
||||
log.Infof("expected read error ok : %s", test.read_error)
|
||||
continue
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatalf("%d/%d unexpected read error %s", tidx, len(tests), err)
|
||||
}
|
||||
}
|
||||
|
||||
READLOOP:
|
||||
for {
|
||||
select {
|
||||
case <-out:
|
||||
count++
|
||||
case <-time.After(1 * time.Second):
|
||||
break READLOOP
|
||||
}
|
||||
}
|
||||
|
||||
if count != test.lines {
|
||||
t.Fatalf("%d/%d expected %d line read, got %d", tidx, len(tests), test.lines, count)
|
||||
}
|
||||
|
||||
if test.tomb_error != "" {
|
||||
assert.Contains(t, fmt.Sprintf("%s", tomb.Err()), test.tomb_error)
|
||||
log.Infof("expected tomb error ok : %s", test.read_error)
|
||||
continue
|
||||
} else {
|
||||
if tomb.Err() != nil {
|
||||
t.Fatalf("%d/%d unexpected tomb error %s", tidx, len(tests), tomb.Err())
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestTailKill(t *testing.T) {
|
||||
cfg := DataSourceCfg{
|
||||
Filename: "./tests/test.log",
|
||||
Mode: TAIL_MODE,
|
||||
}
|
||||
|
||||
fileSrc := new(FileSource)
|
||||
err := fileSrc.Configure(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected config error %s", err)
|
||||
}
|
||||
|
||||
out := make(chan types.Event)
|
||||
tb := tomb.Tomb{}
|
||||
|
||||
err = fileSrc.StartReading(out, &tb)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected read error %s", err)
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
if tb.Err() != tomb.ErrStillAlive {
|
||||
t.Fatalf("unexpected tomb error %s (should be alive)", tb.Err())
|
||||
}
|
||||
//kill it :>
|
||||
tb.Kill(nil)
|
||||
time.Sleep(1 * time.Second)
|
||||
if tb.Err() != nil {
|
||||
t.Fatalf("unexpected tomb error %s (should be dead)", tb.Err())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestTailKillBis(t *testing.T) {
|
||||
cfg := DataSourceCfg{
|
||||
Filename: "./tests/test.log",
|
||||
Mode: TAIL_MODE,
|
||||
}
|
||||
|
||||
fileSrc := new(FileSource)
|
||||
err := fileSrc.Configure(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected config error %s", err)
|
||||
}
|
||||
|
||||
out := make(chan types.Event)
|
||||
tb := tomb.Tomb{}
|
||||
|
||||
err = fileSrc.StartReading(out, &tb)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected read error %s", err)
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
if tb.Err() != tomb.ErrStillAlive {
|
||||
t.Fatalf("unexpected tomb error %s (should be alive)", tb.Err())
|
||||
}
|
||||
//kill the underlying tomb of tailer
|
||||
fileSrc.tails[0].Kill(fmt.Errorf("ratata"))
|
||||
time.Sleep(1 * time.Second)
|
||||
//it can be two errors :
|
||||
if !strings.Contains(fmt.Sprintf("%s", tb.Err()), "dead reader for ./tests/test.log") &&
|
||||
!strings.Contains(fmt.Sprintf("%s", tb.Err()), "tail for ./tests/test.log is empty") {
|
||||
t.Fatalf("unexpected error : %s", tb.Err())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestTailRuntime(t *testing.T) {
|
||||
//log.SetLevel(log.TraceLevel)
|
||||
|
||||
cfg := DataSourceCfg{
|
||||
Filename: "./tests/test.log",
|
||||
Mode: TAIL_MODE,
|
||||
}
|
||||
|
||||
fileSrc := new(FileSource)
|
||||
err := fileSrc.Configure(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected config error %s", err)
|
||||
}
|
||||
|
||||
out := make(chan types.Event)
|
||||
tb := tomb.Tomb{}
|
||||
count := 0
|
||||
|
||||
err = fileSrc.StartReading(out, &tb)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected read error %s", err)
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
//write data
|
||||
f, err := os.OpenFile(cfg.Filename, os.O_APPEND|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i := 0; i < 5; i++ {
|
||||
_, err := f.WriteString(fmt.Sprintf("ratata%d\n", i))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
f.Close()
|
||||
|
||||
READLOOP:
|
||||
for {
|
||||
select {
|
||||
case <-out:
|
||||
count++
|
||||
case <-time.After(1 * time.Second):
|
||||
break READLOOP
|
||||
}
|
||||
}
|
||||
|
||||
if count != 5 {
|
||||
t.Fatalf("expected %d line read, got %d", 5, count)
|
||||
}
|
||||
|
||||
if tb.Err() != tomb.ErrStillAlive {
|
||||
t.Fatalf("unexpected tomb error %s", tb.Err())
|
||||
}
|
||||
|
||||
/*reset the file*/
|
||||
f, err = os.OpenFile(cfg.Filename, os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = f.WriteString("one log line\n")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
|
||||
func TestAcquisTail(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
cfg DataSourceCfg
|
||||
//tombState
|
||||
config_error string
|
||||
read_error string
|
||||
tomb_error string
|
||||
lines int
|
||||
}{
|
||||
{ //missing filename(s)
|
||||
cfg: DataSourceCfg{
|
||||
Mode: TAIL_MODE,
|
||||
},
|
||||
config_error: "no filename or filenames",
|
||||
},
|
||||
{ //forbiden file
|
||||
cfg: DataSourceCfg{
|
||||
Mode: TAIL_MODE,
|
||||
Filename: "/etc/shadow",
|
||||
},
|
||||
config_error: "unable to open /etc/shadow : permission denied",
|
||||
},
|
||||
{ //bad regexp
|
||||
cfg: DataSourceCfg{
|
||||
Filename: "[a-",
|
||||
Mode: TAIL_MODE,
|
||||
},
|
||||
config_error: "while globbing [a-: syntax error in pattern",
|
||||
},
|
||||
{ //inexisting file
|
||||
cfg: DataSourceCfg{
|
||||
Filename: "/does/not/exists",
|
||||
Mode: TAIL_MODE,
|
||||
},
|
||||
config_error: "no files to read for [/does/not/exists]",
|
||||
},
|
||||
{ //ok file
|
||||
cfg: DataSourceCfg{
|
||||
Filename: "./tests/test.log",
|
||||
Mode: TAIL_MODE,
|
||||
},
|
||||
lines: 0,
|
||||
tomb_error: "still alive",
|
||||
},
|
||||
{ //invalid gz
|
||||
cfg: DataSourceCfg{
|
||||
Filename: "./tests/badlog.gz",
|
||||
Mode: TAIL_MODE,
|
||||
},
|
||||
lines: 0,
|
||||
tomb_error: "still alive",
|
||||
},
|
||||
{ //good gz
|
||||
cfg: DataSourceCfg{
|
||||
Filename: "./tests/test.log.gz",
|
||||
Mode: TAIL_MODE,
|
||||
},
|
||||
lines: 0,
|
||||
tomb_error: "still alive",
|
||||
},
|
||||
}
|
||||
|
||||
for tidx, test := range tests {
|
||||
fileSrc := new(FileSource)
|
||||
err := fileSrc.Configure(test.cfg)
|
||||
if test.config_error != "" {
|
||||
assert.Contains(t, fmt.Sprintf("%s", err), test.config_error)
|
||||
log.Infof("expected config error ok : %s", test.config_error)
|
||||
continue
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatalf("%d/%d unexpected config error %s", tidx, len(tests), err)
|
||||
}
|
||||
}
|
||||
|
||||
out := make(chan types.Event)
|
||||
tomb := tomb.Tomb{}
|
||||
count := 0
|
||||
|
||||
err = fileSrc.StartReading(out, &tomb)
|
||||
if test.read_error != "" {
|
||||
assert.Contains(t, fmt.Sprintf("%s", err), test.read_error)
|
||||
log.Infof("expected read error ok : %s", test.read_error)
|
||||
continue
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatalf("%d/%d unexpected read error %s", tidx, len(tests), err)
|
||||
}
|
||||
}
|
||||
|
||||
READLOOP:
|
||||
for {
|
||||
select {
|
||||
case <-out:
|
||||
count++
|
||||
case <-time.After(1 * time.Second):
|
||||
break READLOOP
|
||||
}
|
||||
}
|
||||
|
||||
if count != test.lines {
|
||||
t.Fatalf("%d/%d expected %d line read, got %d", tidx, len(tests), test.lines, count)
|
||||
}
|
||||
|
||||
if test.tomb_error != "" {
|
||||
assert.Contains(t, fmt.Sprintf("%s", tomb.Err()), test.tomb_error)
|
||||
log.Infof("expected tomb error ok : %s", test.read_error)
|
||||
continue
|
||||
} else {
|
||||
if tomb.Err() != nil {
|
||||
t.Fatalf("%d/%d unexpected tomb error %s", tidx, len(tests), tomb.Err())
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -1,174 +0,0 @@
|
|||
package acquisition
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
tomb "gopkg.in/tomb.v2"
|
||||
)
|
||||
|
||||
/*
|
||||
journald/systemd support :
|
||||
|
||||
systemd has its own logging system, which stores files in non-text mode.
|
||||
To be able to read those, we're going to read the output of journalctl, see https://github.com/crowdsecurity/crowdsec/issues/423
|
||||
|
||||
|
||||
TBD :
|
||||
- handle journalctl errors
|
||||
*/
|
||||
|
||||
type JournaldSource struct {
|
||||
Config DataSourceCfg
|
||||
Cmd *exec.Cmd
|
||||
Stdout io.ReadCloser
|
||||
Stderr io.ReadCloser
|
||||
Decoder *json.Decoder
|
||||
SrcName string
|
||||
}
|
||||
|
||||
var JOURNALD_CMD = "journalctl"
|
||||
var JOURNALD_DEFAULT_TAIL_ARGS = []string{"--follow"}
|
||||
var JOURNALD_DEFAULT_CAT_ARGS = []string{}
|
||||
|
||||
func (j *JournaldSource) Configure(config DataSourceCfg) error {
|
||||
var journalArgs []string
|
||||
|
||||
j.Config = config
|
||||
if config.JournalctlFilters == nil {
|
||||
return fmt.Errorf("journalctl_filter shouldn't be empty")
|
||||
}
|
||||
|
||||
if j.Config.Mode == TAIL_MODE {
|
||||
journalArgs = JOURNALD_DEFAULT_TAIL_ARGS
|
||||
} else if j.Config.Mode == CAT_MODE {
|
||||
journalArgs = JOURNALD_DEFAULT_CAT_ARGS
|
||||
} else {
|
||||
return fmt.Errorf("unknown mode '%s' for journald source", j.Config.Mode)
|
||||
}
|
||||
journalArgs = append(journalArgs, config.JournalctlFilters...)
|
||||
|
||||
j.Cmd = exec.Command(JOURNALD_CMD, journalArgs...)
|
||||
j.Stderr, _ = j.Cmd.StderrPipe()
|
||||
j.Stdout, _ = j.Cmd.StdoutPipe()
|
||||
j.SrcName = fmt.Sprintf("journalctl-%s", strings.Join(config.JournalctlFilters, "."))
|
||||
log.Infof("[journald datasource] Configured with filters : %+v", journalArgs)
|
||||
log.Debugf("cmd path : %s", j.Cmd.Path)
|
||||
log.Debugf("cmd args : %+v", j.Cmd.Args)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *JournaldSource) Mode() string {
|
||||
return j.Config.Mode
|
||||
}
|
||||
|
||||
func (j *JournaldSource) readOutput(out chan types.Event, t *tomb.Tomb) error {
|
||||
|
||||
/*
|
||||
todo : handle the channel
|
||||
*/
|
||||
clog := log.WithFields(log.Fields{
|
||||
"acquisition file": j.SrcName,
|
||||
})
|
||||
if err := j.Cmd.Start(); err != nil {
|
||||
clog.Errorf("failed to start journalctl: %s", err)
|
||||
return errors.Wrapf(err, "starting journalctl (%s)", j.SrcName)
|
||||
}
|
||||
|
||||
readErr := make(chan error)
|
||||
|
||||
/*read stderr*/
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(j.Stderr)
|
||||
if scanner == nil {
|
||||
readErr <- fmt.Errorf("failed to create stderr scanner")
|
||||
return
|
||||
}
|
||||
for scanner.Scan() {
|
||||
txt := scanner.Text()
|
||||
clog.Warningf("got stderr message : %s", txt)
|
||||
readErr <- fmt.Errorf(txt)
|
||||
}
|
||||
}()
|
||||
/*read stdout*/
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(j.Stdout)
|
||||
if scanner == nil {
|
||||
readErr <- fmt.Errorf("failed to create stdout scanner")
|
||||
return
|
||||
}
|
||||
for scanner.Scan() {
|
||||
l := types.Line{}
|
||||
ReaderHits.With(prometheus.Labels{"source": j.SrcName}).Inc()
|
||||
l.Raw = scanner.Text()
|
||||
clog.Debugf("getting one line : %s", l.Raw)
|
||||
l.Labels = j.Config.Labels
|
||||
l.Time = time.Now()
|
||||
l.Src = j.SrcName
|
||||
l.Process = true
|
||||
evt := types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.LIVE}
|
||||
out <- evt
|
||||
}
|
||||
clog.Debugf("finished reading from journalctl")
|
||||
if err := scanner.Err(); err != nil {
|
||||
clog.Debugf("got an error while reading %s : %s", j.SrcName, err)
|
||||
readErr <- err
|
||||
return
|
||||
}
|
||||
readErr <- nil
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-t.Dying():
|
||||
clog.Debugf("journalctl datasource %s stopping", j.SrcName)
|
||||
return nil
|
||||
case err := <-readErr:
|
||||
clog.Debugf("the subroutine returned, leave as well")
|
||||
if err != nil {
|
||||
clog.Warningf("journalctl reader error : %s", err)
|
||||
t.Kill(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (j *JournaldSource) StartReading(out chan types.Event, t *tomb.Tomb) error {
|
||||
|
||||
if j.Config.Mode == CAT_MODE {
|
||||
return j.StartCat(out, t)
|
||||
} else if j.Config.Mode == TAIL_MODE {
|
||||
return j.StartTail(out, t)
|
||||
} else {
|
||||
return fmt.Errorf("unknown mode '%s' for file acquisition", j.Config.Mode)
|
||||
}
|
||||
}
|
||||
|
||||
func (j *JournaldSource) StartCat(out chan types.Event, t *tomb.Tomb) error {
|
||||
t.Go(func() error {
|
||||
defer types.CatchPanic("crowdsec/acquis/tailjournalctl")
|
||||
return j.readOutput(out, t)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *JournaldSource) StartTail(out chan types.Event, t *tomb.Tomb) error {
|
||||
t.Go(func() error {
|
||||
defer types.CatchPanic("crowdsec/acquis/catjournalctl")
|
||||
return j.readOutput(out, t)
|
||||
})
|
||||
return nil
|
||||
}
|
|
@ -1,238 +0,0 @@
|
|||
package acquisition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
tomb "gopkg.in/tomb.v2"
|
||||
)
|
||||
|
||||
/*
|
||||
As we can't decently run journalctl in the CI but we still need to test the command execution aspect :
|
||||
- we create tests 'output only' (cf. TestSimJournalctlCat) that just produce outputs
|
||||
- we run ourselves (os.Args[0]) with specific args to call specific 'output only' tests
|
||||
- and this is how we test the behavior
|
||||
*/
|
||||
|
||||
//14 lines of sshd logs
|
||||
var testjournalctl_output_1 string = `-- Logs begin at Fri 2019-07-26 17:13:13 CEST, end at Mon 2020-11-23 09:17:34 CET. --
|
||||
Nov 22 11:22:19 zeroed sshd[1480]: Invalid user wqeqwe from 127.0.0.1 port 55818
|
||||
Nov 22 11:22:23 zeroed sshd[1480]: Failed password for invalid user wqeqwe from 127.0.0.1 port 55818 ssh2
|
||||
Nov 22 11:23:22 zeroed sshd[1769]: Invalid user wqeqwe1 from 127.0.0.1 port 55824
|
||||
Nov 22 11:23:24 zeroed sshd[1769]: Disconnecting invalid user wqeqwe1 127.0.0.1 port 55824: Too many authentication failures [preauth]
|
||||
Nov 22 11:23:24 zeroed sshd[1777]: Invalid user wqeqwe2 from 127.0.0.1 port 55826
|
||||
Nov 22 11:23:25 zeroed sshd[1777]: Disconnecting invalid user wqeqwe2 127.0.0.1 port 55826: Too many authentication failures [preauth]
|
||||
Nov 22 11:23:25 zeroed sshd[1780]: Invalid user wqeqwe3 from 127.0.0.1 port 55828
|
||||
Nov 22 11:23:26 zeroed sshd[1780]: Disconnecting invalid user wqeqwe3 127.0.0.1 port 55828: Too many authentication failures [preauth]
|
||||
Nov 22 11:23:26 zeroed sshd[1786]: Invalid user wqeqwe4 from 127.0.0.1 port 55830
|
||||
Nov 22 11:23:27 zeroed sshd[1786]: Failed password for invalid user wqeqwe4 from 127.0.0.1 port 55830 ssh2
|
||||
Nov 22 11:23:27 zeroed sshd[1786]: Disconnecting invalid user wqeqwe4 127.0.0.1 port 55830: Too many authentication failures [preauth]
|
||||
Nov 22 11:23:27 zeroed sshd[1791]: Invalid user wqeqwe5 from 127.0.0.1 port 55834
|
||||
Nov 22 11:23:27 zeroed sshd[1791]: Failed password for invalid user wqeqwe5 from 127.0.0.1 port 55834 ssh2
|
||||
`
|
||||
|
||||
func TestSimJournalctlCat(t *testing.T) {
|
||||
if os.Getenv("GO_WANT_TEST_OUTPUT") != "1" {
|
||||
return
|
||||
}
|
||||
defer os.Exit(0)
|
||||
fmt.Print(testjournalctl_output_1)
|
||||
}
|
||||
|
||||
func TestSimJournalctlCatError(t *testing.T) {
|
||||
if os.Getenv("GO_WANT_TEST_OUTPUT") != "1" {
|
||||
return
|
||||
}
|
||||
defer os.Exit(0)
|
||||
fmt.Print("this is a single line being produced")
|
||||
log.Warningf("this is an error message")
|
||||
}
|
||||
|
||||
func TestSimJournalctlCatOneLine(t *testing.T) {
|
||||
if os.Getenv("GO_WANT_TEST_OUTPUT") != "1" {
|
||||
return
|
||||
}
|
||||
defer os.Exit(0)
|
||||
fmt.Print("this is a single line being produced")
|
||||
}
|
||||
|
||||
func TestJournaldTail(t *testing.T) {
|
||||
tests := []struct {
|
||||
cfg DataSourceCfg
|
||||
config_error string
|
||||
read_error string
|
||||
tomb_error string
|
||||
lines int
|
||||
}{
|
||||
{ //missing filename(s)
|
||||
cfg: DataSourceCfg{
|
||||
Mode: TAIL_MODE,
|
||||
},
|
||||
config_error: "journalctl_filter shouldn't be empty",
|
||||
},
|
||||
{ //bad mode
|
||||
cfg: DataSourceCfg{
|
||||
Mode: "ratatata",
|
||||
JournalctlFilters: []string{"-test.run=DoesNotExist", "--"},
|
||||
},
|
||||
/*here would actually be the journalctl error message on bad args, but you get the point*/
|
||||
config_error: "unknown mode 'ratatata' for journald source",
|
||||
},
|
||||
{ //wrong arguments
|
||||
cfg: DataSourceCfg{
|
||||
Mode: TAIL_MODE,
|
||||
JournalctlFilters: []string{"--this-is-bad-option", "--"},
|
||||
},
|
||||
/*here would actually be the journalctl error message on bad args, but you get the point*/
|
||||
tomb_error: "flag provided but not defined: -this-is-bad-option",
|
||||
},
|
||||
}
|
||||
|
||||
//we're actually using tests to do this, hold my beer and watch this
|
||||
JOURNALD_CMD = os.Args[0]
|
||||
JOURNALD_DEFAULT_TAIL_ARGS = []string{}
|
||||
|
||||
for tidx, test := range tests {
|
||||
journalSrc := new(JournaldSource)
|
||||
err := journalSrc.Configure(test.cfg)
|
||||
if test.config_error != "" {
|
||||
assert.Contains(t, fmt.Sprintf("%s", err), test.config_error)
|
||||
log.Infof("expected config error ok : %s", test.config_error)
|
||||
continue
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatalf("%d/%d unexpected config error %s", tidx, len(tests), err)
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, journalSrc.Mode(), test.cfg.Mode)
|
||||
|
||||
//this tells our fake tests to produce data
|
||||
journalSrc.Cmd.Env = []string{"GO_WANT_TEST_OUTPUT=1"}
|
||||
|
||||
out := make(chan types.Event)
|
||||
tomb := tomb.Tomb{}
|
||||
count := 0
|
||||
|
||||
//start consuming the data before we start the prog, so that chan isn't full
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-out:
|
||||
count++
|
||||
case <-time.After(1 * time.Second):
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
err = journalSrc.StartReading(out, &tomb)
|
||||
if test.read_error != "" {
|
||||
assert.Contains(t, fmt.Sprintf("%s", err), test.read_error)
|
||||
log.Infof("expected read error ok : %s", test.read_error)
|
||||
continue
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatalf("%d/%d unexpected read error %s", tidx, len(tests), err)
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
log.Printf("now let's check number of lines & errors")
|
||||
if count != test.lines {
|
||||
t.Fatalf("%d/%d expected %d line read, got %d", tidx, len(tests), test.lines, count)
|
||||
}
|
||||
|
||||
if test.tomb_error != "" {
|
||||
assert.Contains(t, fmt.Sprintf("%s", tomb.Err()), test.tomb_error)
|
||||
log.Infof("expected tomb error ok : %s", test.read_error)
|
||||
continue
|
||||
} else {
|
||||
if tomb.Err() != nil {
|
||||
t.Fatalf("%d/%d unexpected tomb error %s", tidx, len(tests), tomb.Err())
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestJournaldSimple(t *testing.T) {
|
||||
JOURNALD_CMD = os.Args[0]
|
||||
JOURNALD_DEFAULT_TAIL_ARGS = []string{}
|
||||
jBaseCfg := DataSourceCfg{
|
||||
JournalctlFilters: []string{"-test.run=TestSimJournalctlCat", "--"},
|
||||
Mode: CAT_MODE,
|
||||
}
|
||||
|
||||
journalSrc := new(JournaldSource)
|
||||
err := journalSrc.Configure(jBaseCfg)
|
||||
if err != nil {
|
||||
t.Fatalf("configuring journalctl : %s", err)
|
||||
}
|
||||
journalSrc.Cmd.Env = []string{"GO_WANT_TEST_OUTPUT=1"}
|
||||
|
||||
out := make(chan types.Event)
|
||||
tomb := tomb.Tomb{}
|
||||
count := 0
|
||||
|
||||
//start the reading : it doesn't give hand back before it's done
|
||||
err = journalSrc.StartReading(out, &tomb)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected read error %s", err)
|
||||
}
|
||||
|
||||
RLOOP:
|
||||
for {
|
||||
select {
|
||||
case <-out:
|
||||
count++
|
||||
case <-time.After(1 * time.Second):
|
||||
break RLOOP
|
||||
}
|
||||
}
|
||||
//we expect 14 lines to be read
|
||||
assert.Equal(t, 14, count)
|
||||
|
||||
}
|
||||
|
||||
func TestJournalctlKill(t *testing.T) {
|
||||
cfg := DataSourceCfg{
|
||||
Mode: CAT_MODE,
|
||||
JournalctlFilters: []string{"-test.run=TestSimJournalctlCatOneLine", "--"},
|
||||
}
|
||||
//we're actually using tests to do this, hold my beer and watch this
|
||||
JOURNALD_CMD = os.Args[0]
|
||||
JOURNALD_DEFAULT_TAIL_ARGS = []string{}
|
||||
|
||||
log.SetLevel(log.TraceLevel)
|
||||
journalSrc := new(JournaldSource)
|
||||
err := journalSrc.Configure(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected config error %s", err)
|
||||
}
|
||||
journalSrc.Cmd.Env = []string{"GO_WANT_TEST_OUTPUT=1"}
|
||||
|
||||
out := make(chan types.Event)
|
||||
tb := tomb.Tomb{}
|
||||
|
||||
err = journalSrc.StartReading(out, &tb)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected read error %s", err)
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
if tb.Err() != tomb.ErrStillAlive {
|
||||
t.Fatalf("unexpected tomb error %s (should be alive)", tb.Err())
|
||||
}
|
||||
//kill it :>
|
||||
tb.Kill(nil)
|
||||
time.Sleep(1 * time.Second)
|
||||
if tb.Err() != nil {
|
||||
t.Fatalf("unexpected tomb error %s (should be dead)", tb.Err())
|
||||
}
|
||||
|
||||
}
|
681
pkg/acquisition/modules/cloudwatch/cloudwatch.go
Normal file
681
pkg/acquisition/modules/cloudwatch/cloudwatch.go
Normal file
|
@ -0,0 +1,681 @@
|
|||
package cloudwatchacquisition
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration"
|
||||
leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/parser"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/tomb.v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
|
||||
)
|
||||
|
||||
var openedStreams = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "cs_cloudwatch_openstreams_total",
|
||||
Help: "Number of opened stream within group.",
|
||||
},
|
||||
[]string{"group"},
|
||||
)
|
||||
|
||||
var linesRead = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cs_cloudwatch_stream_hits_total",
|
||||
Help: "Number of event read from stream.",
|
||||
},
|
||||
[]string{"group", "stream"},
|
||||
)
|
||||
|
||||
//CloudwatchSource is the runtime instance keeping track of N streams within 1 cloudwatch group
|
||||
type CloudwatchSource struct {
|
||||
Config CloudwatchSourceConfiguration
|
||||
/*runtime stuff*/
|
||||
logger *log.Entry
|
||||
t *tomb.Tomb
|
||||
cwClient *cloudwatchlogs.CloudWatchLogs
|
||||
monitoredStreams []*LogStreamTailConfig
|
||||
streamIndexes map[string]string
|
||||
}
|
||||
|
||||
//CloudwatchSourceConfiguration allows user to define one or more streams to monitor within a cloudwatch log group
|
||||
type CloudwatchSourceConfiguration struct {
|
||||
configuration.DataSourceCommonCfg `yaml:",inline"`
|
||||
GroupName string `yaml:"group_name"` //the group name to be monitored
|
||||
StreamRegexp *string `yaml:"stream_regexp,omitempty"` //allow to filter specific streams
|
||||
StreamName *string `yaml:"stream_name,omitempty"`
|
||||
StartTime, EndTime *time.Time `yaml:"-"`
|
||||
DescribeLogStreamsLimit *int64 `yaml:"describelogstreams_limit,omitempty"` //batch size for DescribeLogStreamsPagesWithContext
|
||||
GetLogEventsPagesLimit *int64 `yaml:"getlogeventspages_limit,omitempty"`
|
||||
PollNewStreamInterval *time.Duration `yaml:"poll_new_stream_interval,omitempty"` //frequency at which we poll for new streams within the log group
|
||||
MaxStreamAge *time.Duration `yaml:"max_stream_age,omitempty"` //monitor only streams that have been updated within $duration
|
||||
PollStreamInterval *time.Duration `yaml:"poll_stream_interval,omitempty"` //frequency at which we poll each stream
|
||||
StreamReadTimeout *time.Duration `yaml:"stream_read_timeout,omitempty"` //stop monitoring streams that haven't been updated within $duration, might be reopened later tho
|
||||
AwsApiCallTimeout *time.Duration `yaml:"aws_api_timeout,omitempty"`
|
||||
AwsProfile *string `yaml:"aws_profile,omitempty"`
|
||||
PrependCloudwatchTimestamp *bool `yaml:"prepend_cloudwatch_timestamp,omitempty"`
|
||||
AwsConfigDir *string `yaml:"aws_config_dir,omitempty"`
|
||||
}
|
||||
|
||||
//LogStreamTailConfig is the configuration for one given stream within one group
|
||||
type LogStreamTailConfig struct {
|
||||
GroupName string
|
||||
StreamName string
|
||||
GetLogEventsPagesLimit int64
|
||||
PollStreamInterval time.Duration
|
||||
StreamReadTimeout time.Duration
|
||||
PrependCloudwatchTimestamp *bool
|
||||
Labels map[string]string
|
||||
logger *log.Entry
|
||||
ExpectMode int
|
||||
t tomb.Tomb
|
||||
StartTime, EndTime time.Time //only used for CatMode
|
||||
}
|
||||
|
||||
var (
|
||||
def_DescribeLogStreamsLimit = int64(50)
|
||||
def_PollNewStreamInterval = 10 * time.Second
|
||||
def_MaxStreamAge = 5 * time.Minute
|
||||
def_PollStreamInterval = 10 * time.Second
|
||||
def_AwsApiCallTimeout = 10 * time.Second
|
||||
def_StreamReadTimeout = 10 * time.Minute
|
||||
def_PollDeadStreamInterval = 10 * time.Second
|
||||
def_GetLogEventsPagesLimit = int64(1000)
|
||||
def_AwsConfigDir = "/root/.aws/"
|
||||
)
|
||||
|
||||
func (cw *CloudwatchSource) Configure(cfg []byte, logger *log.Entry) error {
|
||||
cwConfig := CloudwatchSourceConfiguration{}
|
||||
targetStream := "*"
|
||||
if err := yaml.UnmarshalStrict(cfg, &cwConfig); err != nil {
|
||||
return errors.Wrap(err, "Cannot parse CloudwatchSource configuration")
|
||||
}
|
||||
cw.Config = cwConfig
|
||||
if len(cw.Config.GroupName) == 0 {
|
||||
return fmt.Errorf("group_name is mandatory for CloudwatchSource")
|
||||
}
|
||||
cw.logger = logger.WithField("group", cw.Config.GroupName)
|
||||
if cw.Config.Mode == "" {
|
||||
cw.Config.Mode = configuration.TAIL_MODE
|
||||
}
|
||||
logger.Debugf("Starting configuration for Cloudwatch group %s", cw.Config.GroupName)
|
||||
|
||||
if cw.Config.DescribeLogStreamsLimit == nil {
|
||||
cw.Config.DescribeLogStreamsLimit = &def_DescribeLogStreamsLimit
|
||||
}
|
||||
logger.Tracef("describelogstreams_limit set to %d", *cw.Config.DescribeLogStreamsLimit)
|
||||
if cw.Config.PollNewStreamInterval == nil {
|
||||
cw.Config.PollNewStreamInterval = &def_PollNewStreamInterval
|
||||
}
|
||||
logger.Tracef("poll_new_stream_interval set to %v", *cw.Config.PollNewStreamInterval)
|
||||
if cw.Config.MaxStreamAge == nil {
|
||||
cw.Config.MaxStreamAge = &def_MaxStreamAge
|
||||
}
|
||||
logger.Tracef("max_stream_age set to %v", *cw.Config.MaxStreamAge)
|
||||
if cw.Config.PollStreamInterval == nil {
|
||||
cw.Config.PollStreamInterval = &def_PollStreamInterval
|
||||
}
|
||||
logger.Tracef("poll_stream_interval set to %v", *cw.Config.PollStreamInterval)
|
||||
if cw.Config.StreamReadTimeout == nil {
|
||||
cw.Config.StreamReadTimeout = &def_StreamReadTimeout
|
||||
}
|
||||
logger.Tracef("stream_read_timeout set to %v", *cw.Config.StreamReadTimeout)
|
||||
if cw.Config.GetLogEventsPagesLimit == nil {
|
||||
cw.Config.GetLogEventsPagesLimit = &def_GetLogEventsPagesLimit
|
||||
}
|
||||
logger.Tracef("getlogeventspages_limit set to %v", *cw.Config.GetLogEventsPagesLimit)
|
||||
if cw.Config.AwsApiCallTimeout == nil {
|
||||
cw.Config.AwsApiCallTimeout = &def_AwsApiCallTimeout
|
||||
}
|
||||
logger.Tracef("aws_api_timeout set to %v", *cw.Config.AwsApiCallTimeout)
|
||||
if *cw.Config.MaxStreamAge > *cw.Config.StreamReadTimeout {
|
||||
logger.Warningf("max_stream_age > stream_read_timeout, stream might keep being opened/closed")
|
||||
}
|
||||
if cw.Config.AwsConfigDir == nil {
|
||||
cw.Config.AwsConfigDir = &def_AwsConfigDir
|
||||
}
|
||||
logger.Tracef("aws_config_dir set to %s", *cw.Config.AwsConfigDir)
|
||||
_, err := os.Stat(*cw.Config.AwsConfigDir)
|
||||
if os.IsNotExist(err) {
|
||||
logger.Errorf("aws_config_dir '%s' : directory does not exists", *cw.Config.AwsConfigDir)
|
||||
return fmt.Errorf("aws_config_dir %s does not exist", *cw.Config.AwsConfigDir)
|
||||
}
|
||||
os.Setenv("AWS_SDK_LOAD_CONFIG", "1")
|
||||
//as aws sdk relies on $HOME, let's allow the user to override it :)
|
||||
os.Setenv("AWS_CONFIG_FILE", fmt.Sprintf("%s/config", *cw.Config.AwsConfigDir))
|
||||
os.Setenv("AWS_SHARED_CREDENTIALS_FILE", fmt.Sprintf("%s/credentials", *cw.Config.AwsConfigDir))
|
||||
if err := cw.newClient(); err != nil {
|
||||
return err
|
||||
}
|
||||
cw.streamIndexes = make(map[string]string)
|
||||
|
||||
if cw.Config.StreamRegexp != nil {
|
||||
if _, err := regexp.Compile(*cw.Config.StreamRegexp); err != nil {
|
||||
return errors.Wrapf(err, "error while compiling regexp '%s'", *cw.Config.StreamRegexp)
|
||||
}
|
||||
targetStream = *cw.Config.StreamRegexp
|
||||
} else if cw.Config.StreamName != nil {
|
||||
targetStream = *cw.Config.StreamName
|
||||
}
|
||||
|
||||
logger.Infof("Adding cloudwatch group '%s' (stream:%s) to datasources", cw.Config.GroupName, targetStream)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cw *CloudwatchSource) newClient() error {
|
||||
var sess *session.Session
|
||||
|
||||
if cw.Config.AwsProfile != nil {
|
||||
sess = session.Must(session.NewSessionWithOptions(session.Options{
|
||||
SharedConfigState: session.SharedConfigEnable,
|
||||
Profile: *cw.Config.AwsProfile,
|
||||
}))
|
||||
} else {
|
||||
sess = session.Must(session.NewSessionWithOptions(session.Options{
|
||||
SharedConfigState: session.SharedConfigEnable,
|
||||
}))
|
||||
}
|
||||
|
||||
if sess == nil {
|
||||
return fmt.Errorf("failed to create aws session")
|
||||
}
|
||||
if v := os.Getenv("AWS_ENDPOINT_FORCE"); v != "" {
|
||||
cw.logger.Debugf("[testing] overloading endpoint with %s", v)
|
||||
cw.cwClient = cloudwatchlogs.New(sess, aws.NewConfig().WithEndpoint(v))
|
||||
} else {
|
||||
cw.cwClient = cloudwatchlogs.New(sess)
|
||||
}
|
||||
if cw.cwClient == nil {
|
||||
return fmt.Errorf("failed to create cloudwatch client")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cw *CloudwatchSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
||||
cw.t = t
|
||||
monitChan := make(chan LogStreamTailConfig)
|
||||
t.Go(func() error {
|
||||
return cw.LogStreamManager(monitChan, out)
|
||||
})
|
||||
return cw.WatchLogGroupForStreams(monitChan)
|
||||
}
|
||||
|
||||
func (cw *CloudwatchSource) GetMetrics() []prometheus.Collector {
|
||||
return []prometheus.Collector{linesRead, openedStreams}
|
||||
}
|
||||
|
||||
func (cw *CloudwatchSource) GetAggregMetrics() []prometheus.Collector {
|
||||
return []prometheus.Collector{linesRead, openedStreams}
|
||||
}
|
||||
|
||||
func (cw *CloudwatchSource) GetMode() string {
|
||||
return cw.Config.Mode
|
||||
}
|
||||
|
||||
func (cw *CloudwatchSource) GetName() string {
|
||||
return "cloudwatch"
|
||||
}
|
||||
|
||||
func (cw *CloudwatchSource) CanRun() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cw *CloudwatchSource) Dump() interface{} {
|
||||
return cw
|
||||
}
|
||||
|
||||
func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig) error {
|
||||
cw.logger.Debugf("Starting to watch group (interval:%s)", cw.Config.PollNewStreamInterval)
|
||||
ticker := time.NewTicker(*cw.Config.PollNewStreamInterval)
|
||||
var startFrom *string
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-cw.t.Dying():
|
||||
cw.logger.Infof("stopping group watch")
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
hasMoreStreams := true
|
||||
startFrom = nil
|
||||
for hasMoreStreams {
|
||||
cw.logger.Tracef("doing the call to DescribeLogStreamsPagesWithContext")
|
||||
|
||||
ctx := context.Background()
|
||||
//there can be a lot of streams in a group, and we're only interested in those recently written to, so we sort by LastEventTime
|
||||
err := cw.cwClient.DescribeLogStreamsPagesWithContext(
|
||||
ctx,
|
||||
&cloudwatchlogs.DescribeLogStreamsInput{
|
||||
LogGroupName: aws.String(cw.Config.GroupName),
|
||||
Descending: aws.Bool(true),
|
||||
NextToken: startFrom,
|
||||
OrderBy: aws.String(cloudwatchlogs.OrderByLastEventTime),
|
||||
Limit: cw.Config.DescribeLogStreamsLimit,
|
||||
},
|
||||
func(page *cloudwatchlogs.DescribeLogStreamsOutput, lastPage bool) bool {
|
||||
cw.logger.Tracef("in helper of of DescribeLogStreamsPagesWithContext")
|
||||
for _, event := range page.LogStreams {
|
||||
startFrom = page.NextToken
|
||||
//we check if the stream has been written to recently enough to be monitored
|
||||
if event.LastIngestionTime != nil {
|
||||
//aws uses millisecond since the epoch
|
||||
oldest := time.Now().UTC().Add(-*cw.Config.MaxStreamAge)
|
||||
//TBD : verify that this is correct : Unix 2nd arg expects Nanoseconds, and have a code that is more explicit.
|
||||
LastIngestionTime := time.Unix(0, *event.LastIngestionTime*int64(time.Millisecond))
|
||||
if LastIngestionTime.Before(oldest) {
|
||||
cw.logger.Tracef("stop iteration, %s reached oldest age, stop (%s < %s)", *event.LogStreamName, LastIngestionTime, time.Now().Add(-*cw.Config.MaxStreamAge))
|
||||
hasMoreStreams = false
|
||||
return false
|
||||
}
|
||||
cw.logger.Tracef("stream %s is elligible for monitoring", *event.LogStreamName)
|
||||
//the stream has been update recently, check if we should monitor it
|
||||
monitorStream := LogStreamTailConfig{
|
||||
GroupName: cw.Config.GroupName,
|
||||
StreamName: *event.LogStreamName,
|
||||
GetLogEventsPagesLimit: *cw.Config.GetLogEventsPagesLimit,
|
||||
PollStreamInterval: *cw.Config.PollStreamInterval,
|
||||
StreamReadTimeout: *cw.Config.StreamReadTimeout,
|
||||
PrependCloudwatchTimestamp: cw.Config.PrependCloudwatchTimestamp,
|
||||
ExpectMode: leaky.LIVE,
|
||||
Labels: cw.Config.Labels,
|
||||
}
|
||||
out <- monitorStream
|
||||
}
|
||||
}
|
||||
if lastPage {
|
||||
cw.logger.Tracef("reached last page")
|
||||
hasMoreStreams = false
|
||||
}
|
||||
return true
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
newerr := errors.Wrapf(err, "while describing group %s", cw.Config.GroupName)
|
||||
return newerr
|
||||
}
|
||||
cw.logger.Tracef("after DescribeLogStreamsPagesWithContext")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//LogStreamManager receives the potential streams to monitor, and start a go routine when needed
|
||||
func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outChan chan types.Event) error {
|
||||
|
||||
cw.logger.Debugf("starting to monitor streams for %s", cw.Config.GroupName)
|
||||
pollDeadStreamInterval := time.NewTicker(def_PollDeadStreamInterval)
|
||||
|
||||
for {
|
||||
select {
|
||||
case newStream := <-in:
|
||||
shouldCreate := true
|
||||
cw.logger.Tracef("received new streams to monitor : %s/%s", newStream.GroupName, newStream.StreamName)
|
||||
|
||||
if cw.Config.StreamName != nil && newStream.StreamName != *cw.Config.StreamName {
|
||||
cw.logger.Tracef("stream %s != %s", newStream.StreamName, *cw.Config.StreamName)
|
||||
continue
|
||||
}
|
||||
|
||||
if cw.Config.StreamRegexp != nil {
|
||||
match, err := regexp.Match(newStream.StreamName, []byte(*cw.Config.StreamRegexp))
|
||||
if err != nil {
|
||||
cw.logger.Warningf("invalid regexp : %s", err)
|
||||
} else {
|
||||
if !match {
|
||||
cw.logger.Tracef("stream %s doesn't match %s", newStream.StreamName, *cw.Config.StreamRegexp)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for idx, stream := range cw.monitoredStreams {
|
||||
if newStream.GroupName == stream.GroupName && newStream.StreamName == stream.StreamName {
|
||||
//stream exists, but is dead, remove it from list
|
||||
if !stream.t.Alive() {
|
||||
cw.logger.Debugf("stream %s already exists, but is dead", newStream.StreamName)
|
||||
cw.monitoredStreams = append(cw.monitoredStreams[:idx], cw.monitoredStreams[idx+1:]...)
|
||||
openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Dec()
|
||||
break
|
||||
}
|
||||
shouldCreate = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
//let's start watching this stream
|
||||
if shouldCreate {
|
||||
openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Inc()
|
||||
newStream.t = tomb.Tomb{}
|
||||
newStream.logger = cw.logger.WithFields(log.Fields{"stream": newStream.StreamName})
|
||||
cw.logger.Debugf("starting tail of stream %s", newStream.StreamName)
|
||||
newStream.t.Go(func() error {
|
||||
return cw.TailLogStream(&newStream, outChan)
|
||||
})
|
||||
cw.monitoredStreams = append(cw.monitoredStreams, &newStream)
|
||||
}
|
||||
case <-pollDeadStreamInterval.C:
|
||||
for idx, stream := range cw.monitoredStreams {
|
||||
if !cw.monitoredStreams[idx].t.Alive() {
|
||||
cw.logger.Debugf("remove dead stream %s", stream.StreamName)
|
||||
openedStreams.With(prometheus.Labels{"group": cw.monitoredStreams[idx].GroupName}).Dec()
|
||||
cw.monitoredStreams = append(cw.monitoredStreams[:idx], cw.monitoredStreams[idx+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
case <-cw.t.Dying():
|
||||
cw.logger.Infof("LogStreamManager for %s is dying, %d alive streams", cw.Config.GroupName, len(cw.monitoredStreams))
|
||||
for idx, stream := range cw.monitoredStreams {
|
||||
if cw.monitoredStreams[idx].t.Alive() {
|
||||
cw.logger.Debugf("killing stream %s", stream.StreamName)
|
||||
cw.monitoredStreams[idx].t.Kill(nil)
|
||||
if err := cw.monitoredStreams[idx].t.Wait(); err != nil {
|
||||
cw.logger.Debugf("error while waiting for death of %s : %s", stream.StreamName, err)
|
||||
}
|
||||
} else {
|
||||
cw.monitoredStreams = append(cw.monitoredStreams[:idx], cw.monitoredStreams[idx+1:]...)
|
||||
}
|
||||
}
|
||||
cw.logger.Debugf("routine cleanup done, return")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan types.Event) error {
|
||||
var startFrom *string
|
||||
var lastReadMessage time.Time = time.Now()
|
||||
startup := true
|
||||
|
||||
ticker := time.NewTicker(cfg.PollStreamInterval)
|
||||
//resume at existing index if we already had
|
||||
if v, ok := cw.streamIndexes[cfg.GroupName+"+"+cfg.StreamName]; ok && v != "" {
|
||||
cfg.logger.Debugf("restarting on index %s", v)
|
||||
startFrom = &v
|
||||
startup = false
|
||||
}
|
||||
/*during first run, we want to avoid reading any message, but just get a token.
|
||||
if we don't, we might end up sending the same item several times. hence the 'startup' hack */
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
cfg.logger.Tracef("entering loop")
|
||||
hasMorePages := true
|
||||
for hasMorePages {
|
||||
/*for the first call, we only consume the last item*/
|
||||
limit := cfg.GetLogEventsPagesLimit
|
||||
if startup {
|
||||
limit = 1
|
||||
}
|
||||
cfg.logger.Tracef("calling GetLogEventsPagesWithContext")
|
||||
ctx := context.Background()
|
||||
err := cw.cwClient.GetLogEventsPagesWithContext(ctx,
|
||||
&cloudwatchlogs.GetLogEventsInput{
|
||||
Limit: aws.Int64(limit),
|
||||
LogGroupName: aws.String(cfg.GroupName),
|
||||
LogStreamName: aws.String(cfg.StreamName),
|
||||
NextToken: startFrom,
|
||||
},
|
||||
func(page *cloudwatchlogs.GetLogEventsOutput, lastPage bool) bool {
|
||||
cfg.logger.Tracef("%d results, last:%t", len(page.Events), lastPage)
|
||||
startFrom = page.NextForwardToken
|
||||
if page.NextForwardToken != nil {
|
||||
cw.streamIndexes[cfg.GroupName+"+"+cfg.StreamName] = *page.NextForwardToken
|
||||
}
|
||||
if startup { //we grab the NextForwardToken and we return on first iteration
|
||||
return false
|
||||
}
|
||||
if lastPage { /*wait another ticker to check on new log availability*/
|
||||
cfg.logger.Tracef("last page")
|
||||
hasMorePages = false
|
||||
}
|
||||
if len(page.Events) > 0 {
|
||||
lastReadMessage = time.Now()
|
||||
}
|
||||
for _, event := range page.Events {
|
||||
evt, err := cwLogToEvent(event, cfg)
|
||||
if err != nil {
|
||||
cfg.logger.Warningf("cwLogToEvent error, discarded event : %s", err)
|
||||
} else {
|
||||
cfg.logger.Debugf("pushing message : %s", evt.Line.Raw)
|
||||
linesRead.With(prometheus.Labels{"group": cfg.GroupName, "stream": cfg.StreamName}).Inc()
|
||||
outChan <- evt
|
||||
|
||||
}
|
||||
}
|
||||
return true
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
newerr := errors.Wrapf(err, "while reading %s/%s", cfg.GroupName, cfg.StreamName)
|
||||
cfg.logger.Warningf("err : %s", newerr)
|
||||
return newerr
|
||||
}
|
||||
if startup {
|
||||
startup = false
|
||||
}
|
||||
cfg.logger.Tracef("done reading GetLogEventsPagesWithContext")
|
||||
|
||||
if time.Since(lastReadMessage) > cfg.StreamReadTimeout {
|
||||
cfg.logger.Infof("%s/%s reached timeout (%s) (last message was %s)", cfg.GroupName, cfg.StreamName, time.Since(lastReadMessage),
|
||||
lastReadMessage)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
case <-cfg.t.Dying():
|
||||
cfg.logger.Infof("logstream tail stopping")
|
||||
return fmt.Errorf("killed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cw *CloudwatchSource) ConfigureByDSN(dsn string, logtype string, logger *log.Entry) error {
|
||||
cw.logger = logger
|
||||
|
||||
dsn = strings.TrimPrefix(dsn, cw.GetName()+"://")
|
||||
args := strings.Split(dsn, "?")
|
||||
if len(args) != 2 {
|
||||
return fmt.Errorf("query is mandatory (at least start_date and end_date or backlog)")
|
||||
}
|
||||
frags := strings.Split(args[0], ":")
|
||||
if len(frags) != 2 {
|
||||
return fmt.Errorf("cloudwatch path must contain group and stream : /my/group/name:stream/name")
|
||||
}
|
||||
cw.Config.GroupName = frags[0]
|
||||
cw.Config.StreamName = &frags[1]
|
||||
cw.Config.Labels = make(map[string]string)
|
||||
cw.Config.Labels["type"] = logtype
|
||||
|
||||
u, err := url.ParseQuery(args[1])
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "while parsing %s", dsn)
|
||||
}
|
||||
|
||||
for k, v := range u {
|
||||
switch k {
|
||||
case "log_level":
|
||||
if len(v) != 1 {
|
||||
return fmt.Errorf("expected zero or one value for 'log_level'")
|
||||
}
|
||||
lvl, err := log.ParseLevel(v[0])
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unknown level %s", v[0])
|
||||
}
|
||||
cw.logger.Logger.SetLevel(lvl)
|
||||
|
||||
case "profile":
|
||||
if len(v) != 1 {
|
||||
return fmt.Errorf("expected zero or one value for 'profile'")
|
||||
}
|
||||
awsprof := v[0]
|
||||
cw.Config.AwsProfile = &awsprof
|
||||
cw.logger.Debugf("profile set to '%s'", *cw.Config.AwsProfile)
|
||||
case "start_date":
|
||||
if len(v) != 1 {
|
||||
return fmt.Errorf("expected zero or one argument for 'start_date'")
|
||||
}
|
||||
//let's reuse our parser helper so that a ton of date formats are supported
|
||||
strdate, startDate := parser.GenDateParse(v[0])
|
||||
cw.logger.Debugf("parsed '%s' as '%s'", v[0], strdate)
|
||||
cw.Config.StartTime = &startDate
|
||||
case "end_date":
|
||||
if len(v) != 1 {
|
||||
return fmt.Errorf("expected zero or one argument for 'end_date'")
|
||||
}
|
||||
//let's reuse our parser helper so that a ton of date formats are supported
|
||||
strdate, endDate := parser.GenDateParse(v[0])
|
||||
cw.logger.Debugf("parsed '%s' as '%s'", v[0], strdate)
|
||||
cw.Config.EndTime = &endDate
|
||||
case "backlog":
|
||||
if len(v) != 1 {
|
||||
return fmt.Errorf("expected zero or one argument for 'backlog'")
|
||||
}
|
||||
//let's reuse our parser helper so that a ton of date formats are supported
|
||||
duration, err := time.ParseDuration(v[0])
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to parse '%s' as duration", v[0])
|
||||
}
|
||||
cw.logger.Debugf("parsed '%s' as '%s'", v[0], duration)
|
||||
start := time.Now().UTC().Add(-duration)
|
||||
cw.Config.StartTime = &start
|
||||
end := time.Now().UTC()
|
||||
cw.Config.EndTime = &end
|
||||
default:
|
||||
return fmt.Errorf("unexpected argument %s", k)
|
||||
}
|
||||
}
|
||||
cw.logger.Tracef("host=%s", cw.Config.GroupName)
|
||||
cw.logger.Tracef("stream=%s", *cw.Config.StreamName)
|
||||
cw.Config.GetLogEventsPagesLimit = &def_GetLogEventsPagesLimit
|
||||
|
||||
if err := cw.newClient(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cw.Config.StreamName == nil || cw.Config.GroupName == "" {
|
||||
return fmt.Errorf("missing stream or group name")
|
||||
}
|
||||
if cw.Config.StartTime == nil || cw.Config.EndTime == nil {
|
||||
return fmt.Errorf("start_date and end_date or backlog are mandatory in one-shot mode")
|
||||
}
|
||||
|
||||
cw.Config.Mode = configuration.CAT_MODE
|
||||
cw.streamIndexes = make(map[string]string)
|
||||
cw.t = &tomb.Tomb{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cw *CloudwatchSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
||||
//StreamName string, Start time.Time, End time.Time
|
||||
config := LogStreamTailConfig{
|
||||
GroupName: cw.Config.GroupName,
|
||||
StreamName: *cw.Config.StreamName,
|
||||
StartTime: *cw.Config.StartTime,
|
||||
EndTime: *cw.Config.EndTime,
|
||||
GetLogEventsPagesLimit: *cw.Config.GetLogEventsPagesLimit,
|
||||
logger: cw.logger.WithFields(log.Fields{
|
||||
"group": cw.Config.GroupName,
|
||||
"stream": *cw.Config.StreamName,
|
||||
}),
|
||||
Labels: cw.Config.Labels,
|
||||
ExpectMode: leaky.TIMEMACHINE,
|
||||
}
|
||||
return cw.CatLogStream(&config, out)
|
||||
}
|
||||
|
||||
func (cw *CloudwatchSource) CatLogStream(cfg *LogStreamTailConfig, outChan chan types.Event) error {
|
||||
var startFrom *string
|
||||
var head = true
|
||||
/*convert the times*/
|
||||
startTime := cfg.StartTime.UTC().Unix() * 1000
|
||||
endTime := cfg.EndTime.UTC().Unix() * 1000
|
||||
hasMoreEvents := true
|
||||
for hasMoreEvents {
|
||||
select {
|
||||
default:
|
||||
cfg.logger.Tracef("Calling GetLogEventsPagesWithContext(%s, %s), startTime:%d / endTime:%d",
|
||||
cfg.GroupName, cfg.StreamName, startTime, endTime)
|
||||
cfg.logger.Tracef("startTime:%s / endTime:%s", cfg.StartTime, cfg.EndTime)
|
||||
if startFrom != nil {
|
||||
cfg.logger.Tracef("next_token: %s", *startFrom)
|
||||
}
|
||||
ctx := context.Background()
|
||||
err := cw.cwClient.GetLogEventsPagesWithContext(ctx,
|
||||
&cloudwatchlogs.GetLogEventsInput{
|
||||
Limit: aws.Int64(10),
|
||||
LogGroupName: aws.String(cfg.GroupName),
|
||||
LogStreamName: aws.String(cfg.StreamName),
|
||||
StartTime: aws.Int64(startTime),
|
||||
EndTime: aws.Int64(endTime),
|
||||
StartFromHead: &head,
|
||||
NextToken: startFrom,
|
||||
},
|
||||
func(page *cloudwatchlogs.GetLogEventsOutput, lastPage bool) bool {
|
||||
cfg.logger.Tracef("in GetLogEventsPagesWithContext handker (%d events) (last:%t)", len(page.Events), lastPage)
|
||||
for _, event := range page.Events {
|
||||
evt, err := cwLogToEvent(event, cfg)
|
||||
if err != nil {
|
||||
cfg.logger.Warningf("discard event : %s", err)
|
||||
}
|
||||
cfg.logger.Debugf("pushing message : %s", evt.Line.Raw)
|
||||
outChan <- evt
|
||||
}
|
||||
if startFrom != nil && *page.NextForwardToken == *startFrom {
|
||||
cfg.logger.Debugf("reached end of available events")
|
||||
hasMoreEvents = false
|
||||
return false
|
||||
}
|
||||
startFrom = page.NextForwardToken
|
||||
return true
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "while reading logs from %s/%s", cfg.GroupName, cfg.StreamName)
|
||||
}
|
||||
cfg.logger.Tracef("after GetLogEventsPagesWithContext")
|
||||
case <-cw.t.Dying():
|
||||
cfg.logger.Warningf("cat stream killed")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
cfg.logger.Tracef("CatLogStream out")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cwLogToEvent(log *cloudwatchlogs.OutputLogEvent, cfg *LogStreamTailConfig) (types.Event, error) {
|
||||
l := types.Line{}
|
||||
evt := types.Event{}
|
||||
if log.Message == nil {
|
||||
return evt, fmt.Errorf("nil message")
|
||||
}
|
||||
msg := *log.Message
|
||||
if cfg.PrependCloudwatchTimestamp != nil && *cfg.PrependCloudwatchTimestamp {
|
||||
eventTimestamp := time.Unix(0, *log.Timestamp*int64(time.Millisecond))
|
||||
msg = eventTimestamp.String() + " " + msg
|
||||
}
|
||||
|
||||
l.Raw = msg
|
||||
l.Labels = cfg.Labels
|
||||
l.Time = time.Now()
|
||||
l.Src = fmt.Sprintf("%s/%s", cfg.GroupName, cfg.StreamName)
|
||||
l.Process = true
|
||||
l.Module = "cloudwatch"
|
||||
evt.Line = l
|
||||
evt.Process = true
|
||||
evt.Type = types.LOG
|
||||
evt.ExpectMode = cfg.ExpectMode
|
||||
cfg.logger.Debugf("returned event labels : %+v", evt.Line.Labels)
|
||||
return evt, nil
|
||||
}
|
862
pkg/acquisition/modules/cloudwatch/cloudwatch_test.go
Normal file
862
pkg/acquisition/modules/cloudwatch/cloudwatch_test.go
Normal file
|
@ -0,0 +1,862 @@
|
|||
package cloudwatchacquisition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/tomb.v2"
|
||||
)
|
||||
|
||||
/*
|
||||
test plan :
|
||||
- start on bad group/bad stream
|
||||
- start on good settings (oneshot) -> check expected messages
|
||||
- start on good settings (stream) -> check expected messages within given time
|
||||
- check shutdown/restart
|
||||
*/
|
||||
|
||||
func checkForLocalStackAvailability() error {
|
||||
if v := os.Getenv("AWS_ENDPOINT_FORCE"); v != "" {
|
||||
v = strings.TrimPrefix(v, "http://")
|
||||
_, err := net.Dial("tcp", v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("while dialing %s : %s : aws endpoint isn't available", v, err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("missing aws endpoint for tests : AWS_ENDPOINT_FORCE")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if err := checkForLocalStackAvailability(); err != nil {
|
||||
log.Fatalf("local stack error : %s", err)
|
||||
}
|
||||
def_PollNewStreamInterval = 1 * time.Second
|
||||
def_PollStreamInterval = 1 * time.Second
|
||||
def_StreamReadTimeout = 10 * time.Second
|
||||
def_MaxStreamAge = 5 * time.Second
|
||||
def_PollDeadStreamInterval = 5 * time.Second
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestWatchLogGroupForStreams(t *testing.T) {
|
||||
var err error
|
||||
log.SetLevel(log.DebugLevel)
|
||||
tests := []struct {
|
||||
config []byte
|
||||
expectedCfgErr string
|
||||
expectedStartErr string
|
||||
name string
|
||||
pre func(*CloudwatchSource)
|
||||
run func(*CloudwatchSource)
|
||||
post func(*CloudwatchSource)
|
||||
expectedResLen int
|
||||
expectedResMessages []string
|
||||
}{
|
||||
//require a group name that doesn't exist
|
||||
{
|
||||
name: "group_does_not_exists",
|
||||
config: []byte(`
|
||||
source: cloudwatch
|
||||
labels:
|
||||
type: test_source
|
||||
group_name: b
|
||||
stream_name: test_stream`),
|
||||
expectedStartErr: "The specified log group does not exist",
|
||||
pre: func(cw *CloudwatchSource) {
|
||||
if _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{
|
||||
LogGroupName: aws.String("test_group_not_used_1"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to create log group : %s", err)
|
||||
}
|
||||
},
|
||||
post: func(cw *CloudwatchSource) {
|
||||
if _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{
|
||||
LogGroupName: aws.String("test_group_not_used_1"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to delete log group : %s", err)
|
||||
}
|
||||
},
|
||||
},
|
||||
//test stream mismatch
|
||||
{
|
||||
name: "group_exists_bad_stream_name",
|
||||
config: []byte(`
|
||||
source: cloudwatch
|
||||
labels:
|
||||
type: test_source
|
||||
group_name: test_group1
|
||||
stream_name: test_stream_bad`),
|
||||
pre: func(cw *CloudwatchSource) {
|
||||
if _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{
|
||||
LogGroupName: aws.String("test_group1"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to create log group : %s", err)
|
||||
}
|
||||
if _, err := cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{
|
||||
LogGroupName: aws.String("test_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to create log stream : %s", err)
|
||||
}
|
||||
//have a message before we start - won't be popped, but will trigger stream monitoring
|
||||
if _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{
|
||||
LogGroupName: aws.String("test_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
LogEvents: []*cloudwatchlogs.InputLogEvent{
|
||||
&cloudwatchlogs.InputLogEvent{
|
||||
Message: aws.String("test_message_1"),
|
||||
Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000),
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
log.Fatalf("failed to put logs")
|
||||
}
|
||||
},
|
||||
post: func(cw *CloudwatchSource) {
|
||||
if _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{
|
||||
LogGroupName: aws.String("test_group1"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to delete log group : %s", err)
|
||||
}
|
||||
},
|
||||
expectedResLen: 0,
|
||||
},
|
||||
//test stream mismatch
|
||||
{
|
||||
name: "group_exists_bad_stream_regexp",
|
||||
config: []byte(`
|
||||
source: cloudwatch
|
||||
labels:
|
||||
type: test_source
|
||||
group_name: test_group1
|
||||
stream_regexp: test_bad[0-9]+`),
|
||||
pre: func(cw *CloudwatchSource) {
|
||||
if _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{
|
||||
LogGroupName: aws.String("test_group1"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to create log group : %s", err)
|
||||
}
|
||||
if _, err := cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{
|
||||
LogGroupName: aws.String("test_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to create log stream : %s", err)
|
||||
|
||||
}
|
||||
//have a message before we start - won't be popped, but will trigger stream monitoring
|
||||
if _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{
|
||||
LogGroupName: aws.String("test_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
LogEvents: []*cloudwatchlogs.InputLogEvent{
|
||||
&cloudwatchlogs.InputLogEvent{
|
||||
Message: aws.String("test_message_1"),
|
||||
Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000),
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to put logs")
|
||||
}
|
||||
},
|
||||
post: func(cw *CloudwatchSource) {
|
||||
if _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{
|
||||
LogGroupName: aws.String("test_group1"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to delete log group : %s", err)
|
||||
|
||||
}
|
||||
},
|
||||
expectedResLen: 0,
|
||||
},
|
||||
//require a group name that does exist and contains a stream in which we gonna put events
|
||||
{
|
||||
name: "group_exists_stream_exists_has_events",
|
||||
config: []byte(`
|
||||
source: cloudwatch
|
||||
labels:
|
||||
type: test_source
|
||||
group_name: test_log_group1
|
||||
log_level: trace
|
||||
stream_name: test_stream`),
|
||||
//expectedStartErr: "The specified log group does not exist",
|
||||
pre: func(cw *CloudwatchSource) {
|
||||
if _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to create log group : %s", err)
|
||||
|
||||
}
|
||||
if _, err := cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to create log stream : %s", err)
|
||||
|
||||
}
|
||||
//have a message before we start - won't be popped, but will trigger stream monitoring
|
||||
if _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
LogEvents: []*cloudwatchlogs.InputLogEvent{
|
||||
&cloudwatchlogs.InputLogEvent{
|
||||
Message: aws.String("test_message_1"),
|
||||
Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000),
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to put logs")
|
||||
}
|
||||
},
|
||||
run: func(cw *CloudwatchSource) {
|
||||
//wait for new stream pickup + stream poll interval
|
||||
time.Sleep(def_PollNewStreamInterval + (1 * time.Second))
|
||||
time.Sleep(def_PollStreamInterval + (1 * time.Second))
|
||||
if _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
LogEvents: []*cloudwatchlogs.InputLogEvent{
|
||||
&cloudwatchlogs.InputLogEvent{
|
||||
Message: aws.String("test_message_4"),
|
||||
Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000),
|
||||
},
|
||||
//and add an event in the future that will be popped
|
||||
&cloudwatchlogs.InputLogEvent{
|
||||
Message: aws.String("test_message_5"),
|
||||
Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000),
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to put logs : %s", err)
|
||||
}
|
||||
},
|
||||
post: func(cw *CloudwatchSource) {
|
||||
if _, err := cw.cwClient.DeleteLogStream(&cloudwatchlogs.DeleteLogStreamInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to delete log stream : %s", err)
|
||||
|
||||
}
|
||||
if _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to delete log group : %s", err)
|
||||
|
||||
}
|
||||
},
|
||||
expectedResLen: 2,
|
||||
expectedResMessages: []string{"test_message_4", "test_message_5"},
|
||||
},
|
||||
//have a stream generate events, reach time-out and gets polled again
|
||||
{
|
||||
name: "group_exists_stream_exists_has_events+timeout",
|
||||
config: []byte(`
|
||||
source: cloudwatch
|
||||
labels:
|
||||
type: test_source
|
||||
group_name: test_log_group1
|
||||
log_level: trace
|
||||
stream_name: test_stream`),
|
||||
//expectedStartErr: "The specified log group does not exist",
|
||||
pre: func(cw *CloudwatchSource) {
|
||||
if _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to create log group : %s", err)
|
||||
|
||||
}
|
||||
if _, err := cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to create log stream : %s", err)
|
||||
}
|
||||
//have a message before we start - won't be popped, but will trigger stream monitoring
|
||||
if _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
LogEvents: []*cloudwatchlogs.InputLogEvent{
|
||||
&cloudwatchlogs.InputLogEvent{
|
||||
Message: aws.String("test_message_1"),
|
||||
Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000),
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to put logs")
|
||||
}
|
||||
},
|
||||
run: func(cw *CloudwatchSource) {
|
||||
//wait for new stream pickup + stream poll interval
|
||||
time.Sleep(def_PollNewStreamInterval + (1 * time.Second))
|
||||
time.Sleep(def_PollStreamInterval + (1 * time.Second))
|
||||
//send some events
|
||||
if _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
LogEvents: []*cloudwatchlogs.InputLogEvent{
|
||||
&cloudwatchlogs.InputLogEvent{
|
||||
Message: aws.String("test_message_41"),
|
||||
Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000),
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to put logs : %s", err)
|
||||
}
|
||||
//wait for the stream to time-out
|
||||
time.Sleep(def_StreamReadTimeout + (1 * time.Second))
|
||||
//and send events again
|
||||
if _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
LogEvents: []*cloudwatchlogs.InputLogEvent{
|
||||
&cloudwatchlogs.InputLogEvent{
|
||||
Message: aws.String("test_message_51"),
|
||||
Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000),
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to put logs : %s", err)
|
||||
}
|
||||
//wait for new stream pickup + stream poll interval
|
||||
time.Sleep(def_PollNewStreamInterval + (1 * time.Second))
|
||||
time.Sleep(def_PollStreamInterval + (1 * time.Second))
|
||||
},
|
||||
post: func(cw *CloudwatchSource) {
|
||||
if _, err := cw.cwClient.DeleteLogStream(&cloudwatchlogs.DeleteLogStreamInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to delete log stream : %s", err)
|
||||
|
||||
}
|
||||
if _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to delete log group : %s", err)
|
||||
|
||||
}
|
||||
},
|
||||
expectedResLen: 2,
|
||||
expectedResMessages: []string{"test_message_41", "test_message_51"},
|
||||
},
|
||||
//have a stream generate events, reach time-out and dead body collection
|
||||
{
|
||||
name: "group_exists_stream_exists_has_events+timeout+GC",
|
||||
config: []byte(`
|
||||
source: cloudwatch
|
||||
labels:
|
||||
type: test_source
|
||||
group_name: test_log_group1
|
||||
log_level: trace
|
||||
stream_name: test_stream`),
|
||||
//expectedStartErr: "The specified log group does not exist",
|
||||
pre: func(cw *CloudwatchSource) {
|
||||
if _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to create log group : %s", err)
|
||||
}
|
||||
if _, err := cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to create log stream : %s", err)
|
||||
}
|
||||
//have a message before we start - won't be popped, but will trigger stream monitoring
|
||||
if _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
LogEvents: []*cloudwatchlogs.InputLogEvent{
|
||||
&cloudwatchlogs.InputLogEvent{
|
||||
Message: aws.String("test_message_1"),
|
||||
Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000),
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to put logs")
|
||||
}
|
||||
},
|
||||
run: func(cw *CloudwatchSource) {
|
||||
//wait for new stream pickup + stream poll interval
|
||||
time.Sleep(def_PollNewStreamInterval + (1 * time.Second))
|
||||
time.Sleep(def_PollStreamInterval + (1 * time.Second))
|
||||
time.Sleep(def_PollDeadStreamInterval + (1 * time.Second))
|
||||
},
|
||||
post: func(cw *CloudwatchSource) {
|
||||
if _, err := cw.cwClient.DeleteLogStream(&cloudwatchlogs.DeleteLogStreamInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to delete log stream : %s", err)
|
||||
|
||||
}
|
||||
if _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to delete log stream : %s", err)
|
||||
|
||||
}
|
||||
},
|
||||
expectedResLen: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
dbgLogger := log.New().WithField("test", test.name)
|
||||
dbgLogger.Logger.SetLevel(log.DebugLevel)
|
||||
dbgLogger.Infof("starting test")
|
||||
cw := CloudwatchSource{}
|
||||
err = cw.Configure(test.config, dbgLogger)
|
||||
if err != nil && test.expectedCfgErr != "" {
|
||||
if !strings.Contains(err.Error(), test.expectedCfgErr) {
|
||||
t.Fatalf("%s expected error '%s' got error '%s'", test.name, test.expectedCfgErr, err.Error())
|
||||
}
|
||||
log.Debugf("got expected error : %s", err)
|
||||
continue
|
||||
} else if err != nil && test.expectedCfgErr == "" {
|
||||
t.Fatalf("%s unexpected error : %s", test.name, err)
|
||||
continue
|
||||
} else if test.expectedCfgErr != "" && err == nil {
|
||||
t.Fatalf("%s expected error '%s', got none", test.name, test.expectedCfgErr)
|
||||
continue
|
||||
}
|
||||
dbgLogger.Infof("config done test")
|
||||
//run pre-routine : tests use it to set group & streams etc.
|
||||
if test.pre != nil {
|
||||
test.pre(&cw)
|
||||
}
|
||||
out := make(chan types.Event)
|
||||
tmb := tomb.Tomb{}
|
||||
var rcvd_evts []types.Event
|
||||
|
||||
dbgLogger.Infof("running StreamingAcquisition")
|
||||
actmb := tomb.Tomb{}
|
||||
actmb.Go(func() error {
|
||||
err := cw.StreamingAcquisition(out, &actmb)
|
||||
dbgLogger.Infof("acquis done")
|
||||
|
||||
if err != nil && test.expectedStartErr != "" && !strings.Contains(err.Error(), test.expectedStartErr) {
|
||||
t.Fatalf("%s expected error '%s' got '%s'", test.name, test.expectedStartErr, err.Error())
|
||||
} else if err != nil && test.expectedStartErr == "" {
|
||||
t.Fatalf("%s unexpected error '%s'", test.name, err)
|
||||
} else if err == nil && test.expectedStartErr != "" {
|
||||
t.Fatalf("%s expected error '%s' got none", test.name, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
//let's empty output chan
|
||||
tmb.Go(func() error {
|
||||
for {
|
||||
select {
|
||||
case in := <-out:
|
||||
log.Debugf("received event %+v", in)
|
||||
rcvd_evts = append(rcvd_evts, in)
|
||||
case <-tmb.Dying():
|
||||
log.Debugf("pumper died")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
if test.run != nil {
|
||||
test.run(&cw)
|
||||
} else {
|
||||
dbgLogger.Warning("no run code")
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
dbgLogger.Infof("killing collector")
|
||||
tmb.Kill(nil)
|
||||
<-tmb.Dead()
|
||||
dbgLogger.Infof("killing datasource")
|
||||
actmb.Kill(nil)
|
||||
<-actmb.Dead()
|
||||
//dbgLogger.Infof("collected events : %d -> %+v", len(rcvd_evts), rcvd_evts)
|
||||
//check results
|
||||
if test.expectedResLen != -1 {
|
||||
if test.expectedResLen != len(rcvd_evts) {
|
||||
t.Fatalf("%s : expected %d results got %d -> %v", test.name, test.expectedResLen, len(rcvd_evts), rcvd_evts)
|
||||
} else {
|
||||
dbgLogger.Debugf("got %d expected messages", len(rcvd_evts))
|
||||
}
|
||||
}
|
||||
if len(test.expectedResMessages) != 0 {
|
||||
res := test.expectedResMessages
|
||||
for idx, v := range rcvd_evts {
|
||||
if len(res) == 0 {
|
||||
t.Fatalf("result %d/%d : received '%s', didn't expect anything (recvd:%d, expected:%d)", idx, len(rcvd_evts), v.Line.Raw, len(rcvd_evts), len(test.expectedResMessages))
|
||||
}
|
||||
if res[0] != v.Line.Raw {
|
||||
t.Fatalf("result %d/%d : expected '%s', received '%s' (recvd:%d, expected:%d)", idx, len(rcvd_evts), res[0], v.Line.Raw, len(rcvd_evts), len(test.expectedResMessages))
|
||||
} else {
|
||||
dbgLogger.Debugf("got message '%s'", res[0])
|
||||
}
|
||||
res = res[1:]
|
||||
}
|
||||
if len(res) != 0 {
|
||||
t.Fatalf("leftover unmatched results : %v", res)
|
||||
}
|
||||
|
||||
}
|
||||
if test.post != nil {
|
||||
test.post(&cw)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfiguration(t *testing.T) {
|
||||
var err error
|
||||
log.SetLevel(log.DebugLevel)
|
||||
tests := []struct {
|
||||
config []byte
|
||||
expectedCfgErr string
|
||||
expectedStartErr string
|
||||
name string
|
||||
}{
|
||||
{
|
||||
name: "group_does_not_exists",
|
||||
config: []byte(`
|
||||
source: cloudwatch
|
||||
labels:
|
||||
type: test_source
|
||||
group_name: test_group
|
||||
stream_name: test_stream`),
|
||||
expectedStartErr: "The specified log group does not exist",
|
||||
},
|
||||
{
|
||||
config: []byte(`
|
||||
xxx: cloudwatch
|
||||
labels:
|
||||
type: test_source
|
||||
group_name: test_group
|
||||
stream_name: test_stream`),
|
||||
expectedCfgErr: "field xxx not found in type",
|
||||
},
|
||||
{
|
||||
name: "missing_group_name",
|
||||
config: []byte(`
|
||||
source: cloudwatch
|
||||
labels:
|
||||
type: test_source
|
||||
stream_name: test_stream`),
|
||||
expectedCfgErr: "group_name is mandatory for CloudwatchSource",
|
||||
},
|
||||
}
|
||||
|
||||
for idx, test := range tests {
|
||||
dbgLogger := log.New().WithField("test", test.name)
|
||||
dbgLogger.Logger.SetLevel(log.DebugLevel)
|
||||
log.Printf("%d/%d", idx, len(tests))
|
||||
cw := CloudwatchSource{}
|
||||
err = cw.Configure(test.config, dbgLogger)
|
||||
if err != nil && test.expectedCfgErr != "" {
|
||||
if !strings.Contains(err.Error(), test.expectedCfgErr) {
|
||||
t.Fatalf("%s expected error '%s' got error '%s'", test.name, test.expectedCfgErr, err.Error())
|
||||
}
|
||||
log.Debugf("got expected error : %s", err)
|
||||
continue
|
||||
} else if err != nil && test.expectedCfgErr == "" {
|
||||
t.Fatalf("%s unexpected error : %s", test.name, err)
|
||||
continue
|
||||
} else if test.expectedCfgErr != "" && err == nil {
|
||||
t.Fatalf("%s expected error '%s', got none", test.name, test.expectedCfgErr)
|
||||
continue
|
||||
}
|
||||
out := make(chan types.Event)
|
||||
tmb := tomb.Tomb{}
|
||||
|
||||
switch cw.GetMode() {
|
||||
case "tail":
|
||||
err = cw.StreamingAcquisition(out, &tmb)
|
||||
case "cat":
|
||||
err = cw.OneShotAcquisition(out, &tmb)
|
||||
}
|
||||
if err != nil && test.expectedStartErr != "" && !strings.Contains(err.Error(), test.expectedStartErr) {
|
||||
t.Fatalf("%s expected error '%s' got '%s'", test.name, test.expectedStartErr, err.Error())
|
||||
} else if err != nil && test.expectedStartErr == "" {
|
||||
t.Fatalf("%s unexpected error '%s'", test.name, err)
|
||||
} else if err == nil && test.expectedStartErr != "" {
|
||||
t.Fatalf("%s expected error '%s' got none", test.name, err)
|
||||
}
|
||||
|
||||
log.Debugf("killing ...")
|
||||
tmb.Kill(nil)
|
||||
<-tmb.Dead()
|
||||
log.Debugf("dead :)")
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigureByDSN(t *testing.T) {
|
||||
var err error
|
||||
log.SetLevel(log.DebugLevel)
|
||||
tests := []struct {
|
||||
dsn, logtype string
|
||||
expectedCfgErr string
|
||||
name string
|
||||
}{
|
||||
{
|
||||
name: "missing_query",
|
||||
dsn: "cloudwatch://bad_log_group:bad_stream_name",
|
||||
expectedCfgErr: "query is mandatory (at least start_date and end_date or backlog)",
|
||||
},
|
||||
{
|
||||
name: "backlog",
|
||||
dsn: "cloudwatch://bad_log_group:bad_stream_name?backlog=30m&log_level=info&profile=test",
|
||||
//expectedCfgErr: "query is mandatory (at least start_date and end_date or backlog)",
|
||||
},
|
||||
{
|
||||
name: "start_date/end_date",
|
||||
dsn: "cloudwatch://bad_log_group:bad_stream_name?start_date=2021/05/15 14:04&end_date=2021/05/15 15:04",
|
||||
//expectedCfgErr: "query is mandatory (at least start_date and end_date or backlog)",
|
||||
},
|
||||
{
|
||||
name: "bad_log_level",
|
||||
dsn: "cloudwatch://bad_log_group:bad_stream_name?backlog=4h&log_level=",
|
||||
expectedCfgErr: "unknown level : not a valid logrus Level: ",
|
||||
},
|
||||
}
|
||||
|
||||
for idx, test := range tests {
|
||||
dbgLogger := log.New().WithField("test", test.name)
|
||||
dbgLogger.Logger.SetLevel(log.DebugLevel)
|
||||
log.Printf("%d/%d", idx, len(tests))
|
||||
cw := CloudwatchSource{}
|
||||
err = cw.ConfigureByDSN(test.dsn, test.logtype, dbgLogger)
|
||||
if err != nil && test.expectedCfgErr != "" {
|
||||
if !strings.Contains(err.Error(), test.expectedCfgErr) {
|
||||
t.Fatalf("%s expected error '%s' got error '%s'", test.name, test.expectedCfgErr, err.Error())
|
||||
}
|
||||
log.Debugf("got expected error : %s", err)
|
||||
continue
|
||||
} else if err != nil && test.expectedCfgErr == "" {
|
||||
t.Fatalf("%s unexpected error : %s", test.name, err)
|
||||
continue
|
||||
} else if test.expectedCfgErr != "" && err == nil {
|
||||
t.Fatalf("%s expected error '%s', got none", test.name, test.expectedCfgErr)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOneShotAcquisition(t *testing.T) {
|
||||
var err error
|
||||
log.SetLevel(log.DebugLevel)
|
||||
tests := []struct {
|
||||
dsn string
|
||||
expectedCfgErr string
|
||||
expectedStartErr string
|
||||
name string
|
||||
pre func(*CloudwatchSource)
|
||||
run func(*CloudwatchSource)
|
||||
post func(*CloudwatchSource)
|
||||
expectedResLen int
|
||||
expectedResMessages []string
|
||||
}{
|
||||
//stream with no data
|
||||
{
|
||||
name: "empty_stream",
|
||||
dsn: "cloudwatch://test_log_group1:test_stream?backlog=1h",
|
||||
//expectedStartErr: "The specified log group does not exist",
|
||||
pre: func(cw *CloudwatchSource) {
|
||||
cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
})
|
||||
cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
})
|
||||
},
|
||||
post: func(cw *CloudwatchSource) {
|
||||
cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
})
|
||||
},
|
||||
expectedResLen: 0,
|
||||
},
|
||||
//stream with one event
|
||||
{
|
||||
name: "get_one_event",
|
||||
dsn: "cloudwatch://test_log_group1:test_stream?backlog=1h",
|
||||
//expectedStartErr: "The specified log group does not exist",
|
||||
pre: func(cw *CloudwatchSource) {
|
||||
if _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
}); err != nil {
|
||||
t.Fatalf("error while CreateLogGroup")
|
||||
}
|
||||
if _, err := cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
}); err != nil {
|
||||
t.Fatalf("error while CreateLogStream")
|
||||
|
||||
}
|
||||
//this one is too much in the back
|
||||
if _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
LogEvents: []*cloudwatchlogs.InputLogEvent{
|
||||
&cloudwatchlogs.InputLogEvent{
|
||||
Message: aws.String("test_message_1"),
|
||||
Timestamp: aws.Int64(time.Now().Add(-(2 * time.Hour)).UTC().Unix() * 1000),
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
log.Fatalf("failed to put logs")
|
||||
}
|
||||
|
||||
//this one can be read
|
||||
if _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
LogEvents: []*cloudwatchlogs.InputLogEvent{
|
||||
&cloudwatchlogs.InputLogEvent{
|
||||
Message: aws.String("test_message_2"),
|
||||
Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000),
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
log.Fatalf("failed to put logs")
|
||||
}
|
||||
|
||||
//this one is in the past
|
||||
if _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
LogStreamName: aws.String("test_stream"),
|
||||
LogEvents: []*cloudwatchlogs.InputLogEvent{
|
||||
&cloudwatchlogs.InputLogEvent{
|
||||
Message: aws.String("test_message_3"),
|
||||
Timestamp: aws.Int64(time.Now().Add(-(3 * time.Hour)).UTC().Unix() * 1000),
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
log.Fatalf("failed to put logs")
|
||||
}
|
||||
},
|
||||
post: func(cw *CloudwatchSource) {
|
||||
if _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{
|
||||
LogGroupName: aws.String("test_log_group1"),
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to delete")
|
||||
}
|
||||
},
|
||||
expectedResLen: 1,
|
||||
expectedResMessages: []string{"test_message_2"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
dbgLogger := log.New().WithField("test", test.name)
|
||||
dbgLogger.Logger.SetLevel(log.DebugLevel)
|
||||
dbgLogger.Infof("starting test")
|
||||
cw := CloudwatchSource{}
|
||||
err = cw.ConfigureByDSN(test.dsn, "test", dbgLogger)
|
||||
if err != nil && test.expectedCfgErr != "" {
|
||||
if !strings.Contains(err.Error(), test.expectedCfgErr) {
|
||||
t.Fatalf("%s expected error '%s' got error '%s'", test.name, test.expectedCfgErr, err.Error())
|
||||
}
|
||||
log.Debugf("got expected error : %s", err)
|
||||
continue
|
||||
} else if err != nil && test.expectedCfgErr == "" {
|
||||
t.Fatalf("%s unexpected error : %s", test.name, err)
|
||||
continue
|
||||
} else if test.expectedCfgErr != "" && err == nil {
|
||||
t.Fatalf("%s expected error '%s', got none", test.name, test.expectedCfgErr)
|
||||
continue
|
||||
}
|
||||
dbgLogger.Infof("config done test")
|
||||
//run pre-routine : tests use it to set group & streams etc.
|
||||
if test.pre != nil {
|
||||
test.pre(&cw)
|
||||
}
|
||||
out := make(chan types.Event)
|
||||
tmb := tomb.Tomb{}
|
||||
var rcvd_evts []types.Event
|
||||
|
||||
dbgLogger.Infof("running StreamingAcquisition")
|
||||
actmb := tomb.Tomb{}
|
||||
actmb.Go(func() error {
|
||||
err := cw.OneShotAcquisition(out, &actmb)
|
||||
dbgLogger.Infof("acquis done")
|
||||
|
||||
if err != nil && test.expectedStartErr != "" && !strings.Contains(err.Error(), test.expectedStartErr) {
|
||||
t.Fatalf("%s expected error '%s' got '%s'", test.name, test.expectedStartErr, err.Error())
|
||||
} else if err != nil && test.expectedStartErr == "" {
|
||||
t.Fatalf("%s unexpected error '%s'", test.name, err)
|
||||
} else if err == nil && test.expectedStartErr != "" {
|
||||
t.Fatalf("%s expected error '%s' got none", test.name, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
//let's empty output chan
|
||||
tmb.Go(func() error {
|
||||
for {
|
||||
select {
|
||||
case in := <-out:
|
||||
log.Debugf("received event %+v", in)
|
||||
rcvd_evts = append(rcvd_evts, in)
|
||||
case <-tmb.Dying():
|
||||
log.Debugf("pumper died")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
if test.run != nil {
|
||||
test.run(&cw)
|
||||
} else {
|
||||
dbgLogger.Warning("no run code")
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
dbgLogger.Infof("killing collector")
|
||||
tmb.Kill(nil)
|
||||
<-tmb.Dead()
|
||||
dbgLogger.Infof("killing datasource")
|
||||
actmb.Kill(nil)
|
||||
dbgLogger.Infof("waiting datasource death")
|
||||
<-actmb.Dead()
|
||||
//check results
|
||||
if test.expectedResLen != -1 {
|
||||
if test.expectedResLen != len(rcvd_evts) {
|
||||
t.Fatalf("%s : expected %d results got %d -> %v", test.name, test.expectedResLen, len(rcvd_evts), rcvd_evts)
|
||||
} else {
|
||||
dbgLogger.Debugf("got %d expected messages", len(rcvd_evts))
|
||||
}
|
||||
}
|
||||
if len(test.expectedResMessages) != 0 {
|
||||
res := test.expectedResMessages
|
||||
for idx, v := range rcvd_evts {
|
||||
if len(res) == 0 {
|
||||
t.Fatalf("result %d/%d : received '%s', didn't expect anything (recvd:%d, expected:%d)", idx, len(rcvd_evts), v.Line.Raw, len(rcvd_evts), len(test.expectedResMessages))
|
||||
}
|
||||
if res[0] != v.Line.Raw {
|
||||
t.Fatalf("result %d/%d : expected '%s', received '%s' (recvd:%d, expected:%d)", idx, len(rcvd_evts), res[0], v.Line.Raw, len(rcvd_evts), len(test.expectedResMessages))
|
||||
} else {
|
||||
dbgLogger.Debugf("got message '%s'", res[0])
|
||||
}
|
||||
res = res[1:]
|
||||
}
|
||||
if len(res) != 0 {
|
||||
t.Fatalf("leftover unmatched results : %v", res)
|
||||
}
|
||||
|
||||
}
|
||||
if test.post != nil {
|
||||
test.post(&cw)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
417
pkg/acquisition/modules/file/file.go
Normal file
417
pkg/acquisition/modules/file/file.go
Normal file
|
@ -0,0 +1,417 @@
|
|||
package fileacquisition
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration"
|
||||
leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/nxadm/tail"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
"gopkg.in/tomb.v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var linesRead = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cs_filesource_hits_total",
|
||||
Help: "Total lines that were read.",
|
||||
},
|
||||
[]string{"source"})
|
||||
|
||||
type FileConfiguration struct {
|
||||
Filenames []string
|
||||
Filename string
|
||||
ForceInotify bool `yaml:"force_inotify"`
|
||||
configuration.DataSourceCommonCfg `yaml:",inline"`
|
||||
}
|
||||
|
||||
type FileSource struct {
|
||||
config FileConfiguration
|
||||
watcher *fsnotify.Watcher
|
||||
watchedDirectories map[string]bool
|
||||
tails map[string]bool
|
||||
logger *log.Entry
|
||||
files []string
|
||||
}
|
||||
|
||||
func (f *FileSource) Configure(Config []byte, logger *log.Entry) error {
|
||||
fileConfig := FileConfiguration{}
|
||||
f.logger = logger
|
||||
f.watchedDirectories = make(map[string]bool)
|
||||
f.tails = make(map[string]bool)
|
||||
err := yaml.UnmarshalStrict(Config, &fileConfig)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot parse FileAcquisition configuration")
|
||||
}
|
||||
f.logger.Tracef("FileAcquisition configuration: %+v", fileConfig)
|
||||
if len(fileConfig.Filename) != 0 {
|
||||
fileConfig.Filenames = append(fileConfig.Filenames, fileConfig.Filename)
|
||||
}
|
||||
if len(fileConfig.Filenames) == 0 {
|
||||
return fmt.Errorf("no filename or filenames configuration provided")
|
||||
}
|
||||
f.config = fileConfig
|
||||
if f.config.Mode == "" {
|
||||
f.config.Mode = configuration.TAIL_MODE
|
||||
}
|
||||
if f.config.Mode != configuration.CAT_MODE && f.config.Mode != configuration.TAIL_MODE {
|
||||
return fmt.Errorf("unsupported mode %s for file source", f.config.Mode)
|
||||
}
|
||||
f.watcher, err = fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Could not create fsnotify watcher")
|
||||
}
|
||||
f.logger.Tracef("Actual FileAcquisition Configuration %+v", f.config)
|
||||
for _, pattern := range f.config.Filenames {
|
||||
if f.config.ForceInotify {
|
||||
directory := path.Dir(pattern)
|
||||
f.logger.Infof("Force add watch on %s", directory)
|
||||
if !f.watchedDirectories[directory] {
|
||||
err = f.watcher.Add(directory)
|
||||
if err != nil {
|
||||
f.logger.Errorf("Could not create watch on directory %s : %s", directory, err)
|
||||
continue
|
||||
}
|
||||
f.watchedDirectories[directory] = true
|
||||
}
|
||||
}
|
||||
files, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Glob failure")
|
||||
}
|
||||
if len(files) == 0 {
|
||||
f.logger.Warnf("No matching files for pattern %s", pattern)
|
||||
continue
|
||||
}
|
||||
for _, file := range files {
|
||||
if files[0] != pattern && f.config.Mode == configuration.TAIL_MODE { //we have a glob pattern
|
||||
directory := path.Dir(file)
|
||||
if !f.watchedDirectories[directory] {
|
||||
|
||||
err = f.watcher.Add(directory)
|
||||
if err != nil {
|
||||
f.logger.Errorf("Could not create watch on directory %s : %s", directory, err)
|
||||
continue
|
||||
}
|
||||
f.watchedDirectories[directory] = true
|
||||
}
|
||||
}
|
||||
f.logger.Infof("Adding file %s to datasources", file)
|
||||
f.files = append(f.files, file)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FileSource) ConfigureByDSN(dsn string, labelType string, logger *log.Entry) error {
|
||||
if !strings.HasPrefix(dsn, "file://") {
|
||||
return fmt.Errorf("invalid DSN %s for file source, must start with file://", dsn)
|
||||
}
|
||||
|
||||
f.logger = logger
|
||||
|
||||
dsn = strings.TrimPrefix(dsn, "file://")
|
||||
|
||||
args := strings.Split(dsn, "?")
|
||||
|
||||
if len(args[0]) == 0 {
|
||||
return fmt.Errorf("empty file:// DSN")
|
||||
}
|
||||
|
||||
if len(args) == 2 && len(args[1]) != 0 {
|
||||
params, err := url.ParseQuery(args[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse file args : %s", err)
|
||||
}
|
||||
for key, value := range params {
|
||||
if key != "log_level" {
|
||||
return fmt.Errorf("unsupported key %s in file DSN", key)
|
||||
}
|
||||
if len(value) != 1 {
|
||||
return fmt.Errorf("expected zero or one value for 'log_level'")
|
||||
}
|
||||
lvl, err := log.ParseLevel(value[0])
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unknown level %s", value[0])
|
||||
}
|
||||
f.logger.Logger.SetLevel(lvl)
|
||||
}
|
||||
}
|
||||
|
||||
f.config = FileConfiguration{}
|
||||
f.config.Labels = map[string]string{"type": labelType}
|
||||
f.config.Mode = configuration.CAT_MODE
|
||||
|
||||
f.logger.Debugf("Will try pattern %s", args[0])
|
||||
files, err := filepath.Glob(args[0])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Glob failure")
|
||||
}
|
||||
|
||||
if len(files) == 0 {
|
||||
return fmt.Errorf("no matching files for pattern %s", args[0])
|
||||
}
|
||||
|
||||
if len(files) > 1 {
|
||||
f.logger.Infof("Will read %d files", len(files))
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
f.logger.Infof("Adding file %s to filelist", file)
|
||||
f.files = append(f.files, file)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FileSource) GetMode() string {
|
||||
return f.config.Mode
|
||||
}
|
||||
|
||||
//SupportedModes returns the supported modes by the acquisition module
|
||||
func (f *FileSource) SupportedModes() []string {
|
||||
return []string{configuration.TAIL_MODE, configuration.CAT_MODE}
|
||||
}
|
||||
|
||||
//OneShotAcquisition reads a set of file and returns when done
|
||||
func (f *FileSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
||||
f.logger.Debug("In oneshot")
|
||||
for _, file := range f.files {
|
||||
fi, err := os.Stat(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not stat file %s : %w", file, err)
|
||||
}
|
||||
if fi.IsDir() {
|
||||
f.logger.Warnf("%s is a directory, ignoring it.", file)
|
||||
continue
|
||||
}
|
||||
f.logger.Infof("reading %s at once", file)
|
||||
err = f.readFile(file, out, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FileSource) GetMetrics() []prometheus.Collector {
|
||||
return []prometheus.Collector{linesRead}
|
||||
}
|
||||
|
||||
func (f *FileSource) GetAggregMetrics() []prometheus.Collector {
|
||||
return []prometheus.Collector{linesRead}
|
||||
}
|
||||
|
||||
func (f *FileSource) GetName() string {
|
||||
return "file"
|
||||
}
|
||||
|
||||
func (f *FileSource) CanRun() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
||||
f.logger.Debug("Starting live acquisition")
|
||||
t.Go(func() error {
|
||||
return f.monitorNewFiles(out, t)
|
||||
})
|
||||
for _, file := range f.files {
|
||||
err := unix.Access(file, unix.R_OK)
|
||||
if err != nil {
|
||||
f.logger.Errorf("unable to read %s : %s", file, err)
|
||||
continue
|
||||
}
|
||||
fi, err := os.Stat(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not stat file %s : %w", file, err)
|
||||
}
|
||||
if fi.IsDir() {
|
||||
f.logger.Warnf("%s is a directory, ignoring it.", file)
|
||||
continue
|
||||
}
|
||||
tail, err := tail.TailFile(file, tail.Config{ReOpen: true, Follow: true, Poll: true, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekEnd}})
|
||||
if err != nil {
|
||||
f.logger.Errorf("Could not start tailing file %s : %s", file, err)
|
||||
continue
|
||||
}
|
||||
f.tails[file] = true
|
||||
t.Go(func() error {
|
||||
defer types.CatchPanic("crowdsec/acquis/file/live/fsnotify")
|
||||
return f.tailFile(out, t, tail)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FileSource) Dump() interface{} {
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error {
|
||||
logger := f.logger.WithField("goroutine", "inotify")
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-f.watcher.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if event.Op&fsnotify.Create == fsnotify.Create {
|
||||
fi, err := os.Stat(event.Name)
|
||||
if err != nil {
|
||||
logger.Errorf("Could not stat() new file %s, ignoring it : %s", event.Name, err)
|
||||
continue
|
||||
}
|
||||
if fi.IsDir() {
|
||||
continue
|
||||
}
|
||||
logger.Debugf("Detected new file %s", event.Name)
|
||||
matched := false
|
||||
for _, pattern := range f.config.Filenames {
|
||||
logger.Debugf("Matching %s with %s", pattern, event.Name)
|
||||
matched, err = path.Match(pattern, event.Name)
|
||||
if err != nil {
|
||||
logger.Errorf("Could not match pattern : %s", err)
|
||||
continue
|
||||
}
|
||||
if matched {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !matched {
|
||||
continue
|
||||
}
|
||||
if f.tails[event.Name] {
|
||||
//we already have a tail on it, do not start a new one
|
||||
logger.Debugf("Already tailing file %s, not creating a new tail", event.Name)
|
||||
break
|
||||
}
|
||||
err = unix.Access(event.Name, unix.R_OK)
|
||||
if err != nil {
|
||||
logger.Errorf("unable to read %s : %s", event.Name, err)
|
||||
continue
|
||||
}
|
||||
//Slightly different parameters for Location, as we want to read the first lines of the newly created file
|
||||
tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: true, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}})
|
||||
if err != nil {
|
||||
logger.Errorf("Could not start tailing file %s : %s", event.Name, err)
|
||||
break
|
||||
}
|
||||
f.tails[event.Name] = true
|
||||
t.Go(func() error {
|
||||
defer types.CatchPanic("crowdsec/acquis/tailfile")
|
||||
return f.tailFile(out, t, tail)
|
||||
})
|
||||
}
|
||||
case err, ok := <-f.watcher.Errors:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
logger.Errorf("Error while monitoring folder: %s", err)
|
||||
case <-t.Dying():
|
||||
err := f.watcher.Close()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not remove all inotify watches")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tail) error {
|
||||
logger := f.logger.WithField("tail", tail.Filename)
|
||||
logger.Debugf("-> Starting tail of %s", tail.Filename)
|
||||
for {
|
||||
l := types.Line{}
|
||||
select {
|
||||
case <-t.Dying():
|
||||
logger.Infof("File datasource %s stopping", tail.Filename)
|
||||
if err := tail.Stop(); err != nil {
|
||||
f.logger.Errorf("error in stop : %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case <-tail.Tomb.Dying(): //our tailer is dying
|
||||
logger.Warningf("File reader of %s died", tail.Filename)
|
||||
t.Kill(fmt.Errorf("dead reader for %s", tail.Filename))
|
||||
return fmt.Errorf("reader for %s is dead", tail.Filename)
|
||||
case line := <-tail.Lines:
|
||||
if line == nil {
|
||||
logger.Debugf("Nil line")
|
||||
return fmt.Errorf("tail for %s is empty", tail.Filename)
|
||||
}
|
||||
if line.Err != nil {
|
||||
logger.Warningf("fetch error : %v", line.Err)
|
||||
return line.Err
|
||||
}
|
||||
if line.Text == "" { //skip empty lines
|
||||
continue
|
||||
}
|
||||
linesRead.With(prometheus.Labels{"source": tail.Filename}).Inc()
|
||||
l.Raw = line.Text
|
||||
l.Labels = f.config.Labels
|
||||
l.Time = line.Time
|
||||
l.Src = tail.Filename
|
||||
l.Process = true
|
||||
l.Module = f.GetName()
|
||||
//we're tailing, it must be real time logs
|
||||
logger.Debugf("pushing %+v", l)
|
||||
out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.LIVE}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FileSource) readFile(filename string, out chan types.Event, t *tomb.Tomb) error {
|
||||
var scanner *bufio.Scanner
|
||||
logger := f.logger.WithField("oneshot", filename)
|
||||
fd, err := os.Open(filename)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed opening %s", filename)
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
if strings.HasSuffix(filename, ".gz") {
|
||||
gz, err := gzip.NewReader(fd)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to read gz file: %s", err)
|
||||
return errors.Wrapf(err, "failed to read gz %s", filename)
|
||||
}
|
||||
defer gz.Close()
|
||||
scanner = bufio.NewScanner(gz)
|
||||
|
||||
} else {
|
||||
scanner = bufio.NewScanner(fd)
|
||||
}
|
||||
scanner.Split(bufio.ScanLines)
|
||||
for scanner.Scan() {
|
||||
logger.Debugf("line %s", scanner.Text())
|
||||
l := types.Line{}
|
||||
l.Raw = scanner.Text()
|
||||
l.Time = time.Now()
|
||||
l.Src = filename
|
||||
l.Labels = f.config.Labels
|
||||
l.Process = true
|
||||
l.Module = f.GetName()
|
||||
linesRead.With(prometheus.Labels{"source": filename}).Inc()
|
||||
|
||||
//we're reading logs at once, it must be time-machine buckets
|
||||
out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.TIMEMACHINE}
|
||||
}
|
||||
t.Kill(nil)
|
||||
return nil
|
||||
}
|
403
pkg/acquisition/modules/file/file_test.go
Normal file
403
pkg/acquisition/modules/file/file_test.go
Normal file
|
@ -0,0 +1,403 @@
|
|||
package fileacquisition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/tomb.v2"
|
||||
)
|
||||
|
||||
func TestBadConfiguration(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
config: `foobar: asd.log`,
|
||||
expectedErr: "line 1: field foobar not found in type fileacquisition.FileConfiguration",
|
||||
},
|
||||
{
|
||||
config: `mode: tail`,
|
||||
expectedErr: "no filename or filenames configuration provided",
|
||||
},
|
||||
{
|
||||
config: `filename: "[asd-.log"`,
|
||||
expectedErr: "Glob failure: syntax error in pattern",
|
||||
},
|
||||
}
|
||||
|
||||
subLogger := log.WithFields(log.Fields{
|
||||
"type": "file",
|
||||
})
|
||||
for _, test := range tests {
|
||||
f := FileSource{}
|
||||
err := f.Configure([]byte(test.config), subLogger)
|
||||
assert.Contains(t, err.Error(), test.expectedErr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigureDSN(t *testing.T) {
|
||||
tests := []struct {
|
||||
dsn string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
dsn: "asd://",
|
||||
expectedErr: "invalid DSN asd:// for file source, must start with file://",
|
||||
},
|
||||
{
|
||||
dsn: "file://",
|
||||
expectedErr: "empty file:// DSN",
|
||||
},
|
||||
{
|
||||
dsn: "file:///etc/passwd?log_level=warn",
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
dsn: "file:///etc/passwd?log_level=foobar",
|
||||
expectedErr: "unknown level foobar: not a valid logrus Level:",
|
||||
},
|
||||
}
|
||||
subLogger := log.WithFields(log.Fields{
|
||||
"type": "file",
|
||||
})
|
||||
for _, test := range tests {
|
||||
f := FileSource{}
|
||||
err := f.ConfigureByDSN(test.dsn, "testtype", subLogger)
|
||||
if test.expectedErr != "" {
|
||||
assert.Contains(t, err.Error(), test.expectedErr)
|
||||
} else {
|
||||
assert.Equal(t, err, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOneShot(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
expectedErr string
|
||||
expectedOutput string
|
||||
expectedLines int
|
||||
logLevel log.Level
|
||||
setup func()
|
||||
afterConfigure func()
|
||||
teardown func()
|
||||
}{
|
||||
{
|
||||
config: `
|
||||
mode: cat
|
||||
filename: /etc/shadow`,
|
||||
expectedErr: "failed opening /etc/shadow: open /etc/shadow: permission denied",
|
||||
expectedOutput: "",
|
||||
logLevel: log.WarnLevel,
|
||||
expectedLines: 0,
|
||||
},
|
||||
{
|
||||
config: `
|
||||
mode: cat
|
||||
filename: /`,
|
||||
expectedErr: "",
|
||||
expectedOutput: "/ is a directory, ignoring it",
|
||||
logLevel: log.WarnLevel,
|
||||
expectedLines: 0,
|
||||
},
|
||||
{
|
||||
config: `
|
||||
mode: cat
|
||||
filename: "[*-.log"`,
|
||||
expectedErr: "Glob failure: syntax error in pattern",
|
||||
expectedOutput: "",
|
||||
logLevel: log.WarnLevel,
|
||||
expectedLines: 0,
|
||||
},
|
||||
{
|
||||
config: `
|
||||
mode: cat
|
||||
filename: /do/not/exist`,
|
||||
expectedErr: "",
|
||||
expectedOutput: "No matching files for pattern /do/not/exist",
|
||||
logLevel: log.WarnLevel,
|
||||
expectedLines: 0,
|
||||
},
|
||||
{
|
||||
config: `
|
||||
mode: cat
|
||||
filename: test_files/test.log`,
|
||||
expectedErr: "",
|
||||
expectedOutput: "",
|
||||
expectedLines: 5,
|
||||
logLevel: log.WarnLevel,
|
||||
},
|
||||
{
|
||||
config: `
|
||||
mode: cat
|
||||
filename: test_files/test.log.gz`,
|
||||
expectedErr: "",
|
||||
expectedOutput: "",
|
||||
expectedLines: 5,
|
||||
logLevel: log.WarnLevel,
|
||||
},
|
||||
{
|
||||
config: `
|
||||
mode: cat
|
||||
filename: test_files/bad.gz`,
|
||||
expectedErr: "failed to read gz test_files/bad.gz: unexpected EOF",
|
||||
expectedOutput: "",
|
||||
expectedLines: 0,
|
||||
logLevel: log.WarnLevel,
|
||||
},
|
||||
{
|
||||
config: `
|
||||
mode: cat
|
||||
filename: test_files/test_delete.log`,
|
||||
setup: func() {
|
||||
os.Create("test_files/test_delete.log")
|
||||
},
|
||||
afterConfigure: func() {
|
||||
os.Remove("test_files/test_delete.log")
|
||||
},
|
||||
expectedErr: "could not stat file test_files/test_delete.log : stat test_files/test_delete.log: no such file or directory",
|
||||
},
|
||||
}
|
||||
|
||||
for _, ts := range tests {
|
||||
logger, hook := test.NewNullLogger()
|
||||
logger.SetLevel(ts.logLevel)
|
||||
subLogger := logger.WithFields(log.Fields{
|
||||
"type": "file",
|
||||
})
|
||||
tomb := tomb.Tomb{}
|
||||
out := make(chan types.Event)
|
||||
f := FileSource{}
|
||||
if ts.setup != nil {
|
||||
ts.setup()
|
||||
}
|
||||
err := f.Configure([]byte(ts.config), subLogger)
|
||||
if err != nil && ts.expectedErr != "" {
|
||||
assert.Contains(t, err.Error(), ts.expectedErr)
|
||||
continue
|
||||
} else if err != nil && ts.expectedErr == "" {
|
||||
t.Fatalf("Unexpected error : %s", err)
|
||||
}
|
||||
if ts.afterConfigure != nil {
|
||||
ts.afterConfigure()
|
||||
}
|
||||
actualLines := 0
|
||||
if ts.expectedLines != 0 {
|
||||
go func() {
|
||||
READLOOP:
|
||||
for {
|
||||
select {
|
||||
case <-out:
|
||||
actualLines++
|
||||
case <-time.After(1 * time.Second):
|
||||
break READLOOP
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
err = f.OneShotAcquisition(out, &tomb)
|
||||
if ts.expectedLines != 0 {
|
||||
assert.Equal(t, actualLines, ts.expectedLines)
|
||||
}
|
||||
if ts.expectedErr != "" {
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error but got nothing ! %+v", ts)
|
||||
}
|
||||
assert.Contains(t, err.Error(), ts.expectedErr)
|
||||
}
|
||||
if ts.expectedOutput != "" {
|
||||
assert.Contains(t, hook.LastEntry().Message, ts.expectedOutput)
|
||||
hook.Reset()
|
||||
}
|
||||
if ts.teardown != nil {
|
||||
ts.teardown()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLiveAcquisition(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
expectedErr string
|
||||
expectedOutput string
|
||||
expectedLines int
|
||||
logLevel log.Level
|
||||
setup func()
|
||||
afterConfigure func()
|
||||
teardown func()
|
||||
}{
|
||||
{
|
||||
config: `
|
||||
mode: tail
|
||||
filename: /etc/shadow`,
|
||||
expectedErr: "",
|
||||
expectedOutput: "unable to read /etc/shadow : permission denied",
|
||||
logLevel: log.InfoLevel,
|
||||
expectedLines: 0,
|
||||
},
|
||||
{
|
||||
config: `
|
||||
mode: tail
|
||||
filename: /`,
|
||||
expectedErr: "",
|
||||
expectedOutput: "/ is a directory, ignoring it",
|
||||
logLevel: log.WarnLevel,
|
||||
expectedLines: 0,
|
||||
},
|
||||
{
|
||||
config: `
|
||||
mode: tail
|
||||
filename: /do/not/exist`,
|
||||
expectedErr: "",
|
||||
expectedOutput: "No matching files for pattern /do/not/exist",
|
||||
logLevel: log.WarnLevel,
|
||||
expectedLines: 0,
|
||||
},
|
||||
{
|
||||
config: `
|
||||
mode: tail
|
||||
filenames:
|
||||
- test_files/*.log
|
||||
force_inotify: true`,
|
||||
expectedErr: "",
|
||||
expectedOutput: "",
|
||||
expectedLines: 5,
|
||||
logLevel: log.DebugLevel,
|
||||
},
|
||||
{
|
||||
config: `
|
||||
mode: tail
|
||||
filenames:
|
||||
- test_files/*.log
|
||||
force_inotify: true`,
|
||||
expectedErr: "",
|
||||
expectedOutput: "",
|
||||
expectedLines: 0,
|
||||
logLevel: log.DebugLevel,
|
||||
afterConfigure: func() {
|
||||
os.Create("test_files/a.log")
|
||||
os.Remove("test_files/a.log")
|
||||
},
|
||||
},
|
||||
{
|
||||
config: `
|
||||
mode: tail
|
||||
filenames:
|
||||
- test_files/*.log
|
||||
force_inotify: true`,
|
||||
expectedErr: "",
|
||||
expectedOutput: "",
|
||||
expectedLines: 5,
|
||||
logLevel: log.DebugLevel,
|
||||
afterConfigure: func() {
|
||||
os.Create("test_files/a.log")
|
||||
time.Sleep(1 * time.Second)
|
||||
os.Chmod("test_files/a.log", 0000)
|
||||
},
|
||||
teardown: func() {
|
||||
os.Chmod("test_files/a.log", 0644)
|
||||
os.Remove("test_files/a.log")
|
||||
},
|
||||
},
|
||||
{
|
||||
config: `
|
||||
mode: tail
|
||||
filenames:
|
||||
- test_files/*.log
|
||||
force_inotify: true`,
|
||||
expectedErr: "",
|
||||
expectedOutput: "",
|
||||
expectedLines: 5,
|
||||
logLevel: log.DebugLevel,
|
||||
afterConfigure: func() {
|
||||
os.Mkdir("test_files/pouet/", 0700)
|
||||
},
|
||||
teardown: func() {
|
||||
os.Remove("test_files/pouet/")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, ts := range tests {
|
||||
logger, hook := test.NewNullLogger()
|
||||
logger.SetLevel(ts.logLevel)
|
||||
subLogger := logger.WithFields(log.Fields{
|
||||
"type": "file",
|
||||
})
|
||||
tomb := tomb.Tomb{}
|
||||
out := make(chan types.Event)
|
||||
f := FileSource{}
|
||||
if ts.setup != nil {
|
||||
ts.setup()
|
||||
}
|
||||
err := f.Configure([]byte(ts.config), subLogger)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error : %s", err)
|
||||
}
|
||||
if ts.afterConfigure != nil {
|
||||
ts.afterConfigure()
|
||||
}
|
||||
actualLines := 0
|
||||
if ts.expectedLines != 0 {
|
||||
go func() {
|
||||
READLOOP:
|
||||
for {
|
||||
select {
|
||||
case <-out:
|
||||
actualLines++
|
||||
case <-time.After(2 * time.Second):
|
||||
break READLOOP
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
err = f.StreamingAcquisition(out, &tomb)
|
||||
|
||||
if ts.expectedErr != "" {
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error but got nothing ! %+v", ts)
|
||||
}
|
||||
assert.Contains(t, err.Error(), ts.expectedErr)
|
||||
}
|
||||
|
||||
if ts.expectedLines != 0 {
|
||||
fd, err := os.Create("test_files/stream.log")
|
||||
if err != nil {
|
||||
t.Fatalf("could not create test file : %s", err)
|
||||
}
|
||||
for i := 0; i < 5; i++ {
|
||||
_, err = fd.WriteString(fmt.Sprintf("%d\n", i))
|
||||
if err != nil {
|
||||
t.Fatalf("could not write test file : %s", err)
|
||||
os.Remove("test_files/stream.log")
|
||||
}
|
||||
}
|
||||
fd.Close()
|
||||
//we sleep to make sure we detect the new file
|
||||
time.Sleep(1 * time.Second)
|
||||
os.Remove("test_files/stream.log")
|
||||
assert.Equal(t, actualLines, ts.expectedLines)
|
||||
}
|
||||
|
||||
if ts.expectedOutput != "" {
|
||||
if hook.LastEntry() == nil {
|
||||
t.Fatalf("expected output %s, but got nothing", ts.expectedOutput)
|
||||
}
|
||||
assert.Contains(t, hook.LastEntry().Message, ts.expectedOutput)
|
||||
hook.Reset()
|
||||
}
|
||||
|
||||
if ts.teardown != nil {
|
||||
ts.teardown()
|
||||
}
|
||||
|
||||
tomb.Kill(nil)
|
||||
}
|
||||
}
|
1
pkg/acquisition/modules/file/test_files/bad.gz
Normal file
1
pkg/acquisition/modules/file/test_files/bad.gz
Normal file
|
@ -0,0 +1 @@
|
|||
42
|
5
pkg/acquisition/modules/file/test_files/test.log
Normal file
5
pkg/acquisition/modules/file/test_files/test.log
Normal file
|
@ -0,0 +1,5 @@
|
|||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
BIN
pkg/acquisition/modules/file/test_files/test.log.gz
Normal file
BIN
pkg/acquisition/modules/file/test_files/test.log.gz
Normal file
Binary file not shown.
256
pkg/acquisition/modules/journalctl/journalctl.go
Normal file
256
pkg/acquisition/modules/journalctl/journalctl.go
Normal file
|
@ -0,0 +1,256 @@
|
|||
package journalctlacquisition
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration"
|
||||
leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"gopkg.in/tomb.v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type JournalCtlConfiguration struct {
|
||||
configuration.DataSourceCommonCfg `yaml:",inline"`
|
||||
Filters []string `yaml:"journalctl_filter"`
|
||||
}
|
||||
|
||||
type JournalCtlSource struct {
|
||||
config JournalCtlConfiguration
|
||||
logger *log.Entry
|
||||
src string
|
||||
args []string
|
||||
}
|
||||
|
||||
const journalctlCmd string = "journalctl"
|
||||
|
||||
var (
|
||||
journalctlArgsOneShot = []string{}
|
||||
journalctlArgstreaming = []string{"--follow", "-n", "0"}
|
||||
)
|
||||
|
||||
var linesRead = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cs_journalctlsource_hits_total",
|
||||
Help: "Total lines that were read.",
|
||||
},
|
||||
[]string{"source"})
|
||||
|
||||
func readLine(scanner *bufio.Scanner, out chan string, errChan chan error) error {
|
||||
for scanner.Scan() {
|
||||
txt := scanner.Text()
|
||||
out <- txt
|
||||
}
|
||||
if errChan != nil && scanner.Err() != nil {
|
||||
errChan <- scanner.Err()
|
||||
close(errChan)
|
||||
return scanner.Err()
|
||||
}
|
||||
if errChan != nil {
|
||||
close(errChan)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
cmd := exec.CommandContext(ctx, journalctlCmd, j.args...)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
cancel()
|
||||
return fmt.Errorf("could not get journalctl stdout: %s", err)
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
cancel()
|
||||
return fmt.Errorf("could not get journalctl stderr: %s", err)
|
||||
}
|
||||
|
||||
stderrChan := make(chan string)
|
||||
stdoutChan := make(chan string)
|
||||
errChan := make(chan error)
|
||||
|
||||
logger := j.logger.WithField("src", j.src)
|
||||
|
||||
logger.Infof("Running journalctl command: %s %s", cmd.Path, cmd.Args)
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
cancel()
|
||||
logger.Errorf("could not start journalctl command : %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
stdoutscanner := bufio.NewScanner(stdout)
|
||||
|
||||
if stdoutscanner == nil {
|
||||
cancel()
|
||||
cmd.Wait()
|
||||
return fmt.Errorf("failed to create stdout scanner")
|
||||
}
|
||||
|
||||
stderrScanner := bufio.NewScanner(stderr)
|
||||
|
||||
if stderrScanner == nil {
|
||||
cancel()
|
||||
cmd.Wait()
|
||||
return fmt.Errorf("failed to create stderr scanner")
|
||||
}
|
||||
t.Go(func() error {
|
||||
return readLine(stdoutscanner, stdoutChan, errChan)
|
||||
})
|
||||
t.Go(func() error {
|
||||
//looks like journalctl closes stderr quite early, so ignore its status (but not its output)
|
||||
return readLine(stderrScanner, stderrChan, nil)
|
||||
})
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-t.Dying():
|
||||
logger.Infof("journalctl datasource %s stopping", j.src)
|
||||
cancel()
|
||||
cmd.Wait() //avoid zombie process
|
||||
return nil
|
||||
case stdoutLine := <-stdoutChan:
|
||||
l := types.Line{}
|
||||
l.Raw = stdoutLine
|
||||
logger.Debugf("getting one line : %s", l.Raw)
|
||||
l.Labels = j.config.Labels
|
||||
l.Time = time.Now()
|
||||
l.Src = j.src
|
||||
l.Process = true
|
||||
l.Module = j.GetName()
|
||||
linesRead.With(prometheus.Labels{"source": j.src}).Inc()
|
||||
evt := types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.LIVE}
|
||||
out <- evt
|
||||
case stderrLine := <-stderrChan:
|
||||
logger.Warnf("Got stderr message : %s", stderrLine)
|
||||
err := fmt.Errorf("journalctl error : %s", stderrLine)
|
||||
t.Kill(err)
|
||||
case errScanner, ok := <-errChan:
|
||||
if !ok {
|
||||
logger.Debugf("errChan is closed, quitting")
|
||||
t.Kill(nil)
|
||||
}
|
||||
if errScanner != nil {
|
||||
t.Kill(errScanner)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (j *JournalCtlSource) GetMetrics() []prometheus.Collector {
|
||||
return []prometheus.Collector{linesRead}
|
||||
}
|
||||
|
||||
func (j *JournalCtlSource) GetAggregMetrics() []prometheus.Collector {
|
||||
return []prometheus.Collector{linesRead}
|
||||
}
|
||||
|
||||
func (j *JournalCtlSource) Configure(yamlConfig []byte, logger *log.Entry) error {
|
||||
config := JournalCtlConfiguration{}
|
||||
j.logger = logger
|
||||
err := yaml.UnmarshalStrict(yamlConfig, &config)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot parse JournalCtlSource configuration")
|
||||
}
|
||||
if config.Mode == "" {
|
||||
config.Mode = configuration.TAIL_MODE
|
||||
}
|
||||
var args []string
|
||||
if config.Mode == configuration.TAIL_MODE {
|
||||
args = journalctlArgstreaming
|
||||
} else {
|
||||
args = journalctlArgsOneShot
|
||||
}
|
||||
if len(config.Filters) == 0 {
|
||||
return fmt.Errorf("journalctl_filter is required")
|
||||
}
|
||||
j.args = append(args, config.Filters...)
|
||||
j.src = fmt.Sprintf("journalctl-%s", strings.Join(config.Filters, "."))
|
||||
j.config = config
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *JournalCtlSource) ConfigureByDSN(dsn string, labelType string, logger *log.Entry) error {
|
||||
j.logger = logger
|
||||
j.config = JournalCtlConfiguration{}
|
||||
j.config.Mode = configuration.CAT_MODE
|
||||
j.config.Labels = map[string]string{"type": labelType}
|
||||
|
||||
//format for the DSN is : journalctl://filters=FILTER1&filters=FILTER2
|
||||
if !strings.HasPrefix(dsn, "journalctl://") {
|
||||
return fmt.Errorf("invalid DSN %s for journalctl source, must start with journalctl://", dsn)
|
||||
}
|
||||
|
||||
qs := strings.TrimPrefix(dsn, "journalctl://")
|
||||
if len(qs) == 0 {
|
||||
return fmt.Errorf("empty journalctl:// DSN")
|
||||
}
|
||||
|
||||
params, err := url.ParseQuery(qs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse journalctl DSN : %s", err)
|
||||
}
|
||||
for key, value := range params {
|
||||
switch key {
|
||||
case "filters":
|
||||
j.config.Filters = append(j.config.Filters, value...)
|
||||
case "log_level":
|
||||
if len(value) != 1 {
|
||||
return fmt.Errorf("expected zero or one value for 'log_level'")
|
||||
}
|
||||
lvl, err := log.ParseLevel(value[0])
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unknown level %s", value[0])
|
||||
}
|
||||
j.logger.Logger.SetLevel(lvl)
|
||||
default:
|
||||
return fmt.Errorf("unsupported key %s in journalctl DSN", key)
|
||||
}
|
||||
}
|
||||
j.args = append(j.args, j.config.Filters...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *JournalCtlSource) GetMode() string {
|
||||
return j.config.Mode
|
||||
}
|
||||
|
||||
func (j *JournalCtlSource) GetName() string {
|
||||
return "journalctl"
|
||||
}
|
||||
|
||||
func (j *JournalCtlSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
||||
defer types.CatchPanic("crowdsec/acquis/journalctl/oneshot")
|
||||
err := j.runJournalCtl(out, t)
|
||||
j.logger.Debug("Oneshot journalctl acquisition is done")
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
func (j *JournalCtlSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
||||
t.Go(func() error {
|
||||
defer types.CatchPanic("crowdsec/acquis/journalctl/streaming")
|
||||
return j.runJournalCtl(out, t)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
func (j *JournalCtlSource) CanRun() error {
|
||||
//TODO: add a more precise check on version or something ?
|
||||
_, err := exec.LookPath(journalctlCmd)
|
||||
return err
|
||||
}
|
||||
func (j *JournalCtlSource) Dump() interface{} {
|
||||
return j
|
||||
}
|
283
pkg/acquisition/modules/journalctl/journalctl_test.go
Normal file
283
pkg/acquisition/modules/journalctl/journalctl_test.go
Normal file
|
@ -0,0 +1,283 @@
|
|||
package journalctlacquisition
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/tomb.v2"
|
||||
)
|
||||
|
||||
func TestBadConfiguration(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
config: `foobar: asd.log`,
|
||||
expectedErr: "line 1: field foobar not found in type journalctlacquisition.JournalCtlConfiguration",
|
||||
},
|
||||
{
|
||||
config: `
|
||||
mode: tail
|
||||
source: journalctl`,
|
||||
expectedErr: "journalctl_filter is required",
|
||||
},
|
||||
{
|
||||
config: `
|
||||
mode: cat
|
||||
source: journalctl
|
||||
journalctl_filter:
|
||||
- _UID=42`,
|
||||
expectedErr: "",
|
||||
},
|
||||
}
|
||||
|
||||
subLogger := log.WithFields(log.Fields{
|
||||
"type": "journalctl",
|
||||
})
|
||||
for _, test := range tests {
|
||||
f := JournalCtlSource{}
|
||||
err := f.Configure([]byte(test.config), subLogger)
|
||||
if test.expectedErr != "" && err == nil {
|
||||
t.Fatalf("Expected err %s but got nil !", test.expectedErr)
|
||||
}
|
||||
if test.expectedErr != "" {
|
||||
assert.Contains(t, err.Error(), test.expectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigureDSN(t *testing.T) {
|
||||
tests := []struct {
|
||||
dsn string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
dsn: "asd://",
|
||||
expectedErr: "invalid DSN asd:// for journalctl source, must start with journalctl://",
|
||||
},
|
||||
{
|
||||
dsn: "journalctl://",
|
||||
expectedErr: "empty journalctl:// DSN",
|
||||
},
|
||||
{
|
||||
dsn: "journalctl://foobar=42",
|
||||
expectedErr: "unsupported key foobar in journalctl DSN",
|
||||
},
|
||||
{
|
||||
dsn: "journalctl://filters=%ZZ",
|
||||
expectedErr: "could not parse journalctl DSN : invalid URL escape \"%ZZ\"",
|
||||
},
|
||||
{
|
||||
dsn: "journalctl://filters=_UID=42?log_level=warn",
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
dsn: "journalctl://filters=_UID=1000&log_level=foobar",
|
||||
expectedErr: "unknown level foobar: not a valid logrus Level:",
|
||||
},
|
||||
}
|
||||
subLogger := log.WithFields(log.Fields{
|
||||
"type": "journalctl",
|
||||
})
|
||||
for _, test := range tests {
|
||||
f := JournalCtlSource{}
|
||||
err := f.ConfigureByDSN(test.dsn, "testtype", subLogger)
|
||||
if test.expectedErr != "" {
|
||||
assert.Contains(t, err.Error(), test.expectedErr)
|
||||
} else {
|
||||
assert.Equal(t, err, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOneShot(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
expectedErr string
|
||||
expectedOutput string
|
||||
expectedLines int
|
||||
logLevel log.Level
|
||||
}{
|
||||
{
|
||||
config: `
|
||||
source: journalctl
|
||||
mode: cat
|
||||
journalctl_filter:
|
||||
- "-_UID=42"`,
|
||||
expectedErr: "",
|
||||
expectedOutput: "journalctl: invalid option",
|
||||
logLevel: log.WarnLevel,
|
||||
expectedLines: 0,
|
||||
},
|
||||
{
|
||||
config: `
|
||||
source: journalctl
|
||||
mode: cat
|
||||
journalctl_filter:
|
||||
- _SYSTEMD_UNIT=ssh.service`,
|
||||
expectedErr: "",
|
||||
expectedOutput: "",
|
||||
logLevel: log.WarnLevel,
|
||||
expectedLines: 14,
|
||||
},
|
||||
}
|
||||
for _, ts := range tests {
|
||||
var logger *log.Logger
|
||||
var subLogger *log.Entry
|
||||
var hook *test.Hook
|
||||
if ts.expectedOutput != "" {
|
||||
logger, hook = test.NewNullLogger()
|
||||
logger.SetLevel(ts.logLevel)
|
||||
subLogger = logger.WithFields(log.Fields{
|
||||
"type": "journalctl",
|
||||
})
|
||||
} else {
|
||||
subLogger = log.WithFields(log.Fields{
|
||||
"type": "journalctl",
|
||||
})
|
||||
}
|
||||
tomb := tomb.Tomb{}
|
||||
out := make(chan types.Event)
|
||||
j := JournalCtlSource{}
|
||||
err := j.Configure([]byte(ts.config), subLogger)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error : %s", err)
|
||||
}
|
||||
actualLines := 0
|
||||
if ts.expectedLines != 0 {
|
||||
go func() {
|
||||
READLOOP:
|
||||
for {
|
||||
select {
|
||||
case <-out:
|
||||
actualLines++
|
||||
case <-time.After(1 * time.Second):
|
||||
break READLOOP
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
err = j.OneShotAcquisition(out, &tomb)
|
||||
if ts.expectedErr == "" && err != nil {
|
||||
t.Fatalf("Unexpected error : %s", err)
|
||||
} else if ts.expectedErr != "" && err != nil {
|
||||
assert.Contains(t, err.Error(), ts.expectedErr)
|
||||
continue
|
||||
} else if ts.expectedErr != "" && err == nil {
|
||||
t.Fatalf("Expected error %s, but got nothing !", ts.expectedErr)
|
||||
}
|
||||
if ts.expectedLines != 0 {
|
||||
assert.Equal(t, ts.expectedLines, actualLines)
|
||||
}
|
||||
|
||||
if ts.expectedOutput != "" {
|
||||
if hook.LastEntry() == nil {
|
||||
t.Fatalf("Expected log output '%s' but got nothing !", ts.expectedOutput)
|
||||
}
|
||||
assert.Contains(t, hook.LastEntry().Message, ts.expectedOutput)
|
||||
hook.Reset()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreaming(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
expectedErr string
|
||||
expectedOutput string
|
||||
expectedLines int
|
||||
logLevel log.Level
|
||||
}{
|
||||
{
|
||||
config: `
|
||||
source: journalctl
|
||||
mode: cat
|
||||
journalctl_filter:
|
||||
- _SYSTEMD_UNIT=ssh.service`,
|
||||
expectedErr: "",
|
||||
expectedOutput: "",
|
||||
logLevel: log.WarnLevel,
|
||||
expectedLines: 14,
|
||||
},
|
||||
}
|
||||
for _, ts := range tests {
|
||||
var logger *log.Logger
|
||||
var subLogger *log.Entry
|
||||
var hook *test.Hook
|
||||
if ts.expectedOutput != "" {
|
||||
logger, hook = test.NewNullLogger()
|
||||
logger.SetLevel(ts.logLevel)
|
||||
subLogger = logger.WithFields(log.Fields{
|
||||
"type": "journalctl",
|
||||
})
|
||||
} else {
|
||||
subLogger = log.WithFields(log.Fields{
|
||||
"type": "journalctl",
|
||||
})
|
||||
}
|
||||
tomb := tomb.Tomb{}
|
||||
out := make(chan types.Event)
|
||||
j := JournalCtlSource{}
|
||||
err := j.Configure([]byte(ts.config), subLogger)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error : %s", err)
|
||||
}
|
||||
actualLines := 0
|
||||
if ts.expectedLines != 0 {
|
||||
go func() {
|
||||
READLOOP:
|
||||
for {
|
||||
select {
|
||||
case <-out:
|
||||
actualLines++
|
||||
case <-time.After(1 * time.Second):
|
||||
break READLOOP
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
err = j.StreamingAcquisition(out, &tomb)
|
||||
if ts.expectedErr == "" && err != nil {
|
||||
t.Fatalf("Unexpected error : %s", err)
|
||||
} else if ts.expectedErr != "" && err != nil {
|
||||
assert.Contains(t, err.Error(), ts.expectedErr)
|
||||
continue
|
||||
} else if ts.expectedErr != "" && err == nil {
|
||||
t.Fatalf("Expected error %s, but got nothing !", ts.expectedErr)
|
||||
}
|
||||
|
||||
if ts.expectedLines != 0 {
|
||||
time.Sleep(1 * time.Second)
|
||||
assert.Equal(t, ts.expectedLines, actualLines)
|
||||
}
|
||||
tomb.Kill(nil)
|
||||
tomb.Wait()
|
||||
output, _ := exec.Command("pgrep", "-x", "journalctl").CombinedOutput()
|
||||
if string(output) != "" {
|
||||
t.Fatalf("Found a journalctl process after killing the tomb !")
|
||||
}
|
||||
if ts.expectedOutput != "" {
|
||||
if hook.LastEntry() == nil {
|
||||
t.Fatalf("Expected log output '%s' but got nothing !", ts.expectedOutput)
|
||||
}
|
||||
assert.Contains(t, hook.LastEntry().Message, ts.expectedOutput)
|
||||
hook.Reset()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if os.Getenv("USE_SYSTEM_JOURNALCTL") == "" {
|
||||
os.Setenv("PATH", "./test_files"+":"+os.Getenv("PATH"))
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
45
pkg/acquisition/modules/journalctl/test_files/journalctl
Executable file
45
pkg/acquisition/modules/journalctl/test_files/journalctl
Executable file
|
@ -0,0 +1,45 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import time
|
||||
import sys
|
||||
|
||||
class CustomParser(argparse.ArgumentParser):
|
||||
#small hack to make argparse errors the same as journalctl
|
||||
def error(self, message):
|
||||
if 'unrecognized arguments:' in message:
|
||||
sys.stderr.write("journalctl: invalid option -- '_'\n")
|
||||
sys.stderr.flush()
|
||||
exit(1)
|
||||
else:
|
||||
sys.stderr.write(message)
|
||||
sys.stderr.flush()
|
||||
exit(1)
|
||||
|
||||
LOGS = """-- Logs begin at Fri 2019-07-26 17:13:13 CEST, end at Mon 2020-11-23 09:17:34 CET. --
|
||||
Nov 22 11:22:19 zeroed sshd[1480]: Invalid user wqeqwe from 127.0.0.1 port 55818
|
||||
Nov 22 11:22:23 zeroed sshd[1480]: Failed password for invalid user wqeqwe from 127.0.0.1 port 55818 ssh2
|
||||
Nov 22 11:23:22 zeroed sshd[1769]: Invalid user wqeqwe1 from 127.0.0.1 port 55824
|
||||
Nov 22 11:23:24 zeroed sshd[1769]: Disconnecting invalid user wqeqwe1 127.0.0.1 port 55824: Too many authentication failures [preauth]
|
||||
Nov 22 11:23:24 zeroed sshd[1777]: Invalid user wqeqwe2 from 127.0.0.1 port 55826
|
||||
Nov 22 11:23:25 zeroed sshd[1777]: Disconnecting invalid user wqeqwe2 127.0.0.1 port 55826: Too many authentication failures [preauth]
|
||||
Nov 22 11:23:25 zeroed sshd[1780]: Invalid user wqeqwe3 from 127.0.0.1 port 55828
|
||||
Nov 22 11:23:26 zeroed sshd[1780]: Disconnecting invalid user wqeqwe3 127.0.0.1 port 55828: Too many authentication failures [preauth]
|
||||
Nov 22 11:23:26 zeroed sshd[1786]: Invalid user wqeqwe4 from 127.0.0.1 port 55830
|
||||
Nov 22 11:23:27 zeroed sshd[1786]: Failed password for invalid user wqeqwe4 from 127.0.0.1 port 55830 ssh2
|
||||
Nov 22 11:23:27 zeroed sshd[1786]: Disconnecting invalid user wqeqwe4 127.0.0.1 port 55830: Too many authentication failures [preauth]
|
||||
Nov 22 11:23:27 zeroed sshd[1791]: Invalid user wqeqwe5 from 127.0.0.1 port 55834
|
||||
Nov 22 11:23:27 zeroed sshd[1791]: Failed password for invalid user wqeqwe5 from 127.0.0.1 port 55834 ssh2"""
|
||||
|
||||
parser = CustomParser()
|
||||
parser.add_argument('filter', metavar='FILTER', type=str, nargs='?')
|
||||
parser.add_argument('-n', dest='n', type=int)
|
||||
parser.add_argument('--follow', dest='follow', action='store_true', default=False)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
for line in LOGS.split('\n'):
|
||||
print(line)
|
||||
|
||||
if args.follow:
|
||||
time.sleep(9999)
|
95
pkg/acquisition/modules/syslog/internal/syslogserver.go
Normal file
95
pkg/acquisition/modules/syslog/internal/syslogserver.go
Normal file
|
@ -0,0 +1,95 @@
|
|||
package syslogserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/tomb.v2"
|
||||
)
|
||||
|
||||
type SyslogServer struct {
|
||||
listenAddr string
|
||||
port int
|
||||
channel chan SyslogMessage
|
||||
udpConn *net.UDPConn
|
||||
Logger *log.Entry
|
||||
MaxMessageLen int
|
||||
}
|
||||
|
||||
type SyslogMessage struct {
|
||||
Message []byte
|
||||
Client string
|
||||
}
|
||||
|
||||
func (s *SyslogServer) Listen(listenAddr string, port int) error {
|
||||
|
||||
s.listenAddr = listenAddr
|
||||
s.port = port
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", s.listenAddr, s.port))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not resolve addr %s", s.listenAddr)
|
||||
}
|
||||
udpConn, err := net.ListenUDP("udp", udpAddr)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not listen on port %d", s.port)
|
||||
}
|
||||
s.udpConn = udpConn
|
||||
err = s.udpConn.SetReadBuffer(s.MaxMessageLen) // FIXME probably
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not set readbuffer on UDP socket")
|
||||
}
|
||||
err = s.udpConn.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not set read deadline on UDP socket")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SyslogServer) SetChannel(c chan SyslogMessage) {
|
||||
s.channel = c
|
||||
}
|
||||
|
||||
func (s *SyslogServer) StartServer() *tomb.Tomb {
|
||||
t := tomb.Tomb{}
|
||||
|
||||
t.Go(func() error {
|
||||
for {
|
||||
select {
|
||||
case <-t.Dying():
|
||||
s.Logger.Info("syslog server tomb is dying")
|
||||
err := s.KillServer()
|
||||
return err
|
||||
default:
|
||||
//RFC3164 says 1024 bytes max
|
||||
//RFC5424 says 480 bytes minimum, and should support up to 2048 bytes
|
||||
b := make([]byte, s.MaxMessageLen)
|
||||
n, addr, err := s.udpConn.ReadFrom(b)
|
||||
if err != nil && !strings.Contains(err.Error(), "i/o timeout") {
|
||||
s.Logger.Errorf("error while reading from socket : %s", err)
|
||||
s.udpConn.Close()
|
||||
return err
|
||||
}
|
||||
if err == nil {
|
||||
s.channel <- SyslogMessage{Message: b[:n], Client: strings.Split(addr.String(), ":")[0]}
|
||||
}
|
||||
err = s.udpConn.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
return &t
|
||||
}
|
||||
|
||||
func (s *SyslogServer) KillServer() error {
|
||||
err := s.udpConn.Close()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not close UDP connection")
|
||||
}
|
||||
return nil
|
||||
}
|
237
pkg/acquisition/modules/syslog/syslog.go
Normal file
237
pkg/acquisition/modules/syslog/syslog.go
Normal file
|
@ -0,0 +1,237 @@
|
|||
package syslogacquisition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration"
|
||||
syslogserver "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog/internal"
|
||||
leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
"github.com/influxdata/go-syslog/v3/rfc3164"
|
||||
"github.com/influxdata/go-syslog/v3/rfc5424"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"gopkg.in/tomb.v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type SyslogConfiguration struct {
|
||||
Proto string `yaml:"protocol,omitempty"`
|
||||
Port int `yaml:"listen_port,omitempty"`
|
||||
Addr string `yaml:"listen_addr,omitempty"`
|
||||
MaxMessageLen int `yaml:"max_message_len,omitempty"`
|
||||
configuration.DataSourceCommonCfg `yaml:",inline"`
|
||||
}
|
||||
|
||||
type SyslogSource struct {
|
||||
config SyslogConfiguration
|
||||
logger *log.Entry
|
||||
server *syslogserver.SyslogServer
|
||||
serverTomb *tomb.Tomb
|
||||
}
|
||||
|
||||
var linesReceived = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cs_syslogsource_hits_total",
|
||||
Help: "Total lines that were received.",
|
||||
},
|
||||
[]string{"source"})
|
||||
|
||||
var linesParsed = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cs_syslogsource_parsed_total",
|
||||
Help: "Total lines that were successfully parsed",
|
||||
},
|
||||
[]string{"source", "type"})
|
||||
|
||||
func (s *SyslogSource) GetName() string {
|
||||
return "syslog"
|
||||
}
|
||||
|
||||
func (s *SyslogSource) GetMode() string {
|
||||
return s.config.Mode
|
||||
}
|
||||
|
||||
func (s *SyslogSource) Dump() interface{} {
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *SyslogSource) CanRun() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SyslogSource) GetMetrics() []prometheus.Collector {
|
||||
return []prometheus.Collector{linesReceived, linesParsed}
|
||||
}
|
||||
|
||||
func (s *SyslogSource) GetAggregMetrics() []prometheus.Collector {
|
||||
return []prometheus.Collector{linesReceived, linesParsed}
|
||||
}
|
||||
|
||||
func (s *SyslogSource) ConfigureByDSN(dsn string, labelType string, logger *log.Entry) error {
|
||||
return fmt.Errorf("syslog datasource does not support one shot acquisition")
|
||||
}
|
||||
|
||||
func (s *SyslogSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
||||
return fmt.Errorf("syslog datasource does not support one shot acquisition")
|
||||
}
|
||||
|
||||
func validatePort(port int) bool {
|
||||
return port > 0 && port <= 65535
|
||||
}
|
||||
|
||||
func validateAddr(addr string) bool {
|
||||
return net.ParseIP(addr) != nil
|
||||
}
|
||||
|
||||
func (s *SyslogSource) Configure(yamlConfig []byte, logger *log.Entry) error {
|
||||
s.logger = logger
|
||||
s.logger.Infof("Starting syslog datasource configuration")
|
||||
syslogConfig := SyslogConfiguration{}
|
||||
syslogConfig.Mode = configuration.TAIL_MODE
|
||||
err := yaml.UnmarshalStrict(yamlConfig, &syslogConfig)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot parse syslog configuration")
|
||||
}
|
||||
if syslogConfig.Addr == "" {
|
||||
syslogConfig.Addr = "127.0.0.1" //do we want a usable or secure default ?
|
||||
}
|
||||
if syslogConfig.Port == 0 {
|
||||
syslogConfig.Port = 514
|
||||
}
|
||||
if syslogConfig.MaxMessageLen == 0 {
|
||||
syslogConfig.MaxMessageLen = 2048
|
||||
}
|
||||
if !validatePort(syslogConfig.Port) {
|
||||
return fmt.Errorf("invalid port %d", syslogConfig.Port)
|
||||
}
|
||||
if !validateAddr(syslogConfig.Addr) {
|
||||
return fmt.Errorf("invalid listen IP %s", syslogConfig.Addr)
|
||||
}
|
||||
s.config = syslogConfig
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SyslogSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
||||
c := make(chan syslogserver.SyslogMessage)
|
||||
s.server = &syslogserver.SyslogServer{Logger: s.logger.WithField("syslog", "internal"), MaxMessageLen: s.config.MaxMessageLen}
|
||||
s.server.SetChannel(c)
|
||||
err := s.server.Listen(s.config.Addr, s.config.Port)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not start syslog server")
|
||||
}
|
||||
s.serverTomb = s.server.StartServer()
|
||||
t.Go(func() error {
|
||||
defer types.CatchPanic("crowdsec/acquis/syslog/live")
|
||||
return s.handleSyslogMsg(out, t, c)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SyslogSource) buildLogFromSyslog(ts *time.Time, hostname *string,
|
||||
appname *string, pid *string, msg *string) (string, error) {
|
||||
ret := ""
|
||||
if ts != nil {
|
||||
ret += ts.Format("Jan 2 15:04:05")
|
||||
} else {
|
||||
ret += time.Now().Format("Jan 2 15:04:05")
|
||||
}
|
||||
if hostname != nil {
|
||||
ret += " " + *hostname
|
||||
} else {
|
||||
ret += " unknownhost"
|
||||
}
|
||||
if appname != nil {
|
||||
ret += " " + *appname
|
||||
} else {
|
||||
return "", errors.Errorf("missing appname field in syslog message")
|
||||
}
|
||||
if pid != nil {
|
||||
/*
|
||||
!!! ugly hack !!!
|
||||
Due to a bug in the syslog parser we use (https://github.com/influxdata/go-syslog/issues/31),
|
||||
the ProcID field will contain garbage if the message as a ] anywhere in it.
|
||||
Assume that a correctly formated ProcID only contains number, and if this is not the case, set it to an arbitrary value
|
||||
*/
|
||||
_, err := strconv.Atoi(*pid)
|
||||
if err != nil {
|
||||
ret += "[1]: "
|
||||
} else {
|
||||
ret += "[" + *pid + "]: "
|
||||
}
|
||||
} else {
|
||||
ret += ": "
|
||||
}
|
||||
if msg != nil {
|
||||
ret += *msg
|
||||
} else {
|
||||
return "", errors.Errorf("missing message field in syslog message")
|
||||
}
|
||||
return ret, nil
|
||||
|
||||
}
|
||||
|
||||
func (s *SyslogSource) handleSyslogMsg(out chan types.Event, t *tomb.Tomb, c chan syslogserver.SyslogMessage) error {
|
||||
for {
|
||||
select {
|
||||
case <-t.Dying():
|
||||
s.logger.Info("Syslog datasource is dying")
|
||||
s.serverTomb.Kill(nil)
|
||||
return s.serverTomb.Wait()
|
||||
case <-s.serverTomb.Dying():
|
||||
s.logger.Info("Syslog server is dying, exiting")
|
||||
return nil
|
||||
case <-s.serverTomb.Dead():
|
||||
s.logger.Info("Syslog server has exited")
|
||||
return nil
|
||||
case syslogLine := <-c:
|
||||
var line string
|
||||
var ts time.Time
|
||||
|
||||
logger := s.logger.WithField("client", syslogLine.Client)
|
||||
linesReceived.With(prometheus.Labels{"source": syslogLine.Client}).Inc()
|
||||
p := rfc5424.NewParser()
|
||||
m, err := p.Parse(syslogLine.Message)
|
||||
if err != nil {
|
||||
logger.Debugf("could not parse message as RFC5424, falling back to RFC3164 : %s", err)
|
||||
p = rfc3164.NewParser(rfc3164.WithYear(rfc3164.CurrentYear{}))
|
||||
m, err = p.Parse(syslogLine.Message)
|
||||
if err != nil {
|
||||
logger.Errorf("could not parse message: %s", err)
|
||||
continue
|
||||
}
|
||||
msg := m.(*rfc3164.SyslogMessage)
|
||||
line, err = s.buildLogFromSyslog(msg.Timestamp, msg.Hostname, msg.Appname, msg.ProcID, msg.Message)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
continue
|
||||
}
|
||||
linesParsed.With(prometheus.Labels{"source": syslogLine.Client,
|
||||
"type": "RFC3164"}).Inc()
|
||||
} else {
|
||||
msg := m.(*rfc5424.SyslogMessage)
|
||||
line, err = s.buildLogFromSyslog(msg.Timestamp, msg.Hostname, msg.Appname, msg.ProcID, msg.Message)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
continue
|
||||
}
|
||||
linesParsed.With(prometheus.Labels{"source": syslogLine.Client,
|
||||
"type": "RFC5424"}).Inc()
|
||||
|
||||
}
|
||||
l := types.Line{}
|
||||
l.Raw = line
|
||||
l.Module = s.GetName()
|
||||
l.Labels = s.config.Labels
|
||||
l.Time = ts
|
||||
l.Src = syslogLine.Client
|
||||
l.Process = true
|
||||
out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.LIVE}
|
||||
}
|
||||
}
|
||||
}
|
149
pkg/acquisition/modules/syslog/syslog_test.go
Normal file
149
pkg/acquisition/modules/syslog/syslog_test.go
Normal file
|
@ -0,0 +1,149 @@
|
|||
package syslogacquisition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/tomb.v2"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConfigure(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
config: `
|
||||
foobar: bla
|
||||
source: syslog`,
|
||||
expectedErr: "line 2: field foobar not found in type syslogacquisition.SyslogConfiguration",
|
||||
},
|
||||
{
|
||||
config: `source: syslog`,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
config: `
|
||||
source: syslog
|
||||
listen_port: asd`,
|
||||
expectedErr: "cannot unmarshal !!str `asd` into int",
|
||||
},
|
||||
{
|
||||
config: `
|
||||
source: syslog
|
||||
listen_port: 424242`,
|
||||
expectedErr: "invalid port 424242",
|
||||
},
|
||||
{
|
||||
config: `
|
||||
source: syslog
|
||||
listen_addr: 10.0.0`,
|
||||
expectedErr: "invalid listen IP 10.0.0",
|
||||
},
|
||||
}
|
||||
|
||||
subLogger := log.WithFields(log.Fields{
|
||||
"type": "syslog",
|
||||
})
|
||||
for _, test := range tests {
|
||||
s := SyslogSource{}
|
||||
err := s.Configure([]byte(test.config), subLogger)
|
||||
if test.expectedErr != "" {
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error but got nothing : %+v", test)
|
||||
}
|
||||
assert.Contains(t, err.Error(), test.expectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func writeToSyslog(logs []string) {
|
||||
conn, err := net.Dial("udp", "127.0.0.1:4242")
|
||||
if err != nil {
|
||||
fmt.Printf("could not establish connection to syslog server : %s", err)
|
||||
return
|
||||
}
|
||||
for _, log := range logs {
|
||||
fmt.Fprint(conn, log)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreamingAcquisition(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
expectedErr string
|
||||
logs []string
|
||||
expectedLines int
|
||||
}{
|
||||
{
|
||||
config: `source: syslog`,
|
||||
expectedErr: "could not start syslog server: could not listen on port 514: listen udp 127.0.0.1:514: bind: permission denied",
|
||||
},
|
||||
{
|
||||
config: `
|
||||
source: syslog
|
||||
listen_port: 4242
|
||||
listen_addr: 127.0.0.1`,
|
||||
logs: []string{"foobar", "bla", "pouet"},
|
||||
},
|
||||
{
|
||||
config: `
|
||||
source: syslog
|
||||
listen_port: 4242
|
||||
listen_addr: 127.0.0.1`,
|
||||
expectedLines: 2,
|
||||
logs: []string{`<13>1 2021-05-18T11:58:40.828081+02:00 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla`,
|
||||
`<13>1 2021-05-18T12:12:37.560695+02:00 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla2[foobar]`},
|
||||
},
|
||||
{
|
||||
config: `
|
||||
source: syslog
|
||||
listen_port: 4242
|
||||
listen_addr: 127.0.0.1`,
|
||||
expectedLines: 3,
|
||||
logs: []string{`<13>May 18 12:37:56 mantis sshd[49340]: blabla2[foobar]`,
|
||||
`<13>May 18 12:37:56 mantis sshd[49340]: blabla2`,
|
||||
`<13>May 18 12:37:56 mantis sshd: blabla2`,
|
||||
`<13>May 18 12:37:56 mantis sshd`},
|
||||
},
|
||||
}
|
||||
|
||||
for _, ts := range tests {
|
||||
subLogger := log.WithFields(log.Fields{
|
||||
"type": "syslog",
|
||||
})
|
||||
s := SyslogSource{}
|
||||
_ = s.Configure([]byte(ts.config), subLogger)
|
||||
tomb := tomb.Tomb{}
|
||||
out := make(chan types.Event)
|
||||
err := s.StreamingAcquisition(out, &tomb)
|
||||
if ts.expectedErr != "" && err == nil {
|
||||
t.Fatalf("expected error but got nothing : %+v", ts)
|
||||
} else if ts.expectedErr == "" && err != nil {
|
||||
t.Fatalf("unexpected error : %s", err)
|
||||
} else if ts.expectedErr != "" && err != nil {
|
||||
assert.Contains(t, err.Error(), ts.expectedErr)
|
||||
continue
|
||||
}
|
||||
actualLines := 0
|
||||
go writeToSyslog(ts.logs)
|
||||
READLOOP:
|
||||
for {
|
||||
select {
|
||||
case <-out:
|
||||
actualLines++
|
||||
case <-time.After(2 * time.Second):
|
||||
break READLOOP
|
||||
}
|
||||
}
|
||||
assert.Equal(t, ts.expectedLines, actualLines)
|
||||
tomb.Kill(nil)
|
||||
tomb.Wait()
|
||||
}
|
||||
}
|
15
pkg/acquisition/test_files/backward_compat.yaml
Normal file
15
pkg/acquisition/test_files/backward_compat.yaml
Normal file
|
@ -0,0 +1,15 @@
|
|||
filename: /tmp/test.log
|
||||
labels:
|
||||
type: syslog
|
||||
---
|
||||
filenames:
|
||||
- /tmp/test*.log
|
||||
labels:
|
||||
type: syslog
|
||||
---
|
||||
# to be uncommented when we reimplement back journalctl
|
||||
# journalctl_filter:
|
||||
# - "_SYSTEMD_UNIT=ssh.service"
|
||||
# labels:
|
||||
# type: syslog
|
||||
---
|
5
pkg/acquisition/test_files/bad_filetype.yaml
Normal file
5
pkg/acquisition/test_files/bad_filetype.yaml
Normal file
|
@ -0,0 +1,5 @@
|
|||
type: file
|
||||
filenames: /tmp/tltlt.log #it should be an array
|
||||
labels:
|
||||
type: syslog
|
||||
|
4
pkg/acquisition/test_files/bad_source.yaml
Normal file
4
pkg/acquisition/test_files/bad_source.yaml
Normal file
|
@ -0,0 +1,4 @@
|
|||
source: does_not_exist
|
||||
labels:
|
||||
type: syslog
|
||||
foobar: toto
|
1
pkg/acquisition/test_files/badyaml.yaml
Normal file
1
pkg/acquisition/test_files/badyaml.yaml
Normal file
|
@ -0,0 +1 @@
|
|||
<aaaa
|
11
pkg/acquisition/test_files/basic_filemode.yaml
Normal file
11
pkg/acquisition/test_files/basic_filemode.yaml
Normal file
|
@ -0,0 +1,11 @@
|
|||
#type: file
|
||||
filename: /tmp/test.log
|
||||
labels:
|
||||
type: syslog
|
||||
---
|
||||
#type: file
|
||||
filenames:
|
||||
- /tmp/test*.log
|
||||
labels:
|
||||
type: syslog
|
||||
---
|
1
pkg/acquisition/test_files/emptyitem.yaml
Normal file
1
pkg/acquisition/test_files/emptyitem.yaml
Normal file
|
@ -0,0 +1 @@
|
|||
|
2
pkg/acquisition/test_files/missing_labels.yaml
Normal file
2
pkg/acquisition/test_files/missing_labels.yaml
Normal file
|
@ -0,0 +1,2 @@
|
|||
type: file
|
||||
filename: /tmp/test.log
|
|
@ -1,5 +0,0 @@
|
|||
filenames:
|
||||
- ./tests/test.log
|
||||
mode: tail
|
||||
labels:
|
||||
type: my_test_log
|
|
@ -1 +0,0 @@
|
|||
one log line
|
Binary file not shown.
|
@ -77,7 +77,7 @@ var BucketsPour = prometheus.NewCounterVec(
|
|||
Name: "cs_bucket_poured_total",
|
||||
Help: "Total events were poured in bucket.",
|
||||
},
|
||||
[]string{"source", "name"},
|
||||
[]string{"source", "type", "name"},
|
||||
)
|
||||
|
||||
var BucketsOverflow = prometheus.NewCounterVec(
|
||||
|
@ -226,7 +226,7 @@ func LeakRoutine(leaky *Leaky) error {
|
|||
if leaky.logger.Level >= log.TraceLevel {
|
||||
leaky.logger.Tracef("Pour event: %s", spew.Sdump(msg))
|
||||
}
|
||||
BucketsPour.With(prometheus.Labels{"name": leaky.Name, "source": msg.Line.Src}).Inc()
|
||||
BucketsPour.With(prometheus.Labels{"name": leaky.Name, "source": msg.Line.Src, "type": msg.Line.Module}).Inc()
|
||||
|
||||
leaky.Pour(leaky, msg) // glue for now
|
||||
//Clear cache on behalf of pour
|
||||
|
|
|
@ -147,7 +147,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
|
|||
}
|
||||
|
||||
if n.Name != "" {
|
||||
NodesHits.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc()
|
||||
NodesHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc()
|
||||
}
|
||||
isWhitelisted := false
|
||||
hasWhitelist := false
|
||||
|
@ -308,14 +308,14 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
|
|||
//grok or leafs failed, don't process statics
|
||||
if !NodeState {
|
||||
if n.Name != "" {
|
||||
NodesHitsKo.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc()
|
||||
NodesHitsKo.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc()
|
||||
}
|
||||
clog.Debugf("Event leaving node : ko")
|
||||
return NodeState, nil
|
||||
}
|
||||
|
||||
if n.Name != "" {
|
||||
NodesHitsOk.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc()
|
||||
NodesHitsOk.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc()
|
||||
}
|
||||
/*
|
||||
Please kill me. this is to apply statics when the node *has* whitelists that successfully matched the node.
|
||||
|
|
|
@ -195,7 +195,7 @@ var NodesHits = prometheus.NewCounterVec(
|
|||
Name: "cs_node_hits_total",
|
||||
Help: "Total events entered node.",
|
||||
},
|
||||
[]string{"source", "name"},
|
||||
[]string{"source", "type", "name"},
|
||||
)
|
||||
|
||||
var NodesHitsOk = prometheus.NewCounterVec(
|
||||
|
@ -203,7 +203,7 @@ var NodesHitsOk = prometheus.NewCounterVec(
|
|||
Name: "cs_node_hits_ok_total",
|
||||
Help: "Total events successfuly exited node.",
|
||||
},
|
||||
[]string{"source", "name"},
|
||||
[]string{"source", "type", "name"},
|
||||
)
|
||||
|
||||
var NodesHitsKo = prometheus.NewCounterVec(
|
||||
|
@ -211,7 +211,7 @@ var NodesHitsKo = prometheus.NewCounterVec(
|
|||
Name: "cs_node_hits_ko_total",
|
||||
Help: "Total events unsuccessfuly exited node.",
|
||||
},
|
||||
[]string{"source", "name"},
|
||||
[]string{"source", "type", "name"},
|
||||
)
|
||||
|
||||
func stageidx(stage string, stages []string) int {
|
||||
|
|
|
@ -8,4 +8,5 @@ type Line struct {
|
|||
Time time.Time //acquis time
|
||||
Labels map[string]string `yaml:"Labels,omitempty"`
|
||||
Process bool
|
||||
Module string `yaml:"Module,omitempty"`
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ for i in `seq 1 10` ; do
|
|||
echo `date '+%b %d %H:%M:%S '`'sd-126005 sshd[12422]: Invalid user netflix from 1.1.1.172 port 35424' >> ssh-bf.log
|
||||
done;
|
||||
|
||||
${CROWDSEC} -file ./ssh-bf.log -type syslog -no-api
|
||||
${CROWDSEC} -dsn "file://./ssh-bf.log" -type syslog -no-api
|
||||
|
||||
${CSCLI} decisions list -o=json | ${JQ} '. | length == 1' || fail "expected exactly one decision"
|
||||
${CSCLI} decisions list -o=json | ${JQ} '.[].decisions[0].value == "1.1.1.172"' || fail "(exact) expected ban on 1.1.1.172"
|
||||
|
|
|
@ -20,7 +20,7 @@ for i in `seq 1 10` ; do
|
|||
echo `date '+%b %d %H:%M:%S '`'sd-126005 sshd[12422]: Invalid user netflix from 1.1.1.174 port 35424' >> ssh-bf.log
|
||||
done;
|
||||
|
||||
${CROWDSEC} -file ./ssh-bf.log -type syslog -no-api
|
||||
${CROWDSEC} -dsn file://./ssh-bf.log -type syslog -no-api
|
||||
|
||||
${CSCLI} decisions list -o=json | ${JQ} '. | length == 1' || fail "expected exactly one decision"
|
||||
${CSCLI} decisions list -o=json | ${JQ} '.[].decisions[0].value == "1.1.1.174"' || fail "(exact) expected ban on 1.1.1.174"
|
||||
|
@ -32,7 +32,7 @@ ${CSCLI} decisions list -o=json | ${JQ} '.[].decisions[0].simulated == false' |
|
|||
${CSCLI} decisions delete --all
|
||||
${CSCLI} simulation enable $SCENARIO
|
||||
|
||||
${CROWDSEC} -file ./ssh-bf.log -type syslog -no-api
|
||||
${CROWDSEC} -dsn file://./ssh-bf.log -type syslog -no-api
|
||||
|
||||
${CSCLI} decisions list --no-simu -o=json | ${JQ} '. == null' || fail "expected no decision (listing only non-simulated decisions)"
|
||||
|
||||
|
@ -42,6 +42,6 @@ ${CSCLI} decisions delete --all
|
|||
${CSCLI} simulation disable $SCENARIO
|
||||
${CSCLI} simulation enable --global
|
||||
|
||||
${CROWDSEC} -file ./ssh-bf.log -type syslog -no-api
|
||||
${CROWDSEC} -dsn file://./ssh-bf.log -type syslog -no-api
|
||||
|
||||
${CSCLI} decisions list --no-simu -o=json | ${JQ} '. == null' || fail "expected no decision (listing only non-simulated decisions)"
|
Loading…
Add table
Reference in a new issue