2020-05-15 09:39:16 +00:00
|
|
|
package parser
|
|
|
|
|
|
|
|
import (
|
2023-06-13 11:16:13 +00:00
|
|
|
"errors"
|
2020-05-15 09:39:16 +00:00
|
|
|
"fmt"
|
|
|
|
"net"
|
2020-08-20 13:07:50 +00:00
|
|
|
"strings"
|
2023-01-11 14:01:02 +00:00
|
|
|
"time"
|
2020-05-15 09:39:16 +00:00
|
|
|
|
|
|
|
"github.com/antonmedv/expr"
|
2023-06-13 11:16:13 +00:00
|
|
|
"github.com/antonmedv/expr/vm"
|
|
|
|
"github.com/davecgh/go-spew/spew"
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
log "github.com/sirupsen/logrus"
|
2021-03-10 17:27:21 +00:00
|
|
|
yaml "gopkg.in/yaml.v2"
|
2020-08-03 10:21:15 +00:00
|
|
|
|
2023-06-13 11:16:13 +00:00
|
|
|
"github.com/crowdsecurity/grokky"
|
|
|
|
|
2023-01-11 14:01:02 +00:00
|
|
|
"github.com/crowdsecurity/crowdsec/pkg/cache"
|
2020-05-15 09:39:16 +00:00
|
|
|
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
|
|
|
|
"github.com/crowdsecurity/crowdsec/pkg/types"
|
|
|
|
)
|
|
|
|
|
|
|
|
type Node struct {
|
|
|
|
FormatVersion string `yaml:"format"`
|
|
|
|
//Enable config + runtime debug of node via config o/
|
|
|
|
Debug bool `yaml:"debug,omitempty"`
|
|
|
|
//If enabled, the node (and its child) will report their own statistics
|
|
|
|
Profiling bool `yaml:"profiling,omitempty"`
|
|
|
|
//Name, author, description and reference(s) for parser pattern
|
|
|
|
Name string `yaml:"name,omitempty"`
|
|
|
|
Author string `yaml:"author,omitempty"`
|
|
|
|
Description string `yaml:"description,omitempty"`
|
2022-04-27 09:04:12 +00:00
|
|
|
References []string `yaml:"references,omitempty"`
|
2020-12-14 13:12:22 +00:00
|
|
|
//if debug is present in the node, keep its specific Logger in runtime structure
|
|
|
|
Logger *log.Entry `yaml:"-"`
|
2022-04-19 09:25:27 +00:00
|
|
|
//This is mostly a hack to make writing less repetitive.
|
2020-05-15 09:39:16 +00:00
|
|
|
//relying on stage, we know which field to parse, and we
|
2022-04-19 09:25:27 +00:00
|
|
|
//can also promote log to next stage on success
|
2020-05-15 09:39:16 +00:00
|
|
|
Stage string `yaml:"stage,omitempty"`
|
|
|
|
//OnSuccess allows to tag a node to be able to move log to next stage on success
|
|
|
|
OnSuccess string `yaml:"onsuccess,omitempty"`
|
|
|
|
rn string //this is only for us in debug, a random generated name for each node
|
|
|
|
//Filter is executed at runtime (with current log line as context)
|
|
|
|
//and must succeed or node is exited
|
2020-08-03 10:21:15 +00:00
|
|
|
Filter string `yaml:"filter,omitempty"`
|
|
|
|
RunTimeFilter *vm.Program `yaml:"-" json:"-"` //the actual compiled filter
|
|
|
|
ExprDebugger *exprhelpers.ExprDebugger `yaml:"-" json:"-"` //used to debug expression by printing the content of each variable of the expression
|
2020-05-15 09:39:16 +00:00
|
|
|
//If node has leafs, execute all of them until one asks for a 'break'
|
2020-11-30 09:37:17 +00:00
|
|
|
LeavesNodes []Node `yaml:"nodes,omitempty"`
|
2020-05-15 09:39:16 +00:00
|
|
|
//Flag used to describe when to 'break' or return an 'error'
|
2021-09-09 14:27:30 +00:00
|
|
|
EnrichFunctions EnricherCtx
|
2020-05-15 09:39:16 +00:00
|
|
|
|
|
|
|
/* If the node is actually a leaf, it can have : grok, enrich, statics */
|
2022-06-16 12:41:54 +00:00
|
|
|
//pattern_syntax are named grok patterns that are re-utilized over several grok patterns
|
2021-03-10 17:27:21 +00:00
|
|
|
SubGroks yaml.MapSlice `yaml:"pattern_syntax,omitempty"`
|
|
|
|
|
2020-05-15 09:39:16 +00:00
|
|
|
//Holds a grok pattern
|
2023-06-08 13:07:30 +00:00
|
|
|
Grok GrokPattern `yaml:"grok,omitempty"`
|
2020-05-15 09:39:16 +00:00
|
|
|
//Statics can be present in any type of node and is executed last
|
2023-06-08 13:07:30 +00:00
|
|
|
Statics []ExtraField `yaml:"statics,omitempty"`
|
2023-01-11 14:01:02 +00:00
|
|
|
//Stash allows to capture data from the log line and store it in an accessible cache
|
2023-06-08 13:07:30 +00:00
|
|
|
Stash []DataCapture `yaml:"stash,omitempty"`
|
2020-05-15 09:39:16 +00:00
|
|
|
//Whitelists
|
2022-06-22 09:29:52 +00:00
|
|
|
Whitelist Whitelist `yaml:"whitelist,omitempty"`
|
2020-05-27 14:31:08 +00:00
|
|
|
Data []*types.DataSource `yaml:"data,omitempty"`
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
|
2021-09-09 14:27:30 +00:00
|
|
|
func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error {
|
2020-05-15 09:39:16 +00:00
|
|
|
|
|
|
|
//stage is being set automagically
|
|
|
|
if n.Stage == "" {
|
2020-05-19 19:31:06 +00:00
|
|
|
return fmt.Errorf("stage needs to be an existing stage")
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* "" behaves like continue */
|
|
|
|
if n.OnSuccess != "continue" && n.OnSuccess != "next_stage" && n.OnSuccess != "" {
|
|
|
|
return fmt.Errorf("onsuccess '%s' not continue,next_stage", n.OnSuccess)
|
|
|
|
}
|
|
|
|
if n.Filter != "" && n.RunTimeFilter == nil {
|
|
|
|
return fmt.Errorf("non-empty filter '%s' was not compiled", n.Filter)
|
|
|
|
}
|
|
|
|
|
|
|
|
if n.Grok.RunTimeRegexp != nil || n.Grok.TargetField != "" {
|
2021-06-21 07:07:33 +00:00
|
|
|
if n.Grok.TargetField == "" && n.Grok.ExpValue == "" {
|
|
|
|
return fmt.Errorf("grok requires 'expression' or 'apply_on'")
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
if n.Grok.RegexpName == "" && n.Grok.RegexpValue == "" {
|
|
|
|
return fmt.Errorf("grok needs 'pattern' or 'name'")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for idx, static := range n.Statics {
|
|
|
|
if static.Method != "" {
|
|
|
|
if static.ExpValue == "" {
|
|
|
|
return fmt.Errorf("static %d : when method is set, expression must be present", idx)
|
|
|
|
}
|
2021-09-09 14:27:30 +00:00
|
|
|
if _, ok := ectx.Registered[static.Method]; !ok {
|
|
|
|
log.Warningf("the method '%s' doesn't exist or the plugin has not been initialized", static.Method)
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if static.Meta == "" && static.Parsed == "" && static.TargetByName == "" {
|
|
|
|
return fmt.Errorf("static %d : at least one of meta/event/target must be set", idx)
|
|
|
|
}
|
|
|
|
if static.Value == "" && static.RunTimeValue == nil {
|
|
|
|
return fmt.Errorf("static %d value or expression must be set", idx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-01-11 14:01:02 +00:00
|
|
|
|
|
|
|
for idx, stash := range n.Stash {
|
|
|
|
if stash.Name == "" {
|
|
|
|
return fmt.Errorf("stash %d : name must be set", idx)
|
|
|
|
}
|
|
|
|
if stash.Value == "" {
|
|
|
|
return fmt.Errorf("stash %s : value expression must be set", stash.Name)
|
|
|
|
}
|
|
|
|
if stash.Key == "" {
|
|
|
|
return fmt.Errorf("stash %s : key expression must be set", stash.Name)
|
|
|
|
}
|
|
|
|
if stash.TTL == "" {
|
|
|
|
return fmt.Errorf("stash %s : ttl must be set", stash.Name)
|
|
|
|
}
|
2023-02-06 14:42:55 +00:00
|
|
|
if stash.Strategy == "" {
|
|
|
|
stash.Strategy = "LRU"
|
|
|
|
}
|
2023-01-11 14:01:02 +00:00
|
|
|
//should be configurable
|
|
|
|
if stash.MaxMapSize == 0 {
|
|
|
|
stash.MaxMapSize = 100
|
|
|
|
}
|
|
|
|
}
|
2020-05-15 09:39:16 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-05-17 08:50:37 +00:00
|
|
|
func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[string]interface{}) (bool, error) {
|
2020-05-20 15:50:56 +00:00
|
|
|
var NodeState bool
|
2021-09-28 15:58:07 +00:00
|
|
|
var NodeHasOKGrok bool
|
2020-12-14 13:12:22 +00:00
|
|
|
clog := n.Logger
|
2020-05-15 09:39:16 +00:00
|
|
|
|
2022-05-17 08:50:37 +00:00
|
|
|
cachedExprEnv := expressionEnv
|
|
|
|
|
2020-11-30 09:37:17 +00:00
|
|
|
clog.Tracef("Event entering node")
|
2020-05-15 09:39:16 +00:00
|
|
|
if n.RunTimeFilter != nil {
|
|
|
|
//Evaluate node's filter
|
2022-05-17 08:50:37 +00:00
|
|
|
output, err := expr.Run(n.RunTimeFilter, cachedExprEnv)
|
2020-05-15 09:39:16 +00:00
|
|
|
if err != nil {
|
|
|
|
clog.Warningf("failed to run filter : %v", err)
|
|
|
|
clog.Debugf("Event leaving node : ko")
|
|
|
|
return false, nil
|
|
|
|
}
|
2020-08-03 10:21:15 +00:00
|
|
|
|
2020-05-20 08:49:17 +00:00
|
|
|
switch out := output.(type) {
|
2020-05-15 09:39:16 +00:00
|
|
|
case bool:
|
2020-08-03 10:21:15 +00:00
|
|
|
if n.Debug {
|
2022-05-17 08:50:37 +00:00
|
|
|
n.ExprDebugger.Run(clog, out, cachedExprEnv)
|
2020-08-03 10:21:15 +00:00
|
|
|
}
|
2020-05-20 08:49:17 +00:00
|
|
|
if !out {
|
2020-11-30 09:37:17 +00:00
|
|
|
clog.Debugf("Event leaving node : ko (failed filter)")
|
2020-05-15 09:39:16 +00:00
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
clog.Warningf("Expr '%s' returned non-bool, abort : %T", n.Filter, output)
|
|
|
|
clog.Debugf("Event leaving node : ko")
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
NodeState = true
|
|
|
|
} else {
|
2020-11-30 09:37:17 +00:00
|
|
|
clog.Tracef("Node has not filter, enter")
|
2020-05-15 09:39:16 +00:00
|
|
|
NodeState = true
|
|
|
|
}
|
|
|
|
|
2020-06-19 11:57:44 +00:00
|
|
|
if n.Name != "" {
|
2021-06-11 07:53:53 +00:00
|
|
|
NodesHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc()
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2020-07-09 10:41:18 +00:00
|
|
|
isWhitelisted := false
|
|
|
|
hasWhitelist := false
|
2020-11-30 09:37:17 +00:00
|
|
|
var srcs []net.IP
|
2020-05-15 09:39:16 +00:00
|
|
|
/*overflow and log don't hold the source ip in the same field, should be changed */
|
|
|
|
/* perform whitelist checks for ips, cidr accordingly */
|
2020-11-30 09:37:17 +00:00
|
|
|
/* TODO move whitelist elsewhere */
|
2020-05-15 09:39:16 +00:00
|
|
|
if p.Type == types.LOG {
|
|
|
|
if _, ok := p.Meta["source_ip"]; ok {
|
2020-11-30 09:37:17 +00:00
|
|
|
srcs = append(srcs, net.ParseIP(p.Meta["source_ip"]))
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
} else if p.Type == types.OVFLW {
|
2022-03-09 15:15:18 +00:00
|
|
|
for k := range p.Overflow.Sources {
|
2020-11-30 09:37:17 +00:00
|
|
|
srcs = append(srcs, net.ParseIP(k))
|
|
|
|
}
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2020-11-30 09:37:17 +00:00
|
|
|
for _, src := range srcs {
|
|
|
|
if isWhitelisted {
|
|
|
|
break
|
|
|
|
}
|
2020-05-15 09:39:16 +00:00
|
|
|
for _, v := range n.Whitelist.B_Ips {
|
|
|
|
if v.Equal(src) {
|
2022-02-03 16:04:18 +00:00
|
|
|
clog.Debugf("Event from [%s] is whitelisted by IP (%s), reason [%s]", src, v, n.Whitelist.Reason)
|
2020-07-09 10:41:18 +00:00
|
|
|
isWhitelisted = true
|
|
|
|
} else {
|
2020-11-30 09:37:17 +00:00
|
|
|
clog.Tracef("whitelist: %s is not eq [%s]", src, v)
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2020-07-09 10:41:18 +00:00
|
|
|
hasWhitelist = true
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
for _, v := range n.Whitelist.B_Cidrs {
|
|
|
|
if v.Contains(src) {
|
2022-02-03 16:04:18 +00:00
|
|
|
clog.Debugf("Event from [%s] is whitelisted by CIDR (%s), reason [%s]", src, v, n.Whitelist.Reason)
|
2020-07-09 10:41:18 +00:00
|
|
|
isWhitelisted = true
|
2020-05-15 09:39:16 +00:00
|
|
|
} else {
|
2020-11-30 09:37:17 +00:00
|
|
|
clog.Tracef("whitelist: %s not in [%s]", src, v)
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2020-07-09 10:41:18 +00:00
|
|
|
hasWhitelist = true
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2020-11-30 09:37:17 +00:00
|
|
|
}
|
|
|
|
|
2020-05-15 09:39:16 +00:00
|
|
|
/* run whitelist expression tests anyway */
|
2020-07-02 15:56:39 +00:00
|
|
|
for eidx, e := range n.Whitelist.B_Exprs {
|
2022-05-17 08:50:37 +00:00
|
|
|
output, err := expr.Run(e.Filter, cachedExprEnv)
|
2020-05-15 09:39:16 +00:00
|
|
|
if err != nil {
|
|
|
|
clog.Warningf("failed to run whitelist expr : %v", err)
|
2022-06-22 07:38:23 +00:00
|
|
|
clog.Debug("Event leaving node : ko")
|
2020-05-15 09:39:16 +00:00
|
|
|
return false, nil
|
|
|
|
}
|
2020-05-20 09:00:25 +00:00
|
|
|
switch out := output.(type) {
|
2020-05-15 09:39:16 +00:00
|
|
|
case bool:
|
2020-08-03 10:21:15 +00:00
|
|
|
if n.Debug {
|
2022-05-17 08:50:37 +00:00
|
|
|
e.ExprDebugger.Run(clog, out, cachedExprEnv)
|
2020-08-03 10:21:15 +00:00
|
|
|
}
|
2020-05-20 09:00:25 +00:00
|
|
|
if out {
|
2022-02-03 16:04:18 +00:00
|
|
|
clog.Debugf("Event is whitelisted by expr, reason [%s]", n.Whitelist.Reason)
|
2020-07-09 10:41:18 +00:00
|
|
|
isWhitelisted = true
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2020-07-09 10:41:18 +00:00
|
|
|
hasWhitelist = true
|
2020-07-02 15:56:39 +00:00
|
|
|
default:
|
|
|
|
log.Errorf("unexpected type %t (%v) while running '%s'", output, output, n.Whitelist.Exprs[eidx])
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
}
|
2023-08-23 13:51:37 +00:00
|
|
|
if isWhitelisted && !p.Whitelisted {
|
|
|
|
p.Whitelisted = true
|
2021-10-04 15:14:52 +00:00
|
|
|
p.WhitelistReason = n.Whitelist.Reason
|
2020-05-15 09:39:16 +00:00
|
|
|
/*huglily wipe the ban order if the event is whitelisted and it's an overflow */
|
|
|
|
if p.Type == types.OVFLW { /*don't do this at home kids */
|
2020-11-30 09:37:17 +00:00
|
|
|
ips := []string{}
|
|
|
|
for _, src := range srcs {
|
|
|
|
ips = append(ips, src.String())
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2020-11-30 09:37:17 +00:00
|
|
|
clog.Infof("Ban for %s whitelisted, reason [%s]", strings.Join(ips, ","), n.Whitelist.Reason)
|
|
|
|
p.Overflow.Whitelisted = true
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//Process grok if present, should be exclusive with nodes :)
|
|
|
|
gstr := ""
|
|
|
|
if n.Grok.RunTimeRegexp != nil {
|
|
|
|
clog.Tracef("Processing grok pattern : %s : %p", n.Grok.RegexpName, n.Grok.RunTimeRegexp)
|
|
|
|
//for unparsed, parsed etc. set sensible defaults to reduce user hassle
|
2021-06-21 07:07:33 +00:00
|
|
|
if n.Grok.TargetField != "" {
|
2020-05-15 09:39:16 +00:00
|
|
|
//it's a hack to avoid using real reflect
|
|
|
|
if n.Grok.TargetField == "Line.Raw" {
|
|
|
|
gstr = p.Line.Raw
|
|
|
|
} else if val, ok := p.Parsed[n.Grok.TargetField]; ok {
|
|
|
|
gstr = val
|
|
|
|
} else {
|
|
|
|
clog.Debugf("(%s) target field '%s' doesn't exist in %v", n.rn, n.Grok.TargetField, p.Parsed)
|
|
|
|
NodeState = false
|
2021-06-21 07:07:33 +00:00
|
|
|
}
|
|
|
|
} else if n.Grok.RunTimeValue != nil {
|
2022-05-17 08:50:37 +00:00
|
|
|
output, err := expr.Run(n.Grok.RunTimeValue, cachedExprEnv)
|
2021-06-21 07:07:33 +00:00
|
|
|
if err != nil {
|
|
|
|
clog.Warningf("failed to run RunTimeValue : %v", err)
|
|
|
|
NodeState = false
|
|
|
|
}
|
|
|
|
switch out := output.(type) {
|
|
|
|
case string:
|
|
|
|
gstr = out
|
|
|
|
default:
|
|
|
|
clog.Errorf("unexpected return type for RunTimeValue : %T", output)
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
}
|
2021-06-21 07:07:33 +00:00
|
|
|
|
2020-05-27 09:51:49 +00:00
|
|
|
var groklabel string
|
|
|
|
if n.Grok.RegexpName == "" {
|
|
|
|
groklabel = fmt.Sprintf("%5.5s...", n.Grok.RegexpValue)
|
|
|
|
} else {
|
|
|
|
groklabel = n.Grok.RegexpName
|
|
|
|
}
|
2020-05-15 09:39:16 +00:00
|
|
|
grok := n.Grok.RunTimeRegexp.Parse(gstr)
|
|
|
|
if len(grok) > 0 {
|
2022-04-19 09:25:27 +00:00
|
|
|
/*tag explicitly that the *current* node had a successful grok pattern. it's important to know success state*/
|
2021-09-28 15:58:07 +00:00
|
|
|
NodeHasOKGrok = true
|
2020-05-27 09:51:49 +00:00
|
|
|
clog.Debugf("+ Grok '%s' returned %d entries to merge in Parsed", groklabel, len(grok))
|
2020-05-15 09:39:16 +00:00
|
|
|
//We managed to grok stuff, merged into parse
|
|
|
|
for k, v := range grok {
|
|
|
|
clog.Debugf("\t.Parsed['%s'] = '%s'", k, v)
|
|
|
|
p.Parsed[k] = v
|
|
|
|
}
|
|
|
|
// if the grok succeed, process associated statics
|
2020-11-30 09:37:17 +00:00
|
|
|
err := n.ProcessStatics(n.Grok.Statics, p)
|
2020-05-15 09:39:16 +00:00
|
|
|
if err != nil {
|
2022-06-08 10:15:29 +00:00
|
|
|
clog.Errorf("(%s) Failed to process statics : %v", n.rn, err)
|
|
|
|
return false, err
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
//grok failed, node failed
|
2020-05-27 09:51:49 +00:00
|
|
|
clog.Debugf("+ Grok '%s' didn't return data on '%s'", groklabel, gstr)
|
2020-05-15 09:39:16 +00:00
|
|
|
NodeState = false
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
clog.Tracef("! No grok pattern : %p", n.Grok.RunTimeRegexp)
|
|
|
|
}
|
|
|
|
|
2023-01-11 14:01:02 +00:00
|
|
|
//Process the stash (data collection) if : a grok was present and succeeded, or if there is no grok
|
|
|
|
if NodeHasOKGrok || n.Grok.RunTimeRegexp == nil {
|
|
|
|
for idx, stash := range n.Stash {
|
|
|
|
var value string
|
|
|
|
var key string
|
|
|
|
if stash.ValueExpression == nil {
|
|
|
|
clog.Warningf("Stash %d has no value expression, skipping", idx)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if stash.KeyExpression == nil {
|
|
|
|
clog.Warningf("Stash %d has no key expression, skipping", idx)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
//collect the data
|
|
|
|
output, err := expr.Run(stash.ValueExpression, cachedExprEnv)
|
|
|
|
if err != nil {
|
|
|
|
clog.Warningf("Error while running stash val expression : %v", err)
|
|
|
|
}
|
|
|
|
//can we expect anything else than a string ?
|
|
|
|
switch output := output.(type) {
|
|
|
|
case string:
|
|
|
|
value = output
|
|
|
|
default:
|
|
|
|
clog.Warningf("unexpected type %t (%v) while running '%s'", output, output, stash.Value)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
//collect the key
|
|
|
|
output, err = expr.Run(stash.KeyExpression, cachedExprEnv)
|
|
|
|
if err != nil {
|
|
|
|
clog.Warningf("Error while running stash key expression : %v", err)
|
|
|
|
}
|
|
|
|
//can we expect anything else than a string ?
|
|
|
|
switch output := output.(type) {
|
|
|
|
case string:
|
|
|
|
key = output
|
|
|
|
default:
|
|
|
|
clog.Warningf("unexpected type %t (%v) while running '%s'", output, output, stash.Key)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
cache.SetKey(stash.Name, key, value, &stash.TTLVal)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-30 09:37:17 +00:00
|
|
|
//Iterate on leafs
|
2023-06-13 11:16:13 +00:00
|
|
|
for _, leaf := range n.LeavesNodes {
|
|
|
|
ret, err := leaf.process(p, ctx, cachedExprEnv)
|
|
|
|
if err != nil {
|
|
|
|
clog.Tracef("\tNode (%s) failed : %v", leaf.rn, err)
|
|
|
|
clog.Debugf("Event leaving node : ko")
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaf.rn, ret, n.OnSuccess)
|
|
|
|
if ret {
|
|
|
|
NodeState = true
|
|
|
|
/* if child is successful, stop processing */
|
|
|
|
if n.OnSuccess == "next_stage" {
|
|
|
|
clog.Debugf("child is success, OnSuccess=next_stage, skip")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
} else if !NodeHasOKGrok {
|
|
|
|
/*
|
|
|
|
If the parent node has a successful grok pattern, it's state will stay successful even if one or more chil fails.
|
|
|
|
If the parent node is a skeleton node (no grok pattern), then at least one child must be successful for it to be a success.
|
|
|
|
*/
|
|
|
|
NodeState = false
|
2020-11-30 09:37:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/*todo : check if a node made the state change ?*/
|
2022-06-16 12:41:54 +00:00
|
|
|
/* should the childs inherit the on_success behavior */
|
2020-11-30 09:37:17 +00:00
|
|
|
|
|
|
|
clog.Tracef("State after nodes : %v", NodeState)
|
|
|
|
|
2020-05-15 09:39:16 +00:00
|
|
|
//grok or leafs failed, don't process statics
|
2020-05-20 08:49:17 +00:00
|
|
|
if !NodeState {
|
2020-06-19 11:57:44 +00:00
|
|
|
if n.Name != "" {
|
2021-06-11 07:53:53 +00:00
|
|
|
NodesHitsKo.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc()
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
clog.Debugf("Event leaving node : ko")
|
|
|
|
return NodeState, nil
|
|
|
|
}
|
|
|
|
|
2020-06-19 11:57:44 +00:00
|
|
|
if n.Name != "" {
|
2021-06-11 07:53:53 +00:00
|
|
|
NodesHitsOk.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc()
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2023-06-13 11:16:13 +00:00
|
|
|
|
2020-11-30 09:37:17 +00:00
|
|
|
/*
|
2023-06-13 11:16:13 +00:00
|
|
|
This is to apply statics when the node *has* whitelists that successfully matched the node.
|
2020-11-30 09:37:17 +00:00
|
|
|
*/
|
2023-06-13 11:16:13 +00:00
|
|
|
if len(n.Statics) > 0 && (isWhitelisted || !hasWhitelist) {
|
2020-05-15 09:39:16 +00:00
|
|
|
clog.Debugf("+ Processing %d statics", len(n.Statics))
|
2020-07-09 10:41:18 +00:00
|
|
|
// if all else is good in whitelist, process node's statics
|
2020-11-30 09:37:17 +00:00
|
|
|
err := n.ProcessStatics(n.Statics, p)
|
2020-05-15 09:39:16 +00:00
|
|
|
if err != nil {
|
2022-06-08 10:15:29 +00:00
|
|
|
clog.Errorf("Failed to process statics : %v", err)
|
|
|
|
return false, err
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
clog.Tracef("! No node statics")
|
|
|
|
}
|
|
|
|
|
2020-05-20 08:49:17 +00:00
|
|
|
if NodeState {
|
2020-05-15 09:39:16 +00:00
|
|
|
clog.Debugf("Event leaving node : ok")
|
|
|
|
log.Tracef("node is successful, check strategy")
|
|
|
|
if n.OnSuccess == "next_stage" {
|
|
|
|
idx := stageidx(p.Stage, ctx.Stages)
|
|
|
|
//we're at the last stage
|
|
|
|
if idx+1 == len(ctx.Stages) {
|
|
|
|
clog.Debugf("node reached the last stage : %s", p.Stage)
|
|
|
|
} else {
|
|
|
|
clog.Debugf("move Event from stage %s to %s", p.Stage, ctx.Stages[idx+1])
|
|
|
|
p.Stage = ctx.Stages[idx+1]
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
clog.Tracef("no strategy on success (%s), continue !", n.OnSuccess)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
clog.Debugf("Event leaving node : ko")
|
|
|
|
}
|
|
|
|
clog.Tracef("Node successful, continue")
|
|
|
|
return NodeState, nil
|
|
|
|
}
|
|
|
|
|
2021-09-09 14:27:30 +00:00
|
|
|
func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
|
2020-05-15 09:39:16 +00:00
|
|
|
var err error
|
|
|
|
var valid bool
|
|
|
|
|
|
|
|
valid = false
|
|
|
|
|
|
|
|
dumpr := spew.ConfigState{MaxDepth: 1, DisablePointerAddresses: true}
|
|
|
|
n.rn = seed.Generate()
|
|
|
|
|
2020-11-30 09:37:17 +00:00
|
|
|
n.EnrichFunctions = ectx
|
|
|
|
log.Tracef("compile, node is %s", n.Stage)
|
2020-05-15 09:39:16 +00:00
|
|
|
/* if the node has debugging enabled, create a specific logger with debug
|
|
|
|
that will be used only for processing this node ;) */
|
2020-05-20 08:49:17 +00:00
|
|
|
if n.Debug {
|
2023-06-13 11:16:13 +00:00
|
|
|
var clog = log.New()
|
|
|
|
if err = types.ConfigureLogger(clog); err != nil {
|
2020-05-27 09:51:49 +00:00
|
|
|
log.Fatalf("While creating bucket-specific logger : %s", err)
|
|
|
|
}
|
2020-05-15 09:39:16 +00:00
|
|
|
clog.SetLevel(log.DebugLevel)
|
2020-12-14 13:12:22 +00:00
|
|
|
n.Logger = clog.WithFields(log.Fields{
|
2020-05-15 09:39:16 +00:00
|
|
|
"id": n.rn,
|
|
|
|
})
|
2020-12-14 13:12:22 +00:00
|
|
|
n.Logger.Infof("%s has debug enabled", n.Name)
|
2020-05-15 09:39:16 +00:00
|
|
|
} else {
|
|
|
|
/* else bind it to the default one (might find something more elegant here)*/
|
2020-12-14 13:12:22 +00:00
|
|
|
n.Logger = log.WithFields(log.Fields{
|
2020-05-15 09:39:16 +00:00
|
|
|
"id": n.rn,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
/* display info about top-level nodes, they should be the only one with explicit stage name ?*/
|
2020-12-14 13:12:22 +00:00
|
|
|
n.Logger = n.Logger.WithFields(log.Fields{"stage": n.Stage, "name": n.Name})
|
2020-05-15 09:39:16 +00:00
|
|
|
|
2020-12-14 13:12:22 +00:00
|
|
|
n.Logger.Tracef("Compiling : %s", dumpr.Sdump(n))
|
2020-05-15 09:39:16 +00:00
|
|
|
|
|
|
|
//compile filter if present
|
|
|
|
if n.Filter != "" {
|
2023-03-28 08:49:01 +00:00
|
|
|
n.RunTimeFilter, err = expr.Compile(n.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
|
2020-05-15 09:39:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("compilation of '%s' failed: %v", n.Filter, err)
|
|
|
|
}
|
2020-08-03 10:21:15 +00:00
|
|
|
|
|
|
|
if n.Debug {
|
2023-03-28 08:49:01 +00:00
|
|
|
n.ExprDebugger, err = exprhelpers.NewDebugger(n.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
|
2020-08-03 10:21:15 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf("unable to build debug filter for '%s' : %s", n.Filter, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* handle pattern_syntax and groks */
|
2021-03-10 17:27:21 +00:00
|
|
|
for _, pattern := range n.SubGroks {
|
|
|
|
n.Logger.Tracef("Adding subpattern '%s' : '%s'", pattern.Key, pattern.Value)
|
2023-06-13 11:16:13 +00:00
|
|
|
if err = pctx.Grok.Add(pattern.Key.(string), pattern.Value.(string)); err != nil {
|
2022-11-29 08:16:07 +00:00
|
|
|
if errors.Is(err, grokky.ErrAlreadyExist) {
|
2021-03-22 16:17:24 +00:00
|
|
|
n.Logger.Warningf("grok '%s' already registred", pattern.Key)
|
|
|
|
continue
|
|
|
|
}
|
2021-03-10 17:27:21 +00:00
|
|
|
n.Logger.Errorf("Unable to compile subpattern %s : %v", pattern.Key, err)
|
2020-05-15 09:39:16 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2021-03-10 17:27:21 +00:00
|
|
|
|
2020-05-15 09:39:16 +00:00
|
|
|
/* load grok by name or compile in-place */
|
|
|
|
if n.Grok.RegexpName != "" {
|
2020-12-14 13:12:22 +00:00
|
|
|
n.Logger.Tracef("+ Regexp Compilation '%s'", n.Grok.RegexpName)
|
2020-05-15 09:39:16 +00:00
|
|
|
n.Grok.RunTimeRegexp, err = pctx.Grok.Get(n.Grok.RegexpName)
|
|
|
|
if err != nil {
|
2023-01-11 14:01:02 +00:00
|
|
|
return fmt.Errorf("unable to find grok '%s' : %v", n.Grok.RegexpName, err)
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
if n.Grok.RunTimeRegexp == nil {
|
2023-01-11 14:01:02 +00:00
|
|
|
return fmt.Errorf("empty grok '%s'", n.Grok.RegexpName)
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2023-03-28 14:26:47 +00:00
|
|
|
n.Logger.Tracef("%s regexp: %s", n.Grok.RegexpName, n.Grok.RunTimeRegexp.String())
|
2020-05-15 09:39:16 +00:00
|
|
|
valid = true
|
|
|
|
} else if n.Grok.RegexpValue != "" {
|
2020-08-20 13:07:50 +00:00
|
|
|
if strings.HasSuffix(n.Grok.RegexpValue, "\n") {
|
2020-12-14 13:12:22 +00:00
|
|
|
n.Logger.Debugf("Beware, pattern ends with \\n : '%s'", n.Grok.RegexpValue)
|
2020-08-20 13:07:50 +00:00
|
|
|
}
|
2020-05-15 09:39:16 +00:00
|
|
|
n.Grok.RunTimeRegexp, err = pctx.Grok.Compile(n.Grok.RegexpValue)
|
|
|
|
if err != nil {
|
2023-01-11 14:01:02 +00:00
|
|
|
return fmt.Errorf("failed to compile grok '%s': %v", n.Grok.RegexpValue, err)
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
if n.Grok.RunTimeRegexp == nil {
|
|
|
|
// We shouldn't be here because compilation succeeded, so regexp shouldn't be nil
|
2023-01-11 14:01:02 +00:00
|
|
|
return fmt.Errorf("grok compilation failure: %s", n.Grok.RegexpValue)
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2023-03-28 14:26:47 +00:00
|
|
|
n.Logger.Tracef("%s regexp : %s", n.Grok.RegexpValue, n.Grok.RunTimeRegexp.String())
|
2020-05-15 09:39:16 +00:00
|
|
|
valid = true
|
|
|
|
}
|
2021-06-21 07:07:33 +00:00
|
|
|
|
|
|
|
/*if grok source is an expression*/
|
|
|
|
if n.Grok.ExpValue != "" {
|
|
|
|
n.Grok.RunTimeValue, err = expr.Compile(n.Grok.ExpValue,
|
2023-03-28 08:49:01 +00:00
|
|
|
exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
|
2021-06-21 07:07:33 +00:00
|
|
|
if err != nil {
|
2023-06-13 11:16:13 +00:00
|
|
|
return fmt.Errorf("while compiling grok's expression: %w", err)
|
2021-06-21 07:07:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-15 09:39:16 +00:00
|
|
|
/* load grok statics */
|
2023-06-13 11:16:13 +00:00
|
|
|
//compile expr statics if present
|
|
|
|
for idx := range n.Grok.Statics {
|
|
|
|
if n.Grok.Statics[idx].ExpValue != "" {
|
|
|
|
n.Grok.Statics[idx].RunTimeValue, err = expr.Compile(n.Grok.Statics[idx].ExpValue,
|
|
|
|
exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
valid = true
|
|
|
|
}
|
2023-01-11 14:01:02 +00:00
|
|
|
|
|
|
|
/* load data capture (stash) */
|
|
|
|
for i, stash := range n.Stash {
|
|
|
|
n.Stash[i].ValueExpression, err = expr.Compile(stash.Value,
|
2023-03-28 08:49:01 +00:00
|
|
|
exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
|
2023-01-11 14:01:02 +00:00
|
|
|
if err != nil {
|
2023-06-13 11:16:13 +00:00
|
|
|
return fmt.Errorf("while compiling stash value expression: %w", err)
|
2023-01-11 14:01:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
n.Stash[i].KeyExpression, err = expr.Compile(stash.Key,
|
2023-03-28 08:49:01 +00:00
|
|
|
exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
|
2023-01-11 14:01:02 +00:00
|
|
|
if err != nil {
|
2023-06-13 11:16:13 +00:00
|
|
|
return fmt.Errorf("while compiling stash key expression: %w", err)
|
2023-01-11 14:01:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
n.Stash[i].TTLVal, err = time.ParseDuration(stash.TTL)
|
|
|
|
if err != nil {
|
2023-06-13 11:16:13 +00:00
|
|
|
return fmt.Errorf("while parsing stash ttl: %w", err)
|
2023-01-11 14:01:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
logLvl := n.Logger.Logger.GetLevel()
|
|
|
|
//init the cache, does it make sense to create it here just to be sure everything is fine ?
|
2023-06-13 11:16:13 +00:00
|
|
|
if err = cache.CacheInit(cache.CacheCfg{
|
2023-01-11 14:01:02 +00:00
|
|
|
Size: n.Stash[i].MaxMapSize,
|
|
|
|
TTL: n.Stash[i].TTLVal,
|
|
|
|
Name: n.Stash[i].Name,
|
2023-02-06 14:42:55 +00:00
|
|
|
Strategy: n.Stash[i].Strategy,
|
2023-01-11 14:01:02 +00:00
|
|
|
LogLevel: &logLvl,
|
|
|
|
}); err != nil {
|
2023-06-13 11:16:13 +00:00
|
|
|
return fmt.Errorf("while initializing cache: %w", err)
|
2023-01-11 14:01:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-15 09:39:16 +00:00
|
|
|
/* compile leafs if present */
|
2023-06-13 11:16:13 +00:00
|
|
|
for idx := range n.LeavesNodes {
|
|
|
|
if n.LeavesNodes[idx].Name == "" {
|
|
|
|
n.LeavesNodes[idx].Name = fmt.Sprintf("child-%s", n.Name)
|
|
|
|
}
|
|
|
|
/*propagate debug/stats to child nodes*/
|
|
|
|
if !n.LeavesNodes[idx].Debug && n.Debug {
|
|
|
|
n.LeavesNodes[idx].Debug = true
|
|
|
|
}
|
|
|
|
if !n.LeavesNodes[idx].Profiling && n.Profiling {
|
|
|
|
n.LeavesNodes[idx].Profiling = true
|
|
|
|
}
|
|
|
|
n.LeavesNodes[idx].Stage = n.Stage
|
|
|
|
err = n.LeavesNodes[idx].compile(pctx, ectx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
valid = true
|
|
|
|
}
|
2023-06-13 11:16:13 +00:00
|
|
|
|
2020-05-15 09:39:16 +00:00
|
|
|
/* load statics if present */
|
2020-05-20 09:26:21 +00:00
|
|
|
for idx := range n.Statics {
|
2020-05-15 09:39:16 +00:00
|
|
|
if n.Statics[idx].ExpValue != "" {
|
2023-03-28 08:49:01 +00:00
|
|
|
n.Statics[idx].RunTimeValue, err = expr.Compile(n.Statics[idx].ExpValue, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
|
2020-05-15 09:39:16 +00:00
|
|
|
if err != nil {
|
2020-12-14 13:12:22 +00:00
|
|
|
n.Logger.Errorf("Statics Compilation failed %v.", err)
|
2020-05-15 09:39:16 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
valid = true
|
|
|
|
}
|
|
|
|
|
|
|
|
/* compile whitelists if present */
|
|
|
|
for _, v := range n.Whitelist.Ips {
|
|
|
|
n.Whitelist.B_Ips = append(n.Whitelist.B_Ips, net.ParseIP(v))
|
2020-12-14 13:12:22 +00:00
|
|
|
n.Logger.Debugf("adding ip %s to whitelists", net.ParseIP(v))
|
2020-05-15 09:39:16 +00:00
|
|
|
valid = true
|
|
|
|
}
|
2023-06-13 11:16:13 +00:00
|
|
|
|
2020-05-15 09:39:16 +00:00
|
|
|
for _, v := range n.Whitelist.Cidrs {
|
|
|
|
_, tnet, err := net.ParseCIDR(v)
|
|
|
|
if err != nil {
|
2020-12-14 13:12:22 +00:00
|
|
|
n.Logger.Fatalf("Unable to parse cidr whitelist '%s' : %v.", v, err)
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
|
|
|
n.Whitelist.B_Cidrs = append(n.Whitelist.B_Cidrs, tnet)
|
2020-12-14 13:12:22 +00:00
|
|
|
n.Logger.Debugf("adding cidr %s to whitelists", tnet)
|
2020-05-15 09:39:16 +00:00
|
|
|
valid = true
|
|
|
|
}
|
2023-06-13 11:16:13 +00:00
|
|
|
|
2020-08-03 10:21:15 +00:00
|
|
|
for _, filter := range n.Whitelist.Exprs {
|
2022-06-22 09:29:52 +00:00
|
|
|
expression := &ExprWhitelist{}
|
2023-03-28 08:49:01 +00:00
|
|
|
expression.Filter, err = expr.Compile(filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
|
2020-08-03 10:21:15 +00:00
|
|
|
if err != nil {
|
2020-12-14 13:12:22 +00:00
|
|
|
n.Logger.Fatalf("Unable to compile whitelist expression '%s' : %v.", filter, err)
|
2020-08-03 10:21:15 +00:00
|
|
|
}
|
2023-03-28 08:49:01 +00:00
|
|
|
expression.ExprDebugger, err = exprhelpers.NewDebugger(filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
|
2020-05-15 09:39:16 +00:00
|
|
|
if err != nil {
|
2020-08-03 10:21:15 +00:00
|
|
|
log.Errorf("unable to build debug filter for '%s' : %s", filter, err)
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2020-08-03 10:21:15 +00:00
|
|
|
n.Whitelist.B_Exprs = append(n.Whitelist.B_Exprs, expression)
|
2020-12-14 13:12:22 +00:00
|
|
|
n.Logger.Debugf("adding expression %s to whitelists", filter)
|
2020-05-15 09:39:16 +00:00
|
|
|
valid = true
|
|
|
|
}
|
|
|
|
|
2020-05-20 08:49:17 +00:00
|
|
|
if !valid {
|
2020-05-15 09:39:16 +00:00
|
|
|
/* node is empty, error force return */
|
2022-05-20 11:29:47 +00:00
|
|
|
n.Logger.Error("Node is empty or invalid, abort")
|
2020-05-15 09:39:16 +00:00
|
|
|
n.Stage = ""
|
2022-05-20 11:29:47 +00:00
|
|
|
return fmt.Errorf("Node is empty")
|
2020-05-15 09:39:16 +00:00
|
|
|
}
|
2022-05-20 11:29:47 +00:00
|
|
|
|
2020-11-30 09:37:17 +00:00
|
|
|
if err := n.validate(pctx, ectx); err != nil {
|
2020-05-15 09:39:16 +00:00
|
|
|
return err
|
|
|
|
}
|
2023-06-13 11:16:13 +00:00
|
|
|
|
2020-05-15 09:39:16 +00:00
|
|
|
return nil
|
|
|
|
}
|