123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304 |
- package apiserver
- import (
- "context"
- "encoding/json"
- "fmt"
- "strings"
- "time"
- "slices"
- log "github.com/sirupsen/logrus"
- "github.com/crowdsecurity/go-cs-lib/ptr"
- "github.com/crowdsecurity/go-cs-lib/trace"
- "github.com/crowdsecurity/go-cs-lib/version"
- "github.com/crowdsecurity/crowdsec/pkg/database/ent"
- "github.com/crowdsecurity/crowdsec/pkg/models"
- )
- func (a *apic) GetUsageMetrics() (*models.AllMetrics, error) {
- lpsMetrics, err := a.dbClient.GetLPsUsageMetrics()
- if err != nil {
- return nil, err
- }
- //spew.Dump(lpsMetrics)
- bouncersMetrics, err := a.dbClient.GetBouncersUsageMetrics()
- if err != nil {
- return nil, err
- }
- //spew.Dump(bouncersMetrics)
- allMetrics := &models.AllMetrics{}
- /*allLps, err := a.dbClient.ListMachines()
- if err != nil {
- return nil, err
- }
- allBouncers, err := a.dbClient.ListBouncers()
- if err != nil {
- return nil, err
- }*/
- lpsCache := make(map[string]*ent.Machine)
- bouncersCache := make(map[string]*ent.Bouncer)
- for _, lpsMetric := range lpsMetrics {
- lpName := lpsMetric.GeneratedBy
- metrics := models.LogProcessorsMetricsItems0{}
- err := json.Unmarshal([]byte(lpsMetric.Payload), &metrics)
- if err != nil {
- log.Errorf("unable to unmarshal LPs metrics (%s)", err)
- continue
- }
- var lp *ent.Machine
- if _, ok := lpsCache[lpName]; !ok {
- lp, err = a.dbClient.QueryMachineByID(lpName)
- if err != nil {
- log.Errorf("unable to get LP information for %s: %s", lpName, err)
- continue
- }
- } else {
- lp = lpsCache[lpName]
- }
- if lp.Hubstate != nil {
- metrics.HubItems = *lp.Hubstate
- }
- metrics.Os = &models.OSversion{
- Name: lp.Osname,
- Version: lp.Osversion,
- }
- metrics.FeatureFlags = strings.Split(lp.Featureflags, ",")
- metrics.Version = &lp.Version
- //TODO: meta
- allMetrics.LogProcessors = append(allMetrics.LogProcessors, models.LogProcessorsMetrics{&metrics})
- }
- for _, bouncersMetric := range bouncersMetrics {
- bouncerName := bouncersMetric.GeneratedBy
- metrics := models.RemediationComponentsMetricsItems0{}
- err := json.Unmarshal([]byte(bouncersMetric.Payload), &metrics)
- if err != nil {
- log.Errorf("unable to unmarshal bouncers metrics (%s)", err)
- continue
- }
- var bouncer *ent.Bouncer
- if _, ok := bouncersCache[bouncerName]; !ok {
- bouncer, err = a.dbClient.SelectBouncerByName(bouncerName)
- if err != nil {
- log.Errorf("unable to get bouncer information for %s: %s", bouncerName, err)
- continue
- }
- } else {
- bouncer = bouncersCache[bouncerName]
- }
- metrics.Os = &models.OSversion{
- Name: bouncer.Osname,
- Version: bouncer.Osversion,
- }
- metrics.Type = bouncer.Type
- metrics.FeatureFlags = strings.Split(bouncer.Featureflags, ",")
- //TODO: meta
- allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, models.RemediationComponentsMetrics{&metrics})
- }
- //bouncerInfos := make(map[string]string)
- //TODO: add LAPI metrics
- return allMetrics, nil
- }
- func (a *apic) GetMetrics() (*models.Metrics, error) {
- machines, err := a.dbClient.ListMachines()
- if err != nil {
- return nil, err
- }
- machinesInfo := make([]*models.MetricsAgentInfo, len(machines))
- for i, machine := range machines {
- machinesInfo[i] = &models.MetricsAgentInfo{
- Version: machine.Version,
- Name: machine.MachineId,
- LastUpdate: machine.UpdatedAt.Format(time.RFC3339),
- LastPush: ptr.OrEmpty(machine.LastPush).Format(time.RFC3339),
- }
- }
- bouncers, err := a.dbClient.ListBouncers()
- if err != nil {
- return nil, err
- }
- bouncersInfo := make([]*models.MetricsBouncerInfo, len(bouncers))
- for i, bouncer := range bouncers {
- bouncersInfo[i] = &models.MetricsBouncerInfo{
- Version: bouncer.Version,
- CustomName: bouncer.Name,
- Name: bouncer.Type,
- LastPull: bouncer.LastPull.Format(time.RFC3339),
- }
- }
- return &models.Metrics{
- ApilVersion: ptr.Of(version.String()),
- Machines: machinesInfo,
- Bouncers: bouncersInfo,
- }, nil
- }
- func (a *apic) fetchMachineIDs() ([]string, error) {
- machines, err := a.dbClient.ListMachines()
- if err != nil {
- return nil, err
- }
- ret := make([]string, len(machines))
- for i, machine := range machines {
- ret[i] = machine.MachineId
- }
- // sorted slices are required for the slices.Equal comparison
- slices.Sort(ret)
- return ret, nil
- }
- // SendMetrics sends metrics to the API server until it receives a stop signal.
- //
- // Metrics are sent at start, then at the randomized metricsIntervalFirst,
- // then at regular metricsInterval. If a change is detected in the list
- // of machines, the next metrics are sent immediately.
- func (a *apic) SendMetrics(stop chan (bool)) {
- defer trace.CatchPanic("lapi/metricsToAPIC")
- // verify the list of machines every <checkInt> interval
- const checkInt = 20 * time.Second
- // intervals must always be > 0
- metInts := []time.Duration{1 * time.Millisecond, a.metricsIntervalFirst, a.metricsInterval}
- log.Infof("Start sending metrics to CrowdSec Central API (interval: %s once, then %s)",
- metInts[1].Round(time.Second), metInts[2])
- count := -1
- nextMetInt := func() time.Duration {
- if count < len(metInts)-1 {
- count++
- }
- return metInts[count]
- }
- machineIDs := []string{}
- reloadMachineIDs := func() {
- ids, err := a.fetchMachineIDs()
- if err != nil {
- log.Debugf("unable to get machines (%s), will retry", err)
- return
- }
- machineIDs = ids
- }
- // store the list of machine IDs to compare
- // with the next list
- reloadMachineIDs()
- checkTicker := time.NewTicker(checkInt)
- metTicker := time.NewTicker(nextMetInt())
- for {
- select {
- case <-stop:
- checkTicker.Stop()
- metTicker.Stop()
- return
- case <-checkTicker.C:
- oldIDs := machineIDs
- reloadMachineIDs()
- if !slices.Equal(oldIDs, machineIDs) {
- log.Infof("capi metrics: machines changed, immediate send")
- metTicker.Reset(1 * time.Millisecond)
- }
- case <-metTicker.C:
- metTicker.Stop()
- metrics, err := a.GetMetrics()
- if err != nil {
- log.Errorf("unable to get metrics (%s)", err)
- }
- // metrics are nil if they could not be retrieved
- if metrics != nil {
- log.Info("capi metrics: sending")
- _, _, err = a.apiClient.Metrics.Add(context.Background(), metrics)
- if err != nil {
- log.Errorf("capi metrics: failed: %s", err)
- }
- }
- metTicker.Reset(nextMetInt())
- case <-a.metricsTomb.Dying(): // if one apic routine is dying, do we kill the others?
- checkTicker.Stop()
- metTicker.Stop()
- a.pullTomb.Kill(nil)
- a.pushTomb.Kill(nil)
- return
- }
- }
- }
- func (a *apic) SendUsageMetrics() {
- defer trace.CatchPanic("lapi/usageMetricsToAPIC")
- ticker := time.NewTicker(5 * time.Second)
- for {
- select {
- case <-a.metricsTomb.Dying():
- //The normal metrics routine also kills push/pull tombs, does that make sense ?
- ticker.Stop()
- return
- case <-ticker.C:
- metrics, err := a.GetUsageMetrics()
- if err != nil {
- log.Errorf("unable to get usage metrics (%s)", err)
- }
- jsonStr, err := json.Marshal(metrics)
- if err != nil {
- log.Errorf("unable to marshal usage metrics (%s)", err)
- }
- fmt.Printf("Usage metrics: %s\n", string(jsonStr))
- }
- }
- }
|