Browse Source

implement highAvailability feature

he2ss 1 year ago
parent
commit
8ce3f6ed85

+ 2 - 2
cmd/crowdsec-cli/papi.go

@@ -53,7 +53,7 @@ func NewPapiStatusCmd() *cobra.Command {
 				log.Fatalf("unable to initialize database client : %s", err)
 				log.Fatalf("unable to initialize database client : %s", err)
 			}
 			}
 
 
-			apic, err := apiserver.NewAPIC(csConfig.API.Server.OnlineClient, dbClient, csConfig.API.Server.ConsoleConfig, csConfig.API.Server.CapiWhitelists)
+			apic, err := apiserver.NewAPIC(csConfig.API.Server.OnlineClient, dbClient, csConfig.API.Server.ConsoleConfig, csConfig.API.Server.CapiWhitelists, csConfig.API.Server.HighAvailability)
 
 
 			if err != nil {
 			if err != nil {
 				log.Fatalf("unable to initialize API client : %s", err)
 				log.Fatalf("unable to initialize API client : %s", err)
@@ -103,7 +103,7 @@ func NewPapiSyncCmd() *cobra.Command {
 				log.Fatalf("unable to initialize database client : %s", err)
 				log.Fatalf("unable to initialize database client : %s", err)
 			}
 			}
 
 
-			apic, err := apiserver.NewAPIC(csConfig.API.Server.OnlineClient, dbClient, csConfig.API.Server.ConsoleConfig, csConfig.API.Server.CapiWhitelists)
+			apic, err := apiserver.NewAPIC(csConfig.API.Server.OnlineClient, dbClient, csConfig.API.Server.ConsoleConfig, csConfig.API.Server.CapiWhitelists, csConfig.API.Server.HighAvailability)
 
 
 			if err != nil {
 			if err != nil {
 				log.Fatalf("unable to initialize API client : %s", err)
 				log.Fatalf("unable to initialize API client : %s", err)

+ 1 - 0
go.sum

@@ -625,6 +625,7 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
 github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
 github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
 github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
 github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
 github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
 github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
+github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
 github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
 github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
 github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec=
 github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec=

+ 22 - 1
pkg/apiserver/apic.go

@@ -56,6 +56,7 @@ type apic struct {
 	metricsIntervalFirst time.Duration
 	metricsIntervalFirst time.Duration
 	dbClient             *database.Client
 	dbClient             *database.Client
 	apiClient            *apiclient.ApiClient
 	apiClient            *apiclient.ApiClient
+	isHAEnabled          bool
 	AlertsAddChan        chan []*models.Alert
 	AlertsAddChan        chan []*models.Alert
 
 
 	mu            sync.Mutex
 	mu            sync.Mutex
@@ -162,7 +163,7 @@ func alertToSignal(alert *models.Alert, scenarioTrust string, shareContext bool)
 	return signal
 	return signal
 }
 }
 
 
-func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig, apicWhitelist *csconfig.CapiWhitelist) (*apic, error) {
+func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig, apicWhitelist *csconfig.CapiWhitelist, haConfig *csconfig.HighAvailabilityCfg) (*apic, error) {
 	var err error
 	var err error
 	ret := &apic{
 	ret := &apic{
 
 
@@ -186,6 +187,10 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con
 		whitelists:           apicWhitelist,
 		whitelists:           apicWhitelist,
 	}
 	}
 
 
+	if haConfig != nil && *haConfig.Enabled {
+		ret.isHAEnabled = *haConfig.Enabled
+	}
+
 	password := strfmt.Password(config.Credentials.Password)
 	password := strfmt.Password(config.Credentials.Password)
 	apiURL, err := url.Parse(config.Credentials.URL)
 	apiURL, err := url.Parse(config.Credentials.URL)
 	if err != nil {
 	if err != nil {
@@ -571,6 +576,15 @@ func (a *apic) PullTop(forcePull bool) error {
 		}
 		}
 	}
 	}
 
 
+	if a.isHAEnabled {
+		log.Debug("Acquiring lock for pullCAPI")
+		err = a.dbClient.AcquirePullCAPILock()
+		if a.dbClient.IsLocked(err) {
+			log.Info("PullCAPI is already running, skipping")
+			return nil
+		}
+	}
+
 	log.Infof("Starting community-blocklist update")
 	log.Infof("Starting community-blocklist update")
 
 
 	data, _, err := a.apiClient.Decisions.GetStreamV3(context.Background(), apiclient.DecisionsStreamOpts{Startup: a.startup})
 	data, _, err := a.apiClient.Decisions.GetStreamV3(context.Background(), apiclient.DecisionsStreamOpts{Startup: a.startup})
@@ -617,6 +631,13 @@ func (a *apic) PullTop(forcePull bool) error {
 	if err := a.UpdateBlocklists(data.Links, add_counters); err != nil {
 	if err := a.UpdateBlocklists(data.Links, add_counters); err != nil {
 		return fmt.Errorf("while updating blocklists: %w", err)
 		return fmt.Errorf("while updating blocklists: %w", err)
 	}
 	}
+
+	if a.isHAEnabled {
+		log.Debug("Releasing lock for pullCAPI")
+		if err := a.dbClient.ReleasePullCAPILock(); err != nil {
+			return fmt.Errorf("while releasing lock: %w", err)
+		}
+	}
 	return nil
 	return nil
 }
 }
 
 

+ 20 - 2
pkg/apiserver/apic_metrics.go

@@ -81,7 +81,7 @@ func (a *apic) SendMetrics(stop chan (bool)) {
 	const checkInt = 20 * time.Second
 	const checkInt = 20 * time.Second
 
 
 	// intervals must always be > 0
 	// intervals must always be > 0
-	metInts := []time.Duration{1*time.Millisecond, a.metricsIntervalFirst, a.metricsInterval}
+	metInts := []time.Duration{1 * time.Millisecond, a.metricsIntervalFirst, a.metricsInterval}
 
 
 	log.Infof("Start send metrics to CrowdSec Central API (interval: %s once, then %s)",
 	log.Infof("Start send metrics to CrowdSec Central API (interval: %s once, then %s)",
 		metInts[1].Round(time.Second), metInts[2])
 		metInts[1].Round(time.Second), metInts[2])
@@ -123,10 +123,21 @@ func (a *apic) SendMetrics(stop chan (bool)) {
 			reloadMachineIDs()
 			reloadMachineIDs()
 			if !slices.Equal(oldIDs, machineIDs) {
 			if !slices.Equal(oldIDs, machineIDs) {
 				log.Infof("capi metrics: machines changed, immediate send")
 				log.Infof("capi metrics: machines changed, immediate send")
-				metTicker.Reset(1*time.Millisecond)
+				metTicker.Reset(1 * time.Millisecond)
 			}
 			}
 		case <-metTicker.C:
 		case <-metTicker.C:
 			metTicker.Stop()
 			metTicker.Stop()
+			if a.isHAEnabled {
+				log.Debug("capi metrics: acquiring lock")
+				err := a.dbClient.AcquirePushMetricsLock()
+				if a.dbClient.IsLocked(err) {
+					log.Infof("another instance of crowdsec is already pushing metrics, skipping")
+					metTicker.Reset(nextMetInt())
+				}
+				if err != nil {
+					log.Errorf("unable to acquire pushMetrics lock (%s)", err)
+				}
+			}
 			metrics, err := a.GetMetrics()
 			metrics, err := a.GetMetrics()
 			if err != nil {
 			if err != nil {
 				log.Errorf("unable to get metrics (%s), will retry", err)
 				log.Errorf("unable to get metrics (%s), will retry", err)
@@ -136,6 +147,13 @@ func (a *apic) SendMetrics(stop chan (bool)) {
 			if err != nil {
 			if err != nil {
 				log.Errorf("capi metrics: failed: %s", err)
 				log.Errorf("capi metrics: failed: %s", err)
 			}
 			}
+			if a.isHAEnabled {
+				log.Debug("capi metrics: releasing lock")
+				err = a.dbClient.ReleasePushMetricsLock()
+				if err != nil {
+					log.Errorf("unable to release metrics lock (%s)", err)
+				}
+			}
 			metTicker.Reset(nextMetInt())
 			metTicker.Reset(nextMetInt())
 		case <-a.metricsTomb.Dying(): // if one apic routine is dying, do we kill the others?
 		case <-a.metricsTomb.Dying(): // if one apic routine is dying, do we kill the others?
 			checkTicker.Stop()
 			checkTicker.Stop()

+ 1 - 1
pkg/apiserver/apic_test.go

@@ -236,7 +236,7 @@ func TestNewAPIC(t *testing.T) {
 				),
 				),
 			))
 			))
 			tc.action()
 			tc.action()
-			_, err := NewAPIC(testConfig, tc.args.dbClient, tc.args.consoleConfig, nil)
+			_, err := NewAPIC(testConfig, tc.args.dbClient, tc.args.consoleConfig, nil, nil)
 			cstest.RequireErrorContains(t, err, tc.expectedErr)
 			cstest.RequireErrorContains(t, err, tc.expectedErr)
 		})
 		})
 	}
 	}

+ 1 - 1
pkg/apiserver/apiserver.go

@@ -213,7 +213,7 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) {
 
 
 	if config.OnlineClient != nil && config.OnlineClient.Credentials != nil {
 	if config.OnlineClient != nil && config.OnlineClient.Credentials != nil {
 		log.Printf("Loading CAPI manager")
 		log.Printf("Loading CAPI manager")
-		apiClient, err = NewAPIC(config.OnlineClient, dbClient, config.ConsoleConfig, config.CapiWhitelists)
+		apiClient, err = NewAPIC(config.OnlineClient, dbClient, config.ConsoleConfig, config.CapiWhitelists, config.HighAvailability)
 		if err != nil {
 		if err != nil {
 			return &APIServer{}, err
 			return &APIServer{}, err
 		}
 		}

+ 28 - 23
pkg/csconfig/api.go

@@ -186,29 +186,34 @@ type CapiWhitelist struct {
 
 
 /*local api service configuration*/
 /*local api service configuration*/
 type LocalApiServerCfg struct {
 type LocalApiServerCfg struct {
-	Enable                        *bool               `yaml:"enable"`
-	ListenURI                     string              `yaml:"listen_uri,omitempty"` // 127.0.0.1:8080
-	TLS                           *TLSCfg             `yaml:"tls"`
-	DbConfig                      *DatabaseCfg        `yaml:"-"`
-	LogDir                        string              `yaml:"-"`
-	LogMedia                      string              `yaml:"-"`
-	OnlineClient                  *OnlineApiClientCfg `yaml:"online_client"`
-	ProfilesPath                  string              `yaml:"profiles_path,omitempty"`
-	ConsoleConfigPath             string              `yaml:"console_path,omitempty"`
-	ConsoleConfig                 *ConsoleConfig      `yaml:"-"`
-	Profiles                      []*ProfileCfg       `yaml:"-"`
-	LogLevel                      *log.Level          `yaml:"log_level"`
-	UseForwardedForHeaders        bool                `yaml:"use_forwarded_for_headers,omitempty"`
-	TrustedProxies                *[]string           `yaml:"trusted_proxies,omitempty"`
-	CompressLogs                  *bool               `yaml:"-"`
-	LogMaxSize                    int                 `yaml:"-"`
-	LogMaxAge                     int                 `yaml:"-"`
-	LogMaxFiles                   int                 `yaml:"-"`
-	TrustedIPs                    []string            `yaml:"trusted_ips,omitempty"`
-	PapiLogLevel                  *log.Level          `yaml:"papi_log_level"`
-	DisableRemoteLapiRegistration bool                `yaml:"disable_remote_lapi_registration,omitempty"`
-	CapiWhitelistsPath            string              `yaml:"capi_whitelists_path,omitempty"`
-	CapiWhitelists                *CapiWhitelist      `yaml:"-"`
+	Enable                        *bool                `yaml:"enable"`
+	ListenURI                     string               `yaml:"listen_uri,omitempty"` // 127.0.0.1:8080
+	TLS                           *TLSCfg              `yaml:"tls"`
+	DbConfig                      *DatabaseCfg         `yaml:"-"`
+	LogDir                        string               `yaml:"-"`
+	LogMedia                      string               `yaml:"-"`
+	OnlineClient                  *OnlineApiClientCfg  `yaml:"online_client"`
+	ProfilesPath                  string               `yaml:"profiles_path,omitempty"`
+	ConsoleConfigPath             string               `yaml:"console_path,omitempty"`
+	ConsoleConfig                 *ConsoleConfig       `yaml:"-"`
+	Profiles                      []*ProfileCfg        `yaml:"-"`
+	LogLevel                      *log.Level           `yaml:"log_level"`
+	UseForwardedForHeaders        bool                 `yaml:"use_forwarded_for_headers,omitempty"`
+	TrustedProxies                *[]string            `yaml:"trusted_proxies,omitempty"`
+	CompressLogs                  *bool                `yaml:"-"`
+	LogMaxSize                    int                  `yaml:"-"`
+	LogMaxAge                     int                  `yaml:"-"`
+	LogMaxFiles                   int                  `yaml:"-"`
+	TrustedIPs                    []string             `yaml:"trusted_ips,omitempty"`
+	PapiLogLevel                  *log.Level           `yaml:"papi_log_level"`
+	DisableRemoteLapiRegistration bool                 `yaml:"disable_remote_lapi_registration,omitempty"`
+	CapiWhitelistsPath            string               `yaml:"capi_whitelists_path,omitempty"`
+	CapiWhitelists                *CapiWhitelist       `yaml:"-"`
+	HighAvailability              *HighAvailabilityCfg `yaml:"high_availability,omitempty"`
+}
+
+type HighAvailabilityCfg struct {
+	Enabled *bool `yaml:"enabled"`
 }
 }
 
 
 type TLSCfg struct {
 type TLSCfg struct {

+ 37 - 0
pkg/database/ent/alert_query.go

@@ -8,6 +8,7 @@ import (
 	"fmt"
 	"fmt"
 	"math"
 	"math"
 
 
+	"entgo.io/ent/dialect"
 	"entgo.io/ent/dialect/sql"
 	"entgo.io/ent/dialect/sql"
 	"entgo.io/ent/dialect/sql/sqlgraph"
 	"entgo.io/ent/dialect/sql/sqlgraph"
 	"entgo.io/ent/schema/field"
 	"entgo.io/ent/schema/field"
@@ -33,6 +34,7 @@ type AlertQuery struct {
 	withEvents    *EventQuery
 	withEvents    *EventQuery
 	withMetas     *MetaQuery
 	withMetas     *MetaQuery
 	withFKs       bool
 	withFKs       bool
+	modifiers     []func(*sql.Selector)
 	// intermediate query (i.e. traversal path).
 	// intermediate query (i.e. traversal path).
 	sql  *sql.Selector
 	sql  *sql.Selector
 	path func(context.Context) (*sql.Selector, error)
 	path func(context.Context) (*sql.Selector, error)
@@ -484,6 +486,9 @@ func (aq *AlertQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Alert,
 		node.Edges.loadedTypes = loadedTypes
 		node.Edges.loadedTypes = loadedTypes
 		return node.assignValues(columns, values)
 		return node.assignValues(columns, values)
 	}
 	}
+	if len(aq.modifiers) > 0 {
+		_spec.Modifiers = aq.modifiers
+	}
 	for i := range hooks {
 	for i := range hooks {
 		hooks[i](ctx, _spec)
 		hooks[i](ctx, _spec)
 	}
 	}
@@ -636,6 +641,9 @@ func (aq *AlertQuery) loadMetas(ctx context.Context, query *MetaQuery, nodes []*
 
 
 func (aq *AlertQuery) sqlCount(ctx context.Context) (int, error) {
 func (aq *AlertQuery) sqlCount(ctx context.Context) (int, error) {
 	_spec := aq.querySpec()
 	_spec := aq.querySpec()
+	if len(aq.modifiers) > 0 {
+		_spec.Modifiers = aq.modifiers
+	}
 	_spec.Node.Columns = aq.fields
 	_spec.Node.Columns = aq.fields
 	if len(aq.fields) > 0 {
 	if len(aq.fields) > 0 {
 		_spec.Unique = aq.unique != nil && *aq.unique
 		_spec.Unique = aq.unique != nil && *aq.unique
@@ -717,6 +725,9 @@ func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector {
 	if aq.unique != nil && *aq.unique {
 	if aq.unique != nil && *aq.unique {
 		selector.Distinct()
 		selector.Distinct()
 	}
 	}
+	for _, m := range aq.modifiers {
+		m(selector)
+	}
 	for _, p := range aq.predicates {
 	for _, p := range aq.predicates {
 		p(selector)
 		p(selector)
 	}
 	}
@@ -734,6 +745,32 @@ func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector {
 	return selector
 	return selector
 }
 }
 
 
+// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
+// updated, deleted or "selected ... for update" by other sessions, until the transaction is
+// either committed or rolled-back.
+func (aq *AlertQuery) ForUpdate(opts ...sql.LockOption) *AlertQuery {
+	if aq.driver.Dialect() == dialect.Postgres {
+		aq.Unique(false)
+	}
+	aq.modifiers = append(aq.modifiers, func(s *sql.Selector) {
+		s.ForUpdate(opts...)
+	})
+	return aq
+}
+
+// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
+// on any rows that are read. Other sessions can read the rows, but cannot modify them
+// until your transaction commits.
+func (aq *AlertQuery) ForShare(opts ...sql.LockOption) *AlertQuery {
+	if aq.driver.Dialect() == dialect.Postgres {
+		aq.Unique(false)
+	}
+	aq.modifiers = append(aq.modifiers, func(s *sql.Selector) {
+		s.ForShare(opts...)
+	})
+	return aq
+}
+
 // AlertGroupBy is the group-by builder for Alert entities.
 // AlertGroupBy is the group-by builder for Alert entities.
 type AlertGroupBy struct {
 type AlertGroupBy struct {
 	config
 	config

+ 37 - 0
pkg/database/ent/bouncer_query.go

@@ -7,6 +7,7 @@ import (
 	"fmt"
 	"fmt"
 	"math"
 	"math"
 
 
+	"entgo.io/ent/dialect"
 	"entgo.io/ent/dialect/sql"
 	"entgo.io/ent/dialect/sql"
 	"entgo.io/ent/dialect/sql/sqlgraph"
 	"entgo.io/ent/dialect/sql/sqlgraph"
 	"entgo.io/ent/schema/field"
 	"entgo.io/ent/schema/field"
@@ -23,6 +24,7 @@ type BouncerQuery struct {
 	order      []OrderFunc
 	order      []OrderFunc
 	fields     []string
 	fields     []string
 	predicates []predicate.Bouncer
 	predicates []predicate.Bouncer
+	modifiers  []func(*sql.Selector)
 	// intermediate query (i.e. traversal path).
 	// intermediate query (i.e. traversal path).
 	sql  *sql.Selector
 	sql  *sql.Selector
 	path func(context.Context) (*sql.Selector, error)
 	path func(context.Context) (*sql.Selector, error)
@@ -324,6 +326,9 @@ func (bq *BouncerQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Boun
 		nodes = append(nodes, node)
 		nodes = append(nodes, node)
 		return node.assignValues(columns, values)
 		return node.assignValues(columns, values)
 	}
 	}
+	if len(bq.modifiers) > 0 {
+		_spec.Modifiers = bq.modifiers
+	}
 	for i := range hooks {
 	for i := range hooks {
 		hooks[i](ctx, _spec)
 		hooks[i](ctx, _spec)
 	}
 	}
@@ -338,6 +343,9 @@ func (bq *BouncerQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Boun
 
 
 func (bq *BouncerQuery) sqlCount(ctx context.Context) (int, error) {
 func (bq *BouncerQuery) sqlCount(ctx context.Context) (int, error) {
 	_spec := bq.querySpec()
 	_spec := bq.querySpec()
+	if len(bq.modifiers) > 0 {
+		_spec.Modifiers = bq.modifiers
+	}
 	_spec.Node.Columns = bq.fields
 	_spec.Node.Columns = bq.fields
 	if len(bq.fields) > 0 {
 	if len(bq.fields) > 0 {
 		_spec.Unique = bq.unique != nil && *bq.unique
 		_spec.Unique = bq.unique != nil && *bq.unique
@@ -419,6 +427,9 @@ func (bq *BouncerQuery) sqlQuery(ctx context.Context) *sql.Selector {
 	if bq.unique != nil && *bq.unique {
 	if bq.unique != nil && *bq.unique {
 		selector.Distinct()
 		selector.Distinct()
 	}
 	}
+	for _, m := range bq.modifiers {
+		m(selector)
+	}
 	for _, p := range bq.predicates {
 	for _, p := range bq.predicates {
 		p(selector)
 		p(selector)
 	}
 	}
@@ -436,6 +447,32 @@ func (bq *BouncerQuery) sqlQuery(ctx context.Context) *sql.Selector {
 	return selector
 	return selector
 }
 }
 
 
+// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
+// updated, deleted or "selected ... for update" by other sessions, until the transaction is
+// either committed or rolled-back.
+func (bq *BouncerQuery) ForUpdate(opts ...sql.LockOption) *BouncerQuery {
+	if bq.driver.Dialect() == dialect.Postgres {
+		bq.Unique(false)
+	}
+	bq.modifiers = append(bq.modifiers, func(s *sql.Selector) {
+		s.ForUpdate(opts...)
+	})
+	return bq
+}
+
+// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
+// on any rows that are read. Other sessions can read the rows, but cannot modify them
+// until your transaction commits.
+func (bq *BouncerQuery) ForShare(opts ...sql.LockOption) *BouncerQuery {
+	if bq.driver.Dialect() == dialect.Postgres {
+		bq.Unique(false)
+	}
+	bq.modifiers = append(bq.modifiers, func(s *sql.Selector) {
+		s.ForShare(opts...)
+	})
+	return bq
+}
+
 // BouncerGroupBy is the group-by builder for Bouncer entities.
 // BouncerGroupBy is the group-by builder for Bouncer entities.
 type BouncerGroupBy struct {
 type BouncerGroupBy struct {
 	config
 	config

+ 97 - 0
pkg/database/ent/client.go

@@ -15,6 +15,7 @@ import (
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/lock"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/meta"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/meta"
 
 
@@ -38,6 +39,8 @@ type Client struct {
 	Decision *DecisionClient
 	Decision *DecisionClient
 	// Event is the client for interacting with the Event builders.
 	// Event is the client for interacting with the Event builders.
 	Event *EventClient
 	Event *EventClient
+	// Lock is the client for interacting with the Lock builders.
+	Lock *LockClient
 	// Machine is the client for interacting with the Machine builders.
 	// Machine is the client for interacting with the Machine builders.
 	Machine *MachineClient
 	Machine *MachineClient
 	// Meta is the client for interacting with the Meta builders.
 	// Meta is the client for interacting with the Meta builders.
@@ -60,6 +63,7 @@ func (c *Client) init() {
 	c.ConfigItem = NewConfigItemClient(c.config)
 	c.ConfigItem = NewConfigItemClient(c.config)
 	c.Decision = NewDecisionClient(c.config)
 	c.Decision = NewDecisionClient(c.config)
 	c.Event = NewEventClient(c.config)
 	c.Event = NewEventClient(c.config)
+	c.Lock = NewLockClient(c.config)
 	c.Machine = NewMachineClient(c.config)
 	c.Machine = NewMachineClient(c.config)
 	c.Meta = NewMetaClient(c.config)
 	c.Meta = NewMetaClient(c.config)
 }
 }
@@ -100,6 +104,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) {
 		ConfigItem: NewConfigItemClient(cfg),
 		ConfigItem: NewConfigItemClient(cfg),
 		Decision:   NewDecisionClient(cfg),
 		Decision:   NewDecisionClient(cfg),
 		Event:      NewEventClient(cfg),
 		Event:      NewEventClient(cfg),
+		Lock:       NewLockClient(cfg),
 		Machine:    NewMachineClient(cfg),
 		Machine:    NewMachineClient(cfg),
 		Meta:       NewMetaClient(cfg),
 		Meta:       NewMetaClient(cfg),
 	}, nil
 	}, nil
@@ -126,6 +131,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error)
 		ConfigItem: NewConfigItemClient(cfg),
 		ConfigItem: NewConfigItemClient(cfg),
 		Decision:   NewDecisionClient(cfg),
 		Decision:   NewDecisionClient(cfg),
 		Event:      NewEventClient(cfg),
 		Event:      NewEventClient(cfg),
+		Lock:       NewLockClient(cfg),
 		Machine:    NewMachineClient(cfg),
 		Machine:    NewMachineClient(cfg),
 		Meta:       NewMetaClient(cfg),
 		Meta:       NewMetaClient(cfg),
 	}, nil
 	}, nil
@@ -161,6 +167,7 @@ func (c *Client) Use(hooks ...Hook) {
 	c.ConfigItem.Use(hooks...)
 	c.ConfigItem.Use(hooks...)
 	c.Decision.Use(hooks...)
 	c.Decision.Use(hooks...)
 	c.Event.Use(hooks...)
 	c.Event.Use(hooks...)
+	c.Lock.Use(hooks...)
 	c.Machine.Use(hooks...)
 	c.Machine.Use(hooks...)
 	c.Meta.Use(hooks...)
 	c.Meta.Use(hooks...)
 }
 }
@@ -711,6 +718,96 @@ func (c *EventClient) Hooks() []Hook {
 	return c.hooks.Event
 	return c.hooks.Event
 }
 }
 
 
+// LockClient is a client for the Lock schema.
+type LockClient struct {
+	config
+}
+
+// NewLockClient returns a client for the Lock from the given config.
+func NewLockClient(c config) *LockClient {
+	return &LockClient{config: c}
+}
+
+// Use adds a list of mutation hooks to the hooks stack.
+// A call to `Use(f, g, h)` equals to `lock.Hooks(f(g(h())))`.
+func (c *LockClient) Use(hooks ...Hook) {
+	c.hooks.Lock = append(c.hooks.Lock, hooks...)
+}
+
+// Create returns a builder for creating a Lock entity.
+func (c *LockClient) Create() *LockCreate {
+	mutation := newLockMutation(c.config, OpCreate)
+	return &LockCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// CreateBulk returns a builder for creating a bulk of Lock entities.
+func (c *LockClient) CreateBulk(builders ...*LockCreate) *LockCreateBulk {
+	return &LockCreateBulk{config: c.config, builders: builders}
+}
+
+// Update returns an update builder for Lock.
+func (c *LockClient) Update() *LockUpdate {
+	mutation := newLockMutation(c.config, OpUpdate)
+	return &LockUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOne returns an update builder for the given entity.
+func (c *LockClient) UpdateOne(l *Lock) *LockUpdateOne {
+	mutation := newLockMutation(c.config, OpUpdateOne, withLock(l))
+	return &LockUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOneID returns an update builder for the given id.
+func (c *LockClient) UpdateOneID(id int) *LockUpdateOne {
+	mutation := newLockMutation(c.config, OpUpdateOne, withLockID(id))
+	return &LockUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// Delete returns a delete builder for Lock.
+func (c *LockClient) Delete() *LockDelete {
+	mutation := newLockMutation(c.config, OpDelete)
+	return &LockDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// DeleteOne returns a builder for deleting the given entity.
+func (c *LockClient) DeleteOne(l *Lock) *LockDeleteOne {
+	return c.DeleteOneID(l.ID)
+}
+
+// DeleteOne returns a builder for deleting the given entity by its id.
+func (c *LockClient) DeleteOneID(id int) *LockDeleteOne {
+	builder := c.Delete().Where(lock.ID(id))
+	builder.mutation.id = &id
+	builder.mutation.op = OpDeleteOne
+	return &LockDeleteOne{builder}
+}
+
+// Query returns a query builder for Lock.
+func (c *LockClient) Query() *LockQuery {
+	return &LockQuery{
+		config: c.config,
+	}
+}
+
+// Get returns a Lock entity by its id.
+func (c *LockClient) Get(ctx context.Context, id int) (*Lock, error) {
+	return c.Query().Where(lock.ID(id)).Only(ctx)
+}
+
+// GetX is like Get, but panics if an error occurs.
+func (c *LockClient) GetX(ctx context.Context, id int) *Lock {
+	obj, err := c.Get(ctx, id)
+	if err != nil {
+		panic(err)
+	}
+	return obj
+}
+
+// Hooks returns the client hooks.
+func (c *LockClient) Hooks() []Hook {
+	return c.hooks.Lock
+}
+
 // MachineClient is a client for the Machine schema.
 // MachineClient is a client for the Machine schema.
 type MachineClient struct {
 type MachineClient struct {
 	config
 	config

+ 1 - 0
pkg/database/ent/config.go

@@ -29,6 +29,7 @@ type hooks struct {
 	ConfigItem []ent.Hook
 	ConfigItem []ent.Hook
 	Decision   []ent.Hook
 	Decision   []ent.Hook
 	Event      []ent.Hook
 	Event      []ent.Hook
+	Lock       []ent.Hook
 	Machine    []ent.Hook
 	Machine    []ent.Hook
 	Meta       []ent.Hook
 	Meta       []ent.Hook
 }
 }

+ 37 - 0
pkg/database/ent/configitem_query.go

@@ -7,6 +7,7 @@ import (
 	"fmt"
 	"fmt"
 	"math"
 	"math"
 
 
+	"entgo.io/ent/dialect"
 	"entgo.io/ent/dialect/sql"
 	"entgo.io/ent/dialect/sql"
 	"entgo.io/ent/dialect/sql/sqlgraph"
 	"entgo.io/ent/dialect/sql/sqlgraph"
 	"entgo.io/ent/schema/field"
 	"entgo.io/ent/schema/field"
@@ -23,6 +24,7 @@ type ConfigItemQuery struct {
 	order      []OrderFunc
 	order      []OrderFunc
 	fields     []string
 	fields     []string
 	predicates []predicate.ConfigItem
 	predicates []predicate.ConfigItem
+	modifiers  []func(*sql.Selector)
 	// intermediate query (i.e. traversal path).
 	// intermediate query (i.e. traversal path).
 	sql  *sql.Selector
 	sql  *sql.Selector
 	path func(context.Context) (*sql.Selector, error)
 	path func(context.Context) (*sql.Selector, error)
@@ -324,6 +326,9 @@ func (ciq *ConfigItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*
 		nodes = append(nodes, node)
 		nodes = append(nodes, node)
 		return node.assignValues(columns, values)
 		return node.assignValues(columns, values)
 	}
 	}
+	if len(ciq.modifiers) > 0 {
+		_spec.Modifiers = ciq.modifiers
+	}
 	for i := range hooks {
 	for i := range hooks {
 		hooks[i](ctx, _spec)
 		hooks[i](ctx, _spec)
 	}
 	}
@@ -338,6 +343,9 @@ func (ciq *ConfigItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*
 
 
 func (ciq *ConfigItemQuery) sqlCount(ctx context.Context) (int, error) {
 func (ciq *ConfigItemQuery) sqlCount(ctx context.Context) (int, error) {
 	_spec := ciq.querySpec()
 	_spec := ciq.querySpec()
+	if len(ciq.modifiers) > 0 {
+		_spec.Modifiers = ciq.modifiers
+	}
 	_spec.Node.Columns = ciq.fields
 	_spec.Node.Columns = ciq.fields
 	if len(ciq.fields) > 0 {
 	if len(ciq.fields) > 0 {
 		_spec.Unique = ciq.unique != nil && *ciq.unique
 		_spec.Unique = ciq.unique != nil && *ciq.unique
@@ -419,6 +427,9 @@ func (ciq *ConfigItemQuery) sqlQuery(ctx context.Context) *sql.Selector {
 	if ciq.unique != nil && *ciq.unique {
 	if ciq.unique != nil && *ciq.unique {
 		selector.Distinct()
 		selector.Distinct()
 	}
 	}
+	for _, m := range ciq.modifiers {
+		m(selector)
+	}
 	for _, p := range ciq.predicates {
 	for _, p := range ciq.predicates {
 		p(selector)
 		p(selector)
 	}
 	}
@@ -436,6 +447,32 @@ func (ciq *ConfigItemQuery) sqlQuery(ctx context.Context) *sql.Selector {
 	return selector
 	return selector
 }
 }
 
 
+// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
+// updated, deleted or "selected ... for update" by other sessions, until the transaction is
+// either committed or rolled-back.
+func (ciq *ConfigItemQuery) ForUpdate(opts ...sql.LockOption) *ConfigItemQuery {
+	if ciq.driver.Dialect() == dialect.Postgres {
+		ciq.Unique(false)
+	}
+	ciq.modifiers = append(ciq.modifiers, func(s *sql.Selector) {
+		s.ForUpdate(opts...)
+	})
+	return ciq
+}
+
+// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
+// on any rows that are read. Other sessions can read the rows, but cannot modify them
+// until your transaction commits.
+func (ciq *ConfigItemQuery) ForShare(opts ...sql.LockOption) *ConfigItemQuery {
+	if ciq.driver.Dialect() == dialect.Postgres {
+		ciq.Unique(false)
+	}
+	ciq.modifiers = append(ciq.modifiers, func(s *sql.Selector) {
+		s.ForShare(opts...)
+	})
+	return ciq
+}
+
 // ConfigItemGroupBy is the group-by builder for ConfigItem entities.
 // ConfigItemGroupBy is the group-by builder for ConfigItem entities.
 type ConfigItemGroupBy struct {
 type ConfigItemGroupBy struct {
 	config
 	config

+ 37 - 0
pkg/database/ent/decision_query.go

@@ -7,6 +7,7 @@ import (
 	"fmt"
 	"fmt"
 	"math"
 	"math"
 
 
+	"entgo.io/ent/dialect"
 	"entgo.io/ent/dialect/sql"
 	"entgo.io/ent/dialect/sql"
 	"entgo.io/ent/dialect/sql/sqlgraph"
 	"entgo.io/ent/dialect/sql/sqlgraph"
 	"entgo.io/ent/schema/field"
 	"entgo.io/ent/schema/field"
@@ -25,6 +26,7 @@ type DecisionQuery struct {
 	fields     []string
 	fields     []string
 	predicates []predicate.Decision
 	predicates []predicate.Decision
 	withOwner  *AlertQuery
 	withOwner  *AlertQuery
+	modifiers  []func(*sql.Selector)
 	// intermediate query (i.e. traversal path).
 	// intermediate query (i.e. traversal path).
 	sql  *sql.Selector
 	sql  *sql.Selector
 	path func(context.Context) (*sql.Selector, error)
 	path func(context.Context) (*sql.Selector, error)
@@ -364,6 +366,9 @@ func (dq *DecisionQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Dec
 		node.Edges.loadedTypes = loadedTypes
 		node.Edges.loadedTypes = loadedTypes
 		return node.assignValues(columns, values)
 		return node.assignValues(columns, values)
 	}
 	}
+	if len(dq.modifiers) > 0 {
+		_spec.Modifiers = dq.modifiers
+	}
 	for i := range hooks {
 	for i := range hooks {
 		hooks[i](ctx, _spec)
 		hooks[i](ctx, _spec)
 	}
 	}
@@ -411,6 +416,9 @@ func (dq *DecisionQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes
 
 
 func (dq *DecisionQuery) sqlCount(ctx context.Context) (int, error) {
 func (dq *DecisionQuery) sqlCount(ctx context.Context) (int, error) {
 	_spec := dq.querySpec()
 	_spec := dq.querySpec()
+	if len(dq.modifiers) > 0 {
+		_spec.Modifiers = dq.modifiers
+	}
 	_spec.Node.Columns = dq.fields
 	_spec.Node.Columns = dq.fields
 	if len(dq.fields) > 0 {
 	if len(dq.fields) > 0 {
 		_spec.Unique = dq.unique != nil && *dq.unique
 		_spec.Unique = dq.unique != nil && *dq.unique
@@ -492,6 +500,9 @@ func (dq *DecisionQuery) sqlQuery(ctx context.Context) *sql.Selector {
 	if dq.unique != nil && *dq.unique {
 	if dq.unique != nil && *dq.unique {
 		selector.Distinct()
 		selector.Distinct()
 	}
 	}
+	for _, m := range dq.modifiers {
+		m(selector)
+	}
 	for _, p := range dq.predicates {
 	for _, p := range dq.predicates {
 		p(selector)
 		p(selector)
 	}
 	}
@@ -509,6 +520,32 @@ func (dq *DecisionQuery) sqlQuery(ctx context.Context) *sql.Selector {
 	return selector
 	return selector
 }
 }
 
 
+// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
+// updated, deleted or "selected ... for update" by other sessions, until the transaction is
+// either committed or rolled-back.
+func (dq *DecisionQuery) ForUpdate(opts ...sql.LockOption) *DecisionQuery {
+	if dq.driver.Dialect() == dialect.Postgres {
+		dq.Unique(false)
+	}
+	dq.modifiers = append(dq.modifiers, func(s *sql.Selector) {
+		s.ForUpdate(opts...)
+	})
+	return dq
+}
+
+// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
+// on any rows that are read. Other sessions can read the rows, but cannot modify them
+// until your transaction commits.
+func (dq *DecisionQuery) ForShare(opts ...sql.LockOption) *DecisionQuery {
+	if dq.driver.Dialect() == dialect.Postgres {
+		dq.Unique(false)
+	}
+	dq.modifiers = append(dq.modifiers, func(s *sql.Selector) {
+		s.ForShare(opts...)
+	})
+	return dq
+}
+
 // DecisionGroupBy is the group-by builder for Decision entities.
 // DecisionGroupBy is the group-by builder for Decision entities.
 type DecisionGroupBy struct {
 type DecisionGroupBy struct {
 	config
 	config

+ 2 - 0
pkg/database/ent/ent.go

@@ -15,6 +15,7 @@ import (
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/lock"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/meta"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/meta"
 )
 )
@@ -42,6 +43,7 @@ func columnChecker(table string) func(string) error {
 		configitem.Table: configitem.ValidColumn,
 		configitem.Table: configitem.ValidColumn,
 		decision.Table:   decision.ValidColumn,
 		decision.Table:   decision.ValidColumn,
 		event.Table:      event.ValidColumn,
 		event.Table:      event.ValidColumn,
+		lock.Table:       lock.ValidColumn,
 		machine.Table:    machine.ValidColumn,
 		machine.Table:    machine.ValidColumn,
 		meta.Table:       meta.ValidColumn,
 		meta.Table:       meta.ValidColumn,
 	}
 	}

+ 37 - 0
pkg/database/ent/event_query.go

@@ -7,6 +7,7 @@ import (
 	"fmt"
 	"fmt"
 	"math"
 	"math"
 
 
+	"entgo.io/ent/dialect"
 	"entgo.io/ent/dialect/sql"
 	"entgo.io/ent/dialect/sql"
 	"entgo.io/ent/dialect/sql/sqlgraph"
 	"entgo.io/ent/dialect/sql/sqlgraph"
 	"entgo.io/ent/schema/field"
 	"entgo.io/ent/schema/field"
@@ -25,6 +26,7 @@ type EventQuery struct {
 	fields     []string
 	fields     []string
 	predicates []predicate.Event
 	predicates []predicate.Event
 	withOwner  *AlertQuery
 	withOwner  *AlertQuery
+	modifiers  []func(*sql.Selector)
 	// intermediate query (i.e. traversal path).
 	// intermediate query (i.e. traversal path).
 	sql  *sql.Selector
 	sql  *sql.Selector
 	path func(context.Context) (*sql.Selector, error)
 	path func(context.Context) (*sql.Selector, error)
@@ -364,6 +366,9 @@ func (eq *EventQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Event,
 		node.Edges.loadedTypes = loadedTypes
 		node.Edges.loadedTypes = loadedTypes
 		return node.assignValues(columns, values)
 		return node.assignValues(columns, values)
 	}
 	}
+	if len(eq.modifiers) > 0 {
+		_spec.Modifiers = eq.modifiers
+	}
 	for i := range hooks {
 	for i := range hooks {
 		hooks[i](ctx, _spec)
 		hooks[i](ctx, _spec)
 	}
 	}
@@ -411,6 +416,9 @@ func (eq *EventQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes []
 
 
 func (eq *EventQuery) sqlCount(ctx context.Context) (int, error) {
 func (eq *EventQuery) sqlCount(ctx context.Context) (int, error) {
 	_spec := eq.querySpec()
 	_spec := eq.querySpec()
+	if len(eq.modifiers) > 0 {
+		_spec.Modifiers = eq.modifiers
+	}
 	_spec.Node.Columns = eq.fields
 	_spec.Node.Columns = eq.fields
 	if len(eq.fields) > 0 {
 	if len(eq.fields) > 0 {
 		_spec.Unique = eq.unique != nil && *eq.unique
 		_spec.Unique = eq.unique != nil && *eq.unique
@@ -492,6 +500,9 @@ func (eq *EventQuery) sqlQuery(ctx context.Context) *sql.Selector {
 	if eq.unique != nil && *eq.unique {
 	if eq.unique != nil && *eq.unique {
 		selector.Distinct()
 		selector.Distinct()
 	}
 	}
+	for _, m := range eq.modifiers {
+		m(selector)
+	}
 	for _, p := range eq.predicates {
 	for _, p := range eq.predicates {
 		p(selector)
 		p(selector)
 	}
 	}
@@ -509,6 +520,32 @@ func (eq *EventQuery) sqlQuery(ctx context.Context) *sql.Selector {
 	return selector
 	return selector
 }
 }
 
 
+// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
+// updated, deleted or "selected ... for update" by other sessions, until the transaction is
+// either committed or rolled-back.
+func (eq *EventQuery) ForUpdate(opts ...sql.LockOption) *EventQuery {
+	if eq.driver.Dialect() == dialect.Postgres {
+		eq.Unique(false)
+	}
+	eq.modifiers = append(eq.modifiers, func(s *sql.Selector) {
+		s.ForUpdate(opts...)
+	})
+	return eq
+}
+
+// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
+// on any rows that are read. Other sessions can read the rows, but cannot modify them
+// until your transaction commits.
+func (eq *EventQuery) ForShare(opts ...sql.LockOption) *EventQuery {
+	if eq.driver.Dialect() == dialect.Postgres {
+		eq.Unique(false)
+	}
+	eq.modifiers = append(eq.modifiers, func(s *sql.Selector) {
+		s.ForShare(opts...)
+	})
+	return eq
+}
+
 // EventGroupBy is the group-by builder for Event entities.
 // EventGroupBy is the group-by builder for Event entities.
 type EventGroupBy struct {
 type EventGroupBy struct {
 	config
 	config

+ 13 - 0
pkg/database/ent/hook/hook.go

@@ -74,6 +74,19 @@ func (f EventFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error
 	return f(ctx, mv)
 	return f(ctx, mv)
 }
 }
 
 
+// The LockFunc type is an adapter to allow the use of ordinary
+// function as Lock mutator.
+type LockFunc func(context.Context, *ent.LockMutation) (ent.Value, error)
+
+// Mutate calls f(ctx, m).
+func (f LockFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
+	mv, ok := m.(*ent.LockMutation)
+	if !ok {
+		return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.LockMutation", m)
+	}
+	return f(ctx, mv)
+}
+
 // The MachineFunc type is an adapter to allow the use of ordinary
 // The MachineFunc type is an adapter to allow the use of ordinary
 // function as Machine mutator.
 // function as Machine mutator.
 type MachineFunc func(context.Context, *ent.MachineMutation) (ent.Value, error)
 type MachineFunc func(context.Context, *ent.MachineMutation) (ent.Value, error)

+ 113 - 0
pkg/database/ent/lock.go

@@ -0,0 +1,113 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"entgo.io/ent/dialect/sql"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/lock"
+)
+
+// Lock is the model entity for the Lock schema.
+type Lock struct {
+	config `json:"-"`
+	// ID of the ent.
+	ID int `json:"id,omitempty"`
+	// Name holds the value of the "name" field.
+	Name string `json:"name"`
+	// CreatedAt holds the value of the "created_at" field.
+	CreatedAt time.Time `json:"created_at"`
+}
+
+// scanValues returns the types for scanning values from sql.Rows.
+func (*Lock) scanValues(columns []string) ([]any, error) {
+	values := make([]any, len(columns))
+	for i := range columns {
+		switch columns[i] {
+		case lock.FieldID:
+			values[i] = new(sql.NullInt64)
+		case lock.FieldName:
+			values[i] = new(sql.NullString)
+		case lock.FieldCreatedAt:
+			values[i] = new(sql.NullTime)
+		default:
+			return nil, fmt.Errorf("unexpected column %q for type Lock", columns[i])
+		}
+	}
+	return values, nil
+}
+
+// assignValues assigns the values that were returned from sql.Rows (after scanning)
+// to the Lock fields.
+func (l *Lock) assignValues(columns []string, values []any) error {
+	if m, n := len(values), len(columns); m < n {
+		return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
+	}
+	for i := range columns {
+		switch columns[i] {
+		case lock.FieldID:
+			value, ok := values[i].(*sql.NullInt64)
+			if !ok {
+				return fmt.Errorf("unexpected type %T for field id", value)
+			}
+			l.ID = int(value.Int64)
+		case lock.FieldName:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field name", values[i])
+			} else if value.Valid {
+				l.Name = value.String
+			}
+		case lock.FieldCreatedAt:
+			if value, ok := values[i].(*sql.NullTime); !ok {
+				return fmt.Errorf("unexpected type %T for field created_at", values[i])
+			} else if value.Valid {
+				l.CreatedAt = value.Time
+			}
+		}
+	}
+	return nil
+}
+
+// Update returns a builder for updating this Lock.
+// Note that you need to call Lock.Unwrap() before calling this method if this Lock
+// was returned from a transaction, and the transaction was committed or rolled back.
+func (l *Lock) Update() *LockUpdateOne {
+	return (&LockClient{config: l.config}).UpdateOne(l)
+}
+
+// Unwrap unwraps the Lock entity that was returned from a transaction after it was closed,
+// so that all future queries will be executed through the driver which created the transaction.
+func (l *Lock) Unwrap() *Lock {
+	_tx, ok := l.config.driver.(*txDriver)
+	if !ok {
+		panic("ent: Lock is not a transactional entity")
+	}
+	l.config.driver = _tx.drv
+	return l
+}
+
+// String implements the fmt.Stringer.
+func (l *Lock) String() string {
+	var builder strings.Builder
+	builder.WriteString("Lock(")
+	builder.WriteString(fmt.Sprintf("id=%v, ", l.ID))
+	builder.WriteString("name=")
+	builder.WriteString(l.Name)
+	builder.WriteString(", ")
+	builder.WriteString("created_at=")
+	builder.WriteString(l.CreatedAt.Format(time.ANSIC))
+	builder.WriteByte(')')
+	return builder.String()
+}
+
+// Locks is a parsable slice of Lock.
+type Locks []*Lock
+
+func (l Locks) config(cfg config) {
+	for _i := range l {
+		l[_i].config = cfg
+	}
+}

+ 42 - 0
pkg/database/ent/lock/lock.go

@@ -0,0 +1,42 @@
+// Code generated by ent, DO NOT EDIT.
+
+package lock
+
+import (
+	"time"
+)
+
+const (
+	// Label holds the string label denoting the lock type in the database.
+	Label = "lock"
+	// FieldID holds the string denoting the id field in the database.
+	FieldID = "id"
+	// FieldName holds the string denoting the name field in the database.
+	FieldName = "name"
+	// FieldCreatedAt holds the string denoting the created_at field in the database.
+	FieldCreatedAt = "created_at"
+	// Table holds the table name of the lock in the database.
+	Table = "locks"
+)
+
+// Columns holds all SQL columns for lock fields.
+var Columns = []string{
+	FieldID,
+	FieldName,
+	FieldCreatedAt,
+}
+
+// ValidColumn reports if the column name is valid (part of the table columns).
+func ValidColumn(column string) bool {
+	for i := range Columns {
+		if column == Columns[i] {
+			return true
+		}
+	}
+	return false
+}
+
+var (
+	// DefaultCreatedAt holds the default value on creation for the "created_at" field.
+	DefaultCreatedAt func() time.Time
+)

+ 290 - 0
pkg/database/ent/lock/where.go

@@ -0,0 +1,290 @@
+// Code generated by ent, DO NOT EDIT.
+
+package lock
+
+import (
+	"time"
+
+	"entgo.io/ent/dialect/sql"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate"
+)
+
+// ID filters vertices based on their ID field.
+func ID(id int) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldID), id))
+	})
+}
+
+// IDEQ applies the EQ predicate on the ID field.
+func IDEQ(id int) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldID), id))
+	})
+}
+
+// IDNEQ applies the NEQ predicate on the ID field.
+func IDNEQ(id int) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldID), id))
+	})
+}
+
+// IDIn applies the In predicate on the ID field.
+func IDIn(ids ...int) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		v := make([]any, len(ids))
+		for i := range v {
+			v[i] = ids[i]
+		}
+		s.Where(sql.In(s.C(FieldID), v...))
+	})
+}
+
+// IDNotIn applies the NotIn predicate on the ID field.
+func IDNotIn(ids ...int) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		v := make([]any, len(ids))
+		for i := range v {
+			v[i] = ids[i]
+		}
+		s.Where(sql.NotIn(s.C(FieldID), v...))
+	})
+}
+
+// IDGT applies the GT predicate on the ID field.
+func IDGT(id int) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldID), id))
+	})
+}
+
+// IDGTE applies the GTE predicate on the ID field.
+func IDGTE(id int) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldID), id))
+	})
+}
+
+// IDLT applies the LT predicate on the ID field.
+func IDLT(id int) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldID), id))
+	})
+}
+
+// IDLTE applies the LTE predicate on the ID field.
+func IDLTE(id int) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldID), id))
+	})
+}
+
+// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
+func Name(v string) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldName), v))
+	})
+}
+
+// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
+func CreatedAt(v time.Time) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldCreatedAt), v))
+	})
+}
+
+// NameEQ applies the EQ predicate on the "name" field.
+func NameEQ(v string) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldName), v))
+	})
+}
+
+// NameNEQ applies the NEQ predicate on the "name" field.
+func NameNEQ(v string) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldName), v))
+	})
+}
+
+// NameIn applies the In predicate on the "name" field.
+func NameIn(vs ...string) predicate.Lock {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.In(s.C(FieldName), v...))
+	})
+}
+
+// NameNotIn applies the NotIn predicate on the "name" field.
+func NameNotIn(vs ...string) predicate.Lock {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.NotIn(s.C(FieldName), v...))
+	})
+}
+
+// NameGT applies the GT predicate on the "name" field.
+func NameGT(v string) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldName), v))
+	})
+}
+
+// NameGTE applies the GTE predicate on the "name" field.
+func NameGTE(v string) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldName), v))
+	})
+}
+
+// NameLT applies the LT predicate on the "name" field.
+func NameLT(v string) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldName), v))
+	})
+}
+
+// NameLTE applies the LTE predicate on the "name" field.
+func NameLTE(v string) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldName), v))
+	})
+}
+
+// NameContains applies the Contains predicate on the "name" field.
+func NameContains(v string) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.Contains(s.C(FieldName), v))
+	})
+}
+
+// NameHasPrefix applies the HasPrefix predicate on the "name" field.
+func NameHasPrefix(v string) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.HasPrefix(s.C(FieldName), v))
+	})
+}
+
+// NameHasSuffix applies the HasSuffix predicate on the "name" field.
+func NameHasSuffix(v string) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.HasSuffix(s.C(FieldName), v))
+	})
+}
+
+// NameEqualFold applies the EqualFold predicate on the "name" field.
+func NameEqualFold(v string) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.EqualFold(s.C(FieldName), v))
+	})
+}
+
+// NameContainsFold applies the ContainsFold predicate on the "name" field.
+func NameContainsFold(v string) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.ContainsFold(s.C(FieldName), v))
+	})
+}
+
+// CreatedAtEQ applies the EQ predicate on the "created_at" field.
+func CreatedAtEQ(v time.Time) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
+func CreatedAtNEQ(v time.Time) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtIn applies the In predicate on the "created_at" field.
+func CreatedAtIn(vs ...time.Time) predicate.Lock {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.In(s.C(FieldCreatedAt), v...))
+	})
+}
+
+// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
+func CreatedAtNotIn(vs ...time.Time) predicate.Lock {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
+	})
+}
+
+// CreatedAtGT applies the GT predicate on the "created_at" field.
+func CreatedAtGT(v time.Time) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtGTE applies the GTE predicate on the "created_at" field.
+func CreatedAtGTE(v time.Time) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtLT applies the LT predicate on the "created_at" field.
+func CreatedAtLT(v time.Time) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtLTE applies the LTE predicate on the "created_at" field.
+func CreatedAtLTE(v time.Time) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldCreatedAt), v))
+	})
+}
+
+// And groups predicates with the AND operator between them.
+func And(predicates ...predicate.Lock) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s1 := s.Clone().SetP(nil)
+		for _, p := range predicates {
+			p(s1)
+		}
+		s.Where(s1.P())
+	})
+}
+
+// Or groups predicates with the OR operator between them.
+func Or(predicates ...predicate.Lock) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		s1 := s.Clone().SetP(nil)
+		for i, p := range predicates {
+			if i > 0 {
+				s1.Or()
+			}
+			p(s1)
+		}
+		s.Where(s1.P())
+	})
+}
+
+// Not applies the not operator on the given predicate.
+func Not(p predicate.Lock) predicate.Lock {
+	return predicate.Lock(func(s *sql.Selector) {
+		p(s.Not())
+	})
+}

+ 262 - 0
pkg/database/ent/lock_create.go

@@ -0,0 +1,262 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/lock"
+)
+
+// LockCreate is the builder for creating a Lock entity.
+type LockCreate struct {
+	config
+	mutation *LockMutation
+	hooks    []Hook
+}
+
+// SetName sets the "name" field.
+func (lc *LockCreate) SetName(s string) *LockCreate {
+	lc.mutation.SetName(s)
+	return lc
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (lc *LockCreate) SetCreatedAt(t time.Time) *LockCreate {
+	lc.mutation.SetCreatedAt(t)
+	return lc
+}
+
+// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
+func (lc *LockCreate) SetNillableCreatedAt(t *time.Time) *LockCreate {
+	if t != nil {
+		lc.SetCreatedAt(*t)
+	}
+	return lc
+}
+
+// Mutation returns the LockMutation object of the builder.
+func (lc *LockCreate) Mutation() *LockMutation {
+	return lc.mutation
+}
+
+// Save creates the Lock in the database.
+func (lc *LockCreate) Save(ctx context.Context) (*Lock, error) {
+	var (
+		err  error
+		node *Lock
+	)
+	lc.defaults()
+	if len(lc.hooks) == 0 {
+		if err = lc.check(); err != nil {
+			return nil, err
+		}
+		node, err = lc.sqlSave(ctx)
+	} else {
+		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+			mutation, ok := m.(*LockMutation)
+			if !ok {
+				return nil, fmt.Errorf("unexpected mutation type %T", m)
+			}
+			if err = lc.check(); err != nil {
+				return nil, err
+			}
+			lc.mutation = mutation
+			if node, err = lc.sqlSave(ctx); err != nil {
+				return nil, err
+			}
+			mutation.id = &node.ID
+			mutation.done = true
+			return node, err
+		})
+		for i := len(lc.hooks) - 1; i >= 0; i-- {
+			if lc.hooks[i] == nil {
+				return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
+			}
+			mut = lc.hooks[i](mut)
+		}
+		v, err := mut.Mutate(ctx, lc.mutation)
+		if err != nil {
+			return nil, err
+		}
+		nv, ok := v.(*Lock)
+		if !ok {
+			return nil, fmt.Errorf("unexpected node type %T returned from LockMutation", v)
+		}
+		node = nv
+	}
+	return node, err
+}
+
+// SaveX calls Save and panics if Save returns an error.
+func (lc *LockCreate) SaveX(ctx context.Context) *Lock {
+	v, err := lc.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Exec executes the query.
+func (lc *LockCreate) Exec(ctx context.Context) error {
+	_, err := lc.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (lc *LockCreate) ExecX(ctx context.Context) {
+	if err := lc.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
+
+// defaults sets the default values of the builder before save.
+func (lc *LockCreate) defaults() {
+	if _, ok := lc.mutation.CreatedAt(); !ok {
+		v := lock.DefaultCreatedAt()
+		lc.mutation.SetCreatedAt(v)
+	}
+}
+
+// check runs all checks and user-defined validators on the builder.
+func (lc *LockCreate) check() error {
+	if _, ok := lc.mutation.Name(); !ok {
+		return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Lock.name"`)}
+	}
+	if _, ok := lc.mutation.CreatedAt(); !ok {
+		return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Lock.created_at"`)}
+	}
+	return nil
+}
+
+func (lc *LockCreate) sqlSave(ctx context.Context) (*Lock, error) {
+	_node, _spec := lc.createSpec()
+	if err := sqlgraph.CreateNode(ctx, lc.driver, _spec); err != nil {
+		if sqlgraph.IsConstraintError(err) {
+			err = &ConstraintError{msg: err.Error(), wrap: err}
+		}
+		return nil, err
+	}
+	id := _spec.ID.Value.(int64)
+	_node.ID = int(id)
+	return _node, nil
+}
+
+func (lc *LockCreate) createSpec() (*Lock, *sqlgraph.CreateSpec) {
+	var (
+		_node = &Lock{config: lc.config}
+		_spec = &sqlgraph.CreateSpec{
+			Table: lock.Table,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: lock.FieldID,
+			},
+		}
+	)
+	if value, ok := lc.mutation.Name(); ok {
+		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Value:  value,
+			Column: lock.FieldName,
+		})
+		_node.Name = value
+	}
+	if value, ok := lc.mutation.CreatedAt(); ok {
+		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
+			Type:   field.TypeTime,
+			Value:  value,
+			Column: lock.FieldCreatedAt,
+		})
+		_node.CreatedAt = value
+	}
+	return _node, _spec
+}
+
+// LockCreateBulk is the builder for creating many Lock entities in bulk.
+type LockCreateBulk struct {
+	config
+	builders []*LockCreate
+}
+
+// Save creates the Lock entities in the database.
+func (lcb *LockCreateBulk) Save(ctx context.Context) ([]*Lock, error) {
+	specs := make([]*sqlgraph.CreateSpec, len(lcb.builders))
+	nodes := make([]*Lock, len(lcb.builders))
+	mutators := make([]Mutator, len(lcb.builders))
+	for i := range lcb.builders {
+		func(i int, root context.Context) {
+			builder := lcb.builders[i]
+			builder.defaults()
+			var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+				mutation, ok := m.(*LockMutation)
+				if !ok {
+					return nil, fmt.Errorf("unexpected mutation type %T", m)
+				}
+				if err := builder.check(); err != nil {
+					return nil, err
+				}
+				builder.mutation = mutation
+				nodes[i], specs[i] = builder.createSpec()
+				var err error
+				if i < len(mutators)-1 {
+					_, err = mutators[i+1].Mutate(root, lcb.builders[i+1].mutation)
+				} else {
+					spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
+					// Invoke the actual operation on the latest mutation in the chain.
+					if err = sqlgraph.BatchCreate(ctx, lcb.driver, spec); err != nil {
+						if sqlgraph.IsConstraintError(err) {
+							err = &ConstraintError{msg: err.Error(), wrap: err}
+						}
+					}
+				}
+				if err != nil {
+					return nil, err
+				}
+				mutation.id = &nodes[i].ID
+				if specs[i].ID.Value != nil {
+					id := specs[i].ID.Value.(int64)
+					nodes[i].ID = int(id)
+				}
+				mutation.done = true
+				return nodes[i], nil
+			})
+			for i := len(builder.hooks) - 1; i >= 0; i-- {
+				mut = builder.hooks[i](mut)
+			}
+			mutators[i] = mut
+		}(i, ctx)
+	}
+	if len(mutators) > 0 {
+		if _, err := mutators[0].Mutate(ctx, lcb.builders[0].mutation); err != nil {
+			return nil, err
+		}
+	}
+	return nodes, nil
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (lcb *LockCreateBulk) SaveX(ctx context.Context) []*Lock {
+	v, err := lcb.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Exec executes the query.
+func (lcb *LockCreateBulk) Exec(ctx context.Context) error {
+	_, err := lcb.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (lcb *LockCreateBulk) ExecX(ctx context.Context) {
+	if err := lcb.Exec(ctx); err != nil {
+		panic(err)
+	}
+}

+ 115 - 0
pkg/database/ent/lock_delete.go

@@ -0,0 +1,115 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"fmt"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/lock"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate"
+)
+
+// LockDelete is the builder for deleting a Lock entity.
+type LockDelete struct {
+	config
+	hooks    []Hook
+	mutation *LockMutation
+}
+
+// Where appends a list predicates to the LockDelete builder.
+func (ld *LockDelete) Where(ps ...predicate.Lock) *LockDelete {
+	ld.mutation.Where(ps...)
+	return ld
+}
+
+// Exec executes the deletion query and returns how many vertices were deleted.
+func (ld *LockDelete) Exec(ctx context.Context) (int, error) {
+	var (
+		err      error
+		affected int
+	)
+	if len(ld.hooks) == 0 {
+		affected, err = ld.sqlExec(ctx)
+	} else {
+		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+			mutation, ok := m.(*LockMutation)
+			if !ok {
+				return nil, fmt.Errorf("unexpected mutation type %T", m)
+			}
+			ld.mutation = mutation
+			affected, err = ld.sqlExec(ctx)
+			mutation.done = true
+			return affected, err
+		})
+		for i := len(ld.hooks) - 1; i >= 0; i-- {
+			if ld.hooks[i] == nil {
+				return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
+			}
+			mut = ld.hooks[i](mut)
+		}
+		if _, err := mut.Mutate(ctx, ld.mutation); err != nil {
+			return 0, err
+		}
+	}
+	return affected, err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (ld *LockDelete) ExecX(ctx context.Context) int {
+	n, err := ld.Exec(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return n
+}
+
+func (ld *LockDelete) sqlExec(ctx context.Context) (int, error) {
+	_spec := &sqlgraph.DeleteSpec{
+		Node: &sqlgraph.NodeSpec{
+			Table: lock.Table,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: lock.FieldID,
+			},
+		},
+	}
+	if ps := ld.mutation.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	affected, err := sqlgraph.DeleteNodes(ctx, ld.driver, _spec)
+	if err != nil && sqlgraph.IsConstraintError(err) {
+		err = &ConstraintError{msg: err.Error(), wrap: err}
+	}
+	return affected, err
+}
+
+// LockDeleteOne is the builder for deleting a single Lock entity.
+type LockDeleteOne struct {
+	ld *LockDelete
+}
+
+// Exec executes the deletion query.
+func (ldo *LockDeleteOne) Exec(ctx context.Context) error {
+	n, err := ldo.ld.Exec(ctx)
+	switch {
+	case err != nil:
+		return err
+	case n == 0:
+		return &NotFoundError{lock.Label}
+	default:
+		return nil
+	}
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (ldo *LockDeleteOne) ExecX(ctx context.Context) {
+	ldo.ld.ExecX(ctx)
+}

+ 566 - 0
pkg/database/ent/lock_query.go

@@ -0,0 +1,566 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"fmt"
+	"math"
+
+	"entgo.io/ent/dialect"
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/lock"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate"
+)
+
+// LockQuery is the builder for querying Lock entities.
+type LockQuery struct {
+	config
+	limit      *int
+	offset     *int
+	unique     *bool
+	order      []OrderFunc
+	fields     []string
+	predicates []predicate.Lock
+	modifiers  []func(*sql.Selector)
+	// intermediate query (i.e. traversal path).
+	sql  *sql.Selector
+	path func(context.Context) (*sql.Selector, error)
+}
+
+// Where adds a new predicate for the LockQuery builder.
+func (lq *LockQuery) Where(ps ...predicate.Lock) *LockQuery {
+	lq.predicates = append(lq.predicates, ps...)
+	return lq
+}
+
+// Limit adds a limit step to the query.
+func (lq *LockQuery) Limit(limit int) *LockQuery {
+	lq.limit = &limit
+	return lq
+}
+
+// Offset adds an offset step to the query.
+func (lq *LockQuery) Offset(offset int) *LockQuery {
+	lq.offset = &offset
+	return lq
+}
+
+// Unique configures the query builder to filter duplicate records on query.
+// By default, unique is set to true, and can be disabled using this method.
+func (lq *LockQuery) Unique(unique bool) *LockQuery {
+	lq.unique = &unique
+	return lq
+}
+
+// Order adds an order step to the query.
+func (lq *LockQuery) Order(o ...OrderFunc) *LockQuery {
+	lq.order = append(lq.order, o...)
+	return lq
+}
+
+// First returns the first Lock entity from the query.
+// Returns a *NotFoundError when no Lock was found.
+func (lq *LockQuery) First(ctx context.Context) (*Lock, error) {
+	nodes, err := lq.Limit(1).All(ctx)
+	if err != nil {
+		return nil, err
+	}
+	if len(nodes) == 0 {
+		return nil, &NotFoundError{lock.Label}
+	}
+	return nodes[0], nil
+}
+
+// FirstX is like First, but panics if an error occurs.
+func (lq *LockQuery) FirstX(ctx context.Context) *Lock {
+	node, err := lq.First(ctx)
+	if err != nil && !IsNotFound(err) {
+		panic(err)
+	}
+	return node
+}
+
+// FirstID returns the first Lock ID from the query.
+// Returns a *NotFoundError when no Lock ID was found.
+func (lq *LockQuery) FirstID(ctx context.Context) (id int, err error) {
+	var ids []int
+	if ids, err = lq.Limit(1).IDs(ctx); err != nil {
+		return
+	}
+	if len(ids) == 0 {
+		err = &NotFoundError{lock.Label}
+		return
+	}
+	return ids[0], nil
+}
+
+// FirstIDX is like FirstID, but panics if an error occurs.
+func (lq *LockQuery) FirstIDX(ctx context.Context) int {
+	id, err := lq.FirstID(ctx)
+	if err != nil && !IsNotFound(err) {
+		panic(err)
+	}
+	return id
+}
+
+// Only returns a single Lock entity found by the query, ensuring it only returns one.
+// Returns a *NotSingularError when more than one Lock entity is found.
+// Returns a *NotFoundError when no Lock entities are found.
+func (lq *LockQuery) Only(ctx context.Context) (*Lock, error) {
+	nodes, err := lq.Limit(2).All(ctx)
+	if err != nil {
+		return nil, err
+	}
+	switch len(nodes) {
+	case 1:
+		return nodes[0], nil
+	case 0:
+		return nil, &NotFoundError{lock.Label}
+	default:
+		return nil, &NotSingularError{lock.Label}
+	}
+}
+
+// OnlyX is like Only, but panics if an error occurs.
+func (lq *LockQuery) OnlyX(ctx context.Context) *Lock {
+	node, err := lq.Only(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return node
+}
+
+// OnlyID is like Only, but returns the only Lock ID in the query.
+// Returns a *NotSingularError when more than one Lock ID is found.
+// Returns a *NotFoundError when no entities are found.
+func (lq *LockQuery) OnlyID(ctx context.Context) (id int, err error) {
+	var ids []int
+	if ids, err = lq.Limit(2).IDs(ctx); err != nil {
+		return
+	}
+	switch len(ids) {
+	case 1:
+		id = ids[0]
+	case 0:
+		err = &NotFoundError{lock.Label}
+	default:
+		err = &NotSingularError{lock.Label}
+	}
+	return
+}
+
+// OnlyIDX is like OnlyID, but panics if an error occurs.
+func (lq *LockQuery) OnlyIDX(ctx context.Context) int {
+	id, err := lq.OnlyID(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return id
+}
+
+// All executes the query and returns a list of Locks.
+func (lq *LockQuery) All(ctx context.Context) ([]*Lock, error) {
+	if err := lq.prepareQuery(ctx); err != nil {
+		return nil, err
+	}
+	return lq.sqlAll(ctx)
+}
+
+// AllX is like All, but panics if an error occurs.
+func (lq *LockQuery) AllX(ctx context.Context) []*Lock {
+	nodes, err := lq.All(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return nodes
+}
+
+// IDs executes the query and returns a list of Lock IDs.
+func (lq *LockQuery) IDs(ctx context.Context) ([]int, error) {
+	var ids []int
+	if err := lq.Select(lock.FieldID).Scan(ctx, &ids); err != nil {
+		return nil, err
+	}
+	return ids, nil
+}
+
+// IDsX is like IDs, but panics if an error occurs.
+func (lq *LockQuery) IDsX(ctx context.Context) []int {
+	ids, err := lq.IDs(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return ids
+}
+
+// Count returns the count of the given query.
+func (lq *LockQuery) Count(ctx context.Context) (int, error) {
+	if err := lq.prepareQuery(ctx); err != nil {
+		return 0, err
+	}
+	return lq.sqlCount(ctx)
+}
+
+// CountX is like Count, but panics if an error occurs.
+func (lq *LockQuery) CountX(ctx context.Context) int {
+	count, err := lq.Count(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return count
+}
+
+// Exist returns true if the query has elements in the graph.
+func (lq *LockQuery) Exist(ctx context.Context) (bool, error) {
+	if err := lq.prepareQuery(ctx); err != nil {
+		return false, err
+	}
+	return lq.sqlExist(ctx)
+}
+
+// ExistX is like Exist, but panics if an error occurs.
+func (lq *LockQuery) ExistX(ctx context.Context) bool {
+	exist, err := lq.Exist(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return exist
+}
+
+// Clone returns a duplicate of the LockQuery builder, including all associated steps. It can be
+// used to prepare common query builders and use them differently after the clone is made.
+func (lq *LockQuery) Clone() *LockQuery {
+	if lq == nil {
+		return nil
+	}
+	return &LockQuery{
+		config:     lq.config,
+		limit:      lq.limit,
+		offset:     lq.offset,
+		order:      append([]OrderFunc{}, lq.order...),
+		predicates: append([]predicate.Lock{}, lq.predicates...),
+		// clone intermediate query.
+		sql:    lq.sql.Clone(),
+		path:   lq.path,
+		unique: lq.unique,
+	}
+}
+
+// GroupBy is used to group vertices by one or more fields/columns.
+// It is often used with aggregate functions, like: count, max, mean, min, sum.
+//
+// Example:
+//
+//	var v []struct {
+//		Name string `json:"name"`
+//		Count int `json:"count,omitempty"`
+//	}
+//
+//	client.Lock.Query().
+//		GroupBy(lock.FieldName).
+//		Aggregate(ent.Count()).
+//		Scan(ctx, &v)
+func (lq *LockQuery) GroupBy(field string, fields ...string) *LockGroupBy {
+	grbuild := &LockGroupBy{config: lq.config}
+	grbuild.fields = append([]string{field}, fields...)
+	grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
+		if err := lq.prepareQuery(ctx); err != nil {
+			return nil, err
+		}
+		return lq.sqlQuery(ctx), nil
+	}
+	grbuild.label = lock.Label
+	grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
+	return grbuild
+}
+
+// Select allows the selection one or more fields/columns for the given query,
+// instead of selecting all fields in the entity.
+//
+// Example:
+//
+//	var v []struct {
+//		Name string `json:"name"`
+//	}
+//
+//	client.Lock.Query().
+//		Select(lock.FieldName).
+//		Scan(ctx, &v)
+func (lq *LockQuery) Select(fields ...string) *LockSelect {
+	lq.fields = append(lq.fields, fields...)
+	selbuild := &LockSelect{LockQuery: lq}
+	selbuild.label = lock.Label
+	selbuild.flds, selbuild.scan = &lq.fields, selbuild.Scan
+	return selbuild
+}
+
+func (lq *LockQuery) prepareQuery(ctx context.Context) error {
+	for _, f := range lq.fields {
+		if !lock.ValidColumn(f) {
+			return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+		}
+	}
+	if lq.path != nil {
+		prev, err := lq.path(ctx)
+		if err != nil {
+			return err
+		}
+		lq.sql = prev
+	}
+	return nil
+}
+
+func (lq *LockQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Lock, error) {
+	var (
+		nodes = []*Lock{}
+		_spec = lq.querySpec()
+	)
+	_spec.ScanValues = func(columns []string) ([]any, error) {
+		return (*Lock).scanValues(nil, columns)
+	}
+	_spec.Assign = func(columns []string, values []any) error {
+		node := &Lock{config: lq.config}
+		nodes = append(nodes, node)
+		return node.assignValues(columns, values)
+	}
+	if len(lq.modifiers) > 0 {
+		_spec.Modifiers = lq.modifiers
+	}
+	for i := range hooks {
+		hooks[i](ctx, _spec)
+	}
+	if err := sqlgraph.QueryNodes(ctx, lq.driver, _spec); err != nil {
+		return nil, err
+	}
+	if len(nodes) == 0 {
+		return nodes, nil
+	}
+	return nodes, nil
+}
+
+func (lq *LockQuery) sqlCount(ctx context.Context) (int, error) {
+	_spec := lq.querySpec()
+	if len(lq.modifiers) > 0 {
+		_spec.Modifiers = lq.modifiers
+	}
+	_spec.Node.Columns = lq.fields
+	if len(lq.fields) > 0 {
+		_spec.Unique = lq.unique != nil && *lq.unique
+	}
+	return sqlgraph.CountNodes(ctx, lq.driver, _spec)
+}
+
+func (lq *LockQuery) sqlExist(ctx context.Context) (bool, error) {
+	switch _, err := lq.FirstID(ctx); {
+	case IsNotFound(err):
+		return false, nil
+	case err != nil:
+		return false, fmt.Errorf("ent: check existence: %w", err)
+	default:
+		return true, nil
+	}
+}
+
+func (lq *LockQuery) querySpec() *sqlgraph.QuerySpec {
+	_spec := &sqlgraph.QuerySpec{
+		Node: &sqlgraph.NodeSpec{
+			Table:   lock.Table,
+			Columns: lock.Columns,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: lock.FieldID,
+			},
+		},
+		From:   lq.sql,
+		Unique: true,
+	}
+	if unique := lq.unique; unique != nil {
+		_spec.Unique = *unique
+	}
+	if fields := lq.fields; len(fields) > 0 {
+		_spec.Node.Columns = make([]string, 0, len(fields))
+		_spec.Node.Columns = append(_spec.Node.Columns, lock.FieldID)
+		for i := range fields {
+			if fields[i] != lock.FieldID {
+				_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
+			}
+		}
+	}
+	if ps := lq.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	if limit := lq.limit; limit != nil {
+		_spec.Limit = *limit
+	}
+	if offset := lq.offset; offset != nil {
+		_spec.Offset = *offset
+	}
+	if ps := lq.order; len(ps) > 0 {
+		_spec.Order = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	return _spec
+}
+
+func (lq *LockQuery) sqlQuery(ctx context.Context) *sql.Selector {
+	builder := sql.Dialect(lq.driver.Dialect())
+	t1 := builder.Table(lock.Table)
+	columns := lq.fields
+	if len(columns) == 0 {
+		columns = lock.Columns
+	}
+	selector := builder.Select(t1.Columns(columns...)...).From(t1)
+	if lq.sql != nil {
+		selector = lq.sql
+		selector.Select(selector.Columns(columns...)...)
+	}
+	if lq.unique != nil && *lq.unique {
+		selector.Distinct()
+	}
+	for _, m := range lq.modifiers {
+		m(selector)
+	}
+	for _, p := range lq.predicates {
+		p(selector)
+	}
+	for _, p := range lq.order {
+		p(selector)
+	}
+	if offset := lq.offset; offset != nil {
+		// limit is mandatory for offset clause. We start
+		// with default value, and override it below if needed.
+		selector.Offset(*offset).Limit(math.MaxInt32)
+	}
+	if limit := lq.limit; limit != nil {
+		selector.Limit(*limit)
+	}
+	return selector
+}
+
+// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
+// updated, deleted or "selected ... for update" by other sessions, until the transaction is
+// either committed or rolled-back.
+func (lq *LockQuery) ForUpdate(opts ...sql.LockOption) *LockQuery {
+	if lq.driver.Dialect() == dialect.Postgres {
+		lq.Unique(false)
+	}
+	lq.modifiers = append(lq.modifiers, func(s *sql.Selector) {
+		s.ForUpdate(opts...)
+	})
+	return lq
+}
+
+// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
+// on any rows that are read. Other sessions can read the rows, but cannot modify them
+// until your transaction commits.
+func (lq *LockQuery) ForShare(opts ...sql.LockOption) *LockQuery {
+	if lq.driver.Dialect() == dialect.Postgres {
+		lq.Unique(false)
+	}
+	lq.modifiers = append(lq.modifiers, func(s *sql.Selector) {
+		s.ForShare(opts...)
+	})
+	return lq
+}
+
+// LockGroupBy is the group-by builder for Lock entities.
+type LockGroupBy struct {
+	config
+	selector
+	fields []string
+	fns    []AggregateFunc
+	// intermediate query (i.e. traversal path).
+	sql  *sql.Selector
+	path func(context.Context) (*sql.Selector, error)
+}
+
+// Aggregate adds the given aggregation functions to the group-by query.
+func (lgb *LockGroupBy) Aggregate(fns ...AggregateFunc) *LockGroupBy {
+	lgb.fns = append(lgb.fns, fns...)
+	return lgb
+}
+
+// Scan applies the group-by query and scans the result into the given value.
+func (lgb *LockGroupBy) Scan(ctx context.Context, v any) error {
+	query, err := lgb.path(ctx)
+	if err != nil {
+		return err
+	}
+	lgb.sql = query
+	return lgb.sqlScan(ctx, v)
+}
+
+func (lgb *LockGroupBy) sqlScan(ctx context.Context, v any) error {
+	for _, f := range lgb.fields {
+		if !lock.ValidColumn(f) {
+			return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
+		}
+	}
+	selector := lgb.sqlQuery()
+	if err := selector.Err(); err != nil {
+		return err
+	}
+	rows := &sql.Rows{}
+	query, args := selector.Query()
+	if err := lgb.driver.Query(ctx, query, args, rows); err != nil {
+		return err
+	}
+	defer rows.Close()
+	return sql.ScanSlice(rows, v)
+}
+
+func (lgb *LockGroupBy) sqlQuery() *sql.Selector {
+	selector := lgb.sql.Select()
+	aggregation := make([]string, 0, len(lgb.fns))
+	for _, fn := range lgb.fns {
+		aggregation = append(aggregation, fn(selector))
+	}
+	// If no columns were selected in a custom aggregation function, the default
+	// selection is the fields used for "group-by", and the aggregation functions.
+	if len(selector.SelectedColumns()) == 0 {
+		columns := make([]string, 0, len(lgb.fields)+len(lgb.fns))
+		for _, f := range lgb.fields {
+			columns = append(columns, selector.C(f))
+		}
+		columns = append(columns, aggregation...)
+		selector.Select(columns...)
+	}
+	return selector.GroupBy(selector.Columns(lgb.fields...)...)
+}
+
+// LockSelect is the builder for selecting fields of Lock entities.
+type LockSelect struct {
+	*LockQuery
+	selector
+	// intermediate query (i.e. traversal path).
+	sql *sql.Selector
+}
+
+// Scan applies the selector query and scans the result into the given value.
+func (ls *LockSelect) Scan(ctx context.Context, v any) error {
+	if err := ls.prepareQuery(ctx); err != nil {
+		return err
+	}
+	ls.sql = ls.LockQuery.sqlQuery(ctx)
+	return ls.sqlScan(ctx, v)
+}
+
+func (ls *LockSelect) sqlScan(ctx context.Context, v any) error {
+	rows := &sql.Rows{}
+	query, args := ls.sql.Query()
+	if err := ls.driver.Query(ctx, query, args, rows); err != nil {
+		return err
+	}
+	defer rows.Close()
+	return sql.ScanSlice(rows, v)
+}

+ 314 - 0
pkg/database/ent/lock_update.go

@@ -0,0 +1,314 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/lock"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate"
+)
+
+// LockUpdate is the builder for updating Lock entities.
+type LockUpdate struct {
+	config
+	hooks    []Hook
+	mutation *LockMutation
+}
+
+// Where appends a list predicates to the LockUpdate builder.
+func (lu *LockUpdate) Where(ps ...predicate.Lock) *LockUpdate {
+	lu.mutation.Where(ps...)
+	return lu
+}
+
+// SetName sets the "name" field.
+func (lu *LockUpdate) SetName(s string) *LockUpdate {
+	lu.mutation.SetName(s)
+	return lu
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (lu *LockUpdate) SetCreatedAt(t time.Time) *LockUpdate {
+	lu.mutation.SetCreatedAt(t)
+	return lu
+}
+
+// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
+func (lu *LockUpdate) SetNillableCreatedAt(t *time.Time) *LockUpdate {
+	if t != nil {
+		lu.SetCreatedAt(*t)
+	}
+	return lu
+}
+
+// Mutation returns the LockMutation object of the builder.
+func (lu *LockUpdate) Mutation() *LockMutation {
+	return lu.mutation
+}
+
+// Save executes the query and returns the number of nodes affected by the update operation.
+func (lu *LockUpdate) Save(ctx context.Context) (int, error) {
+	var (
+		err      error
+		affected int
+	)
+	if len(lu.hooks) == 0 {
+		affected, err = lu.sqlSave(ctx)
+	} else {
+		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+			mutation, ok := m.(*LockMutation)
+			if !ok {
+				return nil, fmt.Errorf("unexpected mutation type %T", m)
+			}
+			lu.mutation = mutation
+			affected, err = lu.sqlSave(ctx)
+			mutation.done = true
+			return affected, err
+		})
+		for i := len(lu.hooks) - 1; i >= 0; i-- {
+			if lu.hooks[i] == nil {
+				return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
+			}
+			mut = lu.hooks[i](mut)
+		}
+		if _, err := mut.Mutate(ctx, lu.mutation); err != nil {
+			return 0, err
+		}
+	}
+	return affected, err
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (lu *LockUpdate) SaveX(ctx context.Context) int {
+	affected, err := lu.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return affected
+}
+
+// Exec executes the query.
+func (lu *LockUpdate) Exec(ctx context.Context) error {
+	_, err := lu.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (lu *LockUpdate) ExecX(ctx context.Context) {
+	if err := lu.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
+
+func (lu *LockUpdate) sqlSave(ctx context.Context) (n int, err error) {
+	_spec := &sqlgraph.UpdateSpec{
+		Node: &sqlgraph.NodeSpec{
+			Table:   lock.Table,
+			Columns: lock.Columns,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: lock.FieldID,
+			},
+		},
+	}
+	if ps := lu.mutation.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	if value, ok := lu.mutation.Name(); ok {
+		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Value:  value,
+			Column: lock.FieldName,
+		})
+	}
+	if value, ok := lu.mutation.CreatedAt(); ok {
+		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
+			Type:   field.TypeTime,
+			Value:  value,
+			Column: lock.FieldCreatedAt,
+		})
+	}
+	if n, err = sqlgraph.UpdateNodes(ctx, lu.driver, _spec); err != nil {
+		if _, ok := err.(*sqlgraph.NotFoundError); ok {
+			err = &NotFoundError{lock.Label}
+		} else if sqlgraph.IsConstraintError(err) {
+			err = &ConstraintError{msg: err.Error(), wrap: err}
+		}
+		return 0, err
+	}
+	return n, nil
+}
+
+// LockUpdateOne is the builder for updating a single Lock entity.
+type LockUpdateOne struct {
+	config
+	fields   []string
+	hooks    []Hook
+	mutation *LockMutation
+}
+
+// SetName sets the "name" field.
+func (luo *LockUpdateOne) SetName(s string) *LockUpdateOne {
+	luo.mutation.SetName(s)
+	return luo
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (luo *LockUpdateOne) SetCreatedAt(t time.Time) *LockUpdateOne {
+	luo.mutation.SetCreatedAt(t)
+	return luo
+}
+
+// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
+func (luo *LockUpdateOne) SetNillableCreatedAt(t *time.Time) *LockUpdateOne {
+	if t != nil {
+		luo.SetCreatedAt(*t)
+	}
+	return luo
+}
+
+// Mutation returns the LockMutation object of the builder.
+func (luo *LockUpdateOne) Mutation() *LockMutation {
+	return luo.mutation
+}
+
+// Select allows selecting one or more fields (columns) of the returned entity.
+// The default is selecting all fields defined in the entity schema.
+func (luo *LockUpdateOne) Select(field string, fields ...string) *LockUpdateOne {
+	luo.fields = append([]string{field}, fields...)
+	return luo
+}
+
+// Save executes the query and returns the updated Lock entity.
+func (luo *LockUpdateOne) Save(ctx context.Context) (*Lock, error) {
+	var (
+		err  error
+		node *Lock
+	)
+	if len(luo.hooks) == 0 {
+		node, err = luo.sqlSave(ctx)
+	} else {
+		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+			mutation, ok := m.(*LockMutation)
+			if !ok {
+				return nil, fmt.Errorf("unexpected mutation type %T", m)
+			}
+			luo.mutation = mutation
+			node, err = luo.sqlSave(ctx)
+			mutation.done = true
+			return node, err
+		})
+		for i := len(luo.hooks) - 1; i >= 0; i-- {
+			if luo.hooks[i] == nil {
+				return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
+			}
+			mut = luo.hooks[i](mut)
+		}
+		v, err := mut.Mutate(ctx, luo.mutation)
+		if err != nil {
+			return nil, err
+		}
+		nv, ok := v.(*Lock)
+		if !ok {
+			return nil, fmt.Errorf("unexpected node type %T returned from LockMutation", v)
+		}
+		node = nv
+	}
+	return node, err
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (luo *LockUpdateOne) SaveX(ctx context.Context) *Lock {
+	node, err := luo.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return node
+}
+
+// Exec executes the query on the entity.
+func (luo *LockUpdateOne) Exec(ctx context.Context) error {
+	_, err := luo.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (luo *LockUpdateOne) ExecX(ctx context.Context) {
+	if err := luo.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
+
+func (luo *LockUpdateOne) sqlSave(ctx context.Context) (_node *Lock, err error) {
+	_spec := &sqlgraph.UpdateSpec{
+		Node: &sqlgraph.NodeSpec{
+			Table:   lock.Table,
+			Columns: lock.Columns,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: lock.FieldID,
+			},
+		},
+	}
+	id, ok := luo.mutation.ID()
+	if !ok {
+		return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Lock.id" for update`)}
+	}
+	_spec.Node.ID.Value = id
+	if fields := luo.fields; len(fields) > 0 {
+		_spec.Node.Columns = make([]string, 0, len(fields))
+		_spec.Node.Columns = append(_spec.Node.Columns, lock.FieldID)
+		for _, f := range fields {
+			if !lock.ValidColumn(f) {
+				return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+			}
+			if f != lock.FieldID {
+				_spec.Node.Columns = append(_spec.Node.Columns, f)
+			}
+		}
+	}
+	if ps := luo.mutation.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	if value, ok := luo.mutation.Name(); ok {
+		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Value:  value,
+			Column: lock.FieldName,
+		})
+	}
+	if value, ok := luo.mutation.CreatedAt(); ok {
+		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
+			Type:   field.TypeTime,
+			Value:  value,
+			Column: lock.FieldCreatedAt,
+		})
+	}
+	_node = &Lock{config: luo.config}
+	_spec.Assign = _node.assignValues
+	_spec.ScanValues = _node.scanValues
+	if err = sqlgraph.UpdateNode(ctx, luo.driver, _spec); err != nil {
+		if _, ok := err.(*sqlgraph.NotFoundError); ok {
+			err = &NotFoundError{lock.Label}
+		} else if sqlgraph.IsConstraintError(err) {
+			err = &ConstraintError{msg: err.Error(), wrap: err}
+		}
+		return nil, err
+	}
+	return _node, nil
+}

+ 37 - 0
pkg/database/ent/machine_query.go

@@ -8,6 +8,7 @@ import (
 	"fmt"
 	"fmt"
 	"math"
 	"math"
 
 
+	"entgo.io/ent/dialect"
 	"entgo.io/ent/dialect/sql"
 	"entgo.io/ent/dialect/sql"
 	"entgo.io/ent/dialect/sql/sqlgraph"
 	"entgo.io/ent/dialect/sql/sqlgraph"
 	"entgo.io/ent/schema/field"
 	"entgo.io/ent/schema/field"
@@ -26,6 +27,7 @@ type MachineQuery struct {
 	fields     []string
 	fields     []string
 	predicates []predicate.Machine
 	predicates []predicate.Machine
 	withAlerts *AlertQuery
 	withAlerts *AlertQuery
+	modifiers  []func(*sql.Selector)
 	// intermediate query (i.e. traversal path).
 	// intermediate query (i.e. traversal path).
 	sql  *sql.Selector
 	sql  *sql.Selector
 	path func(context.Context) (*sql.Selector, error)
 	path func(context.Context) (*sql.Selector, error)
@@ -365,6 +367,9 @@ func (mq *MachineQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Mach
 		node.Edges.loadedTypes = loadedTypes
 		node.Edges.loadedTypes = loadedTypes
 		return node.assignValues(columns, values)
 		return node.assignValues(columns, values)
 	}
 	}
+	if len(mq.modifiers) > 0 {
+		_spec.Modifiers = mq.modifiers
+	}
 	for i := range hooks {
 	for i := range hooks {
 		hooks[i](ctx, _spec)
 		hooks[i](ctx, _spec)
 	}
 	}
@@ -418,6 +423,9 @@ func (mq *MachineQuery) loadAlerts(ctx context.Context, query *AlertQuery, nodes
 
 
 func (mq *MachineQuery) sqlCount(ctx context.Context) (int, error) {
 func (mq *MachineQuery) sqlCount(ctx context.Context) (int, error) {
 	_spec := mq.querySpec()
 	_spec := mq.querySpec()
+	if len(mq.modifiers) > 0 {
+		_spec.Modifiers = mq.modifiers
+	}
 	_spec.Node.Columns = mq.fields
 	_spec.Node.Columns = mq.fields
 	if len(mq.fields) > 0 {
 	if len(mq.fields) > 0 {
 		_spec.Unique = mq.unique != nil && *mq.unique
 		_spec.Unique = mq.unique != nil && *mq.unique
@@ -499,6 +507,9 @@ func (mq *MachineQuery) sqlQuery(ctx context.Context) *sql.Selector {
 	if mq.unique != nil && *mq.unique {
 	if mq.unique != nil && *mq.unique {
 		selector.Distinct()
 		selector.Distinct()
 	}
 	}
+	for _, m := range mq.modifiers {
+		m(selector)
+	}
 	for _, p := range mq.predicates {
 	for _, p := range mq.predicates {
 		p(selector)
 		p(selector)
 	}
 	}
@@ -516,6 +527,32 @@ func (mq *MachineQuery) sqlQuery(ctx context.Context) *sql.Selector {
 	return selector
 	return selector
 }
 }
 
 
+// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
+// updated, deleted or "selected ... for update" by other sessions, until the transaction is
+// either committed or rolled-back.
+func (mq *MachineQuery) ForUpdate(opts ...sql.LockOption) *MachineQuery {
+	if mq.driver.Dialect() == dialect.Postgres {
+		mq.Unique(false)
+	}
+	mq.modifiers = append(mq.modifiers, func(s *sql.Selector) {
+		s.ForUpdate(opts...)
+	})
+	return mq
+}
+
+// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
+// on any rows that are read. Other sessions can read the rows, but cannot modify them
+// until your transaction commits.
+func (mq *MachineQuery) ForShare(opts ...sql.LockOption) *MachineQuery {
+	if mq.driver.Dialect() == dialect.Postgres {
+		mq.Unique(false)
+	}
+	mq.modifiers = append(mq.modifiers, func(s *sql.Selector) {
+		s.ForShare(opts...)
+	})
+	return mq
+}
+
 // MachineGroupBy is the group-by builder for Machine entities.
 // MachineGroupBy is the group-by builder for Machine entities.
 type MachineGroupBy struct {
 type MachineGroupBy struct {
 	config
 	config

+ 37 - 0
pkg/database/ent/meta_query.go

@@ -7,6 +7,7 @@ import (
 	"fmt"
 	"fmt"
 	"math"
 	"math"
 
 
+	"entgo.io/ent/dialect"
 	"entgo.io/ent/dialect/sql"
 	"entgo.io/ent/dialect/sql"
 	"entgo.io/ent/dialect/sql/sqlgraph"
 	"entgo.io/ent/dialect/sql/sqlgraph"
 	"entgo.io/ent/schema/field"
 	"entgo.io/ent/schema/field"
@@ -25,6 +26,7 @@ type MetaQuery struct {
 	fields     []string
 	fields     []string
 	predicates []predicate.Meta
 	predicates []predicate.Meta
 	withOwner  *AlertQuery
 	withOwner  *AlertQuery
+	modifiers  []func(*sql.Selector)
 	// intermediate query (i.e. traversal path).
 	// intermediate query (i.e. traversal path).
 	sql  *sql.Selector
 	sql  *sql.Selector
 	path func(context.Context) (*sql.Selector, error)
 	path func(context.Context) (*sql.Selector, error)
@@ -364,6 +366,9 @@ func (mq *MetaQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Meta, e
 		node.Edges.loadedTypes = loadedTypes
 		node.Edges.loadedTypes = loadedTypes
 		return node.assignValues(columns, values)
 		return node.assignValues(columns, values)
 	}
 	}
+	if len(mq.modifiers) > 0 {
+		_spec.Modifiers = mq.modifiers
+	}
 	for i := range hooks {
 	for i := range hooks {
 		hooks[i](ctx, _spec)
 		hooks[i](ctx, _spec)
 	}
 	}
@@ -411,6 +416,9 @@ func (mq *MetaQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes []*
 
 
 func (mq *MetaQuery) sqlCount(ctx context.Context) (int, error) {
 func (mq *MetaQuery) sqlCount(ctx context.Context) (int, error) {
 	_spec := mq.querySpec()
 	_spec := mq.querySpec()
+	if len(mq.modifiers) > 0 {
+		_spec.Modifiers = mq.modifiers
+	}
 	_spec.Node.Columns = mq.fields
 	_spec.Node.Columns = mq.fields
 	if len(mq.fields) > 0 {
 	if len(mq.fields) > 0 {
 		_spec.Unique = mq.unique != nil && *mq.unique
 		_spec.Unique = mq.unique != nil && *mq.unique
@@ -492,6 +500,9 @@ func (mq *MetaQuery) sqlQuery(ctx context.Context) *sql.Selector {
 	if mq.unique != nil && *mq.unique {
 	if mq.unique != nil && *mq.unique {
 		selector.Distinct()
 		selector.Distinct()
 	}
 	}
+	for _, m := range mq.modifiers {
+		m(selector)
+	}
 	for _, p := range mq.predicates {
 	for _, p := range mq.predicates {
 		p(selector)
 		p(selector)
 	}
 	}
@@ -509,6 +520,32 @@ func (mq *MetaQuery) sqlQuery(ctx context.Context) *sql.Selector {
 	return selector
 	return selector
 }
 }
 
 
+// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
+// updated, deleted or "selected ... for update" by other sessions, until the transaction is
+// either committed or rolled-back.
+func (mq *MetaQuery) ForUpdate(opts ...sql.LockOption) *MetaQuery {
+	if mq.driver.Dialect() == dialect.Postgres {
+		mq.Unique(false)
+	}
+	mq.modifiers = append(mq.modifiers, func(s *sql.Selector) {
+		s.ForUpdate(opts...)
+	})
+	return mq
+}
+
+// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
+// on any rows that are read. Other sessions can read the rows, but cannot modify them
+// until your transaction commits.
+func (mq *MetaQuery) ForShare(opts ...sql.LockOption) *MetaQuery {
+	if mq.driver.Dialect() == dialect.Postgres {
+		mq.Unique(false)
+	}
+	mq.modifiers = append(mq.modifiers, func(s *sql.Selector) {
+		s.ForShare(opts...)
+	})
+	return mq
+}
+
 // MetaGroupBy is the group-by builder for Meta entities.
 // MetaGroupBy is the group-by builder for Meta entities.
 type MetaGroupBy struct {
 type MetaGroupBy struct {
 	config
 	config

+ 13 - 0
pkg/database/ent/migrate/schema.go

@@ -178,6 +178,18 @@ var (
 			},
 			},
 		},
 		},
 	}
 	}
+	// LocksColumns holds the columns for the "locks" table.
+	LocksColumns = []*schema.Column{
+		{Name: "id", Type: field.TypeInt, Increment: true},
+		{Name: "name", Type: field.TypeString, Unique: true},
+		{Name: "created_at", Type: field.TypeTime},
+	}
+	// LocksTable holds the schema information for the "locks" table.
+	LocksTable = &schema.Table{
+		Name:       "locks",
+		Columns:    LocksColumns,
+		PrimaryKey: []*schema.Column{LocksColumns[0]},
+	}
 	// MachinesColumns holds the columns for the "machines" table.
 	// MachinesColumns holds the columns for the "machines" table.
 	MachinesColumns = []*schema.Column{
 	MachinesColumns = []*schema.Column{
 		{Name: "id", Type: field.TypeInt, Increment: true},
 		{Name: "id", Type: field.TypeInt, Increment: true},
@@ -237,6 +249,7 @@ var (
 		ConfigItemsTable,
 		ConfigItemsTable,
 		DecisionsTable,
 		DecisionsTable,
 		EventsTable,
 		EventsTable,
+		LocksTable,
 		MachinesTable,
 		MachinesTable,
 		MetaTable,
 		MetaTable,
 	}
 	}

+ 367 - 0
pkg/database/ent/mutation.go

@@ -14,6 +14,7 @@ import (
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/lock"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/meta"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/meta"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate"
@@ -35,6 +36,7 @@ const (
 	TypeConfigItem = "ConfigItem"
 	TypeConfigItem = "ConfigItem"
 	TypeDecision   = "Decision"
 	TypeDecision   = "Decision"
 	TypeEvent      = "Event"
 	TypeEvent      = "Event"
+	TypeLock       = "Lock"
 	TypeMachine    = "Machine"
 	TypeMachine    = "Machine"
 	TypeMeta       = "Meta"
 	TypeMeta       = "Meta"
 )
 )
@@ -6088,6 +6090,371 @@ func (m *EventMutation) ResetEdge(name string) error {
 	return fmt.Errorf("unknown Event edge %s", name)
 	return fmt.Errorf("unknown Event edge %s", name)
 }
 }
 
 
+// LockMutation represents an operation that mutates the Lock nodes in the graph.
+type LockMutation struct {
+	config
+	op            Op
+	typ           string
+	id            *int
+	name          *string
+	created_at    *time.Time
+	clearedFields map[string]struct{}
+	done          bool
+	oldValue      func(context.Context) (*Lock, error)
+	predicates    []predicate.Lock
+}
+
+var _ ent.Mutation = (*LockMutation)(nil)
+
+// lockOption allows management of the mutation configuration using functional options.
+type lockOption func(*LockMutation)
+
+// newLockMutation creates new mutation for the Lock entity.
+func newLockMutation(c config, op Op, opts ...lockOption) *LockMutation {
+	m := &LockMutation{
+		config:        c,
+		op:            op,
+		typ:           TypeLock,
+		clearedFields: make(map[string]struct{}),
+	}
+	for _, opt := range opts {
+		opt(m)
+	}
+	return m
+}
+
+// withLockID sets the ID field of the mutation.
+func withLockID(id int) lockOption {
+	return func(m *LockMutation) {
+		var (
+			err   error
+			once  sync.Once
+			value *Lock
+		)
+		m.oldValue = func(ctx context.Context) (*Lock, error) {
+			once.Do(func() {
+				if m.done {
+					err = errors.New("querying old values post mutation is not allowed")
+				} else {
+					value, err = m.Client().Lock.Get(ctx, id)
+				}
+			})
+			return value, err
+		}
+		m.id = &id
+	}
+}
+
+// withLock sets the old Lock of the mutation.
+func withLock(node *Lock) lockOption {
+	return func(m *LockMutation) {
+		m.oldValue = func(context.Context) (*Lock, error) {
+			return node, nil
+		}
+		m.id = &node.ID
+	}
+}
+
+// Client returns a new `ent.Client` from the mutation. If the mutation was
+// executed in a transaction (ent.Tx), a transactional client is returned.
+func (m LockMutation) Client() *Client {
+	client := &Client{config: m.config}
+	client.init()
+	return client
+}
+
+// Tx returns an `ent.Tx` for mutations that were executed in transactions;
+// it returns an error otherwise.
+func (m LockMutation) Tx() (*Tx, error) {
+	if _, ok := m.driver.(*txDriver); !ok {
+		return nil, errors.New("ent: mutation is not running in a transaction")
+	}
+	tx := &Tx{config: m.config}
+	tx.init()
+	return tx, nil
+}
+
+// ID returns the ID value in the mutation. Note that the ID is only available
+// if it was provided to the builder or after it was returned from the database.
+func (m *LockMutation) ID() (id int, exists bool) {
+	if m.id == nil {
+		return
+	}
+	return *m.id, true
+}
+
+// IDs queries the database and returns the entity ids that match the mutation's predicate.
+// That means, if the mutation is applied within a transaction with an isolation level such
+// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated
+// or updated by the mutation.
+func (m *LockMutation) IDs(ctx context.Context) ([]int, error) {
+	switch {
+	case m.op.Is(OpUpdateOne | OpDeleteOne):
+		id, exists := m.ID()
+		if exists {
+			return []int{id}, nil
+		}
+		fallthrough
+	case m.op.Is(OpUpdate | OpDelete):
+		return m.Client().Lock.Query().Where(m.predicates...).IDs(ctx)
+	default:
+		return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op)
+	}
+}
+
+// SetName sets the "name" field.
+func (m *LockMutation) SetName(s string) {
+	m.name = &s
+}
+
+// Name returns the value of the "name" field in the mutation.
+func (m *LockMutation) Name() (r string, exists bool) {
+	v := m.name
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldName returns the old "name" field's value of the Lock entity.
+// If the Lock object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *LockMutation) OldName(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldName is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldName requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldName: %w", err)
+	}
+	return oldValue.Name, nil
+}
+
+// ResetName resets all changes to the "name" field.
+func (m *LockMutation) ResetName() {
+	m.name = nil
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (m *LockMutation) SetCreatedAt(t time.Time) {
+	m.created_at = &t
+}
+
+// CreatedAt returns the value of the "created_at" field in the mutation.
+func (m *LockMutation) CreatedAt() (r time.Time, exists bool) {
+	v := m.created_at
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldCreatedAt returns the old "created_at" field's value of the Lock entity.
+// If the Lock object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *LockMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldCreatedAt requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err)
+	}
+	return oldValue.CreatedAt, nil
+}
+
+// ResetCreatedAt resets all changes to the "created_at" field.
+func (m *LockMutation) ResetCreatedAt() {
+	m.created_at = nil
+}
+
+// Where appends a list predicates to the LockMutation builder.
+func (m *LockMutation) Where(ps ...predicate.Lock) {
+	m.predicates = append(m.predicates, ps...)
+}
+
+// Op returns the operation name.
+func (m *LockMutation) Op() Op {
+	return m.op
+}
+
+// Type returns the node type of this mutation (Lock).
+func (m *LockMutation) Type() string {
+	return m.typ
+}
+
+// Fields returns all fields that were changed during this mutation. Note that in
+// order to get all numeric fields that were incremented/decremented, call
+// AddedFields().
+func (m *LockMutation) Fields() []string {
+	fields := make([]string, 0, 2)
+	if m.name != nil {
+		fields = append(fields, lock.FieldName)
+	}
+	if m.created_at != nil {
+		fields = append(fields, lock.FieldCreatedAt)
+	}
+	return fields
+}
+
+// Field returns the value of a field with the given name. The second boolean
+// return value indicates that this field was not set, or was not defined in the
+// schema.
+func (m *LockMutation) Field(name string) (ent.Value, bool) {
+	switch name {
+	case lock.FieldName:
+		return m.Name()
+	case lock.FieldCreatedAt:
+		return m.CreatedAt()
+	}
+	return nil, false
+}
+
+// OldField returns the old value of the field from the database. An error is
+// returned if the mutation operation is not UpdateOne, or the query to the
+// database failed.
+func (m *LockMutation) OldField(ctx context.Context, name string) (ent.Value, error) {
+	switch name {
+	case lock.FieldName:
+		return m.OldName(ctx)
+	case lock.FieldCreatedAt:
+		return m.OldCreatedAt(ctx)
+	}
+	return nil, fmt.Errorf("unknown Lock field %s", name)
+}
+
+// SetField sets the value of a field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *LockMutation) SetField(name string, value ent.Value) error {
+	switch name {
+	case lock.FieldName:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetName(v)
+		return nil
+	case lock.FieldCreatedAt:
+		v, ok := value.(time.Time)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetCreatedAt(v)
+		return nil
+	}
+	return fmt.Errorf("unknown Lock field %s", name)
+}
+
+// AddedFields returns all numeric fields that were incremented/decremented during
+// this mutation.
+func (m *LockMutation) AddedFields() []string {
+	return nil
+}
+
+// AddedField returns the numeric value that was incremented/decremented on a field
+// with the given name. The second boolean return value indicates that this field
+// was not set, or was not defined in the schema.
+func (m *LockMutation) AddedField(name string) (ent.Value, bool) {
+	return nil, false
+}
+
+// AddField adds the value to the field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *LockMutation) AddField(name string, value ent.Value) error {
+	switch name {
+	}
+	return fmt.Errorf("unknown Lock numeric field %s", name)
+}
+
+// ClearedFields returns all nullable fields that were cleared during this
+// mutation.
+func (m *LockMutation) ClearedFields() []string {
+	return nil
+}
+
+// FieldCleared returns a boolean indicating if a field with the given name was
+// cleared in this mutation.
+func (m *LockMutation) FieldCleared(name string) bool {
+	_, ok := m.clearedFields[name]
+	return ok
+}
+
+// ClearField clears the value of the field with the given name. It returns an
+// error if the field is not defined in the schema.
+func (m *LockMutation) ClearField(name string) error {
+	return fmt.Errorf("unknown Lock nullable field %s", name)
+}
+
+// ResetField resets all changes in the mutation for the field with the given name.
+// It returns an error if the field is not defined in the schema.
+func (m *LockMutation) ResetField(name string) error {
+	switch name {
+	case lock.FieldName:
+		m.ResetName()
+		return nil
+	case lock.FieldCreatedAt:
+		m.ResetCreatedAt()
+		return nil
+	}
+	return fmt.Errorf("unknown Lock field %s", name)
+}
+
+// AddedEdges returns all edge names that were set/added in this mutation.
+func (m *LockMutation) AddedEdges() []string {
+	edges := make([]string, 0, 0)
+	return edges
+}
+
+// AddedIDs returns all IDs (to other nodes) that were added for the given edge
+// name in this mutation.
+func (m *LockMutation) AddedIDs(name string) []ent.Value {
+	return nil
+}
+
+// RemovedEdges returns all edge names that were removed in this mutation.
+func (m *LockMutation) RemovedEdges() []string {
+	edges := make([]string, 0, 0)
+	return edges
+}
+
+// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with
+// the given name in this mutation.
+func (m *LockMutation) RemovedIDs(name string) []ent.Value {
+	return nil
+}
+
+// ClearedEdges returns all edge names that were cleared in this mutation.
+func (m *LockMutation) ClearedEdges() []string {
+	edges := make([]string, 0, 0)
+	return edges
+}
+
+// EdgeCleared returns a boolean which indicates if the edge with the given name
+// was cleared in this mutation.
+func (m *LockMutation) EdgeCleared(name string) bool {
+	return false
+}
+
+// ClearEdge clears the value of the edge with the given name. It returns an error
+// if that edge is not defined in the schema.
+func (m *LockMutation) ClearEdge(name string) error {
+	return fmt.Errorf("unknown Lock unique edge %s", name)
+}
+
+// ResetEdge resets all changes to the edge with the given name in this mutation.
+// It returns an error if the edge is not defined in the schema.
+func (m *LockMutation) ResetEdge(name string) error {
+	return fmt.Errorf("unknown Lock edge %s", name)
+}
+
 // MachineMutation represents an operation that mutates the Machine nodes in the graph.
 // MachineMutation represents an operation that mutates the Machine nodes in the graph.
 type MachineMutation struct {
 type MachineMutation struct {
 	config
 	config

+ 3 - 0
pkg/database/ent/predicate/predicate.go

@@ -21,6 +21,9 @@ type Decision func(*sql.Selector)
 // Event is the predicate function for event builders.
 // Event is the predicate function for event builders.
 type Event func(*sql.Selector)
 type Event func(*sql.Selector)
 
 
+// Lock is the predicate function for lock builders.
+type Lock func(*sql.Selector)
+
 // Machine is the predicate function for machine builders.
 // Machine is the predicate function for machine builders.
 type Machine func(*sql.Selector)
 type Machine func(*sql.Selector)
 
 

+ 7 - 0
pkg/database/ent/runtime.go

@@ -10,6 +10,7 @@ import (
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/lock"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/meta"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/meta"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/schema"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/schema"
@@ -137,6 +138,12 @@ func init() {
 	eventDescSerialized := eventFields[3].Descriptor()
 	eventDescSerialized := eventFields[3].Descriptor()
 	// event.SerializedValidator is a validator for the "serialized" field. It is called by the builders before save.
 	// event.SerializedValidator is a validator for the "serialized" field. It is called by the builders before save.
 	event.SerializedValidator = eventDescSerialized.Validators[0].(func(string) error)
 	event.SerializedValidator = eventDescSerialized.Validators[0].(func(string) error)
+	lockFields := schema.Lock{}.Fields()
+	_ = lockFields
+	// lockDescCreatedAt is the schema descriptor for created_at field.
+	lockDescCreatedAt := lockFields[1].Descriptor()
+	// lock.DefaultCreatedAt holds the default value on creation for the created_at field.
+	lock.DefaultCreatedAt = lockDescCreatedAt.Default.(func() time.Time)
 	machineFields := schema.Machine{}.Fields()
 	machineFields := schema.Machine{}.Fields()
 	_ = machineFields
 	_ = machineFields
 	// machineDescCreatedAt is the schema descriptor for created_at field.
 	// machineDescCreatedAt is the schema descriptor for created_at field.

+ 22 - 0
pkg/database/ent/schema/lock.go

@@ -0,0 +1,22 @@
+package schema
+
+import (
+	"entgo.io/ent"
+	"entgo.io/ent/schema/field"
+	"github.com/crowdsecurity/crowdsec/pkg/types"
+)
+
+type Lock struct {
+	ent.Schema
+}
+
+func (Lock) Fields() []ent.Field {
+	return []ent.Field{
+		field.String("name").Unique().StructTag(`json:"name"`),
+		field.Time("created_at").Default(types.UtcNow).StructTag(`json:"created_at"`),
+	}
+}
+
+func (Lock) Edges() []ent.Edge {
+	return nil
+}

+ 3 - 0
pkg/database/ent/tx.go

@@ -22,6 +22,8 @@ type Tx struct {
 	Decision *DecisionClient
 	Decision *DecisionClient
 	// Event is the client for interacting with the Event builders.
 	// Event is the client for interacting with the Event builders.
 	Event *EventClient
 	Event *EventClient
+	// Lock is the client for interacting with the Lock builders.
+	Lock *LockClient
 	// Machine is the client for interacting with the Machine builders.
 	// Machine is the client for interacting with the Machine builders.
 	Machine *MachineClient
 	Machine *MachineClient
 	// Meta is the client for interacting with the Meta builders.
 	// Meta is the client for interacting with the Meta builders.
@@ -166,6 +168,7 @@ func (tx *Tx) init() {
 	tx.ConfigItem = NewConfigItemClient(tx.config)
 	tx.ConfigItem = NewConfigItemClient(tx.config)
 	tx.Decision = NewDecisionClient(tx.config)
 	tx.Decision = NewDecisionClient(tx.config)
 	tx.Event = NewEventClient(tx.config)
 	tx.Event = NewEventClient(tx.config)
+	tx.Lock = NewLockClient(tx.config)
 	tx.Machine = NewMachineClient(tx.config)
 	tx.Machine = NewMachineClient(tx.config)
 	tx.Meta = NewMetaClient(tx.config)
 	tx.Meta = NewMetaClient(tx.config)
 }
 }

+ 79 - 0
pkg/database/lock.go

@@ -0,0 +1,79 @@
+package database
+
+import (
+	"time"
+
+	"github.com/pkg/errors"
+	log "github.com/sirupsen/logrus"
+
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/lock"
+	"github.com/crowdsecurity/crowdsec/pkg/types"
+)
+
+const (
+	CAPIPullLockTimeout = 130
+	MetricsLockTimeout  = 40
+)
+
+func (c *Client) AcquireLock(name string) error {
+	// pessimistic lock
+	_, err := c.Ent.Lock.Create().
+		SetName(name).
+		SetCreatedAt(types.UtcNow()).
+		Save(c.CTX)
+	if err != nil {
+		return errors.Wrapf(QueryFail, "insert lock: %s", err)
+	}
+	return nil
+}
+
+func (c *Client) ReleaseLock(name string) error {
+	_, err := c.Ent.Lock.Delete().Where(lock.NameEQ(name)).Exec(c.CTX)
+	if err != nil {
+		return errors.Wrapf(QueryFail, "delete lock: %s", err)
+	}
+	return nil
+}
+
+func (c *Client) ReleaseLockWithTimeout(name string, timeout int) error {
+	log.Debugf("releasing (%s) orphin locks", name)
+	_, err := c.Ent.Lock.Delete().Where(
+		lock.NameEQ(name),
+		lock.CreatedAtGT(time.Now().Add(-time.Duration(timeout)*time.Minute)),
+	).Exec(c.CTX)
+	if err != nil {
+		return errors.Wrapf(QueryFail, "delete lock: %s", err)
+	}
+	return nil
+}
+
+func (c *Client) IsLocked(err error) bool {
+	return ent.IsConstraintError(err)
+}
+
+func (c *Client) AcquirePushMetricsLock() error {
+	lockName := "pushMetrics"
+	err := c.ReleaseLockWithTimeout(lockName, MetricsLockTimeout)
+	if err != nil {
+		log.Errorf("unable to release pushMetrics lock: %s", err)
+	}
+	return c.AcquireLock(lockName)
+}
+
+func (c *Client) ReleasePushMetricsLock() error {
+	return c.ReleaseLock("pushMetrics")
+}
+
+func (c *Client) AcquirePullCAPILock() error {
+	lockName := "pullCAPI"
+	err := c.ReleaseLockWithTimeout(lockName, CAPIPullLockTimeout)
+	if err != nil {
+		log.Errorf("unable to release pullCAPI lock: %s", err)
+	}
+	return c.AcquireLock(lockName)
+}
+
+func (c *Client) ReleasePullCAPILock() error {
+	return c.ReleaseLockWithTimeout("pullCAPI", CAPIPullLockTimeout)
+}