Fix typos in docs, comments, code (#1483)
This commit is contained in:
parent
44b11c2e5b
commit
0f4ab71f01
23 changed files with 39 additions and 39 deletions
|
@ -521,7 +521,7 @@ decisions.json :
|
||||||
`,
|
`,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
if importFile == "" {
|
if importFile == "" {
|
||||||
log.Fatalf("Please provide a input file contaning decisions with -i flag")
|
log.Fatalf("Please provide a input file containing decisions with -i flag")
|
||||||
}
|
}
|
||||||
csvData, err := os.ReadFile(importFile)
|
csvData, err := os.ReadFile(importFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -622,7 +622,7 @@ func RestoreHub(dirPath string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed enumerating files of %s : %s", itemDirectory+"/"+stage, err)
|
return fmt.Errorf("failed enumerating files of %s : %s", itemDirectory+"/"+stage, err)
|
||||||
}
|
}
|
||||||
//finaly copy item
|
//finally copy item
|
||||||
for _, tfile := range ifiles {
|
for _, tfile := range ifiles {
|
||||||
log.Infof("Going to restore local/tainted [%s]", tfile.Name())
|
log.Infof("Going to restore local/tainted [%s]", tfile.Name())
|
||||||
sourceFile := fmt.Sprintf("%s/%s/%s", itemDirectory, stage, tfile.Name())
|
sourceFile := fmt.Sprintf("%s/%s/%s", itemDirectory, stage, tfile.Name())
|
||||||
|
|
|
@ -82,7 +82,7 @@ The container is built with specific docker [configuration](https://github.com/c
|
||||||
If you wish to use the [notification system](https://docs.crowdsec.net/docs/notification_plugins/intro), you will need to mount at least a custom `profiles.yaml` and a notification configuration to `/etc/crowdsec/notifications`
|
If you wish to use the [notification system](https://docs.crowdsec.net/docs/notification_plugins/intro), you will need to mount at least a custom `profiles.yaml` and a notification configuration to `/etc/crowdsec/notifications`
|
||||||
|
|
||||||
# Deployment use cases
|
# Deployment use cases
|
||||||
Crowdsec is composed of an `agent` that parse logs and creates `alerts` that `local API` or `LAPI` tranform into decisions. Both can run in the same process but also on separated containers as it makes sense in complex configurations to have agents on the same machines as the protected component and a LAPI that gather all signals from agents and communicate with the `central api`.
|
Crowdsec is composed of an `agent` that parse logs and creates `alerts` that `local API` or `LAPI` transform into decisions. Both can run in the same process but also on separated containers as it makes sense in complex configurations to have agents on the same machines as the protected component and a LAPI that gather all signals from agents and communicate with the `central api`.
|
||||||
|
|
||||||
## Register a new agent with LAPI
|
## Register a new agent with LAPI
|
||||||
```shell
|
```shell
|
||||||
|
@ -92,7 +92,7 @@ docker exec -it crowdsec_lapi_container_name cscli machines add agent_user_name
|
||||||
## Run an agent connected to LAPI
|
## Run an agent connected to LAPI
|
||||||
Add following environment variables to your docker run command:
|
Add following environment variables to your docker run command:
|
||||||
* `DISABLE_LOCAL_API=true`
|
* `DISABLE_LOCAL_API=true`
|
||||||
* `AGENT_USERNAME="agent_user_name"` - agent_user_name previously registred with LAPI
|
* `AGENT_USERNAME="agent_user_name"` - agent_user_name previously registered with LAPI
|
||||||
* `AGENT_PASSWORD="agent_password"` - agent_password previously registered with LAPI
|
* `AGENT_PASSWORD="agent_password"` - agent_password previously registered with LAPI
|
||||||
* `LOCAL_API_URL="http://LAPI_host:LAPI_port"`
|
* `LOCAL_API_URL="http://LAPI_host:LAPI_port"`
|
||||||
|
|
||||||
|
|
|
@ -508,7 +508,7 @@ func (d *DockerSource) TailDocker(container *ContainerConfig, outChan chan types
|
||||||
outChan <- evt
|
outChan <- evt
|
||||||
d.logger.Debugf("Sent line to parsing: %+v", evt.Line.Raw)
|
d.logger.Debugf("Sent line to parsing: %+v", evt.Line.Raw)
|
||||||
case <-readerTomb.Dying():
|
case <-readerTomb.Dying():
|
||||||
//This case is to handle temporarly losing the connection to the docker socket
|
//This case is to handle temporarily losing the connection to the docker socket
|
||||||
//The only known case currently is when using docker-socket-proxy (and maybe a docker daemon restart)
|
//The only known case currently is when using docker-socket-proxy (and maybe a docker daemon restart)
|
||||||
d.logger.Debugf("readerTomb dying for container %s, removing it from runningContainerState", container.Name)
|
d.logger.Debugf("readerTomb dying for container %s, removing it from runningContainerState", container.Name)
|
||||||
delete(d.runningContainerState, container.ID)
|
delete(d.runningContainerState, container.ID)
|
||||||
|
|
|
@ -161,7 +161,7 @@ func (s *SyslogSource) buildLogFromSyslog(ts *time.Time, hostname *string,
|
||||||
!!! ugly hack !!!
|
!!! ugly hack !!!
|
||||||
Due to a bug in the syslog parser we use (https://github.com/influxdata/go-syslog/issues/31),
|
Due to a bug in the syslog parser we use (https://github.com/influxdata/go-syslog/issues/31),
|
||||||
the ProcID field will contain garbage if the message as a ] anywhere in it.
|
the ProcID field will contain garbage if the message as a ] anywhere in it.
|
||||||
Assume that a correctly formated ProcID only contains number, and if this is not the case, set it to an arbitrary value
|
Assume that a correctly formatted ProcID only contains number, and if this is not the case, set it to an arbitrary value
|
||||||
*/
|
*/
|
||||||
_, err := strconv.Atoi(*pid)
|
_, err := strconv.Atoi(*pid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -272,7 +272,7 @@ func TestAlertListFilters(t *testing.T) {
|
||||||
assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ")
|
assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ")
|
||||||
assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`)
|
assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`)
|
||||||
|
|
||||||
//test since (ok but yelds no results)
|
//test since (ok but yields no results)
|
||||||
|
|
||||||
w = lapi.RecordResponse("GET", "/v1/alerts?since=1ns", emptyBody)
|
w = lapi.RecordResponse("GET", "/v1/alerts?since=1ns", emptyBody)
|
||||||
assert.Equal(t, 200, w.Code)
|
assert.Equal(t, 200, w.Code)
|
||||||
|
|
|
@ -538,7 +538,7 @@ func TestAPICPullTop(t *testing.T) {
|
||||||
Scope: types.StrPtr("Ip"),
|
Scope: types.StrPtr("Ip"),
|
||||||
Duration: types.StrPtr("24h"),
|
Duration: types.StrPtr("24h"),
|
||||||
Type: types.StrPtr("ban"),
|
Type: types.StrPtr("ban"),
|
||||||
}, // Thie is already present in DB
|
}, // This is already present in DB
|
||||||
&models.Decision{
|
&models.Decision{
|
||||||
Origin: &SCOPE_LISTS,
|
Origin: &SCOPE_LISTS,
|
||||||
Scenario: types.StrPtr("crowdsecurity/ssh-bf"),
|
Scenario: types.StrPtr("crowdsecurity/ssh-bf"),
|
||||||
|
@ -910,7 +910,7 @@ func TestShouldShareAlert(t *testing.T) {
|
||||||
expectedTrust: "manual",
|
expectedTrust: "manual",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "manaul alert should not be shared if config disables it",
|
name: "manual alert should not be shared if config disables it",
|
||||||
consoleConfig: &csconfig.ConsoleConfig{
|
consoleConfig: &csconfig.ConsoleConfig{
|
||||||
ShareManualDecisions: types.BoolPtr(false),
|
ShareManualDecisions: types.BoolPtr(false),
|
||||||
},
|
},
|
||||||
|
@ -934,7 +934,7 @@ func TestShouldShareAlert(t *testing.T) {
|
||||||
expectedTrust: "tainted",
|
expectedTrust: "tainted",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "manaul alert should not be shared if config disables it",
|
name: "manual alert should not be shared if config disables it",
|
||||||
consoleConfig: &csconfig.ConsoleConfig{
|
consoleConfig: &csconfig.ConsoleConfig{
|
||||||
ShareTaintedScenarios: types.BoolPtr(false),
|
ShareTaintedScenarios: types.BoolPtr(false),
|
||||||
},
|
},
|
||||||
|
|
|
@ -213,7 +213,7 @@ func (c *Controller) FindAlerts(gctx *gin.Context) {
|
||||||
gctx.JSON(http.StatusOK, data)
|
gctx.JSON(http.StatusOK, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindAlertByID return the alert assiocated to the ID
|
// FindAlertByID return the alert associated to the ID
|
||||||
func (c *Controller) FindAlertByID(gctx *gin.Context) {
|
func (c *Controller) FindAlertByID(gctx *gin.Context) {
|
||||||
alertIDStr := gctx.Param("alert_id")
|
alertIDStr := gctx.Param("alert_id")
|
||||||
alertID, err := strconv.Atoi(alertIDStr)
|
alertID, err := strconv.Atoi(alertIDStr)
|
||||||
|
|
|
@ -37,7 +37,7 @@ const (
|
||||||
CrowdsecPluginKey string = "CROWDSEC_PLUGIN_KEY"
|
CrowdsecPluginKey string = "CROWDSEC_PLUGIN_KEY"
|
||||||
)
|
)
|
||||||
|
|
||||||
//The broker is reponsible for running the plugins and dispatching events
|
//The broker is responsible for running the plugins and dispatching events
|
||||||
//It receives all the events from the main process and stacks them up
|
//It receives all the events from the main process and stacks them up
|
||||||
//It is as well notified by the watcher when it needs to deliver events to plugins (based on time or count threshold)
|
//It is as well notified by the watcher when it needs to deliver events to plugins (based on time or count threshold)
|
||||||
type PluginBroker struct {
|
type PluginBroker struct {
|
||||||
|
|
|
@ -304,7 +304,7 @@ func DumpTree(parser_results ParserResults, bucket_pour BucketPourInfo, opts Dum
|
||||||
if evt.Line.Raw == "" {
|
if evt.Line.Raw == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
//it might be bucket oveflow being reprocessed, skip this
|
//it might be bucket overflow being reprocessed, skip this
|
||||||
if _, ok := state[evt.Line.Time]; !ok {
|
if _, ok := state[evt.Line.Time]; !ok {
|
||||||
state[evt.Line.Time] = make(map[string]map[string]ParserResult)
|
state[evt.Line.Time] = make(map[string]map[string]ParserResult)
|
||||||
assoc[evt.Line.Time] = evt.Line.Raw
|
assoc[evt.Line.Time] = evt.Line.Raw
|
||||||
|
|
|
@ -36,7 +36,7 @@ event is poured in a trigger, it always raises an overflow.
|
||||||
|
|
||||||
It's a bucket working as the standard leaky bucket except for one
|
It's a bucket working as the standard leaky bucket except for one
|
||||||
thing: a filter returns a property for each event and only one
|
thing: a filter returns a property for each event and only one
|
||||||
occurence of this property is allowed in the bucket, thus the bucket
|
occurrence of this property is allowed in the bucket, thus the bucket
|
||||||
is called uniq.
|
is called uniq.
|
||||||
|
|
||||||
## Counter
|
## Counter
|
||||||
|
@ -64,9 +64,9 @@ duration.
|
||||||
* leakspeed: leakspeed is a time duration (has to be parseable by
|
* leakspeed: leakspeed is a time duration (has to be parseable by
|
||||||
https://golang.org/pkg/time/#ParseDuration). After each interval an
|
https://golang.org/pkg/time/#ParseDuration). After each interval an
|
||||||
event is leaked from the bucket.
|
event is leaked from the bucket.
|
||||||
* stackkey: mandatory field. This field is used to differenciate on
|
* stackkey: mandatory field. This field is used to differentiate on
|
||||||
which bucket ongoing events will be poured. When an unknows stackkey
|
which bucket ongoing events will be poured. When an unknown stackkey
|
||||||
is seen in an event a new bucekt is created.
|
is seen in an event a new bucket is created.
|
||||||
* on_overflow: optional field, that tells the what to do when the
|
* on_overflow: optional field, that tells the what to do when the
|
||||||
bucket is returning the overflow event. As of today, the possibility
|
bucket is returning the overflow event. As of today, the possibility
|
||||||
are these: "ban,1h", "Reprocess", "Delete".
|
are these: "ban,1h", "Reprocess", "Delete".
|
||||||
|
@ -96,7 +96,7 @@ Nevertheless, this kind of bucket is often used with an infinite
|
||||||
leakspeed and an infinite capacity [capacity set to -1 for now].
|
leakspeed and an infinite capacity [capacity set to -1 for now].
|
||||||
|
|
||||||
|
|
||||||
## Add exemples here
|
## Add examples here
|
||||||
|
|
||||||
```
|
```
|
||||||
# ssh bruteforce
|
# ssh bruteforce
|
||||||
|
|
|
@ -287,7 +287,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
bucketFactory.logger.Infof("Adding %s bucket", bucketFactory.Type)
|
bucketFactory.logger.Infof("Adding %s bucket", bucketFactory.Type)
|
||||||
//return the Holder correponding to the type of bucket
|
//return the Holder corresponding to the type of bucket
|
||||||
bucketFactory.processors = []Processor{}
|
bucketFactory.processors = []Processor{}
|
||||||
switch bucketFactory.Type {
|
switch bucketFactory.Type {
|
||||||
case "leaky":
|
case "leaky":
|
||||||
|
|
|
@ -172,7 +172,7 @@ func TestShutdownBuckets(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if err := ShutdownAllBuckets(buckets); err != nil {
|
if err := ShutdownAllBuckets(buckets); err != nil {
|
||||||
t.Fatalf("while shuting down buckets : %s", err)
|
t.Fatalf("while shutting down buckets : %s", err)
|
||||||
}
|
}
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
if err := expectBucketCount(buckets, 2); err != nil {
|
if err := expectBucketCount(buckets, 2); err != nil {
|
||||||
|
|
|
@ -9,12 +9,12 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// ResetFilter allows to kill the bucket (without overflowing), if a particular condition is met.
|
// ResetFilter allows to kill the bucket (without overflowing), if a particular condition is met.
|
||||||
// An example would be a scenario to detect aggressive crawlers that *do not* fetch any static ressources :
|
// An example would be a scenario to detect aggressive crawlers that *do not* fetch any static resources :
|
||||||
// type : leaky
|
// type : leaky
|
||||||
// filter: filter: "evt.Meta.log_type == 'http_access-log'
|
// filter: filter: "evt.Meta.log_type == 'http_access-log'
|
||||||
// reset_filter: evt.Parsed.request endswith '.css'
|
// reset_filter: evt.Parsed.request endswith '.css'
|
||||||
// ....
|
// ....
|
||||||
// Thus, if the bucket receives a request that matches fetching a static ressource (here css), it cancels itself
|
// Thus, if the bucket receives a request that matches fetching a static resource (here css), it cancels itself
|
||||||
|
|
||||||
type CancelOnFilter struct {
|
type CancelOnFilter struct {
|
||||||
CancelOnFilter *vm.Program
|
CancelOnFilter *vm.Program
|
||||||
|
|
|
@ -13,7 +13,7 @@ type Trigger struct {
|
||||||
|
|
||||||
func (t *Trigger) OnBucketPour(b *BucketFactory) func(types.Event, *Leaky) *types.Event {
|
func (t *Trigger) OnBucketPour(b *BucketFactory) func(types.Event, *Leaky) *types.Event {
|
||||||
// Pour makes the bucket overflow all the time
|
// Pour makes the bucket overflow all the time
|
||||||
// TriggerPour unconditionnaly overflows
|
// TriggerPour unconditionally overflows
|
||||||
return func(msg types.Event, l *Leaky) *types.Event {
|
return func(msg types.Event, l *Leaky) *types.Event {
|
||||||
if l.Mode == TIMEMACHINE {
|
if l.Mode == TIMEMACHINE {
|
||||||
var d time.Time
|
var d time.Time
|
||||||
|
|
|
@ -8,11 +8,11 @@ The alphabetical order dictates the order in which the stages/parsers are proces
|
||||||
|
|
||||||
The runtime representation of a line being parsed (or an overflow) is an `Event`, and has fields that can be manipulated by user :
|
The runtime representation of a line being parsed (or an overflow) is an `Event`, and has fields that can be manipulated by user :
|
||||||
- Parsed : a string dict containing parser outputs
|
- Parsed : a string dict containing parser outputs
|
||||||
- Meta : a string dict containing meta informations about the event
|
- Meta : a string dict containing meta information about the event
|
||||||
- Line : a raw line representation
|
- Line : a raw line representation
|
||||||
- Overflow : a representation of the overflow if applicable
|
- Overflow : a representation of the overflow if applicable
|
||||||
|
|
||||||
The Event structure goes trough the stages, being altered with each parsing step.
|
The Event structure goes through the stages, being altered with each parsing step.
|
||||||
It's the same object that will be later poured into buckets.
|
It's the same object that will be later poured into buckets.
|
||||||
|
|
||||||
# Parser configuration
|
# Parser configuration
|
||||||
|
@ -123,7 +123,7 @@ pattern_syntax:
|
||||||
|
|
||||||
### Enrichment
|
### Enrichment
|
||||||
|
|
||||||
Enrichment mecanism is exposed via statics :
|
Enrichment mechanism is exposed via statics :
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
statics:
|
statics:
|
||||||
|
|
|
@ -29,7 +29,7 @@ type Node struct {
|
||||||
Name string `yaml:"name,omitempty"`
|
Name string `yaml:"name,omitempty"`
|
||||||
Author string `yaml:"author,omitempty"`
|
Author string `yaml:"author,omitempty"`
|
||||||
Description string `yaml:"description,omitempty"`
|
Description string `yaml:"description,omitempty"`
|
||||||
Rerferences []string `yaml:"references,omitempty"`
|
References []string `yaml:"references,omitempty"`
|
||||||
//if debug is present in the node, keep its specific Logger in runtime structure
|
//if debug is present in the node, keep its specific Logger in runtime structure
|
||||||
Logger *log.Entry `yaml:"-"`
|
Logger *log.Entry `yaml:"-"`
|
||||||
//This is mostly a hack to make writing less repetitive.
|
//This is mostly a hack to make writing less repetitive.
|
||||||
|
|
|
@ -54,7 +54,7 @@ func TestParserConfigs(t *testing.T) {
|
||||||
t.Fatalf("Compile: (%d/%d) expected valid, got : %s", idx+1, len(CfgTests), err)
|
t.Fatalf("Compile: (%d/%d) expected valid, got : %s", idx+1, len(CfgTests), err)
|
||||||
}
|
}
|
||||||
if CfgTests[idx].Compiles == false && err == nil {
|
if CfgTests[idx].Compiles == false && err == nil {
|
||||||
t.Fatalf("Compile: (%d/%d) expected errror", idx+1, len(CfgTests))
|
t.Fatalf("Compile: (%d/%d) expected error", idx+1, len(CfgTests))
|
||||||
}
|
}
|
||||||
|
|
||||||
err = CfgTests[idx].NodeCfg.validate(pctx, EnricherCtx{})
|
err = CfgTests[idx].NodeCfg.validate(pctx, EnricherCtx{})
|
||||||
|
|
|
@ -270,7 +270,7 @@ func TestMix(t *testing.T) {
|
||||||
|
|
||||||
runReserve(t, lim, request{t0, 3, t1, false}) // should return false because n > Burst
|
runReserve(t, lim, request{t0, 3, t1, false}) // should return false because n > Burst
|
||||||
runReserve(t, lim, request{t0, 2, t0, true})
|
runReserve(t, lim, request{t0, 2, t0, true})
|
||||||
run(t, lim, []allow{{t1, 2, false}}) // not enought tokens - don't allow
|
run(t, lim, []allow{{t1, 2, false}}) // not enough tokens - don't allow
|
||||||
runReserve(t, lim, request{t1, 2, t2, true})
|
runReserve(t, lim, request{t1, 2, t2, true})
|
||||||
run(t, lim, []allow{{t1, 1, false}}) // negative tokens - don't allow
|
run(t, lim, []allow{{t1, 1, false}}) // negative tokens - don't allow
|
||||||
run(t, lim, []allow{{t3, 1, true}})
|
run(t, lim, []allow{{t3, 1, true}})
|
||||||
|
|
|
@ -48,7 +48,7 @@ function init
|
||||||
cd ..
|
cd ..
|
||||||
BUILD_VERSION=${CROWDSEC_VERSION} make release
|
BUILD_VERSION=${CROWDSEC_VERSION} make release
|
||||||
if [ $? != 0 ]; then
|
if [ $? != 0 ]; then
|
||||||
echo "Unable to make the release (make sur you have go installed), exiting"
|
echo "Unable to make the release (make sure you have go installed), exiting"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
RELEASE_FOLDER="crowdsec-${CROWDSEC_VERSION}"
|
RELEASE_FOLDER="crowdsec-${CROWDSEC_VERSION}"
|
||||||
|
@ -124,7 +124,7 @@ function init
|
||||||
md5sum ${SYSTEMD_FILE} >> systemd.md5
|
md5sum ${SYSTEMD_FILE} >> systemd.md5
|
||||||
|
|
||||||
echo "[*] Setup done"
|
echo "[*] Setup done"
|
||||||
echo "[*] Lauching the upgrade"
|
echo "[*] Launching the upgrade"
|
||||||
cd ${RELEASE_FOLDER}/
|
cd ${RELEASE_FOLDER}/
|
||||||
./wizard.sh --upgrade --force
|
./wizard.sh --upgrade --force
|
||||||
cd ${CURRENT_FOLDER}
|
cd ${CURRENT_FOLDER}
|
||||||
|
|
|
@ -36,9 +36,9 @@ repositories).
|
||||||
| Feature | Covered | Notes |
|
| Feature | Covered | Notes |
|
||||||
| :----------------------------- | :------------- | :----------------------------------------- |
|
| :----------------------------- | :------------- | :----------------------------------------- |
|
||||||
| `systemctl` start/stop/restart | - | |
|
| `systemctl` start/stop/restart | - | |
|
||||||
| agent behaviour | `40_live-ban` | minimal testing (simple ssh-bf detection) |
|
| agent behavior | `40_live-ban` | minimal testing (simple ssh-bf detection) |
|
||||||
| forensic mode | `40_cold-logs` | minimal testing (simple ssh-bf detection) |
|
| forensic mode | `40_cold-logs` | minimal testing (simple ssh-bf detection) |
|
||||||
| starting withou LAPI | `02_nolapi` | |
|
| starting without LAPI | `02_nolapi` | |
|
||||||
| starting without agent | `03_noagent` | |
|
| starting without agent | `03_noagent` | |
|
||||||
| starting without CAPI | `04_nocapi` | |
|
| starting without CAPI | `04_nocapi` | |
|
||||||
| prometheus testing | - | |
|
| prometheus testing | - | |
|
||||||
|
@ -264,7 +264,7 @@ Here are some ways to use these two scripts.
|
||||||
configuration inside the test function before running the lapi/agent. See
|
configuration inside the test function before running the lapi/agent. See
|
||||||
how we use `yq` to change the YAML files to that effect.
|
how we use `yq` to change the YAML files to that effect.
|
||||||
|
|
||||||
- case 3: start crowdsec with the inital set of configuration+data once, and keep it
|
- case 3: start crowdsec with the initial set of configuration+data once, and keep it
|
||||||
running for all the tests (50_simulation, 98_ipv4, 98_ipv6)
|
running for all the tests (50_simulation, 98_ipv4, 98_ipv6)
|
||||||
|
|
||||||
This offers no isolation across tests, which over time could break more
|
This offers no isolation across tests, which over time could break more
|
||||||
|
|
|
@ -51,7 +51,7 @@ make_init_data() {
|
||||||
./instance-db config-yaml
|
./instance-db config-yaml
|
||||||
./instance-db setup
|
./instance-db setup
|
||||||
|
|
||||||
# when installed pacakges are always using sqlite, so no need to regenerate
|
# when installed packages are always using sqlite, so no need to regenerate
|
||||||
# local credz for sqlite
|
# local credz for sqlite
|
||||||
|
|
||||||
[ "${DB_BACKEND}" == "sqlite" ] || ${CSCLI} machines add --auto
|
[ "${DB_BACKEND}" == "sqlite" ] || ${CSCLI} machines add --auto
|
||||||
|
|
|
@ -321,7 +321,7 @@ detect_cs_install () {
|
||||||
if [[ -f "$CROWDSEC_BIN_INSTALLED" ]]; then
|
if [[ -f "$CROWDSEC_BIN_INSTALLED" ]]; then
|
||||||
log_warn "Crowdsec is already installed !"
|
log_warn "Crowdsec is already installed !"
|
||||||
echo ""
|
echo ""
|
||||||
echo "We recommand to upgrade : sudo ./wizard.sh --upgrade "
|
echo "We recommend to upgrade : sudo ./wizard.sh --upgrade "
|
||||||
echo "If you want to install it anyway, please use '--force'."
|
echo "If you want to install it anyway, please use '--force'."
|
||||||
echo ""
|
echo ""
|
||||||
echo "Run : sudo ./wizard.sh -i --force"
|
echo "Run : sudo ./wizard.sh -i --force"
|
||||||
|
@ -352,7 +352,7 @@ check_cs_version () {
|
||||||
if [[ $ACTION != "upgrade" ]] ; then
|
if [[ $ACTION != "upgrade" ]] ; then
|
||||||
if [[ ${FORCE_MODE} == "false" ]]; then
|
if [[ ${FORCE_MODE} == "false" ]]; then
|
||||||
echo ""
|
echo ""
|
||||||
echo "We recommand to upgrade with : sudo ./wizard.sh --upgrade "
|
echo "We recommend to upgrade with : sudo ./wizard.sh --upgrade "
|
||||||
echo "If you want to $ACTION anyway, please use '--force'."
|
echo "If you want to $ACTION anyway, please use '--force'."
|
||||||
echo ""
|
echo ""
|
||||||
echo "Run : sudo ./wizard.sh --$ACTION --force"
|
echo "Run : sudo ./wizard.sh --$ACTION --force"
|
||||||
|
@ -364,7 +364,7 @@ check_cs_version () {
|
||||||
if [[ $ACTION != "binupgrade" ]] ; then
|
if [[ $ACTION != "binupgrade" ]] ; then
|
||||||
if [[ ${FORCE_MODE} == "false" ]]; then
|
if [[ ${FORCE_MODE} == "false" ]]; then
|
||||||
echo ""
|
echo ""
|
||||||
echo "We recommand to upgrade binaries only : sudo ./wizard.sh --binupgrade "
|
echo "We recommend to upgrade binaries only : sudo ./wizard.sh --binupgrade "
|
||||||
echo "If you want to $ACTION anyway, please use '--force'."
|
echo "If you want to $ACTION anyway, please use '--force'."
|
||||||
echo ""
|
echo ""
|
||||||
echo "Run : sudo ./wizard.sh --$ACTION --force"
|
echo "Run : sudo ./wizard.sh --$ACTION --force"
|
||||||
|
@ -375,7 +375,7 @@ check_cs_version () {
|
||||||
log_warn "new version ($NEW_CS_VERSION) is same as current version ($CURRENT_CS_VERSION) !"
|
log_warn "new version ($NEW_CS_VERSION) is same as current version ($CURRENT_CS_VERSION) !"
|
||||||
if [[ ${FORCE_MODE} == "false" ]]; then
|
if [[ ${FORCE_MODE} == "false" ]]; then
|
||||||
echo ""
|
echo ""
|
||||||
echo "We recommand to $ACTION only if it's an higher version. "
|
echo "We recommend to $ACTION only if it's an higher version. "
|
||||||
echo "If it's an RC version (vX.X.X-rc) you can upgrade it using '--force'."
|
echo "If it's an RC version (vX.X.X-rc) you can upgrade it using '--force'."
|
||||||
echo ""
|
echo ""
|
||||||
echo "Run : sudo ./wizard.sh --$ACTION --force"
|
echo "Run : sudo ./wizard.sh --$ACTION --force"
|
||||||
|
|
Loading…
Reference in a new issue