Entgo 0.9 (#1018)
* update entgo & sqlite to latest version * schema update
This commit is contained in:
parent
66919924d3
commit
3f99330b3d
43 changed files with 989 additions and 405 deletions
9
go.mod
9
go.mod
|
@ -3,7 +3,7 @@ module github.com/crowdsecurity/crowdsec
|
|||
go 1.13
|
||||
|
||||
require (
|
||||
entgo.io/ent v0.7.0
|
||||
entgo.io/ent v0.9.1
|
||||
github.com/AlecAivazis/survey/v2 v2.2.7
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver v1.5.0 // indirect
|
||||
|
@ -43,10 +43,10 @@ require (
|
|||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/influxdata/go-syslog/v3 v3.0.0
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/lib/pq v1.10.0
|
||||
github.com/lib/pq v1.10.2
|
||||
github.com/mattn/go-colorable v0.1.8 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.10 // indirect
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible
|
||||
github.com/mattn/go-sqlite3 v1.14.8
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect
|
||||
|
@ -70,8 +70,7 @@ require (
|
|||
github.com/ugorji/go v1.2.3 // indirect
|
||||
github.com/vjeantet/grok v1.0.1 // indirect
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
|
||||
golang.org/x/mod v0.4.1
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b // indirect
|
||||
golang.org/x/mod v0.4.2
|
||||
golang.org/x/sys v0.0.0-20210921065528-437939a70204
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf // indirect
|
||||
golang.org/x/text v0.3.5 // indirect
|
||||
|
|
37
go.sum
37
go.sum
|
@ -11,8 +11,8 @@ cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqCl
|
|||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
entgo.io/ent v0.7.0 h1:E3EjO0cUL61DvUg5ZEZdxa4yTL+4SuZv0LqBExo8CQA=
|
||||
entgo.io/ent v0.7.0/go.mod h1:HZZJxglL8ro4OVDmM06lijj4bOTGcaDdrZttDZ8fVJs=
|
||||
entgo.io/ent v0.9.1 h1:IG8andyeD79GG24U8Q+1Y45hQXj6gY5evSBcva5gtBk=
|
||||
entgo.io/ent v0.9.1/go.mod h1:6NUeTfUN5mp5YN+5tgoH1SlakSvYPTBOYotSOvaI4ak=
|
||||
github.com/AlecAivazis/survey/v2 v2.2.7 h1:5NbxkF4RSKmpywYdcRgUmos1o+roJY8duCLZXbVjoig=
|
||||
github.com/AlecAivazis/survey/v2 v2.2.7/go.mod h1:9DYvHgXtiXm6nCn+jXnOXLKbH+Yo9u8fAS/SduGdoPk=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
|
@ -350,8 +350,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
|
|||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e h1:XmA6L9IPRdUr28a+SK/oMchGgQy159wvzXA5tJ7l+40=
|
||||
|
@ -412,7 +412,7 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
|
|||
github.com/influxdata/go-syslog/v3 v3.0.0 h1:jichmjSZlYK0VMmlz+k4WeOQd7z745YLsvGMqwtYt4I=
|
||||
github.com/influxdata/go-syslog/v3 v3.0.0/go.mod h1:tulsOp+CecTAYC27u9miMgq21GqXRW6VdKbOG+QSP4Q=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
|
||||
github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE=
|
||||
github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
|
@ -460,8 +460,8 @@ github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgx
|
|||
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg=
|
||||
github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E=
|
||||
github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8=
|
||||
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s=
|
||||
|
@ -494,9 +494,8 @@ github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m
|
|||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg=
|
||||
github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.14.8 h1:gDp86IdQsN/xWjIEmr9MF6o9mpksUgh0fu+9ByFxzIU=
|
||||
github.com/mattn/go-sqlite3 v1.14.8/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
|
@ -710,6 +709,7 @@ github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHM
|
|||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
|
@ -770,8 +770,8 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
|||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -801,8 +801,8 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
|
|||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -815,6 +815,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -857,11 +858,11 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6 h1:cdsMqa2nXzqlgs183pHxtvoVwU7CyzaCTAUOg94af4c=
|
||||
golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210921065528-437939a70204 h1:JJhkWtBuTQKyz2bd5WG9H8iUsJRU3En/KRfN8B2RnDs=
|
||||
golang.org/x/sys v0.0.0-20210921065528-437939a70204/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
|
@ -914,7 +915,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
|
|||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
|
@ -129,17 +129,17 @@ func (*Alert) scanValues(columns []string) ([]interface{}, error) {
|
|||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case alert.FieldSimulated:
|
||||
values[i] = &sql.NullBool{}
|
||||
values[i] = new(sql.NullBool)
|
||||
case alert.FieldSourceLatitude, alert.FieldSourceLongitude:
|
||||
values[i] = &sql.NullFloat64{}
|
||||
values[i] = new(sql.NullFloat64)
|
||||
case alert.FieldID, alert.FieldEventsCount, alert.FieldCapacity:
|
||||
values[i] = &sql.NullInt64{}
|
||||
values[i] = new(sql.NullInt64)
|
||||
case alert.FieldScenario, alert.FieldBucketId, alert.FieldMessage, alert.FieldSourceIp, alert.FieldSourceRange, alert.FieldSourceAsNumber, alert.FieldSourceAsName, alert.FieldSourceCountry, alert.FieldSourceScope, alert.FieldSourceValue, alert.FieldLeakSpeed, alert.FieldScenarioVersion, alert.FieldScenarioHash:
|
||||
values[i] = &sql.NullString{}
|
||||
values[i] = new(sql.NullString)
|
||||
case alert.FieldCreatedAt, alert.FieldUpdatedAt, alert.FieldStartedAt, alert.FieldStoppedAt:
|
||||
values[i] = &sql.NullTime{}
|
||||
values[i] = new(sql.NullTime)
|
||||
case alert.ForeignKeys[0]: // machine_alerts
|
||||
values[i] = &sql.NullInt64{}
|
||||
values[i] = new(sql.NullInt64)
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected column %q for type Alert", columns[i])
|
||||
}
|
||||
|
|
|
@ -65,28 +65,28 @@ const (
|
|||
EdgeMetas = "metas"
|
||||
// Table holds the table name of the alert in the database.
|
||||
Table = "alerts"
|
||||
// OwnerTable is the table the holds the owner relation/edge.
|
||||
// OwnerTable is the table that holds the owner relation/edge.
|
||||
OwnerTable = "alerts"
|
||||
// OwnerInverseTable is the table name for the Machine entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "machine" package.
|
||||
OwnerInverseTable = "machines"
|
||||
// OwnerColumn is the table column denoting the owner relation/edge.
|
||||
OwnerColumn = "machine_alerts"
|
||||
// DecisionsTable is the table the holds the decisions relation/edge.
|
||||
// DecisionsTable is the table that holds the decisions relation/edge.
|
||||
DecisionsTable = "decisions"
|
||||
// DecisionsInverseTable is the table name for the Decision entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "decision" package.
|
||||
DecisionsInverseTable = "decisions"
|
||||
// DecisionsColumn is the table column denoting the decisions relation/edge.
|
||||
DecisionsColumn = "alert_decisions"
|
||||
// EventsTable is the table the holds the events relation/edge.
|
||||
// EventsTable is the table that holds the events relation/edge.
|
||||
EventsTable = "events"
|
||||
// EventsInverseTable is the table name for the Event entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "event" package.
|
||||
EventsInverseTable = "events"
|
||||
// EventsColumn is the table column denoting the events relation/edge.
|
||||
EventsColumn = "alert_events"
|
||||
// MetasTable is the table the holds the metas relation/edge.
|
||||
// MetasTable is the table that holds the metas relation/edge.
|
||||
MetasTable = "meta"
|
||||
// MetasInverseTable is the table name for the Meta entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "meta" package.
|
||||
|
|
|
@ -415,11 +415,17 @@ func (ac *AlertCreate) Save(ctx context.Context) (*Alert, error) {
|
|||
return nil, err
|
||||
}
|
||||
ac.mutation = mutation
|
||||
node, err = ac.sqlSave(ctx)
|
||||
if node, err = ac.sqlSave(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &node.ID
|
||||
mutation.done = true
|
||||
return node, err
|
||||
})
|
||||
for i := len(ac.hooks) - 1; i >= 0; i-- {
|
||||
if ac.hooks[i] == nil {
|
||||
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = ac.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, ac.mutation); err != nil {
|
||||
|
@ -438,6 +444,19 @@ func (ac *AlertCreate) SaveX(ctx context.Context) *Alert {
|
|||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (ac *AlertCreate) Exec(ctx context.Context) error {
|
||||
_, err := ac.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (ac *AlertCreate) ExecX(ctx context.Context) {
|
||||
if err := ac.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (ac *AlertCreate) defaults() {
|
||||
if _, ok := ac.mutation.CreatedAt(); !ok {
|
||||
|
@ -477,16 +496,16 @@ func (ac *AlertCreate) defaults() {
|
|||
// check runs all checks and user-defined validators on the builder.
|
||||
func (ac *AlertCreate) check() error {
|
||||
if _, ok := ac.mutation.CreatedAt(); !ok {
|
||||
return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")}
|
||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "created_at"`)}
|
||||
}
|
||||
if _, ok := ac.mutation.UpdatedAt(); !ok {
|
||||
return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")}
|
||||
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "updated_at"`)}
|
||||
}
|
||||
if _, ok := ac.mutation.Scenario(); !ok {
|
||||
return &ValidationError{Name: "scenario", err: errors.New("ent: missing required field \"scenario\"")}
|
||||
return &ValidationError{Name: "scenario", err: errors.New(`ent: missing required field "scenario"`)}
|
||||
}
|
||||
if _, ok := ac.mutation.Simulated(); !ok {
|
||||
return &ValidationError{Name: "simulated", err: errors.New("ent: missing required field \"simulated\"")}
|
||||
return &ValidationError{Name: "simulated", err: errors.New(`ent: missing required field "simulated"`)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -494,8 +513,8 @@ func (ac *AlertCreate) check() error {
|
|||
func (ac *AlertCreate) sqlSave(ctx context.Context) (*Alert, error) {
|
||||
_node, _spec := ac.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, ac.driver, _spec); err != nil {
|
||||
if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -800,19 +819,23 @@ func (acb *AlertCreateBulk) Save(ctx context.Context) ([]*Alert, error) {
|
|||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, acb.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, acb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil {
|
||||
if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
if err = sqlgraph.BatchCreate(ctx, acb.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
}
|
||||
}
|
||||
mutation.done = true
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int(id)
|
||||
mutation.id = &nodes[i].ID
|
||||
mutation.done = true
|
||||
if specs[i].ID.Value != nil {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int(id)
|
||||
}
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
|
@ -837,3 +860,16 @@ func (acb *AlertCreateBulk) SaveX(ctx context.Context) []*Alert {
|
|||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (acb *AlertCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := acb.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (acb *AlertCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := acb.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,9 +20,9 @@ type AlertDelete struct {
|
|||
mutation *AlertMutation
|
||||
}
|
||||
|
||||
// Where adds a new predicate to the AlertDelete builder.
|
||||
// Where appends a list predicates to the AlertDelete builder.
|
||||
func (ad *AlertDelete) Where(ps ...predicate.Alert) *AlertDelete {
|
||||
ad.mutation.predicates = append(ad.mutation.predicates, ps...)
|
||||
ad.mutation.Where(ps...)
|
||||
return ad
|
||||
}
|
||||
|
||||
|
@ -46,6 +46,9 @@ func (ad *AlertDelete) Exec(ctx context.Context) (int, error) {
|
|||
return affected, err
|
||||
})
|
||||
for i := len(ad.hooks) - 1; i >= 0; i-- {
|
||||
if ad.hooks[i] == nil {
|
||||
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = ad.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, ad.mutation); err != nil {
|
||||
|
|
|
@ -25,6 +25,7 @@ type AlertQuery struct {
|
|||
config
|
||||
limit *int
|
||||
offset *int
|
||||
unique *bool
|
||||
order []OrderFunc
|
||||
fields []string
|
||||
predicates []predicate.Alert
|
||||
|
@ -57,6 +58,13 @@ func (aq *AlertQuery) Offset(offset int) *AlertQuery {
|
|||
return aq
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (aq *AlertQuery) Unique(unique bool) *AlertQuery {
|
||||
aq.unique = &unique
|
||||
return aq
|
||||
}
|
||||
|
||||
// Order adds an order step to the query.
|
||||
func (aq *AlertQuery) Order(o ...OrderFunc) *AlertQuery {
|
||||
aq.order = append(aq.order, o...)
|
||||
|
@ -426,8 +434,8 @@ func (aq *AlertQuery) GroupBy(field string, fields ...string) *AlertGroupBy {
|
|||
// Select(alert.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
//
|
||||
func (aq *AlertQuery) Select(field string, fields ...string) *AlertSelect {
|
||||
aq.fields = append([]string{field}, fields...)
|
||||
func (aq *AlertQuery) Select(fields ...string) *AlertSelect {
|
||||
aq.fields = append(aq.fields, fields...)
|
||||
return &AlertSelect{AlertQuery: aq}
|
||||
}
|
||||
|
||||
|
@ -489,11 +497,14 @@ func (aq *AlertQuery) sqlAll(ctx context.Context) ([]*Alert, error) {
|
|||
ids := make([]int, 0, len(nodes))
|
||||
nodeids := make(map[int][]*Alert)
|
||||
for i := range nodes {
|
||||
fk := nodes[i].machine_alerts
|
||||
if fk != nil {
|
||||
ids = append(ids, *fk)
|
||||
nodeids[*fk] = append(nodeids[*fk], nodes[i])
|
||||
if nodes[i].machine_alerts == nil {
|
||||
continue
|
||||
}
|
||||
fk := *nodes[i].machine_alerts
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
query.Where(machine.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
|
@ -627,6 +638,9 @@ func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
From: aq.sql,
|
||||
Unique: true,
|
||||
}
|
||||
if unique := aq.unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
}
|
||||
if fields := aq.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, alert.FieldID)
|
||||
|
@ -652,7 +666,7 @@ func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
if ps := aq.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector, alert.ValidColumn)
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -662,16 +676,20 @@ func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(aq.driver.Dialect())
|
||||
t1 := builder.Table(alert.Table)
|
||||
selector := builder.Select(t1.Columns(alert.Columns...)...).From(t1)
|
||||
columns := aq.fields
|
||||
if len(columns) == 0 {
|
||||
columns = alert.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if aq.sql != nil {
|
||||
selector = aq.sql
|
||||
selector.Select(selector.Columns(alert.Columns...)...)
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
for _, p := range aq.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range aq.order {
|
||||
p(selector, alert.ValidColumn)
|
||||
p(selector)
|
||||
}
|
||||
if offset := aq.offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
|
@ -933,13 +951,24 @@ func (agb *AlertGroupBy) sqlScan(ctx context.Context, v interface{}) error {
|
|||
}
|
||||
|
||||
func (agb *AlertGroupBy) sqlQuery() *sql.Selector {
|
||||
selector := agb.sql
|
||||
columns := make([]string, 0, len(agb.fields)+len(agb.fns))
|
||||
columns = append(columns, agb.fields...)
|
||||
selector := agb.sql.Select()
|
||||
aggregation := make([]string, 0, len(agb.fns))
|
||||
for _, fn := range agb.fns {
|
||||
columns = append(columns, fn(selector, alert.ValidColumn))
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
return selector.Select(columns...).GroupBy(agb.fields...)
|
||||
// If no columns were selected in a custom aggregation function, the default
|
||||
// selection is the fields used for "group-by", and the aggregation functions.
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(agb.fields)+len(agb.fns))
|
||||
for _, f := range agb.fields {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
for _, c := range aggregation {
|
||||
columns = append(columns, c)
|
||||
}
|
||||
selector.Select(columns...)
|
||||
}
|
||||
return selector.GroupBy(selector.Columns(agb.fields...)...)
|
||||
}
|
||||
|
||||
// AlertSelect is the builder for selecting fields of Alert entities.
|
||||
|
@ -1155,16 +1184,10 @@ func (as *AlertSelect) BoolX(ctx context.Context) bool {
|
|||
|
||||
func (as *AlertSelect) sqlScan(ctx context.Context, v interface{}) error {
|
||||
rows := &sql.Rows{}
|
||||
query, args := as.sqlQuery().Query()
|
||||
query, args := as.sql.Query()
|
||||
if err := as.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
func (as *AlertSelect) sqlQuery() sql.Querier {
|
||||
selector := as.sql
|
||||
selector.Select(selector.Columns(as.fields...)...)
|
||||
return selector
|
||||
}
|
||||
|
|
|
@ -25,9 +25,9 @@ type AlertUpdate struct {
|
|||
mutation *AlertMutation
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the AlertUpdate builder.
|
||||
// Where appends a list predicates to the AlertUpdate builder.
|
||||
func (au *AlertUpdate) Where(ps ...predicate.Alert) *AlertUpdate {
|
||||
au.mutation.predicates = append(au.mutation.predicates, ps...)
|
||||
au.mutation.Where(ps...)
|
||||
return au
|
||||
}
|
||||
|
||||
|
@ -625,6 +625,9 @@ func (au *AlertUpdate) Save(ctx context.Context) (int, error) {
|
|||
return affected, err
|
||||
})
|
||||
for i := len(au.hooks) - 1; i >= 0; i-- {
|
||||
if au.hooks[i] == nil {
|
||||
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = au.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, au.mutation); err != nil {
|
||||
|
@ -1164,8 +1167,8 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||
if n, err = sqlgraph.UpdateNodes(ctx, au.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{alert.Label}
|
||||
} else if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
@ -1175,6 +1178,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||
// AlertUpdateOne is the builder for updating a single Alert entity.
|
||||
type AlertUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *AlertMutation
|
||||
}
|
||||
|
@ -1753,6 +1757,13 @@ func (auo *AlertUpdateOne) RemoveMetas(m ...*Meta) *AlertUpdateOne {
|
|||
return auo.RemoveMetaIDs(ids...)
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (auo *AlertUpdateOne) Select(field string, fields ...string) *AlertUpdateOne {
|
||||
auo.fields = append([]string{field}, fields...)
|
||||
return auo
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated Alert entity.
|
||||
func (auo *AlertUpdateOne) Save(ctx context.Context) (*Alert, error) {
|
||||
var (
|
||||
|
@ -1773,6 +1784,9 @@ func (auo *AlertUpdateOne) Save(ctx context.Context) (*Alert, error) {
|
|||
return node, err
|
||||
})
|
||||
for i := len(auo.hooks) - 1; i >= 0; i-- {
|
||||
if auo.hooks[i] == nil {
|
||||
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = auo.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, auo.mutation); err != nil {
|
||||
|
@ -1820,6 +1834,18 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error
|
|||
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Alert.ID for update")}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := auo.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, alert.FieldID)
|
||||
for _, f := range fields {
|
||||
if !alert.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != alert.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := auo.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
|
@ -2320,8 +2346,8 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error
|
|||
if err = sqlgraph.UpdateNode(ctx, auo.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{alert.Label}
|
||||
} else if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -44,13 +44,13 @@ func (*Bouncer) scanValues(columns []string) ([]interface{}, error) {
|
|||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case bouncer.FieldRevoked:
|
||||
values[i] = &sql.NullBool{}
|
||||
values[i] = new(sql.NullBool)
|
||||
case bouncer.FieldID:
|
||||
values[i] = &sql.NullInt64{}
|
||||
values[i] = new(sql.NullInt64)
|
||||
case bouncer.FieldName, bouncer.FieldAPIKey, bouncer.FieldIPAddress, bouncer.FieldType, bouncer.FieldVersion:
|
||||
values[i] = &sql.NullString{}
|
||||
values[i] = new(sql.NullString)
|
||||
case bouncer.FieldCreatedAt, bouncer.FieldUpdatedAt, bouncer.FieldUntil, bouncer.FieldLastPull:
|
||||
values[i] = &sql.NullTime{}
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected column %q for type Bouncer", columns[i])
|
||||
}
|
||||
|
|
|
@ -163,11 +163,17 @@ func (bc *BouncerCreate) Save(ctx context.Context) (*Bouncer, error) {
|
|||
return nil, err
|
||||
}
|
||||
bc.mutation = mutation
|
||||
node, err = bc.sqlSave(ctx)
|
||||
if node, err = bc.sqlSave(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &node.ID
|
||||
mutation.done = true
|
||||
return node, err
|
||||
})
|
||||
for i := len(bc.hooks) - 1; i >= 0; i-- {
|
||||
if bc.hooks[i] == nil {
|
||||
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = bc.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, bc.mutation); err != nil {
|
||||
|
@ -186,6 +192,19 @@ func (bc *BouncerCreate) SaveX(ctx context.Context) *Bouncer {
|
|||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (bc *BouncerCreate) Exec(ctx context.Context) error {
|
||||
_, err := bc.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (bc *BouncerCreate) ExecX(ctx context.Context) {
|
||||
if err := bc.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (bc *BouncerCreate) defaults() {
|
||||
if _, ok := bc.mutation.CreatedAt(); !ok {
|
||||
|
@ -213,22 +232,22 @@ func (bc *BouncerCreate) defaults() {
|
|||
// check runs all checks and user-defined validators on the builder.
|
||||
func (bc *BouncerCreate) check() error {
|
||||
if _, ok := bc.mutation.CreatedAt(); !ok {
|
||||
return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")}
|
||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "created_at"`)}
|
||||
}
|
||||
if _, ok := bc.mutation.UpdatedAt(); !ok {
|
||||
return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")}
|
||||
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "updated_at"`)}
|
||||
}
|
||||
if _, ok := bc.mutation.Name(); !ok {
|
||||
return &ValidationError{Name: "name", err: errors.New("ent: missing required field \"name\"")}
|
||||
return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "name"`)}
|
||||
}
|
||||
if _, ok := bc.mutation.APIKey(); !ok {
|
||||
return &ValidationError{Name: "api_key", err: errors.New("ent: missing required field \"api_key\"")}
|
||||
return &ValidationError{Name: "api_key", err: errors.New(`ent: missing required field "api_key"`)}
|
||||
}
|
||||
if _, ok := bc.mutation.Revoked(); !ok {
|
||||
return &ValidationError{Name: "revoked", err: errors.New("ent: missing required field \"revoked\"")}
|
||||
return &ValidationError{Name: "revoked", err: errors.New(`ent: missing required field "revoked"`)}
|
||||
}
|
||||
if _, ok := bc.mutation.LastPull(); !ok {
|
||||
return &ValidationError{Name: "last_pull", err: errors.New("ent: missing required field \"last_pull\"")}
|
||||
return &ValidationError{Name: "last_pull", err: errors.New(`ent: missing required field "last_pull"`)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -236,8 +255,8 @@ func (bc *BouncerCreate) check() error {
|
|||
func (bc *BouncerCreate) sqlSave(ctx context.Context) (*Bouncer, error) {
|
||||
_node, _spec := bc.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, bc.driver, _spec); err != nil {
|
||||
if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -369,19 +388,23 @@ func (bcb *BouncerCreateBulk) Save(ctx context.Context) ([]*Bouncer, error) {
|
|||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, bcb.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, bcb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil {
|
||||
if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
if err = sqlgraph.BatchCreate(ctx, bcb.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
}
|
||||
}
|
||||
mutation.done = true
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int(id)
|
||||
mutation.id = &nodes[i].ID
|
||||
mutation.done = true
|
||||
if specs[i].ID.Value != nil {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int(id)
|
||||
}
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
|
@ -406,3 +429,16 @@ func (bcb *BouncerCreateBulk) SaveX(ctx context.Context) []*Bouncer {
|
|||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (bcb *BouncerCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := bcb.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (bcb *BouncerCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := bcb.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,9 +20,9 @@ type BouncerDelete struct {
|
|||
mutation *BouncerMutation
|
||||
}
|
||||
|
||||
// Where adds a new predicate to the BouncerDelete builder.
|
||||
// Where appends a list predicates to the BouncerDelete builder.
|
||||
func (bd *BouncerDelete) Where(ps ...predicate.Bouncer) *BouncerDelete {
|
||||
bd.mutation.predicates = append(bd.mutation.predicates, ps...)
|
||||
bd.mutation.Where(ps...)
|
||||
return bd
|
||||
}
|
||||
|
||||
|
@ -46,6 +46,9 @@ func (bd *BouncerDelete) Exec(ctx context.Context) (int, error) {
|
|||
return affected, err
|
||||
})
|
||||
for i := len(bd.hooks) - 1; i >= 0; i-- {
|
||||
if bd.hooks[i] == nil {
|
||||
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = bd.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, bd.mutation); err != nil {
|
||||
|
|
|
@ -20,6 +20,7 @@ type BouncerQuery struct {
|
|||
config
|
||||
limit *int
|
||||
offset *int
|
||||
unique *bool
|
||||
order []OrderFunc
|
||||
fields []string
|
||||
predicates []predicate.Bouncer
|
||||
|
@ -46,6 +47,13 @@ func (bq *BouncerQuery) Offset(offset int) *BouncerQuery {
|
|||
return bq
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (bq *BouncerQuery) Unique(unique bool) *BouncerQuery {
|
||||
bq.unique = &unique
|
||||
return bq
|
||||
}
|
||||
|
||||
// Order adds an order step to the query.
|
||||
func (bq *BouncerQuery) Order(o ...OrderFunc) *BouncerQuery {
|
||||
bq.order = append(bq.order, o...)
|
||||
|
@ -279,8 +287,8 @@ func (bq *BouncerQuery) GroupBy(field string, fields ...string) *BouncerGroupBy
|
|||
// Select(bouncer.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
//
|
||||
func (bq *BouncerQuery) Select(field string, fields ...string) *BouncerSelect {
|
||||
bq.fields = append([]string{field}, fields...)
|
||||
func (bq *BouncerQuery) Select(fields ...string) *BouncerSelect {
|
||||
bq.fields = append(bq.fields, fields...)
|
||||
return &BouncerSelect{BouncerQuery: bq}
|
||||
}
|
||||
|
||||
|
@ -352,6 +360,9 @@ func (bq *BouncerQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
From: bq.sql,
|
||||
Unique: true,
|
||||
}
|
||||
if unique := bq.unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
}
|
||||
if fields := bq.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, bouncer.FieldID)
|
||||
|
@ -377,7 +388,7 @@ func (bq *BouncerQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
if ps := bq.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector, bouncer.ValidColumn)
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -387,16 +398,20 @@ func (bq *BouncerQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
func (bq *BouncerQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(bq.driver.Dialect())
|
||||
t1 := builder.Table(bouncer.Table)
|
||||
selector := builder.Select(t1.Columns(bouncer.Columns...)...).From(t1)
|
||||
columns := bq.fields
|
||||
if len(columns) == 0 {
|
||||
columns = bouncer.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if bq.sql != nil {
|
||||
selector = bq.sql
|
||||
selector.Select(selector.Columns(bouncer.Columns...)...)
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
for _, p := range bq.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range bq.order {
|
||||
p(selector, bouncer.ValidColumn)
|
||||
p(selector)
|
||||
}
|
||||
if offset := bq.offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
|
@ -658,13 +673,24 @@ func (bgb *BouncerGroupBy) sqlScan(ctx context.Context, v interface{}) error {
|
|||
}
|
||||
|
||||
func (bgb *BouncerGroupBy) sqlQuery() *sql.Selector {
|
||||
selector := bgb.sql
|
||||
columns := make([]string, 0, len(bgb.fields)+len(bgb.fns))
|
||||
columns = append(columns, bgb.fields...)
|
||||
selector := bgb.sql.Select()
|
||||
aggregation := make([]string, 0, len(bgb.fns))
|
||||
for _, fn := range bgb.fns {
|
||||
columns = append(columns, fn(selector, bouncer.ValidColumn))
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
return selector.Select(columns...).GroupBy(bgb.fields...)
|
||||
// If no columns were selected in a custom aggregation function, the default
|
||||
// selection is the fields used for "group-by", and the aggregation functions.
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(bgb.fields)+len(bgb.fns))
|
||||
for _, f := range bgb.fields {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
for _, c := range aggregation {
|
||||
columns = append(columns, c)
|
||||
}
|
||||
selector.Select(columns...)
|
||||
}
|
||||
return selector.GroupBy(selector.Columns(bgb.fields...)...)
|
||||
}
|
||||
|
||||
// BouncerSelect is the builder for selecting fields of Bouncer entities.
|
||||
|
@ -880,16 +906,10 @@ func (bs *BouncerSelect) BoolX(ctx context.Context) bool {
|
|||
|
||||
func (bs *BouncerSelect) sqlScan(ctx context.Context, v interface{}) error {
|
||||
rows := &sql.Rows{}
|
||||
query, args := bs.sqlQuery().Query()
|
||||
query, args := bs.sql.Query()
|
||||
if err := bs.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
func (bs *BouncerSelect) sqlQuery() sql.Querier {
|
||||
selector := bs.sql
|
||||
selector.Select(selector.Columns(bs.fields...)...)
|
||||
return selector
|
||||
}
|
||||
|
|
|
@ -21,9 +21,9 @@ type BouncerUpdate struct {
|
|||
mutation *BouncerMutation
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the BouncerUpdate builder.
|
||||
// Where appends a list predicates to the BouncerUpdate builder.
|
||||
func (bu *BouncerUpdate) Where(ps ...predicate.Bouncer) *BouncerUpdate {
|
||||
bu.mutation.predicates = append(bu.mutation.predicates, ps...)
|
||||
bu.mutation.Where(ps...)
|
||||
return bu
|
||||
}
|
||||
|
||||
|
@ -192,6 +192,9 @@ func (bu *BouncerUpdate) Save(ctx context.Context) (int, error) {
|
|||
return affected, err
|
||||
})
|
||||
for i := len(bu.hooks) - 1; i >= 0; i-- {
|
||||
if bu.hooks[i] == nil {
|
||||
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = bu.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, bu.mutation); err != nil {
|
||||
|
@ -338,8 +341,8 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||
if n, err = sqlgraph.UpdateNodes(ctx, bu.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{bouncer.Label}
|
||||
} else if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
@ -349,6 +352,7 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||
// BouncerUpdateOne is the builder for updating a single Bouncer entity.
|
||||
type BouncerUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *BouncerMutation
|
||||
}
|
||||
|
@ -498,6 +502,13 @@ func (buo *BouncerUpdateOne) Mutation() *BouncerMutation {
|
|||
return buo.mutation
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (buo *BouncerUpdateOne) Select(field string, fields ...string) *BouncerUpdateOne {
|
||||
buo.fields = append([]string{field}, fields...)
|
||||
return buo
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated Bouncer entity.
|
||||
func (buo *BouncerUpdateOne) Save(ctx context.Context) (*Bouncer, error) {
|
||||
var (
|
||||
|
@ -518,6 +529,9 @@ func (buo *BouncerUpdateOne) Save(ctx context.Context) (*Bouncer, error) {
|
|||
return node, err
|
||||
})
|
||||
for i := len(buo.hooks) - 1; i >= 0; i-- {
|
||||
if buo.hooks[i] == nil {
|
||||
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = buo.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, buo.mutation); err != nil {
|
||||
|
@ -565,6 +579,18 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e
|
|||
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Bouncer.ID for update")}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := buo.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, bouncer.FieldID)
|
||||
for _, f := range fields {
|
||||
if !bouncer.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != bouncer.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := buo.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
|
@ -672,8 +698,8 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e
|
|||
if err = sqlgraph.UpdateNode(ctx, buo.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{bouncer.Label}
|
||||
} else if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -223,7 +223,9 @@ func (c *AlertClient) DeleteOneID(id int) *AlertDeleteOne {
|
|||
|
||||
// Query returns a query builder for Alert.
|
||||
func (c *AlertClient) Query() *AlertQuery {
|
||||
return &AlertQuery{config: c.config}
|
||||
return &AlertQuery{
|
||||
config: c.config,
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a Alert entity by its id.
|
||||
|
@ -375,7 +377,9 @@ func (c *BouncerClient) DeleteOneID(id int) *BouncerDeleteOne {
|
|||
|
||||
// Query returns a query builder for Bouncer.
|
||||
func (c *BouncerClient) Query() *BouncerQuery {
|
||||
return &BouncerQuery{config: c.config}
|
||||
return &BouncerQuery{
|
||||
config: c.config,
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a Bouncer entity by its id.
|
||||
|
@ -463,7 +467,9 @@ func (c *DecisionClient) DeleteOneID(id int) *DecisionDeleteOne {
|
|||
|
||||
// Query returns a query builder for Decision.
|
||||
func (c *DecisionClient) Query() *DecisionQuery {
|
||||
return &DecisionQuery{config: c.config}
|
||||
return &DecisionQuery{
|
||||
config: c.config,
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a Decision entity by its id.
|
||||
|
@ -567,7 +573,9 @@ func (c *EventClient) DeleteOneID(id int) *EventDeleteOne {
|
|||
|
||||
// Query returns a query builder for Event.
|
||||
func (c *EventClient) Query() *EventQuery {
|
||||
return &EventQuery{config: c.config}
|
||||
return &EventQuery{
|
||||
config: c.config,
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a Event entity by its id.
|
||||
|
@ -671,7 +679,9 @@ func (c *MachineClient) DeleteOneID(id int) *MachineDeleteOne {
|
|||
|
||||
// Query returns a query builder for Machine.
|
||||
func (c *MachineClient) Query() *MachineQuery {
|
||||
return &MachineQuery{config: c.config}
|
||||
return &MachineQuery{
|
||||
config: c.config,
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a Machine entity by its id.
|
||||
|
@ -775,7 +785,9 @@ func (c *MetaClient) DeleteOneID(id int) *MetaDeleteOne {
|
|||
|
||||
// Query returns a query builder for Meta.
|
||||
func (c *MetaClient) Query() *MetaQuery {
|
||||
return &MetaQuery{config: c.config}
|
||||
return &MetaQuery{
|
||||
config: c.config,
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a Meta entity by its id.
|
||||
|
|
|
@ -80,15 +80,15 @@ func (*Decision) scanValues(columns []string) ([]interface{}, error) {
|
|||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case decision.FieldSimulated:
|
||||
values[i] = &sql.NullBool{}
|
||||
values[i] = new(sql.NullBool)
|
||||
case decision.FieldID, decision.FieldStartIP, decision.FieldEndIP, decision.FieldStartSuffix, decision.FieldEndSuffix, decision.FieldIPSize:
|
||||
values[i] = &sql.NullInt64{}
|
||||
values[i] = new(sql.NullInt64)
|
||||
case decision.FieldScenario, decision.FieldType, decision.FieldScope, decision.FieldValue, decision.FieldOrigin:
|
||||
values[i] = &sql.NullString{}
|
||||
values[i] = new(sql.NullString)
|
||||
case decision.FieldCreatedAt, decision.FieldUpdatedAt, decision.FieldUntil:
|
||||
values[i] = &sql.NullTime{}
|
||||
values[i] = new(sql.NullTime)
|
||||
case decision.ForeignKeys[0]: // alert_decisions
|
||||
values[i] = &sql.NullInt64{}
|
||||
values[i] = new(sql.NullInt64)
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected column %q for type Decision", columns[i])
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ const (
|
|||
EdgeOwner = "owner"
|
||||
// Table holds the table name of the decision in the database.
|
||||
Table = "decisions"
|
||||
// OwnerTable is the table the holds the owner relation/edge.
|
||||
// OwnerTable is the table that holds the owner relation/edge.
|
||||
OwnerTable = "decisions"
|
||||
// OwnerInverseTable is the table name for the Alert entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "alert" package.
|
||||
|
|
|
@ -215,11 +215,17 @@ func (dc *DecisionCreate) Save(ctx context.Context) (*Decision, error) {
|
|||
return nil, err
|
||||
}
|
||||
dc.mutation = mutation
|
||||
node, err = dc.sqlSave(ctx)
|
||||
if node, err = dc.sqlSave(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &node.ID
|
||||
mutation.done = true
|
||||
return node, err
|
||||
})
|
||||
for i := len(dc.hooks) - 1; i >= 0; i-- {
|
||||
if dc.hooks[i] == nil {
|
||||
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = dc.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, dc.mutation); err != nil {
|
||||
|
@ -238,6 +244,19 @@ func (dc *DecisionCreate) SaveX(ctx context.Context) *Decision {
|
|||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (dc *DecisionCreate) Exec(ctx context.Context) error {
|
||||
_, err := dc.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dc *DecisionCreate) ExecX(ctx context.Context) {
|
||||
if err := dc.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (dc *DecisionCreate) defaults() {
|
||||
if _, ok := dc.mutation.CreatedAt(); !ok {
|
||||
|
@ -257,31 +276,31 @@ func (dc *DecisionCreate) defaults() {
|
|||
// check runs all checks and user-defined validators on the builder.
|
||||
func (dc *DecisionCreate) check() error {
|
||||
if _, ok := dc.mutation.CreatedAt(); !ok {
|
||||
return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")}
|
||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "created_at"`)}
|
||||
}
|
||||
if _, ok := dc.mutation.UpdatedAt(); !ok {
|
||||
return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")}
|
||||
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "updated_at"`)}
|
||||
}
|
||||
if _, ok := dc.mutation.Until(); !ok {
|
||||
return &ValidationError{Name: "until", err: errors.New("ent: missing required field \"until\"")}
|
||||
return &ValidationError{Name: "until", err: errors.New(`ent: missing required field "until"`)}
|
||||
}
|
||||
if _, ok := dc.mutation.Scenario(); !ok {
|
||||
return &ValidationError{Name: "scenario", err: errors.New("ent: missing required field \"scenario\"")}
|
||||
return &ValidationError{Name: "scenario", err: errors.New(`ent: missing required field "scenario"`)}
|
||||
}
|
||||
if _, ok := dc.mutation.GetType(); !ok {
|
||||
return &ValidationError{Name: "type", err: errors.New("ent: missing required field \"type\"")}
|
||||
return &ValidationError{Name: "type", err: errors.New(`ent: missing required field "type"`)}
|
||||
}
|
||||
if _, ok := dc.mutation.Scope(); !ok {
|
||||
return &ValidationError{Name: "scope", err: errors.New("ent: missing required field \"scope\"")}
|
||||
return &ValidationError{Name: "scope", err: errors.New(`ent: missing required field "scope"`)}
|
||||
}
|
||||
if _, ok := dc.mutation.Value(); !ok {
|
||||
return &ValidationError{Name: "value", err: errors.New("ent: missing required field \"value\"")}
|
||||
return &ValidationError{Name: "value", err: errors.New(`ent: missing required field "value"`)}
|
||||
}
|
||||
if _, ok := dc.mutation.Origin(); !ok {
|
||||
return &ValidationError{Name: "origin", err: errors.New("ent: missing required field \"origin\"")}
|
||||
return &ValidationError{Name: "origin", err: errors.New(`ent: missing required field "origin"`)}
|
||||
}
|
||||
if _, ok := dc.mutation.Simulated(); !ok {
|
||||
return &ValidationError{Name: "simulated", err: errors.New("ent: missing required field \"simulated\"")}
|
||||
return &ValidationError{Name: "simulated", err: errors.New(`ent: missing required field "simulated"`)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -289,8 +308,8 @@ func (dc *DecisionCreate) check() error {
|
|||
func (dc *DecisionCreate) sqlSave(ctx context.Context) (*Decision, error) {
|
||||
_node, _spec := dc.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, dc.driver, _spec); err != nil {
|
||||
if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -474,19 +493,23 @@ func (dcb *DecisionCreateBulk) Save(ctx context.Context) ([]*Decision, error) {
|
|||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, dcb.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, dcb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil {
|
||||
if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
if err = sqlgraph.BatchCreate(ctx, dcb.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
}
|
||||
}
|
||||
mutation.done = true
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int(id)
|
||||
mutation.id = &nodes[i].ID
|
||||
mutation.done = true
|
||||
if specs[i].ID.Value != nil {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int(id)
|
||||
}
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
|
@ -511,3 +534,16 @@ func (dcb *DecisionCreateBulk) SaveX(ctx context.Context) []*Decision {
|
|||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (dcb *DecisionCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := dcb.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dcb *DecisionCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := dcb.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,9 +20,9 @@ type DecisionDelete struct {
|
|||
mutation *DecisionMutation
|
||||
}
|
||||
|
||||
// Where adds a new predicate to the DecisionDelete builder.
|
||||
// Where appends a list predicates to the DecisionDelete builder.
|
||||
func (dd *DecisionDelete) Where(ps ...predicate.Decision) *DecisionDelete {
|
||||
dd.mutation.predicates = append(dd.mutation.predicates, ps...)
|
||||
dd.mutation.Where(ps...)
|
||||
return dd
|
||||
}
|
||||
|
||||
|
@ -46,6 +46,9 @@ func (dd *DecisionDelete) Exec(ctx context.Context) (int, error) {
|
|||
return affected, err
|
||||
})
|
||||
for i := len(dd.hooks) - 1; i >= 0; i-- {
|
||||
if dd.hooks[i] == nil {
|
||||
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = dd.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, dd.mutation); err != nil {
|
||||
|
|
|
@ -21,6 +21,7 @@ type DecisionQuery struct {
|
|||
config
|
||||
limit *int
|
||||
offset *int
|
||||
unique *bool
|
||||
order []OrderFunc
|
||||
fields []string
|
||||
predicates []predicate.Decision
|
||||
|
@ -50,6 +51,13 @@ func (dq *DecisionQuery) Offset(offset int) *DecisionQuery {
|
|||
return dq
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (dq *DecisionQuery) Unique(unique bool) *DecisionQuery {
|
||||
dq.unique = &unique
|
||||
return dq
|
||||
}
|
||||
|
||||
// Order adds an order step to the query.
|
||||
func (dq *DecisionQuery) Order(o ...OrderFunc) *DecisionQuery {
|
||||
dq.order = append(dq.order, o...)
|
||||
|
@ -317,8 +325,8 @@ func (dq *DecisionQuery) GroupBy(field string, fields ...string) *DecisionGroupB
|
|||
// Select(decision.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
//
|
||||
func (dq *DecisionQuery) Select(field string, fields ...string) *DecisionSelect {
|
||||
dq.fields = append([]string{field}, fields...)
|
||||
func (dq *DecisionQuery) Select(fields ...string) *DecisionSelect {
|
||||
dq.fields = append(dq.fields, fields...)
|
||||
return &DecisionSelect{DecisionQuery: dq}
|
||||
}
|
||||
|
||||
|
@ -377,11 +385,14 @@ func (dq *DecisionQuery) sqlAll(ctx context.Context) ([]*Decision, error) {
|
|||
ids := make([]int, 0, len(nodes))
|
||||
nodeids := make(map[int][]*Decision)
|
||||
for i := range nodes {
|
||||
fk := nodes[i].alert_decisions
|
||||
if fk != nil {
|
||||
ids = append(ids, *fk)
|
||||
nodeids[*fk] = append(nodeids[*fk], nodes[i])
|
||||
if nodes[i].alert_decisions == nil {
|
||||
continue
|
||||
}
|
||||
fk := *nodes[i].alert_decisions
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
query.Where(alert.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
|
@ -428,6 +439,9 @@ func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
From: dq.sql,
|
||||
Unique: true,
|
||||
}
|
||||
if unique := dq.unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
}
|
||||
if fields := dq.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, decision.FieldID)
|
||||
|
@ -453,7 +467,7 @@ func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
if ps := dq.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector, decision.ValidColumn)
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -463,16 +477,20 @@ func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
func (dq *DecisionQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(dq.driver.Dialect())
|
||||
t1 := builder.Table(decision.Table)
|
||||
selector := builder.Select(t1.Columns(decision.Columns...)...).From(t1)
|
||||
columns := dq.fields
|
||||
if len(columns) == 0 {
|
||||
columns = decision.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if dq.sql != nil {
|
||||
selector = dq.sql
|
||||
selector.Select(selector.Columns(decision.Columns...)...)
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
for _, p := range dq.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range dq.order {
|
||||
p(selector, decision.ValidColumn)
|
||||
p(selector)
|
||||
}
|
||||
if offset := dq.offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
|
@ -734,13 +752,24 @@ func (dgb *DecisionGroupBy) sqlScan(ctx context.Context, v interface{}) error {
|
|||
}
|
||||
|
||||
func (dgb *DecisionGroupBy) sqlQuery() *sql.Selector {
|
||||
selector := dgb.sql
|
||||
columns := make([]string, 0, len(dgb.fields)+len(dgb.fns))
|
||||
columns = append(columns, dgb.fields...)
|
||||
selector := dgb.sql.Select()
|
||||
aggregation := make([]string, 0, len(dgb.fns))
|
||||
for _, fn := range dgb.fns {
|
||||
columns = append(columns, fn(selector, decision.ValidColumn))
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
return selector.Select(columns...).GroupBy(dgb.fields...)
|
||||
// If no columns were selected in a custom aggregation function, the default
|
||||
// selection is the fields used for "group-by", and the aggregation functions.
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(dgb.fields)+len(dgb.fns))
|
||||
for _, f := range dgb.fields {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
for _, c := range aggregation {
|
||||
columns = append(columns, c)
|
||||
}
|
||||
selector.Select(columns...)
|
||||
}
|
||||
return selector.GroupBy(selector.Columns(dgb.fields...)...)
|
||||
}
|
||||
|
||||
// DecisionSelect is the builder for selecting fields of Decision entities.
|
||||
|
@ -956,16 +985,10 @@ func (ds *DecisionSelect) BoolX(ctx context.Context) bool {
|
|||
|
||||
func (ds *DecisionSelect) sqlScan(ctx context.Context, v interface{}) error {
|
||||
rows := &sql.Rows{}
|
||||
query, args := ds.sqlQuery().Query()
|
||||
query, args := ds.sql.Query()
|
||||
if err := ds.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
func (ds *DecisionSelect) sqlQuery() sql.Querier {
|
||||
selector := ds.sql
|
||||
selector.Select(selector.Columns(ds.fields...)...)
|
||||
return selector
|
||||
}
|
||||
|
|
|
@ -22,9 +22,9 @@ type DecisionUpdate struct {
|
|||
mutation *DecisionMutation
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the DecisionUpdate builder.
|
||||
// Where appends a list predicates to the DecisionUpdate builder.
|
||||
func (du *DecisionUpdate) Where(ps ...predicate.Decision) *DecisionUpdate {
|
||||
du.mutation.predicates = append(du.mutation.predicates, ps...)
|
||||
du.mutation.Where(ps...)
|
||||
return du
|
||||
}
|
||||
|
||||
|
@ -291,6 +291,9 @@ func (du *DecisionUpdate) Save(ctx context.Context) (int, error) {
|
|||
return affected, err
|
||||
})
|
||||
for i := len(du.hooks) - 1; i >= 0; i-- {
|
||||
if du.hooks[i] == nil {
|
||||
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = du.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, du.mutation); err != nil {
|
||||
|
@ -541,8 +544,8 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||
if n, err = sqlgraph.UpdateNodes(ctx, du.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{decision.Label}
|
||||
} else if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
@ -552,6 +555,7 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||
// DecisionUpdateOne is the builder for updating a single Decision entity.
|
||||
type DecisionUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *DecisionMutation
|
||||
}
|
||||
|
@ -799,6 +803,13 @@ func (duo *DecisionUpdateOne) ClearOwner() *DecisionUpdateOne {
|
|||
return duo
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (duo *DecisionUpdateOne) Select(field string, fields ...string) *DecisionUpdateOne {
|
||||
duo.fields = append([]string{field}, fields...)
|
||||
return duo
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated Decision entity.
|
||||
func (duo *DecisionUpdateOne) Save(ctx context.Context) (*Decision, error) {
|
||||
var (
|
||||
|
@ -819,6 +830,9 @@ func (duo *DecisionUpdateOne) Save(ctx context.Context) (*Decision, error) {
|
|||
return node, err
|
||||
})
|
||||
for i := len(duo.hooks) - 1; i >= 0; i-- {
|
||||
if duo.hooks[i] == nil {
|
||||
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = duo.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, duo.mutation); err != nil {
|
||||
|
@ -866,6 +880,18 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err
|
|||
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Decision.ID for update")}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := duo.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, decision.FieldID)
|
||||
for _, f := range fields {
|
||||
if !decision.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != decision.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := duo.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
|
@ -1077,8 +1103,8 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err
|
|||
if err = sqlgraph.UpdateNode(ctx, duo.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{decision.Label}
|
||||
} else if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -7,9 +7,13 @@ import (
|
|||
"fmt"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/database/ent/alert"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/database/ent/meta"
|
||||
)
|
||||
|
||||
// ent aliases to avoid import conflicts in user's code.
|
||||
|
@ -25,36 +29,60 @@ type (
|
|||
)
|
||||
|
||||
// OrderFunc applies an ordering on the sql selector.
|
||||
type OrderFunc func(*sql.Selector, func(string) bool)
|
||||
type OrderFunc func(*sql.Selector)
|
||||
|
||||
// columnChecker returns a function indicates if the column exists in the given column.
|
||||
func columnChecker(table string) func(string) error {
|
||||
checks := map[string]func(string) bool{
|
||||
alert.Table: alert.ValidColumn,
|
||||
bouncer.Table: bouncer.ValidColumn,
|
||||
decision.Table: decision.ValidColumn,
|
||||
event.Table: event.ValidColumn,
|
||||
machine.Table: machine.ValidColumn,
|
||||
meta.Table: meta.ValidColumn,
|
||||
}
|
||||
check, ok := checks[table]
|
||||
if !ok {
|
||||
return func(string) error {
|
||||
return fmt.Errorf("unknown table %q", table)
|
||||
}
|
||||
}
|
||||
return func(column string) error {
|
||||
if !check(column) {
|
||||
return fmt.Errorf("unknown column %q for table %q", column, table)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Asc applies the given fields in ASC order.
|
||||
func Asc(fields ...string) OrderFunc {
|
||||
return func(s *sql.Selector, check func(string) bool) {
|
||||
return func(s *sql.Selector) {
|
||||
check := columnChecker(s.TableName())
|
||||
for _, f := range fields {
|
||||
if check(f) {
|
||||
s.OrderBy(sql.Asc(f))
|
||||
} else {
|
||||
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("invalid field %q for ordering", f)})
|
||||
if err := check(f); err != nil {
|
||||
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
|
||||
}
|
||||
s.OrderBy(sql.Asc(s.C(f)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Desc applies the given fields in DESC order.
|
||||
func Desc(fields ...string) OrderFunc {
|
||||
return func(s *sql.Selector, check func(string) bool) {
|
||||
return func(s *sql.Selector) {
|
||||
check := columnChecker(s.TableName())
|
||||
for _, f := range fields {
|
||||
if check(f) {
|
||||
s.OrderBy(sql.Desc(f))
|
||||
} else {
|
||||
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("invalid field %q for ordering", f)})
|
||||
if err := check(f); err != nil {
|
||||
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
|
||||
}
|
||||
s.OrderBy(sql.Desc(s.C(f)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AggregateFunc applies an aggregation step on the group-by traversal/selector.
|
||||
type AggregateFunc func(*sql.Selector, func(string) bool) string
|
||||
type AggregateFunc func(*sql.Selector) string
|
||||
|
||||
// As is a pseudo aggregation function for renaming another other functions with custom names. For example:
|
||||
//
|
||||
|
@ -63,23 +91,24 @@ type AggregateFunc func(*sql.Selector, func(string) bool) string
|
|||
// Scan(ctx, &v)
|
||||
//
|
||||
func As(fn AggregateFunc, end string) AggregateFunc {
|
||||
return func(s *sql.Selector, check func(string) bool) string {
|
||||
return sql.As(fn(s, check), end)
|
||||
return func(s *sql.Selector) string {
|
||||
return sql.As(fn(s), end)
|
||||
}
|
||||
}
|
||||
|
||||
// Count applies the "count" aggregation function on each group.
|
||||
func Count() AggregateFunc {
|
||||
return func(s *sql.Selector, _ func(string) bool) string {
|
||||
return func(s *sql.Selector) string {
|
||||
return sql.Count("*")
|
||||
}
|
||||
}
|
||||
|
||||
// Max applies the "max" aggregation function on the given field of each group.
|
||||
func Max(field string) AggregateFunc {
|
||||
return func(s *sql.Selector, check func(string) bool) string {
|
||||
if !check(field) {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("invalid field %q for grouping", field)})
|
||||
return func(s *sql.Selector) string {
|
||||
check := columnChecker(s.TableName())
|
||||
if err := check(field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
return sql.Max(s.C(field))
|
||||
|
@ -88,9 +117,10 @@ func Max(field string) AggregateFunc {
|
|||
|
||||
// Mean applies the "mean" aggregation function on the given field of each group.
|
||||
func Mean(field string) AggregateFunc {
|
||||
return func(s *sql.Selector, check func(string) bool) string {
|
||||
if !check(field) {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("invalid field %q for grouping", field)})
|
||||
return func(s *sql.Selector) string {
|
||||
check := columnChecker(s.TableName())
|
||||
if err := check(field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
return sql.Avg(s.C(field))
|
||||
|
@ -99,9 +129,10 @@ func Mean(field string) AggregateFunc {
|
|||
|
||||
// Min applies the "min" aggregation function on the given field of each group.
|
||||
func Min(field string) AggregateFunc {
|
||||
return func(s *sql.Selector, check func(string) bool) string {
|
||||
if !check(field) {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("invalid field %q for grouping", field)})
|
||||
return func(s *sql.Selector) string {
|
||||
check := columnChecker(s.TableName())
|
||||
if err := check(field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
return sql.Min(s.C(field))
|
||||
|
@ -110,9 +141,10 @@ func Min(field string) AggregateFunc {
|
|||
|
||||
// Sum applies the "sum" aggregation function on the given field of each group.
|
||||
func Sum(field string) AggregateFunc {
|
||||
return func(s *sql.Selector, check func(string) bool) string {
|
||||
if !check(field) {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("invalid field %q for grouping", field)})
|
||||
return func(s *sql.Selector) string {
|
||||
check := columnChecker(s.TableName())
|
||||
if err := check(field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
return sql.Sum(s.C(field))
|
||||
|
@ -135,7 +167,7 @@ func (e *ValidationError) Unwrap() error {
|
|||
return e.err
|
||||
}
|
||||
|
||||
// IsValidationError returns a boolean indicating whether the error is a validaton error.
|
||||
// IsValidationError returns a boolean indicating whether the error is a validation error.
|
||||
func IsValidationError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
|
@ -235,21 +267,3 @@ func IsConstraintError(err error) bool {
|
|||
var e *ConstraintError
|
||||
return errors.As(err, &e)
|
||||
}
|
||||
|
||||
func isSQLConstraintError(err error) (*ConstraintError, bool) {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
return &ConstraintError{err.Error(), err}, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// rollback calls tx.Rollback and wraps the given error with the rollback error if present.
|
||||
func rollback(tx dialect.Tx, err error) error {
|
||||
if rerr := tx.Rollback(); rerr != nil {
|
||||
err = fmt.Errorf("%w: %v", err, rerr)
|
||||
}
|
||||
if err, ok := isSQLConstraintError(err); ok {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -60,13 +60,13 @@ func (*Event) scanValues(columns []string) ([]interface{}, error) {
|
|||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case event.FieldID:
|
||||
values[i] = &sql.NullInt64{}
|
||||
values[i] = new(sql.NullInt64)
|
||||
case event.FieldSerialized:
|
||||
values[i] = &sql.NullString{}
|
||||
values[i] = new(sql.NullString)
|
||||
case event.FieldCreatedAt, event.FieldUpdatedAt, event.FieldTime:
|
||||
values[i] = &sql.NullTime{}
|
||||
values[i] = new(sql.NullTime)
|
||||
case event.ForeignKeys[0]: // alert_events
|
||||
values[i] = &sql.NullInt64{}
|
||||
values[i] = new(sql.NullInt64)
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected column %q for type Event", columns[i])
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ const (
|
|||
EdgeOwner = "owner"
|
||||
// Table holds the table name of the event in the database.
|
||||
Table = "events"
|
||||
// OwnerTable is the table the holds the owner relation/edge.
|
||||
// OwnerTable is the table that holds the owner relation/edge.
|
||||
OwnerTable = "events"
|
||||
// OwnerInverseTable is the table name for the Alert entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "alert" package.
|
||||
|
|
|
@ -107,11 +107,17 @@ func (ec *EventCreate) Save(ctx context.Context) (*Event, error) {
|
|||
return nil, err
|
||||
}
|
||||
ec.mutation = mutation
|
||||
node, err = ec.sqlSave(ctx)
|
||||
if node, err = ec.sqlSave(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &node.ID
|
||||
mutation.done = true
|
||||
return node, err
|
||||
})
|
||||
for i := len(ec.hooks) - 1; i >= 0; i-- {
|
||||
if ec.hooks[i] == nil {
|
||||
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = ec.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, ec.mutation); err != nil {
|
||||
|
@ -130,6 +136,19 @@ func (ec *EventCreate) SaveX(ctx context.Context) *Event {
|
|||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (ec *EventCreate) Exec(ctx context.Context) error {
|
||||
_, err := ec.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (ec *EventCreate) ExecX(ctx context.Context) {
|
||||
if err := ec.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (ec *EventCreate) defaults() {
|
||||
if _, ok := ec.mutation.CreatedAt(); !ok {
|
||||
|
@ -145,20 +164,20 @@ func (ec *EventCreate) defaults() {
|
|||
// check runs all checks and user-defined validators on the builder.
|
||||
func (ec *EventCreate) check() error {
|
||||
if _, ok := ec.mutation.CreatedAt(); !ok {
|
||||
return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")}
|
||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "created_at"`)}
|
||||
}
|
||||
if _, ok := ec.mutation.UpdatedAt(); !ok {
|
||||
return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")}
|
||||
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "updated_at"`)}
|
||||
}
|
||||
if _, ok := ec.mutation.Time(); !ok {
|
||||
return &ValidationError{Name: "time", err: errors.New("ent: missing required field \"time\"")}
|
||||
return &ValidationError{Name: "time", err: errors.New(`ent: missing required field "time"`)}
|
||||
}
|
||||
if _, ok := ec.mutation.Serialized(); !ok {
|
||||
return &ValidationError{Name: "serialized", err: errors.New("ent: missing required field \"serialized\"")}
|
||||
return &ValidationError{Name: "serialized", err: errors.New(`ent: missing required field "serialized"`)}
|
||||
}
|
||||
if v, ok := ec.mutation.Serialized(); ok {
|
||||
if err := event.SerializedValidator(v); err != nil {
|
||||
return &ValidationError{Name: "serialized", err: fmt.Errorf("ent: validator failed for field \"serialized\": %w", err)}
|
||||
return &ValidationError{Name: "serialized", err: fmt.Errorf(`ent: validator failed for field "serialized": %w`, err)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -167,8 +186,8 @@ func (ec *EventCreate) check() error {
|
|||
func (ec *EventCreate) sqlSave(ctx context.Context) (*Event, error) {
|
||||
_node, _spec := ec.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, ec.driver, _spec); err != nil {
|
||||
if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -272,19 +291,23 @@ func (ecb *EventCreateBulk) Save(ctx context.Context) ([]*Event, error) {
|
|||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, ecb.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, ecb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil {
|
||||
if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
if err = sqlgraph.BatchCreate(ctx, ecb.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
}
|
||||
}
|
||||
mutation.done = true
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int(id)
|
||||
mutation.id = &nodes[i].ID
|
||||
mutation.done = true
|
||||
if specs[i].ID.Value != nil {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int(id)
|
||||
}
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
|
@ -309,3 +332,16 @@ func (ecb *EventCreateBulk) SaveX(ctx context.Context) []*Event {
|
|||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (ecb *EventCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := ecb.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (ecb *EventCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := ecb.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,9 +20,9 @@ type EventDelete struct {
|
|||
mutation *EventMutation
|
||||
}
|
||||
|
||||
// Where adds a new predicate to the EventDelete builder.
|
||||
// Where appends a list predicates to the EventDelete builder.
|
||||
func (ed *EventDelete) Where(ps ...predicate.Event) *EventDelete {
|
||||
ed.mutation.predicates = append(ed.mutation.predicates, ps...)
|
||||
ed.mutation.Where(ps...)
|
||||
return ed
|
||||
}
|
||||
|
||||
|
@ -46,6 +46,9 @@ func (ed *EventDelete) Exec(ctx context.Context) (int, error) {
|
|||
return affected, err
|
||||
})
|
||||
for i := len(ed.hooks) - 1; i >= 0; i-- {
|
||||
if ed.hooks[i] == nil {
|
||||
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = ed.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, ed.mutation); err != nil {
|
||||
|
|
|
@ -21,6 +21,7 @@ type EventQuery struct {
|
|||
config
|
||||
limit *int
|
||||
offset *int
|
||||
unique *bool
|
||||
order []OrderFunc
|
||||
fields []string
|
||||
predicates []predicate.Event
|
||||
|
@ -50,6 +51,13 @@ func (eq *EventQuery) Offset(offset int) *EventQuery {
|
|||
return eq
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (eq *EventQuery) Unique(unique bool) *EventQuery {
|
||||
eq.unique = &unique
|
||||
return eq
|
||||
}
|
||||
|
||||
// Order adds an order step to the query.
|
||||
func (eq *EventQuery) Order(o ...OrderFunc) *EventQuery {
|
||||
eq.order = append(eq.order, o...)
|
||||
|
@ -317,8 +325,8 @@ func (eq *EventQuery) GroupBy(field string, fields ...string) *EventGroupBy {
|
|||
// Select(event.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
//
|
||||
func (eq *EventQuery) Select(field string, fields ...string) *EventSelect {
|
||||
eq.fields = append([]string{field}, fields...)
|
||||
func (eq *EventQuery) Select(fields ...string) *EventSelect {
|
||||
eq.fields = append(eq.fields, fields...)
|
||||
return &EventSelect{EventQuery: eq}
|
||||
}
|
||||
|
||||
|
@ -377,11 +385,14 @@ func (eq *EventQuery) sqlAll(ctx context.Context) ([]*Event, error) {
|
|||
ids := make([]int, 0, len(nodes))
|
||||
nodeids := make(map[int][]*Event)
|
||||
for i := range nodes {
|
||||
fk := nodes[i].alert_events
|
||||
if fk != nil {
|
||||
ids = append(ids, *fk)
|
||||
nodeids[*fk] = append(nodeids[*fk], nodes[i])
|
||||
if nodes[i].alert_events == nil {
|
||||
continue
|
||||
}
|
||||
fk := *nodes[i].alert_events
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
query.Where(alert.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
|
@ -428,6 +439,9 @@ func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
From: eq.sql,
|
||||
Unique: true,
|
||||
}
|
||||
if unique := eq.unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
}
|
||||
if fields := eq.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, event.FieldID)
|
||||
|
@ -453,7 +467,7 @@ func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
if ps := eq.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector, event.ValidColumn)
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -463,16 +477,20 @@ func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
func (eq *EventQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(eq.driver.Dialect())
|
||||
t1 := builder.Table(event.Table)
|
||||
selector := builder.Select(t1.Columns(event.Columns...)...).From(t1)
|
||||
columns := eq.fields
|
||||
if len(columns) == 0 {
|
||||
columns = event.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if eq.sql != nil {
|
||||
selector = eq.sql
|
||||
selector.Select(selector.Columns(event.Columns...)...)
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
for _, p := range eq.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range eq.order {
|
||||
p(selector, event.ValidColumn)
|
||||
p(selector)
|
||||
}
|
||||
if offset := eq.offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
|
@ -734,13 +752,24 @@ func (egb *EventGroupBy) sqlScan(ctx context.Context, v interface{}) error {
|
|||
}
|
||||
|
||||
func (egb *EventGroupBy) sqlQuery() *sql.Selector {
|
||||
selector := egb.sql
|
||||
columns := make([]string, 0, len(egb.fields)+len(egb.fns))
|
||||
columns = append(columns, egb.fields...)
|
||||
selector := egb.sql.Select()
|
||||
aggregation := make([]string, 0, len(egb.fns))
|
||||
for _, fn := range egb.fns {
|
||||
columns = append(columns, fn(selector, event.ValidColumn))
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
return selector.Select(columns...).GroupBy(egb.fields...)
|
||||
// If no columns were selected in a custom aggregation function, the default
|
||||
// selection is the fields used for "group-by", and the aggregation functions.
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(egb.fields)+len(egb.fns))
|
||||
for _, f := range egb.fields {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
for _, c := range aggregation {
|
||||
columns = append(columns, c)
|
||||
}
|
||||
selector.Select(columns...)
|
||||
}
|
||||
return selector.GroupBy(selector.Columns(egb.fields...)...)
|
||||
}
|
||||
|
||||
// EventSelect is the builder for selecting fields of Event entities.
|
||||
|
@ -956,16 +985,10 @@ func (es *EventSelect) BoolX(ctx context.Context) bool {
|
|||
|
||||
func (es *EventSelect) sqlScan(ctx context.Context, v interface{}) error {
|
||||
rows := &sql.Rows{}
|
||||
query, args := es.sqlQuery().Query()
|
||||
query, args := es.sql.Query()
|
||||
if err := es.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
func (es *EventSelect) sqlQuery() sql.Querier {
|
||||
selector := es.sql
|
||||
selector.Select(selector.Columns(es.fields...)...)
|
||||
return selector
|
||||
}
|
||||
|
|
|
@ -22,9 +22,9 @@ type EventUpdate struct {
|
|||
mutation *EventMutation
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the EventUpdate builder.
|
||||
// Where appends a list predicates to the EventUpdate builder.
|
||||
func (eu *EventUpdate) Where(ps ...predicate.Event) *EventUpdate {
|
||||
eu.mutation.predicates = append(eu.mutation.predicates, ps...)
|
||||
eu.mutation.Where(ps...)
|
||||
return eu
|
||||
}
|
||||
|
||||
|
@ -124,6 +124,9 @@ func (eu *EventUpdate) Save(ctx context.Context) (int, error) {
|
|||
return affected, err
|
||||
})
|
||||
for i := len(eu.hooks) - 1; i >= 0; i-- {
|
||||
if eu.hooks[i] == nil {
|
||||
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = eu.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, eu.mutation); err != nil {
|
||||
|
@ -249,8 +252,8 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||
if n, err = sqlgraph.UpdateNodes(ctx, eu.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{event.Label}
|
||||
} else if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
@ -260,6 +263,7 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||
// EventUpdateOne is the builder for updating a single Event entity.
|
||||
type EventUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *EventMutation
|
||||
}
|
||||
|
@ -334,6 +338,13 @@ func (euo *EventUpdateOne) ClearOwner() *EventUpdateOne {
|
|||
return euo
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (euo *EventUpdateOne) Select(field string, fields ...string) *EventUpdateOne {
|
||||
euo.fields = append([]string{field}, fields...)
|
||||
return euo
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated Event entity.
|
||||
func (euo *EventUpdateOne) Save(ctx context.Context) (*Event, error) {
|
||||
var (
|
||||
|
@ -360,6 +371,9 @@ func (euo *EventUpdateOne) Save(ctx context.Context) (*Event, error) {
|
|||
return node, err
|
||||
})
|
||||
for i := len(euo.hooks) - 1; i >= 0; i-- {
|
||||
if euo.hooks[i] == nil {
|
||||
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = euo.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, euo.mutation); err != nil {
|
||||
|
@ -417,6 +431,18 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error
|
|||
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Event.ID for update")}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := euo.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, event.FieldID)
|
||||
for _, f := range fields {
|
||||
if !event.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != event.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := euo.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
|
@ -493,8 +519,8 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error
|
|||
if err = sqlgraph.UpdateNode(ctx, euo.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{event.Label}
|
||||
} else if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
|
4
pkg/database/ent/generate.go
Normal file
4
pkg/database/ent/generate.go
Normal file
|
@ -0,0 +1,4 @@
|
|||
package ent
|
||||
|
||||
//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate ./schema
|
||||
|
|
@ -63,13 +63,13 @@ func (*Machine) scanValues(columns []string) ([]interface{}, error) {
|
|||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case machine.FieldIsValidated:
|
||||
values[i] = &sql.NullBool{}
|
||||
values[i] = new(sql.NullBool)
|
||||
case machine.FieldID:
|
||||
values[i] = &sql.NullInt64{}
|
||||
values[i] = new(sql.NullInt64)
|
||||
case machine.FieldMachineId, machine.FieldPassword, machine.FieldIpAddress, machine.FieldScenarios, machine.FieldVersion, machine.FieldStatus:
|
||||
values[i] = &sql.NullString{}
|
||||
values[i] = new(sql.NullString)
|
||||
case machine.FieldCreatedAt, machine.FieldUpdatedAt:
|
||||
values[i] = &sql.NullTime{}
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected column %q for type Machine", columns[i])
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ const (
|
|||
EdgeAlerts = "alerts"
|
||||
// Table holds the table name of the machine in the database.
|
||||
Table = "machines"
|
||||
// AlertsTable is the table the holds the alerts relation/edge.
|
||||
// AlertsTable is the table that holds the alerts relation/edge.
|
||||
AlertsTable = "alerts"
|
||||
// AlertsInverseTable is the table name for the Alert entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "alert" package.
|
||||
|
|
|
@ -165,11 +165,17 @@ func (mc *MachineCreate) Save(ctx context.Context) (*Machine, error) {
|
|||
return nil, err
|
||||
}
|
||||
mc.mutation = mutation
|
||||
node, err = mc.sqlSave(ctx)
|
||||
if node, err = mc.sqlSave(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &node.ID
|
||||
mutation.done = true
|
||||
return node, err
|
||||
})
|
||||
for i := len(mc.hooks) - 1; i >= 0; i-- {
|
||||
if mc.hooks[i] == nil {
|
||||
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = mc.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, mc.mutation); err != nil {
|
||||
|
@ -188,6 +194,19 @@ func (mc *MachineCreate) SaveX(ctx context.Context) *Machine {
|
|||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (mc *MachineCreate) Exec(ctx context.Context) error {
|
||||
_, err := mc.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (mc *MachineCreate) ExecX(ctx context.Context) {
|
||||
if err := mc.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (mc *MachineCreate) defaults() {
|
||||
if _, ok := mc.mutation.CreatedAt(); !ok {
|
||||
|
@ -207,27 +226,27 @@ func (mc *MachineCreate) defaults() {
|
|||
// check runs all checks and user-defined validators on the builder.
|
||||
func (mc *MachineCreate) check() error {
|
||||
if _, ok := mc.mutation.CreatedAt(); !ok {
|
||||
return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")}
|
||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "created_at"`)}
|
||||
}
|
||||
if _, ok := mc.mutation.UpdatedAt(); !ok {
|
||||
return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")}
|
||||
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "updated_at"`)}
|
||||
}
|
||||
if _, ok := mc.mutation.MachineId(); !ok {
|
||||
return &ValidationError{Name: "machineId", err: errors.New("ent: missing required field \"machineId\"")}
|
||||
return &ValidationError{Name: "machineId", err: errors.New(`ent: missing required field "machineId"`)}
|
||||
}
|
||||
if _, ok := mc.mutation.Password(); !ok {
|
||||
return &ValidationError{Name: "password", err: errors.New("ent: missing required field \"password\"")}
|
||||
return &ValidationError{Name: "password", err: errors.New(`ent: missing required field "password"`)}
|
||||
}
|
||||
if _, ok := mc.mutation.IpAddress(); !ok {
|
||||
return &ValidationError{Name: "ipAddress", err: errors.New("ent: missing required field \"ipAddress\"")}
|
||||
return &ValidationError{Name: "ipAddress", err: errors.New(`ent: missing required field "ipAddress"`)}
|
||||
}
|
||||
if v, ok := mc.mutation.Scenarios(); ok {
|
||||
if err := machine.ScenariosValidator(v); err != nil {
|
||||
return &ValidationError{Name: "scenarios", err: fmt.Errorf("ent: validator failed for field \"scenarios\": %w", err)}
|
||||
return &ValidationError{Name: "scenarios", err: fmt.Errorf(`ent: validator failed for field "scenarios": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := mc.mutation.IsValidated(); !ok {
|
||||
return &ValidationError{Name: "isValidated", err: errors.New("ent: missing required field \"isValidated\"")}
|
||||
return &ValidationError{Name: "isValidated", err: errors.New(`ent: missing required field "isValidated"`)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -235,8 +254,8 @@ func (mc *MachineCreate) check() error {
|
|||
func (mc *MachineCreate) sqlSave(ctx context.Context) (*Machine, error) {
|
||||
_node, _spec := mc.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil {
|
||||
if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -379,19 +398,23 @@ func (mcb *MachineCreateBulk) Save(ctx context.Context) ([]*Machine, error) {
|
|||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, mcb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil {
|
||||
if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
if err = sqlgraph.BatchCreate(ctx, mcb.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
}
|
||||
}
|
||||
mutation.done = true
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int(id)
|
||||
mutation.id = &nodes[i].ID
|
||||
mutation.done = true
|
||||
if specs[i].ID.Value != nil {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int(id)
|
||||
}
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
|
@ -416,3 +439,16 @@ func (mcb *MachineCreateBulk) SaveX(ctx context.Context) []*Machine {
|
|||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (mcb *MachineCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := mcb.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (mcb *MachineCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := mcb.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,9 +20,9 @@ type MachineDelete struct {
|
|||
mutation *MachineMutation
|
||||
}
|
||||
|
||||
// Where adds a new predicate to the MachineDelete builder.
|
||||
// Where appends a list predicates to the MachineDelete builder.
|
||||
func (md *MachineDelete) Where(ps ...predicate.Machine) *MachineDelete {
|
||||
md.mutation.predicates = append(md.mutation.predicates, ps...)
|
||||
md.mutation.Where(ps...)
|
||||
return md
|
||||
}
|
||||
|
||||
|
@ -46,6 +46,9 @@ func (md *MachineDelete) Exec(ctx context.Context) (int, error) {
|
|||
return affected, err
|
||||
})
|
||||
for i := len(md.hooks) - 1; i >= 0; i-- {
|
||||
if md.hooks[i] == nil {
|
||||
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = md.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, md.mutation); err != nil {
|
||||
|
|
|
@ -22,6 +22,7 @@ type MachineQuery struct {
|
|||
config
|
||||
limit *int
|
||||
offset *int
|
||||
unique *bool
|
||||
order []OrderFunc
|
||||
fields []string
|
||||
predicates []predicate.Machine
|
||||
|
@ -50,6 +51,13 @@ func (mq *MachineQuery) Offset(offset int) *MachineQuery {
|
|||
return mq
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (mq *MachineQuery) Unique(unique bool) *MachineQuery {
|
||||
mq.unique = &unique
|
||||
return mq
|
||||
}
|
||||
|
||||
// Order adds an order step to the query.
|
||||
func (mq *MachineQuery) Order(o ...OrderFunc) *MachineQuery {
|
||||
mq.order = append(mq.order, o...)
|
||||
|
@ -317,8 +325,8 @@ func (mq *MachineQuery) GroupBy(field string, fields ...string) *MachineGroupBy
|
|||
// Select(machine.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
//
|
||||
func (mq *MachineQuery) Select(field string, fields ...string) *MachineSelect {
|
||||
mq.fields = append([]string{field}, fields...)
|
||||
func (mq *MachineQuery) Select(fields ...string) *MachineSelect {
|
||||
mq.fields = append(mq.fields, fields...)
|
||||
return &MachineSelect{MachineQuery: mq}
|
||||
}
|
||||
|
||||
|
@ -424,6 +432,9 @@ func (mq *MachineQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
From: mq.sql,
|
||||
Unique: true,
|
||||
}
|
||||
if unique := mq.unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
}
|
||||
if fields := mq.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, machine.FieldID)
|
||||
|
@ -449,7 +460,7 @@ func (mq *MachineQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
if ps := mq.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector, machine.ValidColumn)
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -459,16 +470,20 @@ func (mq *MachineQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
func (mq *MachineQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(mq.driver.Dialect())
|
||||
t1 := builder.Table(machine.Table)
|
||||
selector := builder.Select(t1.Columns(machine.Columns...)...).From(t1)
|
||||
columns := mq.fields
|
||||
if len(columns) == 0 {
|
||||
columns = machine.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if mq.sql != nil {
|
||||
selector = mq.sql
|
||||
selector.Select(selector.Columns(machine.Columns...)...)
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
for _, p := range mq.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range mq.order {
|
||||
p(selector, machine.ValidColumn)
|
||||
p(selector)
|
||||
}
|
||||
if offset := mq.offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
|
@ -730,13 +745,24 @@ func (mgb *MachineGroupBy) sqlScan(ctx context.Context, v interface{}) error {
|
|||
}
|
||||
|
||||
func (mgb *MachineGroupBy) sqlQuery() *sql.Selector {
|
||||
selector := mgb.sql
|
||||
columns := make([]string, 0, len(mgb.fields)+len(mgb.fns))
|
||||
columns = append(columns, mgb.fields...)
|
||||
selector := mgb.sql.Select()
|
||||
aggregation := make([]string, 0, len(mgb.fns))
|
||||
for _, fn := range mgb.fns {
|
||||
columns = append(columns, fn(selector, machine.ValidColumn))
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
return selector.Select(columns...).GroupBy(mgb.fields...)
|
||||
// If no columns were selected in a custom aggregation function, the default
|
||||
// selection is the fields used for "group-by", and the aggregation functions.
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(mgb.fields)+len(mgb.fns))
|
||||
for _, f := range mgb.fields {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
for _, c := range aggregation {
|
||||
columns = append(columns, c)
|
||||
}
|
||||
selector.Select(columns...)
|
||||
}
|
||||
return selector.GroupBy(selector.Columns(mgb.fields...)...)
|
||||
}
|
||||
|
||||
// MachineSelect is the builder for selecting fields of Machine entities.
|
||||
|
@ -952,16 +978,10 @@ func (ms *MachineSelect) BoolX(ctx context.Context) bool {
|
|||
|
||||
func (ms *MachineSelect) sqlScan(ctx context.Context, v interface{}) error {
|
||||
rows := &sql.Rows{}
|
||||
query, args := ms.sqlQuery().Query()
|
||||
query, args := ms.sql.Query()
|
||||
if err := ms.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
func (ms *MachineSelect) sqlQuery() sql.Querier {
|
||||
selector := ms.sql
|
||||
selector.Select(selector.Columns(ms.fields...)...)
|
||||
return selector
|
||||
}
|
||||
|
|
|
@ -22,9 +22,9 @@ type MachineUpdate struct {
|
|||
mutation *MachineMutation
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the MachineUpdate builder.
|
||||
// Where appends a list predicates to the MachineUpdate builder.
|
||||
func (mu *MachineUpdate) Where(ps ...predicate.Machine) *MachineUpdate {
|
||||
mu.mutation.predicates = append(mu.mutation.predicates, ps...)
|
||||
mu.mutation.Where(ps...)
|
||||
return mu
|
||||
}
|
||||
|
||||
|
@ -215,6 +215,9 @@ func (mu *MachineUpdate) Save(ctx context.Context) (int, error) {
|
|||
return affected, err
|
||||
})
|
||||
for i := len(mu.hooks) - 1; i >= 0; i-- {
|
||||
if mu.hooks[i] == nil {
|
||||
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = mu.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, mu.mutation); err != nil {
|
||||
|
@ -412,8 +415,8 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||
if n, err = sqlgraph.UpdateNodes(ctx, mu.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{machine.Label}
|
||||
} else if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
@ -423,6 +426,7 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||
// MachineUpdateOne is the builder for updating a single Machine entity.
|
||||
type MachineUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *MachineMutation
|
||||
}
|
||||
|
@ -588,6 +592,13 @@ func (muo *MachineUpdateOne) RemoveAlerts(a ...*Alert) *MachineUpdateOne {
|
|||
return muo.RemoveAlertIDs(ids...)
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (muo *MachineUpdateOne) Select(field string, fields ...string) *MachineUpdateOne {
|
||||
muo.fields = append([]string{field}, fields...)
|
||||
return muo
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated Machine entity.
|
||||
func (muo *MachineUpdateOne) Save(ctx context.Context) (*Machine, error) {
|
||||
var (
|
||||
|
@ -614,6 +625,9 @@ func (muo *MachineUpdateOne) Save(ctx context.Context) (*Machine, error) {
|
|||
return node, err
|
||||
})
|
||||
for i := len(muo.hooks) - 1; i >= 0; i-- {
|
||||
if muo.hooks[i] == nil {
|
||||
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = muo.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, muo.mutation); err != nil {
|
||||
|
@ -671,6 +685,18 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e
|
|||
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Machine.ID for update")}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := muo.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, machine.FieldID)
|
||||
for _, f := range fields {
|
||||
if !machine.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != machine.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := muo.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
|
@ -819,8 +845,8 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e
|
|||
if err = sqlgraph.UpdateNode(ctx, muo.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{machine.Label}
|
||||
} else if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -60,13 +60,13 @@ func (*Meta) scanValues(columns []string) ([]interface{}, error) {
|
|||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case meta.FieldID:
|
||||
values[i] = &sql.NullInt64{}
|
||||
values[i] = new(sql.NullInt64)
|
||||
case meta.FieldKey, meta.FieldValue:
|
||||
values[i] = &sql.NullString{}
|
||||
values[i] = new(sql.NullString)
|
||||
case meta.FieldCreatedAt, meta.FieldUpdatedAt:
|
||||
values[i] = &sql.NullTime{}
|
||||
values[i] = new(sql.NullTime)
|
||||
case meta.ForeignKeys[0]: // alert_metas
|
||||
values[i] = &sql.NullInt64{}
|
||||
values[i] = new(sql.NullInt64)
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected column %q for type Meta", columns[i])
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ const (
|
|||
EdgeOwner = "owner"
|
||||
// Table holds the table name of the meta in the database.
|
||||
Table = "meta"
|
||||
// OwnerTable is the table the holds the owner relation/edge.
|
||||
// OwnerTable is the table that holds the owner relation/edge.
|
||||
OwnerTable = "meta"
|
||||
// OwnerInverseTable is the table name for the Alert entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "alert" package.
|
||||
|
|
|
@ -107,11 +107,17 @@ func (mc *MetaCreate) Save(ctx context.Context) (*Meta, error) {
|
|||
return nil, err
|
||||
}
|
||||
mc.mutation = mutation
|
||||
node, err = mc.sqlSave(ctx)
|
||||
if node, err = mc.sqlSave(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &node.ID
|
||||
mutation.done = true
|
||||
return node, err
|
||||
})
|
||||
for i := len(mc.hooks) - 1; i >= 0; i-- {
|
||||
if mc.hooks[i] == nil {
|
||||
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = mc.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, mc.mutation); err != nil {
|
||||
|
@ -130,6 +136,19 @@ func (mc *MetaCreate) SaveX(ctx context.Context) *Meta {
|
|||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (mc *MetaCreate) Exec(ctx context.Context) error {
|
||||
_, err := mc.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (mc *MetaCreate) ExecX(ctx context.Context) {
|
||||
if err := mc.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (mc *MetaCreate) defaults() {
|
||||
if _, ok := mc.mutation.CreatedAt(); !ok {
|
||||
|
@ -145,20 +164,20 @@ func (mc *MetaCreate) defaults() {
|
|||
// check runs all checks and user-defined validators on the builder.
|
||||
func (mc *MetaCreate) check() error {
|
||||
if _, ok := mc.mutation.CreatedAt(); !ok {
|
||||
return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")}
|
||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "created_at"`)}
|
||||
}
|
||||
if _, ok := mc.mutation.UpdatedAt(); !ok {
|
||||
return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")}
|
||||
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "updated_at"`)}
|
||||
}
|
||||
if _, ok := mc.mutation.Key(); !ok {
|
||||
return &ValidationError{Name: "key", err: errors.New("ent: missing required field \"key\"")}
|
||||
return &ValidationError{Name: "key", err: errors.New(`ent: missing required field "key"`)}
|
||||
}
|
||||
if _, ok := mc.mutation.Value(); !ok {
|
||||
return &ValidationError{Name: "value", err: errors.New("ent: missing required field \"value\"")}
|
||||
return &ValidationError{Name: "value", err: errors.New(`ent: missing required field "value"`)}
|
||||
}
|
||||
if v, ok := mc.mutation.Value(); ok {
|
||||
if err := meta.ValueValidator(v); err != nil {
|
||||
return &ValidationError{Name: "value", err: fmt.Errorf("ent: validator failed for field \"value\": %w", err)}
|
||||
return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "value": %w`, err)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -167,8 +186,8 @@ func (mc *MetaCreate) check() error {
|
|||
func (mc *MetaCreate) sqlSave(ctx context.Context) (*Meta, error) {
|
||||
_node, _spec := mc.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil {
|
||||
if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -272,19 +291,23 @@ func (mcb *MetaCreateBulk) Save(ctx context.Context) ([]*Meta, error) {
|
|||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, mcb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil {
|
||||
if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
if err = sqlgraph.BatchCreate(ctx, mcb.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
}
|
||||
}
|
||||
mutation.done = true
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int(id)
|
||||
mutation.id = &nodes[i].ID
|
||||
mutation.done = true
|
||||
if specs[i].ID.Value != nil {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int(id)
|
||||
}
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
|
@ -309,3 +332,16 @@ func (mcb *MetaCreateBulk) SaveX(ctx context.Context) []*Meta {
|
|||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (mcb *MetaCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := mcb.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (mcb *MetaCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := mcb.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,9 +20,9 @@ type MetaDelete struct {
|
|||
mutation *MetaMutation
|
||||
}
|
||||
|
||||
// Where adds a new predicate to the MetaDelete builder.
|
||||
// Where appends a list predicates to the MetaDelete builder.
|
||||
func (md *MetaDelete) Where(ps ...predicate.Meta) *MetaDelete {
|
||||
md.mutation.predicates = append(md.mutation.predicates, ps...)
|
||||
md.mutation.Where(ps...)
|
||||
return md
|
||||
}
|
||||
|
||||
|
@ -46,6 +46,9 @@ func (md *MetaDelete) Exec(ctx context.Context) (int, error) {
|
|||
return affected, err
|
||||
})
|
||||
for i := len(md.hooks) - 1; i >= 0; i-- {
|
||||
if md.hooks[i] == nil {
|
||||
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = md.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, md.mutation); err != nil {
|
||||
|
|
|
@ -21,6 +21,7 @@ type MetaQuery struct {
|
|||
config
|
||||
limit *int
|
||||
offset *int
|
||||
unique *bool
|
||||
order []OrderFunc
|
||||
fields []string
|
||||
predicates []predicate.Meta
|
||||
|
@ -50,6 +51,13 @@ func (mq *MetaQuery) Offset(offset int) *MetaQuery {
|
|||
return mq
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (mq *MetaQuery) Unique(unique bool) *MetaQuery {
|
||||
mq.unique = &unique
|
||||
return mq
|
||||
}
|
||||
|
||||
// Order adds an order step to the query.
|
||||
func (mq *MetaQuery) Order(o ...OrderFunc) *MetaQuery {
|
||||
mq.order = append(mq.order, o...)
|
||||
|
@ -317,8 +325,8 @@ func (mq *MetaQuery) GroupBy(field string, fields ...string) *MetaGroupBy {
|
|||
// Select(meta.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
//
|
||||
func (mq *MetaQuery) Select(field string, fields ...string) *MetaSelect {
|
||||
mq.fields = append([]string{field}, fields...)
|
||||
func (mq *MetaQuery) Select(fields ...string) *MetaSelect {
|
||||
mq.fields = append(mq.fields, fields...)
|
||||
return &MetaSelect{MetaQuery: mq}
|
||||
}
|
||||
|
||||
|
@ -377,11 +385,14 @@ func (mq *MetaQuery) sqlAll(ctx context.Context) ([]*Meta, error) {
|
|||
ids := make([]int, 0, len(nodes))
|
||||
nodeids := make(map[int][]*Meta)
|
||||
for i := range nodes {
|
||||
fk := nodes[i].alert_metas
|
||||
if fk != nil {
|
||||
ids = append(ids, *fk)
|
||||
nodeids[*fk] = append(nodeids[*fk], nodes[i])
|
||||
if nodes[i].alert_metas == nil {
|
||||
continue
|
||||
}
|
||||
fk := *nodes[i].alert_metas
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
query.Where(alert.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
|
@ -428,6 +439,9 @@ func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
From: mq.sql,
|
||||
Unique: true,
|
||||
}
|
||||
if unique := mq.unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
}
|
||||
if fields := mq.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, meta.FieldID)
|
||||
|
@ -453,7 +467,7 @@ func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
if ps := mq.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector, meta.ValidColumn)
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -463,16 +477,20 @@ func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec {
|
|||
func (mq *MetaQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(mq.driver.Dialect())
|
||||
t1 := builder.Table(meta.Table)
|
||||
selector := builder.Select(t1.Columns(meta.Columns...)...).From(t1)
|
||||
columns := mq.fields
|
||||
if len(columns) == 0 {
|
||||
columns = meta.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if mq.sql != nil {
|
||||
selector = mq.sql
|
||||
selector.Select(selector.Columns(meta.Columns...)...)
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
for _, p := range mq.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range mq.order {
|
||||
p(selector, meta.ValidColumn)
|
||||
p(selector)
|
||||
}
|
||||
if offset := mq.offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
|
@ -734,13 +752,24 @@ func (mgb *MetaGroupBy) sqlScan(ctx context.Context, v interface{}) error {
|
|||
}
|
||||
|
||||
func (mgb *MetaGroupBy) sqlQuery() *sql.Selector {
|
||||
selector := mgb.sql
|
||||
columns := make([]string, 0, len(mgb.fields)+len(mgb.fns))
|
||||
columns = append(columns, mgb.fields...)
|
||||
selector := mgb.sql.Select()
|
||||
aggregation := make([]string, 0, len(mgb.fns))
|
||||
for _, fn := range mgb.fns {
|
||||
columns = append(columns, fn(selector, meta.ValidColumn))
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
return selector.Select(columns...).GroupBy(mgb.fields...)
|
||||
// If no columns were selected in a custom aggregation function, the default
|
||||
// selection is the fields used for "group-by", and the aggregation functions.
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(mgb.fields)+len(mgb.fns))
|
||||
for _, f := range mgb.fields {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
for _, c := range aggregation {
|
||||
columns = append(columns, c)
|
||||
}
|
||||
selector.Select(columns...)
|
||||
}
|
||||
return selector.GroupBy(selector.Columns(mgb.fields...)...)
|
||||
}
|
||||
|
||||
// MetaSelect is the builder for selecting fields of Meta entities.
|
||||
|
@ -956,16 +985,10 @@ func (ms *MetaSelect) BoolX(ctx context.Context) bool {
|
|||
|
||||
func (ms *MetaSelect) sqlScan(ctx context.Context, v interface{}) error {
|
||||
rows := &sql.Rows{}
|
||||
query, args := ms.sqlQuery().Query()
|
||||
query, args := ms.sql.Query()
|
||||
if err := ms.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
func (ms *MetaSelect) sqlQuery() sql.Querier {
|
||||
selector := ms.sql
|
||||
selector.Select(selector.Columns(ms.fields...)...)
|
||||
return selector
|
||||
}
|
||||
|
|
|
@ -22,9 +22,9 @@ type MetaUpdate struct {
|
|||
mutation *MetaMutation
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the MetaUpdate builder.
|
||||
// Where appends a list predicates to the MetaUpdate builder.
|
||||
func (mu *MetaUpdate) Where(ps ...predicate.Meta) *MetaUpdate {
|
||||
mu.mutation.predicates = append(mu.mutation.predicates, ps...)
|
||||
mu.mutation.Where(ps...)
|
||||
return mu
|
||||
}
|
||||
|
||||
|
@ -124,6 +124,9 @@ func (mu *MetaUpdate) Save(ctx context.Context) (int, error) {
|
|||
return affected, err
|
||||
})
|
||||
for i := len(mu.hooks) - 1; i >= 0; i-- {
|
||||
if mu.hooks[i] == nil {
|
||||
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = mu.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, mu.mutation); err != nil {
|
||||
|
@ -249,8 +252,8 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||
if n, err = sqlgraph.UpdateNodes(ctx, mu.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{meta.Label}
|
||||
} else if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
@ -260,6 +263,7 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||
// MetaUpdateOne is the builder for updating a single Meta entity.
|
||||
type MetaUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *MetaMutation
|
||||
}
|
||||
|
@ -334,6 +338,13 @@ func (muo *MetaUpdateOne) ClearOwner() *MetaUpdateOne {
|
|||
return muo
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (muo *MetaUpdateOne) Select(field string, fields ...string) *MetaUpdateOne {
|
||||
muo.fields = append([]string{field}, fields...)
|
||||
return muo
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated Meta entity.
|
||||
func (muo *MetaUpdateOne) Save(ctx context.Context) (*Meta, error) {
|
||||
var (
|
||||
|
@ -360,6 +371,9 @@ func (muo *MetaUpdateOne) Save(ctx context.Context) (*Meta, error) {
|
|||
return node, err
|
||||
})
|
||||
for i := len(muo.hooks) - 1; i >= 0; i-- {
|
||||
if muo.hooks[i] == nil {
|
||||
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = muo.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, muo.mutation); err != nil {
|
||||
|
@ -417,6 +431,18 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error)
|
|||
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Meta.ID for update")}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := muo.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, meta.FieldID)
|
||||
for _, f := range fields {
|
||||
if !meta.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != meta.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := muo.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
|
@ -493,8 +519,8 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error)
|
|||
if err = sqlgraph.UpdateNode(ctx, muo.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{meta.Label}
|
||||
} else if cerr, ok := isSQLConstraintError(err); ok {
|
||||
err = cerr
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -65,10 +65,9 @@ var (
|
|||
}
|
||||
// BouncersTable holds the schema information for the "bouncers" table.
|
||||
BouncersTable = &schema.Table{
|
||||
Name: "bouncers",
|
||||
Columns: BouncersColumns,
|
||||
PrimaryKey: []*schema.Column{BouncersColumns[0]},
|
||||
ForeignKeys: []*schema.ForeignKey{},
|
||||
Name: "bouncers",
|
||||
Columns: BouncersColumns,
|
||||
PrimaryKey: []*schema.Column{BouncersColumns[0]},
|
||||
}
|
||||
// DecisionsColumns holds the columns for the "decisions" table.
|
||||
DecisionsColumns = []*schema.Column{
|
||||
|
@ -109,7 +108,7 @@ var (
|
|||
{Name: "created_at", Type: field.TypeTime},
|
||||
{Name: "updated_at", Type: field.TypeTime},
|
||||
{Name: "time", Type: field.TypeTime},
|
||||
{Name: "serialized", Type: field.TypeString, Size: 4095},
|
||||
{Name: "serialized", Type: field.TypeString, Size: 8191},
|
||||
{Name: "alert_events", Type: field.TypeInt, Nullable: true},
|
||||
}
|
||||
// EventsTable holds the schema information for the "events" table.
|
||||
|
@ -141,10 +140,9 @@ var (
|
|||
}
|
||||
// MachinesTable holds the schema information for the "machines" table.
|
||||
MachinesTable = &schema.Table{
|
||||
Name: "machines",
|
||||
Columns: MachinesColumns,
|
||||
PrimaryKey: []*schema.Column{MachinesColumns[0]},
|
||||
ForeignKeys: []*schema.ForeignKey{},
|
||||
Name: "machines",
|
||||
Columns: MachinesColumns,
|
||||
PrimaryKey: []*schema.Column{MachinesColumns[0]},
|
||||
}
|
||||
// MetaColumns holds the columns for the "meta" table.
|
||||
MetaColumns = []*schema.Column{
|
||||
|
|
|
@ -155,8 +155,8 @@ func (m AlertMutation) Tx() (*Tx, error) {
|
|||
return tx, nil
|
||||
}
|
||||
|
||||
// ID returns the ID value in the mutation. Note that the ID
|
||||
// is only available if it was provided to the builder.
|
||||
// ID returns the ID value in the mutation. Note that the ID is only available
|
||||
// if it was provided to the builder or after it was returned from the database.
|
||||
func (m *AlertMutation) ID() (id int, exists bool) {
|
||||
if m.id == nil {
|
||||
return
|
||||
|
@ -1284,7 +1284,7 @@ func (m *AlertMutation) ClearOwner() {
|
|||
m.clearedowner = true
|
||||
}
|
||||
|
||||
// OwnerCleared returns if the "owner" edge to the Machine entity was cleared.
|
||||
// OwnerCleared reports if the "owner" edge to the Machine entity was cleared.
|
||||
func (m *AlertMutation) OwnerCleared() bool {
|
||||
return m.clearedowner
|
||||
}
|
||||
|
@ -1328,7 +1328,7 @@ func (m *AlertMutation) ClearDecisions() {
|
|||
m.cleareddecisions = true
|
||||
}
|
||||
|
||||
// DecisionsCleared returns if the "decisions" edge to the Decision entity was cleared.
|
||||
// DecisionsCleared reports if the "decisions" edge to the Decision entity was cleared.
|
||||
func (m *AlertMutation) DecisionsCleared() bool {
|
||||
return m.cleareddecisions
|
||||
}
|
||||
|
@ -1339,6 +1339,7 @@ func (m *AlertMutation) RemoveDecisionIDs(ids ...int) {
|
|||
m.removeddecisions = make(map[int]struct{})
|
||||
}
|
||||
for i := range ids {
|
||||
delete(m.decisions, ids[i])
|
||||
m.removeddecisions[ids[i]] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
@ -1381,7 +1382,7 @@ func (m *AlertMutation) ClearEvents() {
|
|||
m.clearedevents = true
|
||||
}
|
||||
|
||||
// EventsCleared returns if the "events" edge to the Event entity was cleared.
|
||||
// EventsCleared reports if the "events" edge to the Event entity was cleared.
|
||||
func (m *AlertMutation) EventsCleared() bool {
|
||||
return m.clearedevents
|
||||
}
|
||||
|
@ -1392,6 +1393,7 @@ func (m *AlertMutation) RemoveEventIDs(ids ...int) {
|
|||
m.removedevents = make(map[int]struct{})
|
||||
}
|
||||
for i := range ids {
|
||||
delete(m.events, ids[i])
|
||||
m.removedevents[ids[i]] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
@ -1434,7 +1436,7 @@ func (m *AlertMutation) ClearMetas() {
|
|||
m.clearedmetas = true
|
||||
}
|
||||
|
||||
// MetasCleared returns if the "metas" edge to the Meta entity was cleared.
|
||||
// MetasCleared reports if the "metas" edge to the Meta entity was cleared.
|
||||
func (m *AlertMutation) MetasCleared() bool {
|
||||
return m.clearedmetas
|
||||
}
|
||||
|
@ -1445,6 +1447,7 @@ func (m *AlertMutation) RemoveMetaIDs(ids ...int) {
|
|||
m.removedmetas = make(map[int]struct{})
|
||||
}
|
||||
for i := range ids {
|
||||
delete(m.metas, ids[i])
|
||||
m.removedmetas[ids[i]] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
@ -1472,6 +1475,11 @@ func (m *AlertMutation) ResetMetas() {
|
|||
m.removedmetas = nil
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the AlertMutation builder.
|
||||
func (m *AlertMutation) Where(ps ...predicate.Alert) {
|
||||
m.predicates = append(m.predicates, ps...)
|
||||
}
|
||||
|
||||
// Op returns the operation name.
|
||||
func (m *AlertMutation) Op() Op {
|
||||
return m.op
|
||||
|
@ -2348,8 +2356,8 @@ func (m BouncerMutation) Tx() (*Tx, error) {
|
|||
return tx, nil
|
||||
}
|
||||
|
||||
// ID returns the ID value in the mutation. Note that the ID
|
||||
// is only available if it was provided to the builder.
|
||||
// ID returns the ID value in the mutation. Note that the ID is only available
|
||||
// if it was provided to the builder or after it was returned from the database.
|
||||
func (m *BouncerMutation) ID() (id int, exists bool) {
|
||||
if m.id == nil {
|
||||
return
|
||||
|
@ -2769,6 +2777,11 @@ func (m *BouncerMutation) ResetLastPull() {
|
|||
m.last_pull = nil
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the BouncerMutation builder.
|
||||
func (m *BouncerMutation) Where(ps ...predicate.Bouncer) {
|
||||
m.predicates = append(m.predicates, ps...)
|
||||
}
|
||||
|
||||
// Op returns the operation name.
|
||||
func (m *BouncerMutation) Op() Op {
|
||||
return m.op
|
||||
|
@ -3211,8 +3224,8 @@ func (m DecisionMutation) Tx() (*Tx, error) {
|
|||
return tx, nil
|
||||
}
|
||||
|
||||
// ID returns the ID value in the mutation. Note that the ID
|
||||
// is only available if it was provided to the builder.
|
||||
// ID returns the ID value in the mutation. Note that the ID is only available
|
||||
// if it was provided to the builder or after it was returned from the database.
|
||||
func (m *DecisionMutation) ID() (id int, exists bool) {
|
||||
if m.id == nil {
|
||||
return
|
||||
|
@ -3904,7 +3917,7 @@ func (m *DecisionMutation) ClearOwner() {
|
|||
m.clearedowner = true
|
||||
}
|
||||
|
||||
// OwnerCleared returns if the "owner" edge to the Alert entity was cleared.
|
||||
// OwnerCleared reports if the "owner" edge to the Alert entity was cleared.
|
||||
func (m *DecisionMutation) OwnerCleared() bool {
|
||||
return m.clearedowner
|
||||
}
|
||||
|
@ -3933,6 +3946,11 @@ func (m *DecisionMutation) ResetOwner() {
|
|||
m.clearedowner = false
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the DecisionMutation builder.
|
||||
func (m *DecisionMutation) Where(ps ...predicate.Decision) {
|
||||
m.predicates = append(m.predicates, ps...)
|
||||
}
|
||||
|
||||
// Op returns the operation name.
|
||||
func (m *DecisionMutation) Op() Op {
|
||||
return m.op
|
||||
|
@ -4525,8 +4543,8 @@ func (m EventMutation) Tx() (*Tx, error) {
|
|||
return tx, nil
|
||||
}
|
||||
|
||||
// ID returns the ID value in the mutation. Note that the ID
|
||||
// is only available if it was provided to the builder.
|
||||
// ID returns the ID value in the mutation. Note that the ID is only available
|
||||
// if it was provided to the builder or after it was returned from the database.
|
||||
func (m *EventMutation) ID() (id int, exists bool) {
|
||||
if m.id == nil {
|
||||
return
|
||||
|
@ -4688,7 +4706,7 @@ func (m *EventMutation) ClearOwner() {
|
|||
m.clearedowner = true
|
||||
}
|
||||
|
||||
// OwnerCleared returns if the "owner" edge to the Alert entity was cleared.
|
||||
// OwnerCleared reports if the "owner" edge to the Alert entity was cleared.
|
||||
func (m *EventMutation) OwnerCleared() bool {
|
||||
return m.clearedowner
|
||||
}
|
||||
|
@ -4717,6 +4735,11 @@ func (m *EventMutation) ResetOwner() {
|
|||
m.clearedowner = false
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the EventMutation builder.
|
||||
func (m *EventMutation) Where(ps ...predicate.Event) {
|
||||
m.predicates = append(m.predicates, ps...)
|
||||
}
|
||||
|
||||
// Op returns the operation name.
|
||||
func (m *EventMutation) Op() Op {
|
||||
return m.op
|
||||
|
@ -5049,8 +5072,8 @@ func (m MachineMutation) Tx() (*Tx, error) {
|
|||
return tx, nil
|
||||
}
|
||||
|
||||
// ID returns the ID value in the mutation. Note that the ID
|
||||
// is only available if it was provided to the builder.
|
||||
// ID returns the ID value in the mutation. Note that the ID is only available
|
||||
// if it was provided to the builder or after it was returned from the database.
|
||||
func (m *MachineMutation) ID() (id int, exists bool) {
|
||||
if m.id == nil {
|
||||
return
|
||||
|
@ -5436,7 +5459,7 @@ func (m *MachineMutation) ClearAlerts() {
|
|||
m.clearedalerts = true
|
||||
}
|
||||
|
||||
// AlertsCleared returns if the "alerts" edge to the Alert entity was cleared.
|
||||
// AlertsCleared reports if the "alerts" edge to the Alert entity was cleared.
|
||||
func (m *MachineMutation) AlertsCleared() bool {
|
||||
return m.clearedalerts
|
||||
}
|
||||
|
@ -5447,6 +5470,7 @@ func (m *MachineMutation) RemoveAlertIDs(ids ...int) {
|
|||
m.removedalerts = make(map[int]struct{})
|
||||
}
|
||||
for i := range ids {
|
||||
delete(m.alerts, ids[i])
|
||||
m.removedalerts[ids[i]] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
@ -5474,6 +5498,11 @@ func (m *MachineMutation) ResetAlerts() {
|
|||
m.removedalerts = nil
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the MachineMutation builder.
|
||||
func (m *MachineMutation) Where(ps ...predicate.Machine) {
|
||||
m.predicates = append(m.predicates, ps...)
|
||||
}
|
||||
|
||||
// Op returns the operation name.
|
||||
func (m *MachineMutation) Op() Op {
|
||||
return m.op
|
||||
|
@ -5914,8 +5943,8 @@ func (m MetaMutation) Tx() (*Tx, error) {
|
|||
return tx, nil
|
||||
}
|
||||
|
||||
// ID returns the ID value in the mutation. Note that the ID
|
||||
// is only available if it was provided to the builder.
|
||||
// ID returns the ID value in the mutation. Note that the ID is only available
|
||||
// if it was provided to the builder or after it was returned from the database.
|
||||
func (m *MetaMutation) ID() (id int, exists bool) {
|
||||
if m.id == nil {
|
||||
return
|
||||
|
@ -6077,7 +6106,7 @@ func (m *MetaMutation) ClearOwner() {
|
|||
m.clearedowner = true
|
||||
}
|
||||
|
||||
// OwnerCleared returns if the "owner" edge to the Alert entity was cleared.
|
||||
// OwnerCleared reports if the "owner" edge to the Alert entity was cleared.
|
||||
func (m *MetaMutation) OwnerCleared() bool {
|
||||
return m.clearedowner
|
||||
}
|
||||
|
@ -6106,6 +6135,11 @@ func (m *MetaMutation) ResetOwner() {
|
|||
m.clearedowner = false
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the MetaMutation builder.
|
||||
func (m *MetaMutation) Where(ps ...predicate.Meta) {
|
||||
m.predicates = append(m.predicates, ps...)
|
||||
}
|
||||
|
||||
// Op returns the operation name.
|
||||
func (m *MetaMutation) Op() Op {
|
||||
return m.op
|
||||
|
|
|
@ -5,6 +5,6 @@ package runtime
|
|||
// The schema-stitching logic is generated in github.com/crowdsecurity/crowdsec/pkg/database/ent/runtime.go
|
||||
|
||||
const (
|
||||
Version = "v0.7.0" // Version of ent codegen.
|
||||
Sum = "h1:E3EjO0cUL61DvUg5ZEZdxa4yTL+4SuZv0LqBExo8CQA=" // Sum of ent codegen.
|
||||
Version = "v0.9.1" // Version of ent codegen.
|
||||
Sum = "h1:IG8andyeD79GG24U8Q+1Y45hQXj6gY5evSBcva5gtBk=" // Sum of ent codegen.
|
||||
)
|
||||
|
|
Loading…
Reference in a new issue