2018-02-05 21:05:59 +00:00
package cluster // import "github.com/docker/docker/daemon/cluster"
2017-02-11 18:53:03 +00:00
import (
2018-04-19 22:30:59 +00:00
"context"
2017-02-11 18:53:03 +00:00
"fmt"
"net"
"strings"
"time"
2023-09-13 15:41:45 +00:00
"github.com/containerd/log"
2023-12-05 14:58:22 +00:00
"github.com/docker/docker/api/types/backend"
2023-08-25 21:51:48 +00:00
"github.com/docker/docker/api/types/container"
2017-02-11 18:53:03 +00:00
"github.com/docker/docker/api/types/filters"
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/convert"
2018-01-11 19:53:06 +00:00
"github.com/docker/docker/errdefs"
2017-02-11 18:53:03 +00:00
"github.com/docker/docker/opts"
2021-07-15 15:33:55 +00:00
"github.com/docker/docker/pkg/stack"
2022-04-21 21:33:07 +00:00
swarmapi "github.com/moby/swarmkit/v2/api"
"github.com/moby/swarmkit/v2/manager/encryption"
swarmnode "github.com/moby/swarmkit/v2/node"
2017-02-11 18:53:03 +00:00
"github.com/pkg/errors"
2019-06-10 16:06:11 +00:00
"google.golang.org/grpc"
2017-02-11 18:53:03 +00:00
)
// Init initializes new cluster from user provided request.
func ( c * Cluster ) Init ( req types . InitRequest ) ( string , error ) {
c . controlMutex . Lock ( )
defer c . controlMutex . Unlock ( )
if c . nr != nil {
if req . ForceNewCluster {
2017-09-18 06:47:19 +00:00
2017-04-08 01:27:35 +00:00
// Take c.mu temporarily to wait for presently running
// API handlers to finish before shutting down the node.
c . mu . Lock ( )
2017-09-18 06:47:19 +00:00
if ! c . nr . nodeState . IsManager ( ) {
2020-02-25 18:55:11 +00:00
c . mu . Unlock ( )
2017-09-18 06:47:19 +00:00
return "" , errSwarmNotManager
}
2017-04-08 01:27:35 +00:00
c . mu . Unlock ( )
2017-02-11 18:53:03 +00:00
if err := c . nr . Stop ( ) ; err != nil {
return "" , err
}
} else {
return "" , errSwarmExists
}
}
if err := validateAndSanitizeInitRequest ( & req ) ; err != nil {
2017-11-29 04:09:37 +00:00
return "" , errdefs . InvalidParameter ( err )
2017-02-11 18:53:03 +00:00
}
listenHost , listenPort , err := resolveListenAddr ( req . ListenAddr )
if err != nil {
2022-04-03 20:00:08 +00:00
return "" , errdefs . InvalidParameter ( err )
2017-02-11 18:53:03 +00:00
}
advertiseHost , advertisePort , err := c . resolveAdvertiseAddr ( req . AdvertiseAddr , listenPort )
if err != nil {
return "" , err
}
2017-04-14 23:54:17 +00:00
dataPathAddr , err := resolveDataPathAddr ( req . DataPathAddr )
if err != nil {
return "" , err
}
2017-02-11 18:53:03 +00:00
localAddr := listenHost
// If the local address is undetermined, the advertise address
// will be used as local address, if it belongs to this system.
// If the advertise address is not local, then we try to find
// a system address to use as local address. If this fails,
// we give up and ask the user to pass the listen address.
if net . ParseIP ( localAddr ) . IsUnspecified ( ) {
advertiseIP := net . ParseIP ( advertiseHost )
found := false
for _ , systemIP := range listSystemIPs ( ) {
if systemIP . Equal ( advertiseIP ) {
localAddr = advertiseIP . String ( )
found = true
break
}
}
if ! found {
ip , err := c . resolveSystemAddr ( )
if err != nil {
2023-06-23 00:33:17 +00:00
log . G ( context . TODO ( ) ) . Warnf ( "Could not find a local address: %v" , err )
2017-02-11 18:53:03 +00:00
return "" , errMustSpecifyListenAddr
}
localAddr = ip . String ( )
}
}
2018-08-30 03:28:22 +00:00
if err := validateDefaultAddrPool ( req . DefaultAddrPool , req . SubnetSize ) ; err != nil {
return "" , err
}
2018-11-20 21:44:40 +00:00
port , err := getDataPathPort ( req . DataPathPort )
if err != nil {
return "" , err
}
2017-02-11 18:53:03 +00:00
nr , err := c . newNodeRunner ( nodeStartConfig {
2018-07-30 15:25:02 +00:00
forceNewCluster : req . ForceNewCluster ,
autolock : req . AutoLockManagers ,
LocalAddr : localAddr ,
ListenAddr : net . JoinHostPort ( listenHost , listenPort ) ,
AdvertiseAddr : net . JoinHostPort ( advertiseHost , advertisePort ) ,
DataPathAddr : dataPathAddr ,
DefaultAddressPool : req . DefaultAddrPool ,
SubnetSize : req . SubnetSize ,
availability : req . Availability ,
2018-11-20 21:44:40 +00:00
DataPathPort : port ,
2017-02-11 18:53:03 +00:00
} )
if err != nil {
return "" , err
}
c . mu . Lock ( )
c . nr = nr
c . mu . Unlock ( )
if err := <- nr . Ready ( ) ; err != nil {
2017-05-22 20:52:55 +00:00
c . mu . Lock ( )
c . nr = nil
c . mu . Unlock ( )
2017-02-11 18:53:03 +00:00
if ! req . ForceNewCluster { // if failure on first attempt don't keep state
if err := clearPersistentState ( c . root ) ; err != nil {
return "" , err
}
}
return "" , err
}
state := nr . State ( )
if state . swarmNode == nil { // should never happen but protect from panic
return "" , errors . New ( "invalid cluster state for spec initialization" )
}
if err := initClusterSpec ( state . swarmNode , req . Spec ) ; err != nil {
return "" , err
}
return state . NodeID ( ) , nil
}
// Join makes current Cluster part of an existing swarm cluster.
func ( c * Cluster ) Join ( req types . JoinRequest ) error {
c . controlMutex . Lock ( )
defer c . controlMutex . Unlock ( )
c . mu . Lock ( )
if c . nr != nil {
c . mu . Unlock ( )
2017-07-19 14:20:13 +00:00
return errors . WithStack ( errSwarmExists )
2017-02-11 18:53:03 +00:00
}
c . mu . Unlock ( )
if err := validateAndSanitizeJoinRequest ( & req ) ; err != nil {
2017-11-29 04:09:37 +00:00
return errdefs . InvalidParameter ( err )
2017-02-11 18:53:03 +00:00
}
listenHost , listenPort , err := resolveListenAddr ( req . ListenAddr )
if err != nil {
return err
}
var advertiseAddr string
if req . AdvertiseAddr != "" {
advertiseHost , advertisePort , err := c . resolveAdvertiseAddr ( req . AdvertiseAddr , listenPort )
// For joining, we don't need to provide an advertise address,
// since the remote side can detect it.
if err == nil {
advertiseAddr = net . JoinHostPort ( advertiseHost , advertisePort )
}
}
2017-04-14 23:54:17 +00:00
dataPathAddr , err := resolveDataPathAddr ( req . DataPathAddr )
if err != nil {
return err
}
2017-02-11 18:53:03 +00:00
nr , err := c . newNodeRunner ( nodeStartConfig {
RemoteAddr : req . RemoteAddrs [ 0 ] ,
ListenAddr : net . JoinHostPort ( listenHost , listenPort ) ,
AdvertiseAddr : advertiseAddr ,
2017-04-14 23:54:17 +00:00
DataPathAddr : dataPathAddr ,
2017-02-11 18:53:03 +00:00
joinAddr : req . RemoteAddrs [ 0 ] ,
joinToken : req . JoinToken ,
availability : req . Availability ,
} )
if err != nil {
return err
}
c . mu . Lock ( )
c . nr = nr
c . mu . Unlock ( )
2019-01-09 18:24:03 +00:00
timeout := time . NewTimer ( swarmConnectTimeout )
defer timeout . Stop ( )
2017-02-11 18:53:03 +00:00
select {
2019-01-09 18:24:03 +00:00
case <- timeout . C :
2017-02-11 18:53:03 +00:00
return errSwarmJoinTimeoutReached
case err := <- nr . Ready ( ) :
if err != nil {
c . mu . Lock ( )
c . nr = nil
c . mu . Unlock ( )
2017-05-22 20:52:55 +00:00
if err := clearPersistentState ( c . root ) ; err != nil {
return err
}
2017-02-11 18:53:03 +00:00
}
return err
}
}
// Inspect retrieves the configuration properties of a managed swarm cluster.
func ( c * Cluster ) Inspect ( ) ( types . Swarm , error ) {
2017-11-02 23:34:00 +00:00
var swarm types . Swarm
2017-02-28 10:12:11 +00:00
if err := c . lockedManagerAction ( func ( ctx context . Context , state nodeState ) error {
2017-11-02 23:34:00 +00:00
s , err := c . inspect ( ctx , state )
2017-02-28 10:12:11 +00:00
if err != nil {
return err
}
swarm = s
return nil
} ) ; err != nil {
2017-02-11 18:53:03 +00:00
return types . Swarm { } , err
}
2017-11-02 23:34:00 +00:00
return swarm , nil
}
func ( c * Cluster ) inspect ( ctx context . Context , state nodeState ) ( types . Swarm , error ) {
s , err := getSwarm ( ctx , state . controlClient )
if err != nil {
return types . Swarm { } , err
}
return convert . SwarmFromGRPC ( * s ) , nil
2017-02-11 18:53:03 +00:00
}
// Update updates configuration of a managed swarm cluster.
func ( c * Cluster ) Update ( version uint64 , spec types . Spec , flags types . UpdateFlags ) error {
2017-02-28 10:12:11 +00:00
return c . lockedManagerAction ( func ( ctx context . Context , state nodeState ) error {
swarm , err := getSwarm ( ctx , state . controlClient )
if err != nil {
return err
}
2017-02-11 18:53:03 +00:00
2017-12-06 19:29:58 +00:00
// Validate spec name.
if spec . Annotations . Name == "" {
spec . Annotations . Name = "default"
} else if spec . Annotations . Name != "default" {
2017-11-29 04:09:37 +00:00
return errdefs . InvalidParameter ( errors . New ( ` swarm spec must be named "default" ` ) )
2017-12-06 19:29:58 +00:00
}
2017-02-28 10:12:11 +00:00
// In update, client should provide the complete spec of the swarm, including
// Name and Labels. If a field is specified with 0 or nil, then the default value
// will be used to swarmkit.
clusterSpec , err := convert . SwarmSpecToGRPC ( spec )
if err != nil {
2017-11-29 04:09:37 +00:00
return errdefs . InvalidParameter ( err )
2017-02-28 10:12:11 +00:00
}
2017-02-11 18:53:03 +00:00
2017-02-28 10:12:11 +00:00
_ , err = state . controlClient . UpdateCluster (
ctx ,
& swarmapi . UpdateClusterRequest {
ClusterID : swarm . ID ,
Spec : & clusterSpec ,
ClusterVersion : & swarmapi . Version {
Index : version ,
} ,
Rotation : swarmapi . KeyRotation {
WorkerJoinToken : flags . RotateWorkerToken ,
ManagerJoinToken : flags . RotateManagerToken ,
ManagerUnlockKey : flags . RotateManagerUnlockKey ,
} ,
2017-02-11 18:53:03 +00:00
} ,
2017-02-28 10:12:11 +00:00
)
return err
} )
2017-02-11 18:53:03 +00:00
}
// GetUnlockKey returns the unlock key for the swarm.
func ( c * Cluster ) GetUnlockKey ( ) ( string , error ) {
2017-02-28 10:12:11 +00:00
var resp * swarmapi . GetUnlockKeyResponse
if err := c . lockedManagerAction ( func ( ctx context . Context , state nodeState ) error {
client := swarmapi . NewCAClient ( state . grpcConn )
2017-02-11 18:53:03 +00:00
2017-02-28 10:12:11 +00:00
r , err := client . GetUnlockKey ( ctx , & swarmapi . GetUnlockKeyRequest { } )
if err != nil {
return err
}
resp = r
return nil
} ) ; err != nil {
2017-02-11 18:53:03 +00:00
return "" , err
}
2017-02-28 10:12:11 +00:00
if len ( resp . UnlockKey ) == 0 {
2017-02-11 18:53:03 +00:00
// no key
return "" , nil
}
2017-02-28 10:12:11 +00:00
return encryption . HumanReadableKey ( resp . UnlockKey ) , nil
2017-02-11 18:53:03 +00:00
}
// UnlockSwarm provides a key to decrypt data that is encrypted at rest.
func ( c * Cluster ) UnlockSwarm ( req types . UnlockRequest ) error {
c . controlMutex . Lock ( )
defer c . controlMutex . Unlock ( )
c . mu . RLock ( )
state := c . currentNodeState ( )
if ! state . IsActiveManager ( ) {
// when manager is not active,
// unless it is locked, otherwise return error.
if err := c . errNoManager ( state ) ; err != errSwarmLocked {
c . mu . RUnlock ( )
return err
}
} else {
// when manager is active, return an error of "not locked"
c . mu . RUnlock ( )
2017-07-19 14:20:13 +00:00
return notLockedError { }
2017-02-11 18:53:03 +00:00
}
// only when swarm is locked, code running reaches here
nr := c . nr
c . mu . RUnlock ( )
key , err := encryption . ParseHumanReadableKey ( req . UnlockKey )
if err != nil {
2017-11-29 04:09:37 +00:00
return errdefs . InvalidParameter ( err )
2017-02-11 18:53:03 +00:00
}
config := nr . config
config . lockKey = key
if err := nr . Stop ( ) ; err != nil {
return err
}
nr , err = c . newNodeRunner ( config )
if err != nil {
return err
}
c . mu . Lock ( )
c . nr = nr
c . mu . Unlock ( )
if err := <- nr . Ready ( ) ; err != nil {
2020-04-17 10:01:01 +00:00
if errors . Is ( err , errSwarmLocked ) {
2017-07-19 14:20:13 +00:00
return invalidUnlockKey { }
2017-02-11 18:53:03 +00:00
}
2017-07-19 14:20:13 +00:00
return errors . Errorf ( "swarm component could not be started: %v" , err )
2017-02-11 18:53:03 +00:00
}
return nil
}
// Leave shuts down Cluster and removes current state.
2022-10-26 16:13:17 +00:00
func ( c * Cluster ) Leave ( ctx context . Context , force bool ) error {
2017-02-11 18:53:03 +00:00
c . controlMutex . Lock ( )
defer c . controlMutex . Unlock ( )
c . mu . Lock ( )
nr := c . nr
if nr == nil {
c . mu . Unlock ( )
2017-07-19 14:20:13 +00:00
return errors . WithStack ( errNoSwarm )
2017-02-11 18:53:03 +00:00
}
state := c . currentNodeState ( )
2017-04-08 01:27:35 +00:00
c . mu . Unlock ( )
2020-04-17 10:01:01 +00:00
if errors . Is ( state . err , errSwarmLocked ) && ! force {
2017-02-11 18:53:03 +00:00
// leave a locked swarm without --force is not allowed
2017-07-19 14:20:13 +00:00
return errors . WithStack ( notAvailableError ( "Swarm is encrypted and locked. Please unlock it first or use `--force` to ignore this message." ) )
2017-02-11 18:53:03 +00:00
}
if state . IsManager ( ) && ! force {
msg := "You are attempting to leave the swarm on a node that is participating as a manager. "
if state . IsActiveManager ( ) {
active , reachable , unreachable , err := managerStats ( state . controlClient , state . NodeID ( ) )
if err == nil {
if active && removingManagerCausesLossOfQuorum ( reachable , unreachable ) {
if isLastManager ( reachable , unreachable ) {
msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. "
2017-07-19 14:20:13 +00:00
return errors . WithStack ( notAvailableError ( msg ) )
2017-02-11 18:53:03 +00:00
}
msg += fmt . Sprintf ( "Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. " , reachable - 1 , reachable + unreachable )
}
}
} else {
msg += "Doing so may lose the consensus of your cluster. "
}
msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message."
2017-07-19 14:20:13 +00:00
return errors . WithStack ( notAvailableError ( msg ) )
2017-02-11 18:53:03 +00:00
}
// release readers in here
if err := nr . Stop ( ) ; err != nil {
2023-06-23 00:33:17 +00:00
log . G ( ctx ) . Errorf ( "failed to shut down cluster node: %v" , err )
2021-07-15 15:33:55 +00:00
stack . Dump ( )
2017-02-11 18:53:03 +00:00
return err
}
2017-04-08 01:27:35 +00:00
c . mu . Lock ( )
2017-02-11 18:53:03 +00:00
c . nr = nil
c . mu . Unlock ( )
2017-04-08 01:27:35 +00:00
2017-02-11 18:53:03 +00:00
if nodeID := state . NodeID ( ) ; nodeID != "" {
2022-10-26 16:13:17 +00:00
nodeContainers , err := c . listContainerForNode ( ctx , nodeID )
2017-02-11 18:53:03 +00:00
if err != nil {
return err
}
for _ , id := range nodeContainers {
2023-12-05 14:58:22 +00:00
if err := c . config . Backend . ContainerRm ( id , & backend . ContainerRmConfig { ForceRemove : true } ) ; err != nil {
2023-06-23 00:33:17 +00:00
log . G ( ctx ) . Errorf ( "error removing %v: %v" , id , err )
2017-02-11 18:53:03 +00:00
}
}
}
// todo: cleanup optional?
if err := clearPersistentState ( c . root ) ; err != nil {
return err
}
c . config . Backend . DaemonLeavesCluster ( )
return nil
}
// Info returns information about the current cluster state.
2023-09-10 00:05:05 +00:00
func ( c * Cluster ) Info ( ctx context . Context ) types . Info {
2017-02-11 18:53:03 +00:00
info := types . Info {
NodeAddr : c . GetAdvertiseAddress ( ) ,
}
c . mu . RLock ( )
defer c . mu . RUnlock ( )
state := c . currentNodeState ( )
info . LocalNodeState = state . status
if state . err != nil {
info . Error = state . err . Error ( )
}
2023-09-10 00:05:05 +00:00
ctx , cancel := c . getRequestContext ( ctx )
2017-02-11 18:53:03 +00:00
defer cancel ( )
if state . IsActiveManager ( ) {
info . ControlAvailable = true
2017-11-02 23:34:00 +00:00
swarm , err := c . inspect ( ctx , state )
2017-02-11 18:53:03 +00:00
if err != nil {
info . Error = err . Error ( )
}
2017-03-28 21:20:25 +00:00
info . Cluster = & swarm . ClusterInfo
2017-02-11 18:53:03 +00:00
2019-06-10 16:06:11 +00:00
if r , err := state . controlClient . ListNodes (
ctx , & swarmapi . ListNodesRequest { } ,
grpc . MaxCallRecvMsgSize ( defaultRecvSizeForListResponse ) ,
) ; err != nil {
2017-02-11 18:53:03 +00:00
info . Error = err . Error ( )
} else {
info . Nodes = len ( r . Nodes )
for _ , n := range r . Nodes {
if n . ManagerStatus != nil {
info . Managers = info . Managers + 1
}
}
}
2019-03-18 13:20:44 +00:00
switch info . LocalNodeState {
case types . LocalNodeStateInactive , types . LocalNodeStateLocked , types . LocalNodeStateError :
// nothing to do
default :
if info . Managers == 2 {
const warn string = ` WARNING : Running Swarm in a two - manager configuration . This configuration provides
2019-04-23 01:36:31 +00:00
no fault tolerance , and poses a high risk to lose control over the cluster .
2019-03-18 13:20:44 +00:00
Refer to https : //docs.docker.com/engine/swarm/admin_guide/ to configure the
Swarm for fault - tolerance . `
info . Warnings = append ( info . Warnings , warn )
}
}
2017-02-11 18:53:03 +00:00
}
if state . swarmNode != nil {
for _ , r := range state . swarmNode . Remotes ( ) {
info . RemoteManagers = append ( info . RemoteManagers , types . Peer { NodeID : r . NodeID , Addr : r . Addr } )
}
info . NodeID = state . swarmNode . NodeID ( )
}
return info
}
2021-02-23 17:21:37 +00:00
// Status returns a textual representation of the node's swarm status and role (manager/worker)
func ( c * Cluster ) Status ( ) string {
c . mu . RLock ( )
s := c . currentNodeState ( )
c . mu . RUnlock ( )
state := string ( s . status )
if s . status == types . LocalNodeStateActive {
if s . IsActiveManager ( ) || s . IsManager ( ) {
state += "/manager"
} else {
state += "/worker"
}
}
return state
}
2017-02-11 18:53:03 +00:00
func validateAndSanitizeInitRequest ( req * types . InitRequest ) error {
var err error
req . ListenAddr , err = validateAddr ( req . ListenAddr )
if err != nil {
return fmt . Errorf ( "invalid ListenAddr %q: %v" , req . ListenAddr , err )
}
if req . Spec . Annotations . Name == "" {
req . Spec . Annotations . Name = "default"
} else if req . Spec . Annotations . Name != "default" {
return errors . New ( ` swarm spec must be named "default" ` )
}
return nil
}
func validateAndSanitizeJoinRequest ( req * types . JoinRequest ) error {
var err error
req . ListenAddr , err = validateAddr ( req . ListenAddr )
if err != nil {
return fmt . Errorf ( "invalid ListenAddr %q: %v" , req . ListenAddr , err )
}
if len ( req . RemoteAddrs ) == 0 {
return errors . New ( "at least 1 RemoteAddr is required to join" )
}
for i := range req . RemoteAddrs {
req . RemoteAddrs [ i ] , err = validateAddr ( req . RemoteAddrs [ i ] )
if err != nil {
return fmt . Errorf ( "invalid remoteAddr %q: %v" , req . RemoteAddrs [ i ] , err )
}
}
return nil
}
func validateAddr ( addr string ) ( string , error ) {
if addr == "" {
return addr , errors . New ( "invalid empty address" )
}
newaddr , err := opts . ParseTCPAddr ( addr , defaultAddr )
if err != nil {
2022-04-03 20:00:08 +00:00
// TODO(thaJeztah) why are we ignoring the error here? Is this to allow "non-tcp" addresses?
2017-02-11 18:53:03 +00:00
return addr , nil
}
return strings . TrimPrefix ( newaddr , "tcp://" ) , nil
}
func initClusterSpec ( node * swarmnode . Node , spec types . Spec ) error {
2018-04-19 22:51:35 +00:00
ctx , cancel := context . WithTimeout ( context . Background ( ) , 5 * time . Second )
defer cancel ( )
2017-02-11 18:53:03 +00:00
for conn := range node . ListenControlSocket ( ctx ) {
if ctx . Err ( ) != nil {
return ctx . Err ( )
}
if conn != nil {
client := swarmapi . NewControlClient ( conn )
var cluster * swarmapi . Cluster
for i := 0 ; ; i ++ {
lcr , err := client . ListClusters ( ctx , & swarmapi . ListClustersRequest { } )
if err != nil {
return fmt . Errorf ( "error on listing clusters: %v" , err )
}
if len ( lcr . Clusters ) == 0 {
if i < 10 {
time . Sleep ( 200 * time . Millisecond )
continue
}
return errors . New ( "empty list of clusters was returned" )
}
cluster = lcr . Clusters [ 0 ]
break
}
// In init, we take the initial default values from swarmkit, and merge
// any non nil or 0 value from spec to GRPC spec. This will leave the
// default value alone.
// Note that this is different from Update(), as in Update() we expect
// user to specify the complete spec of the cluster (as they already know
// the existing one and knows which field to update)
clusterSpec , err := convert . MergeSwarmSpecToGRPC ( spec , cluster . Spec )
if err != nil {
return fmt . Errorf ( "error updating cluster settings: %v" , err )
}
_ , err = client . UpdateCluster ( ctx , & swarmapi . UpdateClusterRequest {
ClusterID : cluster . ID ,
ClusterVersion : & cluster . Meta . Version ,
Spec : & clusterSpec ,
} )
if err != nil {
return fmt . Errorf ( "error updating cluster settings: %v" , err )
}
return nil
}
}
return ctx . Err ( )
}
2022-10-26 16:13:17 +00:00
func ( c * Cluster ) listContainerForNode ( ctx context . Context , nodeID string ) ( [ ] string , error ) {
2017-02-11 18:53:03 +00:00
var ids [ ] string
2023-08-25 21:51:48 +00:00
containers , err := c . config . Backend . Containers ( ctx , & container . ListOptions {
2023-04-25 13:17:44 +00:00
Filters : filters . NewArgs ( filters . Arg ( "label" , "com.docker.swarm.node.id=" + nodeID ) ) ,
2017-02-11 18:53:03 +00:00
} )
if err != nil {
return [ ] string { } , err
}
for _ , c := range containers {
ids = append ( ids , c . ID )
}
return ids , nil
}