|
@@ -18,9 +18,6 @@ import (
|
|
|
types "github.com/docker/docker/api/types/swarm"
|
|
|
timetypes "github.com/docker/docker/api/types/time"
|
|
|
"github.com/docker/docker/daemon/cluster/convert"
|
|
|
- "github.com/docker/docker/daemon/logger"
|
|
|
- "github.com/docker/docker/pkg/ioutils"
|
|
|
- "github.com/docker/docker/pkg/stdcopy"
|
|
|
runconfigopts "github.com/docker/docker/runconfig/opts"
|
|
|
swarmapi "github.com/docker/swarmkit/api"
|
|
|
gogotypes "github.com/gogo/protobuf/types"
|
|
@@ -303,56 +300,44 @@ func (c *Cluster) RemoveService(input string) error {
|
|
|
}
|
|
|
|
|
|
// ServiceLogs collects service logs and writes them back to `config.OutStream`
|
|
|
-func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *backend.ContainerLogsConfig, started chan struct{}) error {
|
|
|
+func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *apitypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error) {
|
|
|
c.mu.RLock()
|
|
|
- defer func() {
|
|
|
- select {
|
|
|
- case <-started:
|
|
|
- // if we've started streaming logs, we are no longer holding the
|
|
|
- // lock and do not have to release it
|
|
|
- return
|
|
|
- default:
|
|
|
- // before we start, though, we're holding this lock and it needs to
|
|
|
- // be released
|
|
|
- c.mu.RUnlock()
|
|
|
- }
|
|
|
- }()
|
|
|
+ defer c.mu.RUnlock()
|
|
|
+
|
|
|
state := c.currentNodeState()
|
|
|
if !state.IsActiveManager() {
|
|
|
- return c.errNoManager(state)
|
|
|
+ return nil, c.errNoManager(state)
|
|
|
}
|
|
|
|
|
|
- swarmSelector, tty, err := convertSelector(ctx, state.controlClient, selector)
|
|
|
+ swarmSelector, err := convertSelector(ctx, state.controlClient, selector)
|
|
|
if err != nil {
|
|
|
- return errors.Wrap(err, "error making log selector")
|
|
|
- }
|
|
|
-
|
|
|
- // TODO(dperny) this goes away when we support TTY logs, which is in the works
|
|
|
- if tty {
|
|
|
- return errors.New("service logs not supported on tasks with a TTY attached")
|
|
|
+ return nil, errors.Wrap(err, "error making log selector")
|
|
|
}
|
|
|
|
|
|
// set the streams we'll use
|
|
|
stdStreams := []swarmapi.LogStream{}
|
|
|
- if config.ContainerLogsOptions.ShowStdout {
|
|
|
+ if config.ShowStdout {
|
|
|
stdStreams = append(stdStreams, swarmapi.LogStreamStdout)
|
|
|
}
|
|
|
- if config.ContainerLogsOptions.ShowStderr {
|
|
|
+ if config.ShowStderr {
|
|
|
stdStreams = append(stdStreams, swarmapi.LogStreamStderr)
|
|
|
}
|
|
|
|
|
|
// Get tail value squared away - the number of previous log lines we look at
|
|
|
var tail int64
|
|
|
+ // in ContainerLogs, if the tail value is ANYTHING non-integer, we just set
|
|
|
+ // it to -1 (all). i don't agree with that, but i also think no tail value
|
|
|
+ // should be legitimate. if you don't pass tail, we assume you want "all"
|
|
|
if config.Tail == "all" || config.Tail == "" {
|
|
|
// tail of 0 means send all logs on the swarmkit side
|
|
|
tail = 0
|
|
|
} else {
|
|
|
t, err := strconv.Atoi(config.Tail)
|
|
|
if err != nil {
|
|
|
- return errors.New("tail value must be a positive integer or \"all\"")
|
|
|
+ return nil, errors.New("tail value must be a positive integer or \"all\"")
|
|
|
}
|
|
|
if t < 0 {
|
|
|
- return errors.New("negative tail values not supported")
|
|
|
+ return nil, errors.New("negative tail values not supported")
|
|
|
}
|
|
|
// we actually use negative tail in swarmkit to represent messages
|
|
|
// backwards starting from the beginning. also, -1 means no logs. so,
|
|
@@ -370,12 +355,12 @@ func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector
|
|
|
if config.Since != "" {
|
|
|
s, n, err := timetypes.ParseTimestamps(config.Since, 0)
|
|
|
if err != nil {
|
|
|
- return errors.Wrap(err, "could not parse since timestamp")
|
|
|
+ return nil, errors.Wrap(err, "could not parse since timestamp")
|
|
|
}
|
|
|
since := time.Unix(s, n)
|
|
|
sinceProto, err = gogotypes.TimestampProto(since)
|
|
|
if err != nil {
|
|
|
- return errors.Wrap(err, "could not parse timestamp to proto")
|
|
|
+ return nil, errors.Wrap(err, "could not parse timestamp to proto")
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -389,106 +374,96 @@ func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector
|
|
|
},
|
|
|
})
|
|
|
if err != nil {
|
|
|
- return err
|
|
|
+ return nil, err
|
|
|
}
|
|
|
|
|
|
- wf := ioutils.NewWriteFlusher(config.OutStream)
|
|
|
- defer wf.Close()
|
|
|
-
|
|
|
- // Release the lock before starting the stream.
|
|
|
- //
|
|
|
- // this feels like it could be racy because we would double unlock if we
|
|
|
- // somehow returned right after we unlocked but before we closed, but I do
|
|
|
- // not think such a thing is possible. i wish it were possible to atomically
|
|
|
- // close and unlock but that might be overkill. programming is hard.
|
|
|
- c.mu.RUnlock()
|
|
|
- close(started)
|
|
|
-
|
|
|
- wf.Flush()
|
|
|
-
|
|
|
- outStream := stdcopy.NewStdWriter(wf, stdcopy.Stdout)
|
|
|
- errStream := stdcopy.NewStdWriter(wf, stdcopy.Stderr)
|
|
|
-
|
|
|
- for {
|
|
|
- // Check the context before doing anything.
|
|
|
- select {
|
|
|
- case <-ctx.Done():
|
|
|
- return ctx.Err()
|
|
|
- default:
|
|
|
- }
|
|
|
-
|
|
|
- subscribeMsg, err := stream.Recv()
|
|
|
- if err == io.EOF {
|
|
|
- return nil
|
|
|
- }
|
|
|
- if err != nil {
|
|
|
- return err
|
|
|
- }
|
|
|
-
|
|
|
- for _, msg := range subscribeMsg.Messages {
|
|
|
- data := []byte{}
|
|
|
-
|
|
|
- if config.Timestamps {
|
|
|
- ts, err := gogotypes.TimestampFromProto(msg.Timestamp)
|
|
|
- if err != nil {
|
|
|
- return err
|
|
|
+ messageChan := make(chan *backend.LogMessage, 1)
|
|
|
+ go func() {
|
|
|
+ defer close(messageChan)
|
|
|
+ for {
|
|
|
+ // Check the context before doing anything.
|
|
|
+ select {
|
|
|
+ case <-ctx.Done():
|
|
|
+ return
|
|
|
+ default:
|
|
|
+ }
|
|
|
+ subscribeMsg, err := stream.Recv()
|
|
|
+ if err == io.EOF {
|
|
|
+ return
|
|
|
+ }
|
|
|
+ // if we're not io.EOF, push the message in and return
|
|
|
+ if err != nil {
|
|
|
+ select {
|
|
|
+ case <-ctx.Done():
|
|
|
+ case messageChan <- &backend.LogMessage{Err: err}:
|
|
|
}
|
|
|
- data = append(data, []byte(ts.Format(logger.TimeFormat)+" ")...)
|
|
|
+ return
|
|
|
}
|
|
|
|
|
|
- data = append(data, []byte(fmt.Sprintf("%s.node.id=%s,%s.service.id=%s,%s.task.id=%s ",
|
|
|
- contextPrefix, msg.Context.NodeID,
|
|
|
- contextPrefix, msg.Context.ServiceID,
|
|
|
- contextPrefix, msg.Context.TaskID,
|
|
|
- ))...)
|
|
|
-
|
|
|
- data = append(data, msg.Data...)
|
|
|
-
|
|
|
- switch msg.Stream {
|
|
|
- case swarmapi.LogStreamStdout:
|
|
|
- outStream.Write(data)
|
|
|
- case swarmapi.LogStreamStderr:
|
|
|
- errStream.Write(data)
|
|
|
+ for _, msg := range subscribeMsg.Messages {
|
|
|
+ // make a new message
|
|
|
+ m := new(backend.LogMessage)
|
|
|
+ m.Attrs = make(backend.LogAttributes)
|
|
|
+ // add the timestamp, adding the error if it fails
|
|
|
+ m.Timestamp, err = gogotypes.TimestampFromProto(msg.Timestamp)
|
|
|
+ if err != nil {
|
|
|
+ m.Err = err
|
|
|
+ }
|
|
|
+ m.Attrs[contextPrefix+".node.id"] = msg.Context.NodeID
|
|
|
+ m.Attrs[contextPrefix+".service.id"] = msg.Context.ServiceID
|
|
|
+ m.Attrs[contextPrefix+".task.id"] = msg.Context.TaskID
|
|
|
+ switch msg.Stream {
|
|
|
+ case swarmapi.LogStreamStdout:
|
|
|
+ m.Source = "stdout"
|
|
|
+ case swarmapi.LogStreamStderr:
|
|
|
+ m.Source = "stderr"
|
|
|
+ }
|
|
|
+ m.Line = msg.Data
|
|
|
+
|
|
|
+ // there could be a case where the reader stops accepting
|
|
|
+ // messages and the context is canceled. we need to check that
|
|
|
+ // here, or otherwise we risk blocking forever on the message
|
|
|
+ // send.
|
|
|
+ select {
|
|
|
+ case <-ctx.Done():
|
|
|
+ return
|
|
|
+ case messageChan <- m:
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
- }
|
|
|
+ }()
|
|
|
+ return messageChan, nil
|
|
|
}
|
|
|
|
|
|
// convertSelector takes a backend.LogSelector, which contains raw names that
|
|
|
// may or may not be valid, and converts them to an api.LogSelector proto. It
|
|
|
-// also returns a boolean, true if any of the services use a TTY (false
|
|
|
-// otherwise) and an error if something fails
|
|
|
-func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, bool, error) {
|
|
|
- // if ANY tasks use a TTY, don't mux streams
|
|
|
- var tty bool
|
|
|
+// returns an error if something fails
|
|
|
+func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, error) {
|
|
|
// don't rely on swarmkit to resolve IDs, do it ourselves
|
|
|
swarmSelector := &swarmapi.LogSelector{}
|
|
|
for _, s := range selector.Services {
|
|
|
service, err := getService(ctx, cc, s)
|
|
|
if err != nil {
|
|
|
- return nil, false, err
|
|
|
+ return nil, err
|
|
|
}
|
|
|
c := service.Spec.Task.GetContainer()
|
|
|
if c == nil {
|
|
|
- return nil, false, errors.New("logs only supported on container tasks")
|
|
|
+ return nil, errors.New("logs only supported on container tasks")
|
|
|
}
|
|
|
- // set TTY true if we have a TTY service, or if it's already true
|
|
|
- tty = tty || c.TTY
|
|
|
swarmSelector.ServiceIDs = append(swarmSelector.ServiceIDs, service.ID)
|
|
|
}
|
|
|
for _, t := range selector.Tasks {
|
|
|
task, err := getTask(ctx, cc, t)
|
|
|
if err != nil {
|
|
|
- return nil, false, err
|
|
|
+ return nil, err
|
|
|
}
|
|
|
c := task.Spec.GetContainer()
|
|
|
if c == nil {
|
|
|
- return nil, false, errors.New("logs only supported on container tasks")
|
|
|
+ return nil, errors.New("logs only supported on container tasks")
|
|
|
}
|
|
|
- tty = tty || c.TTY
|
|
|
swarmSelector.TaskIDs = append(swarmSelector.TaskIDs, task.ID)
|
|
|
}
|
|
|
- return swarmSelector, tty, nil
|
|
|
+ return swarmSelector, nil
|
|
|
}
|
|
|
|
|
|
// imageWithDigestString takes an image such as name or name:tag
|