func checkDo(ctx context.Context, t *testing.T, task *api.Task, ctlr Controller, expected *api.TaskStatus, expectedErr ...error) *api.TaskStatus { status, err := Do(ctx, task, ctlr) if len(expectedErr) > 0 { assert.Equal(t, expectedErr[0], err) } else { assert.NoError(t, err) } if task.Status.Timestamp != nil { // crazy timestamp validation follows previous, err := ptypes.Timestamp(task.Status.Timestamp) assert.Nil(t, err) current, err := ptypes.Timestamp(status.Timestamp) assert.Nil(t, err) if current.Before(previous) { // ensure that the timestamp alwways proceeds forward t.Fatalf("timestamp must proceed forward: %v < %v", current, previous) } } // if the status and task.Status are different, make sure new timestamp is greater copy := status.Copy() copy.Timestamp = nil // don't check against timestamp assert.Equal(t, expected, copy) return status }
// ServiceFromGRPC converts a grpc Service to a Service. func ServiceFromGRPC(s swarmapi.Service) types.Service { service := types.Service{ ID: s.ID, Spec: *serviceSpecFromGRPC(&s.Spec), PreviousSpec: serviceSpecFromGRPC(s.PreviousSpec), Endpoint: endpointFromGRPC(s.Endpoint), } // Meta service.Version.Index = s.Meta.Version.Index service.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt) service.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt) // UpdateStatus service.UpdateStatus = types.UpdateStatus{} if s.UpdateStatus != nil { switch s.UpdateStatus.State { case swarmapi.UpdateStatus_UPDATING: service.UpdateStatus.State = types.UpdateStateUpdating case swarmapi.UpdateStatus_PAUSED: service.UpdateStatus.State = types.UpdateStatePaused case swarmapi.UpdateStatus_COMPLETED: service.UpdateStatus.State = types.UpdateStateCompleted } service.UpdateStatus.StartedAt, _ = ptypes.Timestamp(s.UpdateStatus.StartedAt) service.UpdateStatus.CompletedAt, _ = ptypes.Timestamp(s.UpdateStatus.CompletedAt) service.UpdateStatus.Message = s.UpdateStatus.Message } return service }
// TaskFromGRPC converts a grpc Task to a Task. func TaskFromGRPC(t swarmapi.Task) types.Task { if t.Spec.GetAttachment() != nil { return types.Task{} } containerConfig := t.Spec.Runtime.(*swarmapi.TaskSpec_Container).Container containerStatus := t.Status.GetContainer() networks := make([]types.NetworkAttachmentConfig, 0, len(t.Spec.Networks)) for _, n := range t.Spec.Networks { networks = append(networks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) } task := types.Task{ ID: t.ID, Annotations: types.Annotations{ Name: t.Annotations.Name, Labels: t.Annotations.Labels, }, ServiceID: t.ServiceID, Slot: int(t.Slot), NodeID: t.NodeID, Spec: types.TaskSpec{ ContainerSpec: containerSpecFromGRPC(containerConfig), Resources: resourcesFromGRPC(t.Spec.Resources), RestartPolicy: restartPolicyFromGRPC(t.Spec.Restart), Placement: placementFromGRPC(t.Spec.Placement), LogDriver: driverFromGRPC(t.Spec.LogDriver), Networks: networks, }, Status: types.TaskStatus{ State: types.TaskState(strings.ToLower(t.Status.State.String())), Message: t.Status.Message, Err: t.Status.Err, }, DesiredState: types.TaskState(strings.ToLower(t.DesiredState.String())), } // Meta task.Version.Index = t.Meta.Version.Index task.CreatedAt, _ = ptypes.Timestamp(t.Meta.CreatedAt) task.UpdatedAt, _ = ptypes.Timestamp(t.Meta.UpdatedAt) task.Status.Timestamp, _ = ptypes.Timestamp(t.Status.Timestamp) if containerStatus != nil { task.Status.ContainerStatus.ContainerID = containerStatus.ContainerID task.Status.ContainerStatus.PID = int(containerStatus.PID) task.Status.ContainerStatus.ExitCode = int(containerStatus.ExitCode) } // NetworksAttachments for _, na := range t.Networks { task.NetworksAttachments = append(task.NetworksAttachments, networkAttachementFromGRPC(na)) } return task }
// ServiceFromGRPC converts a grpc Service to a Service. func ServiceFromGRPC(s swarmapi.Service) types.Service { spec := s.Spec containerConfig := spec.Task.Runtime.(*swarmapi.TaskSpec_Container).Container networks := make([]types.NetworkAttachmentConfig, 0, len(spec.Networks)) for _, n := range spec.Networks { networks = append(networks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) } service := types.Service{ ID: s.ID, Spec: types.ServiceSpec{ TaskTemplate: types.TaskSpec{ ContainerSpec: containerSpecFromGRPC(containerConfig), Resources: resourcesFromGRPC(s.Spec.Task.Resources), RestartPolicy: restartPolicyFromGRPC(s.Spec.Task.Restart), Placement: placementFromGRPC(s.Spec.Task.Placement), LogDriver: driverFromGRPC(s.Spec.Task.LogDriver), }, Networks: networks, EndpointSpec: endpointSpecFromGRPC(s.Spec.Endpoint), }, Endpoint: endpointFromGRPC(s.Endpoint), } // Meta service.Version.Index = s.Meta.Version.Index service.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt) service.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt) // Annotations service.Spec.Name = s.Spec.Annotations.Name service.Spec.Labels = s.Spec.Annotations.Labels // UpdateConfig if s.Spec.Update != nil { service.Spec.UpdateConfig = &types.UpdateConfig{ Parallelism: s.Spec.Update.Parallelism, } service.Spec.UpdateConfig.Delay, _ = ptypes.Duration(&s.Spec.Update.Delay) } //Mode switch t := s.Spec.GetMode().(type) { case *swarmapi.ServiceSpec_Global: service.Spec.Mode.Global = &types.GlobalService{} case *swarmapi.ServiceSpec_Replicated: service.Spec.Mode.Replicated = &types.ReplicatedService{ Replicas: &t.Replicated.Replicas, } } return service }
func (k secretSorter) Less(i, j int) bool { iTime, err := ptypes.Timestamp(k[i].Meta.CreatedAt) if err != nil { panic(err) } jTime, err := ptypes.Timestamp(k[j].Meta.CreatedAt) if err != nil { panic(err) } return jTime.Before(iTime) }
// NodeFromGRPC converts a grpc Node to a Node. func NodeFromGRPC(n swarmapi.Node) types.Node { node := types.Node{ ID: n.ID, Spec: types.NodeSpec{ Role: types.NodeRole(strings.ToLower(n.Spec.Role.String())), Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())), }, Status: types.NodeStatus{ State: types.NodeState(strings.ToLower(n.Status.State.String())), Message: n.Status.Message, Addr: n.Status.Addr, }, } // Meta node.Version.Index = n.Meta.Version.Index node.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt) node.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt) //Annotations node.Spec.Name = n.Spec.Annotations.Name node.Spec.Labels = n.Spec.Annotations.Labels //Description if n.Description != nil { node.Description.Hostname = n.Description.Hostname if n.Description.Platform != nil { node.Description.Platform.Architecture = n.Description.Platform.Architecture node.Description.Platform.OS = n.Description.Platform.OS } if n.Description.Resources != nil { node.Description.Resources.NanoCPUs = n.Description.Resources.NanoCPUs node.Description.Resources.MemoryBytes = n.Description.Resources.MemoryBytes } if n.Description.Engine != nil { node.Description.Engine.EngineVersion = n.Description.Engine.EngineVersion node.Description.Engine.Labels = n.Description.Engine.Labels for _, plugin := range n.Description.Engine.Plugins { node.Description.Engine.Plugins = append(node.Description.Engine.Plugins, types.PluginDescription{Type: plugin.Type, Name: plugin.Name}) } } } //Manager if n.ManagerStatus != nil { node.ManagerStatus = &types.ManagerStatus{ Leader: n.ManagerStatus.Leader, Reachability: types.Reachability(strings.ToLower(n.ManagerStatus.Reachability.String())), Addr: n.ManagerStatus.Addr, } } return node }
// SwarmFromGRPC converts a grpc Cluster to a Swarm. func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm { swarm := types.Swarm{ ID: c.ID, Spec: types.Spec{ Orchestration: types.OrchestrationConfig{ TaskHistoryRetentionLimit: c.Spec.Orchestration.TaskHistoryRetentionLimit, }, Raft: types.RaftConfig{ SnapshotInterval: c.Spec.Raft.SnapshotInterval, KeepOldSnapshots: c.Spec.Raft.KeepOldSnapshots, LogEntriesForSlowFollowers: c.Spec.Raft.LogEntriesForSlowFollowers, HeartbeatTick: c.Spec.Raft.HeartbeatTick, ElectionTick: c.Spec.Raft.ElectionTick, }, }, } heartbeatPeriod, _ := ptypes.Duration(c.Spec.Dispatcher.HeartbeatPeriod) swarm.Spec.Dispatcher.HeartbeatPeriod = uint64(heartbeatPeriod) swarm.Spec.CAConfig.NodeCertExpiry, _ = ptypes.Duration(c.Spec.CAConfig.NodeCertExpiry) for _, ca := range c.Spec.CAConfig.ExternalCAs { swarm.Spec.CAConfig.ExternalCAs = append(swarm.Spec.CAConfig.ExternalCAs, &types.ExternalCA{ Protocol: types.ExternalCAProtocol(strings.ToLower(ca.Protocol.String())), URL: ca.URL, Options: ca.Options, }) } // Meta swarm.Version.Index = c.Meta.Version.Index swarm.CreatedAt, _ = ptypes.Timestamp(c.Meta.CreatedAt) swarm.UpdatedAt, _ = ptypes.Timestamp(c.Meta.UpdatedAt) // Annotations swarm.Spec.Name = c.Spec.Annotations.Name swarm.Spec.Labels = c.Spec.Annotations.Labels for _, policy := range c.Spec.AcceptancePolicy.Policies { p := types.Policy{ Role: types.NodeRole(strings.ToLower(policy.Role.String())), Autoaccept: policy.Autoaccept, } if policy.Secret != nil { secret := string(policy.Secret.Data) p.Secret = &secret } swarm.Spec.AcceptancePolicy.Policies = append(swarm.Spec.AcceptancePolicy.Policies, p) } return swarm }
// SwarmFromGRPC converts a grpc Cluster to a Swarm. func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm { swarm := types.Swarm{ ClusterInfo: types.ClusterInfo{ ID: c.ID, Spec: types.Spec{ Orchestration: types.OrchestrationConfig{ TaskHistoryRetentionLimit: &c.Spec.Orchestration.TaskHistoryRetentionLimit, }, Raft: types.RaftConfig{ SnapshotInterval: c.Spec.Raft.SnapshotInterval, KeepOldSnapshots: &c.Spec.Raft.KeepOldSnapshots, LogEntriesForSlowFollowers: c.Spec.Raft.LogEntriesForSlowFollowers, HeartbeatTick: int(c.Spec.Raft.HeartbeatTick), ElectionTick: int(c.Spec.Raft.ElectionTick), }, EncryptionConfig: types.EncryptionConfig{ AutoLockManagers: c.Spec.EncryptionConfig.AutoLockManagers, }, }, }, JoinTokens: types.JoinTokens{ Worker: c.RootCA.JoinTokens.Worker, Manager: c.RootCA.JoinTokens.Manager, }, } heartbeatPeriod, _ := ptypes.Duration(c.Spec.Dispatcher.HeartbeatPeriod) swarm.Spec.Dispatcher.HeartbeatPeriod = heartbeatPeriod swarm.Spec.CAConfig.NodeCertExpiry, _ = ptypes.Duration(c.Spec.CAConfig.NodeCertExpiry) for _, ca := range c.Spec.CAConfig.ExternalCAs { swarm.Spec.CAConfig.ExternalCAs = append(swarm.Spec.CAConfig.ExternalCAs, &types.ExternalCA{ Protocol: types.ExternalCAProtocol(strings.ToLower(ca.Protocol.String())), URL: ca.URL, Options: ca.Options, }) } // Meta swarm.Version.Index = c.Meta.Version.Index swarm.CreatedAt, _ = ptypes.Timestamp(c.Meta.CreatedAt) swarm.UpdatedAt, _ = ptypes.Timestamp(c.Meta.UpdatedAt) // Annotations swarm.Spec.Name = c.Spec.Annotations.Name swarm.Spec.Labels = c.Spec.Annotations.Labels return swarm }
func (t tasksBySlot) Less(i, j int) bool { // Sort by slot. if t[i].Slot != t[j].Slot { return t[i].Slot < t[j].Slot } // If same slot, sort by most recent. it, err := ptypes.Timestamp(t[i].Meta.CreatedAt) if err != nil { panic(err) } jt, err := ptypes.Timestamp(t[j].Meta.CreatedAt) if err != nil { panic(err) } return jt.Before(it) }
// TaskFromGRPC converts a grpc Task to a Task. func TaskFromGRPC(t swarmapi.Task) types.Task { containerConfig := t.Spec.Runtime.(*swarmapi.TaskSpec_Container).Container containerStatus := t.Status.GetContainer() task := types.Task{ ID: t.ID, ServiceID: t.ServiceID, Slot: int(t.Slot), NodeID: t.NodeID, Spec: types.TaskSpec{ ContainerSpec: containerSpecFromGRPC(containerConfig), Resources: resourcesFromGRPC(t.Spec.Resources), RestartPolicy: restartPolicyFromGRPC(t.Spec.Restart), Placement: placementFromGRPC(t.Spec.Placement), LogDriver: driverFromGRPC(t.Spec.LogDriver), }, Status: types.TaskStatus{ State: types.TaskState(strings.ToLower(t.Status.State.String())), Message: t.Status.Message, Err: t.Status.Err, }, DesiredState: types.TaskState(strings.ToLower(t.DesiredState.String())), } // Meta task.Version.Index = t.Meta.Version.Index task.CreatedAt, _ = ptypes.Timestamp(t.Meta.CreatedAt) task.UpdatedAt, _ = ptypes.Timestamp(t.Meta.UpdatedAt) task.Status.Timestamp, _ = ptypes.Timestamp(t.Status.Timestamp) if containerStatus != nil { task.Status.ContainerStatus.ContainerID = containerStatus.ContainerID task.Status.ContainerStatus.PID = int(containerStatus.PID) task.Status.ContainerStatus.ExitCode = int(containerStatus.ExitCode) } // NetworksAttachments for _, na := range t.Networks { task.NetworksAttachments = append(task.NetworksAttachments, networkAttachementFromGRPC(na)) } return task }
// TimestampAgo returns a relatime time string from a timestamp (e.g. "12 seconds ago"). func TimestampAgo(ts *tspb.Timestamp) string { if ts == nil { return "" } t, err := ptypes.Timestamp(ts) if err != nil { panic(err) } return humanize.Time(t) }
func networkFromGRPC(n *swarmapi.Network) types.Network { if n != nil { network := types.Network{ ID: n.ID, Spec: types.NetworkSpec{ IPv6Enabled: n.Spec.Ipv6Enabled, Internal: n.Spec.Internal, Attachable: n.Spec.Attachable, IPAMOptions: ipamFromGRPC(n.Spec.IPAM), }, IPAMOptions: ipamFromGRPC(n.IPAM), } // Meta network.Version.Index = n.Meta.Version.Index network.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt) network.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt) //Annotations network.Spec.Name = n.Spec.Annotations.Name network.Spec.Labels = n.Spec.Annotations.Labels //DriverConfiguration if n.Spec.DriverConfig != nil { network.Spec.DriverConfiguration = &types.Driver{ Name: n.Spec.DriverConfig.Name, Options: n.Spec.DriverConfig.Options, } } //DriverState if n.DriverState != nil { network.DriverState = types.Driver{ Name: n.DriverState.Name, Options: n.DriverState.Options, } } return network } return types.Network{} }
func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (io.ReadCloser, error) { reader, writer := io.Pipe() apiOptions := &backend.ContainerLogsConfig{ ContainerLogsOptions: types.ContainerLogsOptions{ Follow: options.Follow, // TODO(stevvooe): Parse timestamp out of message. This // absolutely needs to be done before going to production with // this, at it is completely redundant. Timestamps: true, Details: false, // no clue what to do with this, let's just deprecate it. }, OutStream: writer, } if options.Since != nil { since, err := ptypes.Timestamp(options.Since) if err != nil { return nil, err } apiOptions.Since = since.Format(time.RFC3339Nano) } if options.Tail < 0 { // See protobuf documentation for details of how this works. apiOptions.Tail = fmt.Sprint(-options.Tail - 1) } else if options.Tail > 0 { return nil, fmt.Errorf("tail relative to start of logs not supported via docker API") } if len(options.Streams) == 0 { // empty == all apiOptions.ShowStdout, apiOptions.ShowStderr = true, true } else { for _, stream := range options.Streams { switch stream { case api.LogStreamStdout: apiOptions.ShowStdout = true case api.LogStreamStderr: apiOptions.ShowStderr = true } } } chStarted := make(chan struct{}) go func() { defer writer.Close() c.backend.ContainerLogs(ctx, c.container.name(), apiOptions, chStarted) }() return reader, nil }
// SecretFromGRPC converts a grpc Secret to a Secret. func SecretFromGRPC(s *swarmapi.Secret) swarmtypes.Secret { secret := swarmtypes.Secret{ ID: s.ID, Digest: s.Digest, SecretSize: s.SecretSize, Spec: swarmtypes.SecretSpec{ Annotations: swarmtypes.Annotations{ Name: s.Spec.Annotations.Name, Labels: s.Spec.Annotations.Labels, }, Data: s.Spec.Data, }, } secret.Version.Index = s.Meta.Version.Index // Meta secret.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt) secret.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt) return secret }
func expireBlacklistedCerts(cluster *api.Cluster) { nowMinusGrace := time.Now().Add(-expiredCertGrace) for cn, blacklistedCert := range cluster.BlacklistedCertificates { if blacklistedCert.Expiry == nil { continue } expiry, err := ptypes.Timestamp(blacklistedCert.Expiry) if err == nil && nowMinusGrace.After(expiry) { delete(cluster.BlacklistedCertificates, cn) } } }
func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (io.ReadCloser, error) { apiOptions := types.ContainerLogsOptions{ Follow: options.Follow, Timestamps: true, Details: false, } if options.Since != nil { since, err := ptypes.Timestamp(options.Since) if err != nil { return nil, err } apiOptions.Since = since.Format(time.RFC3339Nano) } if options.Tail < 0 { // See protobuf documentation for details of how this works. apiOptions.Tail = fmt.Sprint(-options.Tail - 1) } else if options.Tail > 0 { return nil, fmt.Errorf("tail relative to start of logs not supported via docker API") } if len(options.Streams) == 0 { // empty == all apiOptions.ShowStdout, apiOptions.ShowStderr = true, true } else { for _, stream := range options.Streams { switch stream { case api.LogStreamStdout: apiOptions.ShowStdout = true case api.LogStreamStderr: apiOptions.ShowStderr = true } } } return c.client.ContainerLogs(ctx, c.container.name(), apiOptions) }
// ServiceFromGRPC converts a grpc Service to a Service. func ServiceFromGRPC(s swarmapi.Service) types.Service { spec := s.Spec containerConfig := spec.Task.Runtime.(*swarmapi.TaskSpec_Container).Container serviceNetworks := make([]types.NetworkAttachmentConfig, 0, len(spec.Networks)) for _, n := range spec.Networks { serviceNetworks = append(serviceNetworks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) } taskNetworks := make([]types.NetworkAttachmentConfig, 0, len(spec.Task.Networks)) for _, n := range spec.Task.Networks { taskNetworks = append(taskNetworks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) } service := types.Service{ ID: s.ID, Spec: types.ServiceSpec{ TaskTemplate: types.TaskSpec{ ContainerSpec: containerSpecFromGRPC(containerConfig), Resources: resourcesFromGRPC(s.Spec.Task.Resources), RestartPolicy: restartPolicyFromGRPC(s.Spec.Task.Restart), Placement: placementFromGRPC(s.Spec.Task.Placement), LogDriver: driverFromGRPC(s.Spec.Task.LogDriver), Networks: taskNetworks, }, Networks: serviceNetworks, EndpointSpec: endpointSpecFromGRPC(s.Spec.Endpoint), }, Endpoint: endpointFromGRPC(s.Endpoint), } // Meta service.Version.Index = s.Meta.Version.Index service.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt) service.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt) // Annotations service.Spec.Name = s.Spec.Annotations.Name service.Spec.Labels = s.Spec.Annotations.Labels // UpdateConfig if s.Spec.Update != nil { service.Spec.UpdateConfig = &types.UpdateConfig{ Parallelism: s.Spec.Update.Parallelism, } service.Spec.UpdateConfig.Delay, _ = ptypes.Duration(&s.Spec.Update.Delay) switch s.Spec.Update.FailureAction { case swarmapi.UpdateConfig_PAUSE: service.Spec.UpdateConfig.FailureAction = types.UpdateFailureActionPause case swarmapi.UpdateConfig_CONTINUE: service.Spec.UpdateConfig.FailureAction = types.UpdateFailureActionContinue } } // Mode switch t := s.Spec.GetMode().(type) { case *swarmapi.ServiceSpec_Global: service.Spec.Mode.Global = &types.GlobalService{} case *swarmapi.ServiceSpec_Replicated: service.Spec.Mode.Replicated = &types.ReplicatedService{ Replicas: &t.Replicated.Replicas, } } // UpdateStatus service.UpdateStatus = types.UpdateStatus{} if s.UpdateStatus != nil { switch s.UpdateStatus.State { case swarmapi.UpdateStatus_UPDATING: service.UpdateStatus.State = types.UpdateStateUpdating case swarmapi.UpdateStatus_PAUSED: service.UpdateStatus.State = types.UpdateStatePaused case swarmapi.UpdateStatus_COMPLETED: service.UpdateStatus.State = types.UpdateStateCompleted } service.UpdateStatus.StartedAt, _ = ptypes.Timestamp(s.UpdateStatus.StartedAt) service.UpdateStatus.CompletedAt, _ = ptypes.Timestamp(s.UpdateStatus.CompletedAt) service.UpdateStatus.Message = s.UpdateStatus.Message } return service }
func printLogMessages(msgs ...api.LogMessage) { for _, msg := range msgs { ts, _ := ptypes.Timestamp(msg.Timestamp) fmt.Printf("%v %v %s\n", msg.Context, ts, string(msg.Data)) } }
// ServiceLogs collects service logs and writes them back to `config.OutStream` func (c *Cluster) ServiceLogs(ctx context.Context, input string, config *backend.ContainerLogsConfig, started chan struct{}) error { c.RLock() if !c.isActiveManager() { c.RUnlock() return c.errNoManager() } service, err := getService(ctx, c.client, input) if err != nil { c.RUnlock() return err } stream, err := c.logs.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{ Selector: &swarmapi.LogSelector{ ServiceIDs: []string{service.ID}, }, Options: &swarmapi.LogSubscriptionOptions{ Follow: true, }, }) if err != nil { c.RUnlock() return err } wf := ioutils.NewWriteFlusher(config.OutStream) defer wf.Close() close(started) wf.Flush() outStream := stdcopy.NewStdWriter(wf, stdcopy.Stdout) errStream := stdcopy.NewStdWriter(wf, stdcopy.Stderr) // Release the lock before starting the stream. c.RUnlock() for { // Check the context before doing anything. select { case <-ctx.Done(): return ctx.Err() default: } subscribeMsg, err := stream.Recv() if err == io.EOF { return nil } if err != nil { return err } for _, msg := range subscribeMsg.Messages { data := []byte{} if config.Timestamps { ts, err := ptypes.Timestamp(msg.Timestamp) if err != nil { return err } data = append(data, []byte(ts.Format(logger.TimeFormat)+" ")...) } data = append(data, []byte(fmt.Sprintf("%s.node.id=%s,%s.service.id=%s,%s.task.id=%s ", contextPrefix, msg.Context.NodeID, contextPrefix, msg.Context.ServiceID, contextPrefix, msg.Context.TaskID, ))...) data = append(data, msg.Data...) switch msg.Stream { case swarmapi.LogStreamStdout: outStream.Write(data) case swarmapi.LogStreamStderr: errStream.Write(data) } } } }
resp, err := client.ListSecrets(common.Context(cmd), &api.ListSecretsRequest{}) if err != nil { return err } var output func(*api.Secret) if !quiet { w := tabwriter.NewWriter(os.Stdout, 0, 4, 4, ' ', 0) defer func() { // Ignore flushing errors - there's nothing we can do. _ = w.Flush() }() common.PrintHeader(w, "ID", "Name", "Created") output = func(s *api.Secret) { created, err := ptypes.Timestamp(s.Meta.CreatedAt) if err != nil { panic(err) } fmt.Fprintf(w, "%s\t%s\t%s\n", s.ID, s.Spec.Annotations.Name, humanize.Time(created), ) } } else { output = func(s *api.Secret) { fmt.Println(s.ID) } } sorted := secretSorter(resp.Secrets)
func printServiceSummary(service *api.Service, running int) { w := tabwriter.NewWriter(os.Stdout, 8, 8, 8, ' ', 0) defer w.Flush() task := service.Spec.Task common.FprintfIfNotEmpty(w, "ID\t: %s\n", service.ID) common.FprintfIfNotEmpty(w, "Name\t: %s\n", service.Spec.Annotations.Name) if len(service.Spec.Annotations.Labels) > 0 { fmt.Fprintln(w, "Labels\t") for k, v := range service.Spec.Annotations.Labels { fmt.Fprintf(w, " %s\t: %s\n", k, v) } } common.FprintfIfNotEmpty(w, "Replicas\t: %s\n", getServiceReplicasTxt(service, running)) if service.UpdateStatus != nil { fmt.Fprintln(w, "Update Status\t") fmt.Fprintln(w, " State\t:", service.UpdateStatus.State) started, err := ptypes.Timestamp(service.UpdateStatus.StartedAt) if err == nil { fmt.Fprintln(w, " Started\t:", humanize.Time(started)) } if service.UpdateStatus.State == api.UpdateStatus_COMPLETED { completed, err := ptypes.Timestamp(service.UpdateStatus.CompletedAt) if err == nil { fmt.Fprintln(w, " Completed\t:", humanize.Time(completed)) } } fmt.Fprintln(w, " Message\t:", service.UpdateStatus.Message) } fmt.Fprintln(w, "Template\t") fmt.Fprintln(w, " Container\t") ctr := service.Spec.Task.GetContainer() common.FprintfIfNotEmpty(w, " Image\t: %s\n", ctr.Image) common.FprintfIfNotEmpty(w, " Command\t: %q\n", strings.Join(ctr.Command, " ")) common.FprintfIfNotEmpty(w, " Args\t: [%s]\n", strings.Join(ctr.Args, ", ")) common.FprintfIfNotEmpty(w, " Env\t: [%s]\n", strings.Join(ctr.Env, ", ")) if task.Placement != nil { common.FprintfIfNotEmpty(w, " Constraints\t: %s\n", strings.Join(task.Placement.Constraints, ", ")) } if task.Resources != nil { res := task.Resources fmt.Fprintln(w, " Resources\t") printResources := func(w io.Writer, r *api.Resources) { if r.NanoCPUs != 0 { fmt.Fprintf(w, " CPU\t: %g\n", float64(r.NanoCPUs)/1e9) } if r.MemoryBytes != 0 { fmt.Fprintf(w, " Memory\t: %s\n", humanize.IBytes(uint64(r.MemoryBytes))) } } if res.Reservations != nil { fmt.Fprintln(w, " Reservations:\t") printResources(w, res.Reservations) } if res.Limits != nil { fmt.Fprintln(w, " Limits:\t") printResources(w, res.Limits) } } if len(service.Spec.Task.Networks) > 0 { fmt.Fprintf(w, " Networks:\t") for _, n := range service.Spec.Task.Networks { fmt.Fprintf(w, " %s", n.Target) } } if service.Endpoint != nil && len(service.Endpoint.Ports) > 0 { fmt.Fprintln(w, "\nPorts:") for _, port := range service.Endpoint.Ports { fmt.Fprintf(w, " - Name\t= %s\n", port.Name) fmt.Fprintf(w, " Protocol\t= %s\n", port.Protocol) fmt.Fprintf(w, " Port\t= %d\n", port.TargetPort) fmt.Fprintf(w, " SwarmPort\t= %d\n", port.PublishedPort) } } if len(ctr.Mounts) > 0 { fmt.Fprintln(w, " Mounts:") for _, v := range ctr.Mounts { fmt.Fprintf(w, " - target = %s\n", v.Target) fmt.Fprintf(w, " source = %s\n", v.Source) fmt.Fprintf(w, " readonly = %v\n", v.ReadOnly) fmt.Fprintf(w, " type = %v\n", strings.ToLower(v.Type.String())) } } if len(ctr.Secrets) > 0 { fmt.Fprintln(w, " Secrets:") for _, sr := range ctr.Secrets { var targetName, mode string if sr.GetFile() != nil { targetName = sr.GetFile().Name mode = "FILE" } fmt.Fprintf(w, " [%s] %s@%s:%s\n", mode, sr.SecretName, sr.SecretID, targetName) } } if task.LogDriver != nil { fmt.Fprintf(w, " LogDriver\t: %s\n", task.LogDriver.Name) var keys []string if task.LogDriver.Options != nil { for k := range task.LogDriver.Options { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { v := task.LogDriver.Options[k] if v != "" { fmt.Fprintf(w, " %s\t: %s\n", k, v) } else { fmt.Fprintf(w, " %s\t\n", k) } } } } }
func (r *Orchestrator) initTasks(ctx context.Context, readTx store.ReadTx) error { tasks, err := store.FindTasks(readTx, store.All) if err != nil { return err } for _, t := range tasks { if t.NodeID != "" { n := store.GetNode(readTx, t.NodeID) if invalidNode(n) && t.Status.State <= api.TaskStateRunning && t.DesiredState <= api.TaskStateRunning { r.restartTasks[t.ID] = struct{}{} } } } _, err = r.store.Batch(func(batch *store.Batch) error { for _, t := range tasks { if t.ServiceID == "" { continue } // TODO(aluzzardi): We should NOT retrieve the service here. service := store.GetService(readTx, t.ServiceID) if service == nil { // Service was deleted err := batch.Update(func(tx store.Tx) error { return store.DeleteTask(tx, t.ID) }) if err != nil { log.G(ctx).WithError(err).Error("failed to set task desired state to dead") } continue } // TODO(aluzzardi): This is shady. We should have a more generic condition. if t.DesiredState != api.TaskStateReady || !orchestrator.IsReplicatedService(service) { continue } restartDelay := orchestrator.DefaultRestartDelay if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil { var err error restartDelay, err = ptypes.Duration(t.Spec.Restart.Delay) if err != nil { log.G(ctx).WithError(err).Error("invalid restart delay") restartDelay = orchestrator.DefaultRestartDelay } } if restartDelay != 0 { timestamp, err := ptypes.Timestamp(t.Status.Timestamp) if err == nil { restartTime := timestamp.Add(restartDelay) calculatedRestartDelay := restartTime.Sub(time.Now()) if calculatedRestartDelay < restartDelay { restartDelay = calculatedRestartDelay } if restartDelay > 0 { _ = batch.Update(func(tx store.Tx) error { t := store.GetTask(tx, t.ID) // TODO(aluzzardi): This is shady as well. We should have a more generic condition. if t == nil || t.DesiredState != api.TaskStateReady { return nil } r.restarts.DelayStart(ctx, tx, nil, t.ID, restartDelay, true) return nil }) continue } } else { log.G(ctx).WithError(err).Error("invalid status timestamp") } } // Start now err := batch.Update(func(tx store.Tx) error { return r.restarts.StartNow(tx, t.ID) }) if err != nil { log.G(ctx).WithError(err).WithField("task.id", t.ID).Error("moving task out of delayed state failed") } } return nil }) return err }