func pullCommand(ctx *cli.Context) { ansibleResp := initAnsubleResp(ctx) fatalf := func(err error) { if ansibleResp != nil { ansibleResp.Error(err).WriteTo(os.Stdout) } log.Fatal(err) } initLogs(ctx) dockerCli := initDockerClient(ctx) config := initComposeConfig(ctx, dockerCli) auth := initAuthConfig(ctx) compose, err := compose.New(&compose.Config{ Manifest: config, Docker: dockerCli, DryRun: ctx.Bool("dry"), Auth: auth, }) if err != nil { fatalf(err) } if err := compose.PullAction(); err != nil { fatalf(err) } if ansibleResp != nil { // ansibleResp.Success("done hehe").WriteTo(os.Stdout) compose.WritePlan(ansibleResp).WriteTo(os.Stdout) } }
func doRemove(ctx *cli.Context, config *config.Config, dockerCli *docker.Client, auth *compose.AuthConfig) error { compose, err := compose.New(&compose.Config{ Manifest: config, Docker: dockerCli, DryRun: ctx.Bool("dry"), Remove: true, Auth: auth, }) if err != nil { return err } return compose.RunAction() }
func runCommand(ctx *cli.Context) { ansibleResp := initAnsubleResp(ctx) // TODO: here we duplicate fatalf in both run(), pull() and clean() // maybe refactor to make it cleaner fatalf := func(err error) { if ansibleResp != nil { ansibleResp.Error(err).WriteTo(os.Stdout) } log.Fatal(err) } initLogs(ctx) dockerCli := initDockerClient(ctx) config := initComposeConfig(ctx, dockerCli) auth := initAuthConfig(ctx) compose, err := compose.New(&compose.Config{ Manifest: config, Docker: dockerCli, Force: ctx.Bool("force"), DryRun: ctx.Bool("dry"), Attach: ctx.Bool("attach"), Wait: ctx.Duration("wait"), Pull: ctx.Bool("pull"), Auth: auth, }) if err != nil { fatalf(err) } // in case of --force given, first remove all existing containers if ctx.Bool("force") { if err := doRemove(ctx, config, dockerCli, auth); err != nil { fatalf(err) } } if err := compose.RunAction(); err != nil { fatalf(err) } if ansibleResp != nil { // ansibleResp.Success("done hehe").WriteTo(os.Stdout) compose.WritePlan(ansibleResp).WriteTo(os.Stdout) } }
func recoverCommand(ctx *cli.Context) { initLogs(ctx) dockerCli := initDockerClient(ctx) auth := initAuthConfig(ctx) compose, err := compose.New(&compose.Config{ Docker: dockerCli, DryRun: ctx.Bool("dry"), Wait: ctx.Duration("wait"), Recover: true, Auth: auth, }) if err != nil { log.Fatal(err) } if err := compose.RecoverAction(); err != nil { log.Fatal(err) } }
func pinCommand(ctx *cli.Context) { initLogs(ctx) var ( vars template.Vars data []byte output = ctx.String("output") format = ctx.String("type") local = ctx.BoolT("local") hub = ctx.BoolT("hub") fd = os.Stdout ) if output == "-" && !ctx.GlobalIsSet("verbose") { log.SetLevel(log.WarnLevel) } dockerCli := initDockerClient(ctx) config := initComposeConfig(ctx, dockerCli) auth := initAuthConfig(ctx) compose, err := compose.New(&compose.Config{ Manifest: config, Docker: dockerCli, Auth: auth, }) if err != nil { log.Fatal(err) } if vars, err = compose.PinAction(local, hub); err != nil { log.Fatal(err) } if output != "-" { if fd, err = os.Create(output); err != nil { log.Fatal(err) } defer fd.Close() if ext := filepath.Ext(output); !ctx.IsSet("type") && ext == ".json" { format = "json" } } switch format { case "yaml": if data, err = yaml.Marshal(vars); err != nil { log.Fatal(err) } case "json": if data, err = json.Marshal(vars); err != nil { log.Fatal(err) } default: log.Fatalf("Possible tyoes are `yaml` and `json`, unknown type `%s`", format) } if _, err := io.Copy(fd, bytes.NewReader(data)); err != nil { log.Fatal(err) } }