// ProjectPull pulls images for services. func ProjectPull(p project.APIProject, c *cli.Context) error { err := p.Pull(context.Background(), c.Args()...) if err != nil && !c.Bool("ignore-pull-failures") { return cli.NewExitError(err.Error(), 1) } return nil }
// ProjectDelete deletes services. func ProjectDelete(p project.APIProject, c *cli.Context) error { options := options.Delete{ RemoveVolume: c.Bool("v"), } if !c.Bool("force") { stoppedContainers, err := p.Containers(context.Background(), project.Filter{ State: project.Stopped, }, c.Args()...) if err != nil { return cli.NewExitError(err.Error(), 1) } if len(stoppedContainers) == 0 { fmt.Println("No stopped containers") return nil } fmt.Printf("Going to remove %v\nAre you sure? [yN]\n", strings.Join(stoppedContainers, ", ")) var answer string _, err = fmt.Scanln(&answer) if err != nil { return cli.NewExitError(err.Error(), 1) } if answer != "y" && answer != "Y" { return nil } } err := p.Delete(context.Background(), options, c.Args()...) if err != nil { return cli.NewExitError(err.Error(), 1) } return nil }
// ProjectUnpause unpauses service containers. func ProjectUnpause(p project.APIProject, c *cli.Context) error { err := p.Unpause(context.Background(), c.Args()...) if err != nil { return cli.NewExitError(err.Error(), 1) } return nil }
// ProjectStop stops all services. func ProjectStop(p project.APIProject, c *cli.Context) error { err := p.Stop(context.Background(), c.Int("timeout"), c.Args()...) if err != nil { return cli.NewExitError(err.Error(), 1) } return nil }
// ProjectLog gets services logs. func ProjectLog(p project.APIProject, c *cli.Context) error { err := p.Log(context.Background(), c.Bool("follow"), c.Args()...) if err != nil { return cli.NewExitError(err.Error(), 1) } return nil }
// ProjectKill forces stop service containers. func ProjectKill(p project.APIProject, c *cli.Context) error { err := p.Kill(context.Background(), c.String("signal"), c.Args()...) if err != nil { return cli.NewExitError(err.Error(), 1) } return nil }
// ProjectPs lists the containers. func ProjectPs(p project.APIProject, c *cli.Context) error { qFlag := c.Bool("q") allInfo, err := p.Ps(context.Background(), qFlag, c.Args()...) if err != nil { return cli.NewExitError(err.Error(), 1) } os.Stdout.WriteString(allInfo.String(!qFlag)) return nil }
// ProjectDelete deletes services. func ProjectDelete(p project.APIProject, c *cli.Context) error { options := options.Delete{ RemoveVolume: c.Bool("v"), } err := p.Delete(context.Background(), options, c.Args()...) if err != nil { return cli.NewExitError(err.Error(), 1) } return nil }
// ProjectConfig validates and print the compose file. func ProjectConfig(p project.APIProject, c *cli.Context) error { yaml, err := p.Config() if err != nil { return cli.NewExitError(err.Error(), 1) } if !c.Bool("quiet") { fmt.Println(yaml) } return nil }
func ProjectCreate(p project.APIProject, c *cli.Context) error { if err := p.Create(context.Background(), options.Create{}, c.Args()...); err != nil { return err } // This is to fix circular links... What!? It works. if err := p.Create(context.Background(), options.Create{}, c.Args()...); err != nil { return err } return nil }
// ProjectBuild builds or rebuilds services. func ProjectBuild(p project.APIProject, c *cli.Context) error { config := options.Build{ NoCache: c.Bool("no-cache"), ForceRemove: c.Bool("force-rm"), Pull: c.Bool("pull"), } err := p.Build(context.Background(), config, c.Args()...) if err != nil { return cli.NewExitError(err.Error(), 1) } return nil }
// ProjectCreate creates all services but do not start them. func ProjectCreate(p project.APIProject, c *cli.Context) error { options := options.Create{ NoRecreate: c.Bool("no-recreate"), ForceRecreate: c.Bool("force-recreate"), NoBuild: c.Bool("no-build"), } err := p.Create(context.Background(), options, c.Args()...) if err != nil { return cli.NewExitError(err.Error(), 1) } return nil }
// ProjectDown brings all services down (stops and clean containers). func ProjectDown(p project.APIProject, c *cli.Context) error { options := options.Down{ RemoveVolume: c.Bool("volumes"), RemoveImages: options.ImageType(c.String("rmi")), RemoveOrphans: c.Bool("remove-orphans"), } err := p.Down(context.Background(), options, c.Args()...) if err != nil { return cli.NewExitError(err.Error(), 1) } return nil }
// ProjectPs lists the containers. func ProjectPs(p project.APIProject, c *cli.Context) error { qFlag := c.Bool("q") allInfo, err := p.Ps(context.Background(), c.Args()...) if err != nil { return cli.NewExitError(err.Error(), 1) } columns := []string{"Name", "Command", "State", "Ports"} if qFlag { columns = []string{"Id"} } os.Stdout.WriteString(allInfo.String(columns, !qFlag)) return nil }
// ProjectPort prints the public port for a port binding. func ProjectPort(p project.APIProject, c *cli.Context) error { if len(c.Args()) != 2 { return cli.NewExitError("Please pass arguments in the form: SERVICE PORT", 1) } index := c.Int("index") protocol := c.String("protocol") serviceName := c.Args()[0] privatePort := c.Args()[1] port, err := p.Port(context.Background(), index, protocol, serviceName, privatePort) if err != nil { return cli.NewExitError(err.Error(), 1) } fmt.Println(port) return nil }
// ProjectEvents listen for real-time events of containers. func ProjectEvents(p project.APIProject, c *cli.Context) error { evts, err := p.Events(context.Background(), c.Args()...) if err != nil { return err } var printfn func(events.ContainerEvent) if c.Bool("json") { printfn = printJSON } else { printfn = printStd } for event := range evts { printfn(event) } return nil }
// ProjectRun runs a given command within a service's container. func ProjectRun(p project.APIProject, c *cli.Context) error { if len(c.Args()) == 0 { logrus.Fatal("No service specified") } serviceName := c.Args()[0] commandParts := c.Args()[1:] options := options.Run{ Detached: c.Bool("d"), } exitCode, err := p.Run(context.Background(), serviceName, commandParts, options) if err != nil { return cli.NewExitError(err.Error(), 1) } return cli.NewExitError("", exitCode) }
// ProjectUp brings all services up. func ProjectUp(p project.APIProject, c *cli.Context) error { options := options.Up{ Create: options.Create{ NoRecreate: c.Bool("no-recreate"), ForceRecreate: c.Bool("force-recreate"), NoBuild: c.Bool("no-build"), ForceBuild: c.Bool("build"), }, } ctx, cancelFun := context.WithCancel(context.Background()) err := p.Up(ctx, options, c.Args()...) if err != nil { return cli.NewExitError(err.Error(), 1) } if !c.Bool("d") { signalChan := make(chan os.Signal, 1) cleanupDone := make(chan bool) signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM) errChan := make(chan error) go func() { errChan <- p.Log(ctx, true, c.Args()...) }() go func() { select { case <-signalChan: fmt.Printf("\nGracefully stopping...\n") cancelFun() ProjectStop(p, c) cleanupDone <- true case err := <-errChan: if err != nil { logrus.Fatal(err) } cleanupDone <- true } }() <-cleanupDone return nil } return nil }
func upgradeInfo(up bool, p project.APIProject, from, to string, opts UpgradeOpts) (*client.Service, *client.Service, *client.RancherClient, error) { fromService, err := p.CreateService(from) if err != nil { return nil, nil, nil, err } toService, err := p.CreateService(to) if err != nil { return nil, nil, nil, err } rFromService, ok := fromService.(*rancher.RancherService) if !ok { return nil, nil, nil, fmt.Errorf("%s is not a Rancher service", from) } rToService, ok := toService.(*rancher.RancherService) if !ok { return nil, nil, nil, fmt.Errorf("%s is not a Rancher service", to) } if up { if err := rToService.Up(context.Background(), options.Up{}); err != nil { return nil, nil, nil, err } } source, err := rFromService.RancherService() if err != nil { return nil, nil, nil, err } dest, err := rToService.RancherService() if err != nil { return nil, nil, nil, err } return source, dest, rFromService.Client(), nil }
// ProjectScale scales services. func ProjectScale(p project.APIProject, c *cli.Context) error { servicesScale := map[string]int{} for _, arg := range c.Args() { kv := strings.SplitN(arg, "=", 2) if len(kv) != 2 { return cli.NewExitError(fmt.Sprintf("Invalid scale parameter: %s", arg), 2) } name := kv[0] count, err := strconv.Atoi(kv[1]) if err != nil { return cli.NewExitError(fmt.Sprintf("Invalid scale parameter: %v", err), 2) } servicesScale[name] = count } err := p.Scale(context.Background(), c.Int("timeout"), servicesScale) if err != nil { return cli.NewExitError(err.Error(), 0) } return nil }
func ProjectUp(p project.APIProject, c *cli.Context) error { if err := p.Create(context.Background(), options.Create{}, c.Args()...); err != nil { return err } if err := p.Up(context.Background(), options.Up{}, c.Args()...); err != nil { return err } if !c.Bool("d") { p.Log(context.Background(), true) // wait forever <-make(chan interface{}) } return nil }
func createShipment(username string, token string, shipmentName string, dockerCompose DockerCompose, shipment ComposeShipment, dockerComposeProject project.APIProject) { userName, token, _ := Login() //map a ComposeShipment object (based on compose files) into //a new NewShipmentEnvironment object if Verbose { log.Println("creating shipment environment") } //create object used to create a new shipment environment from scratch newShipment := NewShipmentEnvironment{ Username: userName, Token: token, Info: NewShipmentInfo{ Name: shipmentName, Group: shipment.Group, }, } //add shipment-level env vars newShipment.Info.Vars = make([]EnvVarPayload, 0) newShipment.Info.Vars = append(newShipment.Info.Vars, envVar("CUSTOMER", shipment.Group)) newShipment.Info.Vars = append(newShipment.Info.Vars, envVar("PROPERTY", shipment.Property)) newShipment.Info.Vars = append(newShipment.Info.Vars, envVar("PROJECT", shipment.Project)) newShipment.Info.Vars = append(newShipment.Info.Vars, envVar("PRODUCT", shipment.Product)) //create environment newShipment.Environment = NewEnvironment{ Name: shipment.Env, Vars: make([]EnvVarPayload, 0), } //add environment-level env vars for name, value := range shipment.Environment { newShipment.Environment.Vars = append(newShipment.Environment.Vars, envVar(name, value)) } //containers //iterate defined containers and apply container level updates newShipment.Containers = make([]NewContainer, 0) for containerIndex, container := range shipment.Containers { if Verbose { log.Printf("processing container: %v", container) } //lookup the container in the list of services in the docker-compose file dockerService := dockerCompose.Services[container] if dockerService.Image == "" { log.Fatalln("'image' is required in docker compose file") } //parse image:tag and map to name/version parsedImage := strings.Split(dockerService.Image, ":") newContainer := NewContainer{ Name: container, Image: dockerService.Image, Version: parsedImage[1], Vars: make([]EnvVarPayload, 0), Ports: make([]PortPayload, 0), } //container-level env vars serviceConfig, success := dockerComposeProject.GetServiceConfig(newContainer.Name) if !success { log.Fatal("error getting service config") } envVarMap := serviceConfig.Environment.ToMap() for name, value := range envVarMap { if name != "" { if Verbose { log.Println("processing " + name) } newContainer.Vars = append(newContainer.Vars, envVar(name, value)) } } //map the docker compose service ports to harbor ports if len(dockerService.Ports) == 0 { log.Fatalln("At least one port mapping is required in docker compose file.") } parsedPort := strings.Split(dockerService.Ports[0], ":") //validate health check healthCheck := dockerService.Environment["HEALTHCHECK"] if healthCheck == "" { log.Fatalln("A container-level 'HEALTHCHECK' environment variable is required") } //map first port in docker compose to default primary HTTP "PORT" external, err := strconv.Atoi(parsedPort[0]) if err != nil { log.Fatalln("invalid port") } internal, err := strconv.Atoi(parsedPort[1]) if err != nil { log.Fatalln("invalid port") } primaryPort := PortPayload{ Name: "PORT", Value: internal, PublicPort: external, Primary: (containerIndex == 0), Protocol: "http", External: false, Healthcheck: healthCheck, } newContainer.Ports = append(newContainer.Ports, primaryPort) //TODO: once Container/Port construct is added to harbor-compose.yml, //they should override these defaults //add container to list newShipment.Containers = append(newShipment.Containers, newContainer) } if shipment.Barge == "" { log.Fatalln("barge is required for a shipment") } //add default ec2 provider provider := NewProvider{ Name: "ec2", Barge: shipment.Barge, Replicas: shipment.Replicas, Vars: make([]EnvVarPayload, 0), } //add provider newShipment.Providers = append(newShipment.Providers, provider) //push the new shipment/environment up to harbor SaveNewShipmentEnvironment(username, token, newShipment) //trigger shipment success, messages := Trigger(shipmentName, shipment.Env) for _, msg := range messages { fmt.Println(msg) } if success && shipment.Replicas > 0 { fmt.Println("Please allow up to 5 minutes for DNS changes to take effect.") } }
func updateShipment(username string, token string, currentShipment *ShipmentEnvironment, shipmentName string, dockerCompose DockerCompose, shipment ComposeShipment, dockerComposeProject project.APIProject) { //map a ComposeShipment object (based on compose files) into //a series of API call to update a shipment //iterate defined containers and apply container level updates for _, container := range shipment.Containers { if Verbose { log.Printf("processing container: %v", container) } //lookup the container in the list of services in the docker-compose file dockerService := dockerCompose.Services[container] //update the shipment/container with the new image if !shipment.IgnoreImageVersion { UpdateContainerImage(username, token, shipmentName, shipment, container, dockerService) } serviceConfig, success := dockerComposeProject.GetServiceConfig(container) if !success { log.Fatal("error getting service config") } envVarMap := serviceConfig.Environment.ToMap() for evName, evValue := range envVarMap { if evName != "" { if Verbose { log.Println("processing " + evName) } //create an envvar object envVarPayload := envVar(evName, evValue) //save the envvar SaveEnvVar(username, token, shipmentName, shipment, envVarPayload, container) } } } //convert the specified barge into an env var if len(shipment.Barge) > 0 { //initialize the environment map if it doesn't exist if shipment.Environment == nil { shipment.Environment = make(map[string]string) } //set the BARGE env var shipment.Environment["BARGE"] = shipment.Barge } //update shipment/environment-level envvars for evName, evValue := range shipment.Environment { if Verbose { log.Println("processing " + evName) } //create an envvar object envVarPayload := envVar(evName, evValue) //save the envvar SaveEnvVar(username, token, shipmentName, shipment, envVarPayload, "") } //envvars //update shipment level configuration UpdateShipment(username, token, shipmentName, shipment) //trigger shipment _, messages := Trigger(shipmentName, shipment.Env) for _, msg := range messages { fmt.Println(msg) } //if replicas is changing from 0, then show wait messages if ec2Provider(currentShipment.Providers).Replicas == 0 { fmt.Println("Please allow up to 5 minutes for DNS changes to take effect.") } }
func Upgrade(p project.APIProject, from, to string, opts UpgradeOpts) error { fromService, err := p.CreateService(from) if err != nil { return err } toService, err := p.CreateService(to) if err != nil { return err } rFromService, ok := fromService.(*rancher.RancherService) if !ok { return fmt.Errorf("%s is not a Rancher service", from) } source, err := rFromService.RancherService() if err != nil { return err } if source == nil { return fmt.Errorf("Failed to find service %s", from) } if source.LaunchConfig.Labels["io.rancher.scheduler.global"] == "true" { return fmt.Errorf("Upgrade is not supported for global services") } rToService, ok := toService.(*rancher.RancherService) if !ok { return fmt.Errorf("%s is not a Rancher service", to) } if service, err := rToService.RancherService(); err != nil { return err } else if service == nil { if err := rToService.Create(context.Background(), options.Create{}); err != nil { return err } // TODO timeout shouldn't really be an argument here // it's ignored in our implementation anyways if err := rToService.Scale(context.Background(), 0, -1); err != nil { return err } } if err := rToService.Up(context.Background(), options.Up{}); err != nil { return err } dest, err := rToService.RancherService() if err != nil { return err } if dest == nil { return fmt.Errorf("Failed to find service %s", to) } if dest.LaunchConfig.Labels["io.rancher.scheduler.global"] == "true" { return fmt.Errorf("Upgrade is not supported for global services") } upgradeOpts := &client.ServiceUpgrade{ ToServiceStrategy: &client.ToServiceUpgradeStrategy{ UpdateLinks: opts.UpdateLinks, FinalScale: int64(opts.FinalScale), BatchSize: int64(opts.BatchSize), IntervalMillis: int64(opts.IntervalMillis), ToServiceId: dest.Id, }, } if upgradeOpts.ToServiceStrategy.FinalScale == -1 { upgradeOpts.ToServiceStrategy.FinalScale = source.Scale } client := rFromService.Client() if opts.Pull { if err := rToService.Pull(context.Background()); err != nil { return err } } logrus.Infof("Upgrading %s to %s, scale=%d", from, to, upgradeOpts.ToServiceStrategy.FinalScale) service, err := client.Service.ActionUpgrade(source, upgradeOpts) if err != nil { return err } if opts.Wait || opts.CleanUp { if err := rFromService.Wait(service); err != nil { return err } } if opts.CleanUp { // Reload source to check scale source, err = rFromService.RancherService() if err != nil { return err } if source.Scale == 0 { if err := rFromService.Delete(context.Background(), options.Delete{}); err != nil { return err } } else { logrus.Warnf("Not deleting service %s, scale is not 0 but %d", source.Name, source.Scale) } } return nil }