func (operation *CreateOperation) Run(logger log.Log) bool { logger.Info("Running operation: create") logger.Debug(log.VERBOSITY_DEBUG, "Run:Targets", operation.targets.TargetOrder()) for _, targetID := range operation.targets.TargetOrder() { target, targetExists := operation.targets.Target(targetID) if !targetExists { // this is strange logger.Warning("Internal target error, was told to use a target that doesn't exist") continue } node, hasNode := target.Node() instances, hasInstances := target.Instances() nodeLogger := logger.MakeChild(targetID) if !hasNode { nodeLogger.Warning("No node [" + node.MachineName() + "]") } else if !node.Can("create") { nodeLogger.Info("Node doesn't create [" + node.MachineName() + ":" + node.Type() + "]") } else if !hasInstances { nodeLogger.Info("No valid instances specified in target list [" + node.MachineName() + "]") } else { nodeLogger.Message("Creating instance containers") for _, id := range instances.InstancesOrder() { instance, _ := instances.Instance(id) instance.Client().Create(logger, []string{}, operation.force) } } } return true }
func (operation *BuildOperation) Run(logger log.Log) bool { logger.Info("Running operation: build") logger.Debug(log.VERBOSITY_DEBUG, "Run:Targets", operation.targets.TargetOrder()) for _, targetID := range operation.targets.TargetOrder() { target, targetExists := operation.targets.Target(targetID) if !targetExists { // this is strange logger.Warning("Internal target error, was told to use a target that doesn't exist") continue } node, hasNode := target.Node() nodeLogger := logger.MakeChild(targetID) if !hasNode { nodeLogger.Info("No node [" + node.MachineName() + "]") } else if !node.Can("build") { nodeLogger.Info("Node doesn't build [" + node.MachineName() + "]") } else { nodeLogger.Message("Building node") node.Client().Build(nodeLogger, operation.force) } } return true }
// scale a node down a certain number of instances func (operation *ScaleOperation) ScaleDownNumber(logger log.Log, instances libs.Instances, number int) int { count := 0 instancesOrder := []string{} for _, instanceId := range instances.InstancesOrder() { instancesOrder = append([]string{instanceId}, instancesOrder...) } InstanceScaleReturn: for _, instanceId := range instancesOrder { if instance, ok := instances.Instance(instanceId); ok { client := instance.Client() if !client.IsRunning() { continue InstanceScaleReturn } logger.Info("Node Scaling down. Stopping instance :" + instanceId) client.Stop(logger, operation.force, operation.timeout) if operation.removeStopped { client.Remove(logger, operation.force) } count++ if count >= number { return count } } } return count }
// scale a node up a certain number of instances func (operation *ScaleOperation) ScaleUpNumber(logger log.Log, instances libs.Instances, number int) int { count := 0 instancesOrder := instances.InstancesOrder() InstanceScaleReturn: for _, instanceId := range instancesOrder { if instance, ok := instances.Instance(instanceId); ok { client := instance.Client() if client.IsRunning() { continue InstanceScaleReturn } else if !client.HasContainer() { // create a new container for this instance client.Create(logger, []string{}, false) } logger.Info("Node Scaling up. Starting instance :" + instanceId) client.Start(logger, false) count++ if count >= number { return count } } } return count }
func (operation *InitGenerateOperation) Run(logger log.Log) bool { logger.Info("running init operation:" + operation.output) var writer io.Writer switch operation.output { case "logger": fallthrough case "": writer = logger default: if strings.HasSuffix(operation.output, ".") { operation.output = operation.output + operation.handler } if fileWriter, err := os.Create(operation.output); err == nil { operation.skip = append(operation.skip, operation.output) writer = io.Writer(fileWriter) defer fileWriter.Close() logger.Message("Opening file for init generation output: " + operation.output) } else { logger.Error("Could not open output file to write init to:" + operation.output) } } initialize.Init_Generate(logger.MakeChild("init-generate"), operation.handler, operation.root, operation.skip, operation.sizeLimit, writer) return true }
func (client *FSouza_NodeClient) Pull(logger log.Log, force bool) bool { image, tag := client.GetImageName() actionCacheTag := "pull:" + image + ":" + tag if _, ok := actionCache[actionCacheTag]; ok { logger.Message("Node image [" + image + ":" + tag + "] was just pulled, so not pulling it again.") return true } if !force && client.HasImage() { logger.Info("Node already has an image [" + image + ":" + tag + "], so not pulling it again. You can force this operation if you want to pull this image.") return false } options := docker.PullImageOptions{ Repository: image, OutputStream: logger, RawJSONStream: false, } if tag != "" { options.Tag = tag } var auth docker.AuthConfiguration // var ok bool //options.Registry = "https://index.docker.io/v1/" // auths, _ := docker.NewAuthConfigurationsFromDockerCfg() // if auth, ok = auths.Configs[registry]; ok { // options.Registry = registry // } else { // node.log.Warning("You have no local login credentials for any repo. Defaulting to no login.") auth = docker.AuthConfiguration{} options.Registry = "https://index.docker.io/v1/" // } logger.Message("Pulling node image [" + image + ":" + tag + "] from server [" + options.Registry + "] using auth [" + auth.Username + "] : " + image + ":" + tag) logger.Debug(log.VERBOSITY_DEBUG_LOTS, "AUTH USED: ", map[string]string{"Username": auth.Username, "Password": auth.Password, "Email": auth.Email, "ServerAdddress": auth.ServerAddress}) // ask the docker client to build the image err := client.backend.PullImage(options, auth) if err != nil { logger.Error("Node image not pulled : " + image + " => " + err.Error()) actionCache[actionCacheTag] = false return false } else { client.backend.Refresh(true, false) logger.Message("Node image pulled: " + image + ":" + tag) actionCache[actionCacheTag] = false return true } }
func (operation *ScaleOperation) Run(logger log.Log) bool { logger.Info("Running operation: scale") logger.Debug(log.VERBOSITY_DEBUG, "Run:Targets", operation.targets.TargetOrder()) if operation.scale == 0 { operation.log.Warning("scale operation was told to scale to 0") return false } for _, targetID := range operation.targets.TargetOrder() { target, targetExists := operation.targets.Target(targetID) if !targetExists { // this is strange logger.Warning("Internal target error, was told to use a target that doesn't exist") continue } node, hasNode := target.Node() nodeLogger := logger.MakeChild(targetID) if !hasNode { nodeLogger.Info("No node [" + node.MachineName() + "]") } else if !node.Can("scale") { nodeLogger.Info("Node doesn't Scale [" + node.MachineName() + "]") } else { nodeLogger.Message("Scaling node " + node.Id()) if operation.scale > 0 { count := operation.ScaleUpNumber(nodeLogger, node.Instances(), operation.scale) if count == 0 { nodeLogger.Warning("Scale operation could not scale up any new instances of node") } else if count < operation.scale { nodeLogger.Warning("Scale operation could not scale up all of the requested instances of node. " + strconv.FormatInt(int64(count+1), 10) + " started.") } else { nodeLogger.Message("Scale operation scaled up " + strconv.FormatInt(int64(count), 10) + " instances") } } else { count := operation.ScaleDownNumber(nodeLogger, node.Instances(), -operation.scale) if count == 0 { nodeLogger.Warning("Scale operation could not scale down any new instances of node") } else if count < (-operation.scale) { nodeLogger.Warning("Scale operation could not scale down all of the requested instances of node. " + strconv.FormatInt(int64(count+1), 10) + " stopped.") } else { nodeLogger.Message("Scale operation scaled down " + strconv.FormatInt(int64(count), 10) + " instances") } } } } return true }
func (task *InitTaskFileBase) copyFileRecursive(logger log.Log, destinationRootPath string, sourceRootPath string, sourcePath string) bool { fullPath := sourceRootPath if sourcePath != "" { fullPath = path.Join(fullPath, sourcePath) } // get properties of source dir info, err := os.Stat(fullPath) if err != nil { // @TODO do something log : source doesn't exist logger.Warning("File does not exist :" + fullPath) return false } mode := info.Mode() if mode.IsDir() { directory, _ := os.Open(fullPath) objects, err := directory.Readdir(-1) if err != nil { // @TODO do something log : source doesn't exist logger.Warning("Could not open directory") return false } for _, obj := range objects { //childSourcePath := source + "/" + obj.Name() childSourcePath := path.Join(sourcePath, obj.Name()) if !task.copyFileRecursive(logger, destinationRootPath, sourceRootPath, childSourcePath) { logger.Warning("Resursive copy failed") } } } else { // add file copy destinationPath := path.Join(destinationRootPath, sourcePath) if task.CopyFile(logger, destinationPath, sourceRootPath) { logger.Info("--> Copied file (recursively): " + sourcePath + " [from " + sourceRootPath + "]") return true } else { logger.Warning("--> Failed to copy file: " + sourcePath + " [from " + sourceRootPath + "]") return false } return true } return true }
func (operation *PauseOperation) Run(logger log.Log) bool { logger.Info("Running operation: pause") logger.Debug(log.VERBOSITY_DEBUG, "Run:Targets", operation.targets.TargetOrder()) for _, targetID := range operation.targets.TargetOrder() { target, targetExists := operation.targets.Target(targetID) if !targetExists { // this is strange logger.Warning("Internal target error, was told to use a target that doesn't exist") continue } node, hasNode := target.Node() instances, hasInstances := target.Instances() nodeLogger := logger.MakeChild(targetID) if !hasNode { nodeLogger.Warning("No node [" + node.MachineName() + "]") } else if !node.Can("pause") { nodeLogger.Info("Node doesn't Pause [" + node.MachineName() + ":" + node.Type() + "]") } else if !hasInstances { nodeLogger.Info("No valid instances specified in target list [" + node.MachineName() + "]") } else { nodeLogger.Message("Pausing instances") if !instances.IsFiltered() { nodeLogger.Info("Switching to using all instances") instances.UseAll() } for _, id := range instances.InstancesOrder() { instance, _ := instances.Instance(id) if !instance.IsRunning() { if !instance.IsReady() { nodeLogger.Info("Instance will not be paused as it is not ready :" + id) } else { nodeLogger.Info("Instance will not be paused as it is not running :" + id) } } else { instance.Client().Pause(logger) } } } } return true }
func (client *FSouza_NodeClient) Build(logger log.Log, force bool) bool { image, tag := client.GetImageName() if client.settings.BuildPath == "" { logger.Warning("Node image [" + image + ":" + tag + "] not built as an empty path was provided. You must point Build: to a path inside .coach") return false } if !force && client.HasImage() { logger.Info("Node image [" + image + ":" + tag + "] not built as an image already exists. You can force this operation to build this image") return false } // determine an absolute buildPath to the build, for Docker to use. buildPath := "" for _, confBuildPath := range client.conf.Paths.GetConfSubPaths(client.settings.BuildPath) { logger.Debug(log.VERBOSITY_DEBUG_STAAAP, "Looking for Build: "+confBuildPath) if _, err := os.Stat(confBuildPath); !os.IsNotExist(err) { buildPath = confBuildPath break } } if buildPath == "" { logger.Error("No matching build path could be found [" + client.settings.BuildPath + "]") } options := docker.BuildImageOptions{ Name: image + ":" + tag, ContextDir: buildPath, RmTmpContainer: true, OutputStream: logger, } logger.Info("Building node image [" + image + ":" + tag + "] From build path [" + buildPath + "]") // ask the docker client to build the image err := client.backend.BuildImage(options) if err != nil { logger.Error("Node build failed [" + client.node.MachineName() + "] in build path [" + buildPath + "] => " + err.Error()) return false } else { client.backend.Refresh(true, false) logger.Message("Node succesfully built image [" + image + ":" + tag + "] From path [" + buildPath + "]") return true } }
// Get some base configuration for the project conf based on // searching for a .coach path func (project *Project) from_DefaultPaths(logger log.Log, workingDir string) { logger.Debug(log.VERBOSITY_DEBUG_LOTS, "Creating default Project") homeDir := "." if currentUser, err := user.Current(); err == nil { homeDir = currentUser.HomeDir } else { homeDir = os.Getenv("HOME") } projectRootDirectory := workingDir _, err := os.Stat(path.Join(projectRootDirectory, COACH_PROJECT_CONF_FOLDER)) RootSearch: for err != nil { projectRootDirectory = path.Dir(projectRootDirectory) if projectRootDirectory == homeDir || projectRootDirectory == "." || projectRootDirectory == "/" { logger.Info("Could not find a project folder, coach will assume that this project is not initialized.") projectRootDirectory = workingDir break RootSearch } _, err = os.Stat(path.Join(projectRootDirectory, COACH_PROJECT_CONF_FOLDER)) } /** * Set up some frequesntly used paths */ project.SetPath("user-home", homeDir, true) project.SetPath("user-coach", path.Join(homeDir, COACH_PROJECT_CONF_FOLDER), true) project.SetPath("project-root", projectRootDirectory, true) project.SetPath("project-coach", path.Join(projectRootDirectory, COACH_PROJECT_CONF_FOLDER), true) /** * @Note that it is advisable to not test if a path exists, as * that can cause race conditions, and can produce an invalid test * as the path could be created between the test, and the use of * the path. */ }
func (operation *HelpOperation) Run(logger log.Log) bool { logger.Info("Running operation: info") helpTopicName := "help" helpTopicFlags := []string{} if len(operation.flags) > 0 { helpTopicName = operation.flags[0] } if len(operation.flags) > 1 { helpTopicFlags = operation.flags[1:] } Helper := operation.getHelpObject() if topic, ok := Helper.Topic(helpTopicName, helpTopicFlags); ok { operation.log.Message(topic) return true } else { for _, operationName := range ListOperations() { if helpTopicName == operationName || strings.HasPrefix(helpTopicName, operationName+":") { if helpOperations := MakeOperation(logger, operation.conf, operationName, operation.flags, &libs.Targets{}); len(helpOperations.operationsList) > 0 { for _, helpOperation := range helpOperations.operationsList { helpOperation.Help(append([]string{helpTopicName}, helpTopicFlags...)) } return true } } } } operation.log.Warning("Unknown help topic") return false }
func (operation *RunOperation) Run(logger log.Log) bool { logger.Info("Running operation: run") logger.Debug(log.VERBOSITY_DEBUG, "Run:Targets", operation.targets.TargetOrder()) for _, targetID := range operation.targets.TargetOrder() { target, targetExists := operation.targets.Target(targetID) if !targetExists { // this is strange logger.Warning("Internal target error, was told to use a target that doesn't exist") continue } node, hasNode := target.Node() instances, _ := target.Instances() nodeLogger := logger.MakeChild(targetID) if !hasNode { nodeLogger.Info("No node [" + node.MachineName() + "]") } else if !node.Can("run") { nodeLogger.Info("Node doesn't Run [" + node.MachineName() + "]") } else { instanceIds := instances.InstancesOrder() if len(instanceIds) == 0 { instanceIds = []string{""} } for _, id := range instanceIds { if instance, ok := instances.Instance(id); ok { instance.Client().Run(logger, false, operation.cmd) } } } } return true }
func (operation *CleanOperation) Run(logger log.Log) bool { logger.Info("Running operation: clean") logger.Debug(log.VERBOSITY_DEBUG, "Run:Targets", operation.targets.TargetOrder()) for _, targetID := range operation.targets.TargetOrder() { target, targetExists := operation.targets.Target(targetID) if !targetExists { // this is strange logger.Warning("Internal target error, was told to use a target that doesn't exist") continue } node, hasNode := target.Node() instances, hasInstances := target.Instances() nodeLogger := logger.MakeChild(targetID) if !hasNode { nodeLogger.Info("No node [" + node.MachineName() + "]") } else if !node.Can("clean") { nodeLogger.Info("Node doesn't Clean [" + node.MachineName() + "]") } else { nodeLogger.Message("Cleaning node [" + node.Id() + "]") if hasInstances { if !(operation.defaultOnly || instances.IsFiltered()) { nodeLogger.Info("Switching to using all instances") instances.UseAll() } instanceIds := instances.InstancesOrder() if len(instanceIds) > 0 { for _, id := range instanceIds { instance, _ := instances.Instance(id) instanceClient := instance.Client() if instanceClient.HasContainer() { if instanceClient.IsRunning() { instanceClient.Stop(logger, operation.force, operation.timeout) } instanceClient.Remove(logger, operation.force) nodeLogger.Message("Cleaning node instance [" + id + "]") } else { nodeLogger.Info("Node instance has no container to clean [" + id + "]") } } } else { nodeLogger.Info("Node has no instances to clean") } } if operation.wipe && node.Can("build") { nodeClient := node.Client() nodeClient.Destroy(nodeLogger, operation.force) nodeLogger.Message("Node build cleaned") } else { nodeLogger.Message("Node was not built, so will not be removed") } } } return true }
func (client *FSouza_InstanceClient) Run(logger log.Log, persistant bool, cmd []string) bool { hushedLogger := logger.MakeChild("RunSupport") hushedLogger.Hush() instance := client.instance // Set up some additional settings for TTY commands if client.settings.Config.Tty == true { // set a default hostname to make a prettier prompt if client.settings.Config.Hostname == "" { client.settings.Config.Hostname = instance.Id() } // make sure that all tty runs have openstdin client.settings.Config.OpenStdin = true } client.settings.Config.AttachStdin = true client.settings.Config.AttachStdout = true client.settings.Config.AttachStderr = true // 1. get the container for the instance (create it if needed) hasContainer := client.HasContainer() if !hasContainer { logger.Info("Creating new disposable RUN container") if hasContainer = client.Create(hushedLogger, cmd, false); hasContainer { logger.Debug(log.VERBOSITY_DEBUG, "Created disposable run container") if !persistant { // 5. [DEFERED] remove the container (if not instructed to keep it) defer func(client *FSouza_InstanceClient, hushedLogger log.Log) { client.backend.Refresh(false, true) if client.IsRunning() { client.Stop(hushedLogger, true, 0) } client.Remove(hushedLogger, true) }(client, hushedLogger) } } else { logger.Error("Failed to create disposable run container") } } else { logger.Info("Run container already exists") } if hasContainer { // 3. start the container (set up a remove) logger.Info("Starting RUN container") ok := client.Start(hushedLogger, false) // 4. attach to the container if ok { logger.Info("Attaching to disposable RUN container") client.Attach(logger) return true } else { logger.Error("Could not start RUN container") return false } } else { logger.Error("Could not create RUN container") } return false }
func (operation *InitOperation) Run(logger log.Log) bool { logger.Info("running init operation") var err error var ok bool var targetPath, coachPath string targetPath = operation.root if targetPath == "" { targetPath, ok = operation.conf.Path("project-root") if !ok || targetPath == "" { targetPath, err = os.Getwd() if err != nil { logger.Error("No path suggested for new project init") return false } } } _, err = os.Stat(targetPath) if err != nil { logger.Error("Invalid path suggested for new project init : [" + targetPath + "] => " + err.Error()) return false } coachPath, _ = operation.conf.Paths.Path("coach-root") logger.Message("Preparing INIT operation [" + operation.handler + ":" + operation.source + "] in path : " + targetPath) _, err = os.Stat(coachPath) if !operation.force && err == nil { logger.Error("cannot create new project folder, as one already exists") return false } logger = logger.MakeChild(strings.ToUpper(operation.handler)) tasks := initialize.InitTasks{} tasks.Init(logger.MakeChild("TASKS"), operation.conf, targetPath) ok = true switch operation.handler { case "user": ok = tasks.Init_User_Run(logger, operation.source) case "demo": ok = tasks.Init_Demo_Run(logger, operation.source) case "git": ok = tasks.Init_Git_Run(logger, operation.source) case "yaml": ok = tasks.Init_Yaml_Run(logger, operation.source) case "default": ok = tasks.Init_Default_Run(logger, operation.source) default: logger.Error("Unknown init handler " + operation.handler) ok = false } if ok { logger.Info("Running init tasks") tasks.RunTasks(logger) return true } else { logger.Warning("No init tasks were defined.") return false } }
func (client *FSouza_InstanceClient) Create(logger log.Log, overrideCmd []string, force bool) bool { instance := client.instance if !force && client.HasContainer() { logger.Info("[" + instance.MachineName() + "]: Skipping node instance, which already has a container") return false } /** * Transform node data, into a format that can be used * for the actual Docker call. This involves transforming * the node keys into docker container ids, for things like * the name, Links, VolumesFrom etc */ name := instance.MachineName() Config := client.settings.Config Host := client.settings.Host image, tag := client.GetImageName() if tag != "" && tag != "latest" { image += ":" + tag } Config.Image = image if len(overrideCmd) > 0 { Config.Cmd = overrideCmd } // ask the docker client to create a container for this instance options := docker.CreateContainerOptions{ Name: name, Config: &Config, HostConfig: &Host, } container, err := client.backend.CreateContainer(options) client.backend.Refresh(false, true) if err != nil { logger.Debug(log.VERBOSITY_DEBUG, "CREATE FAIL CONTAINERS: ", err) /** * There is a weird bug with the library, where sometimes it * reports a missing image error, and yet it still creates the * container. It is not clear if this failure occurs in the * remote API, or in the dockerclient library. */ client.backend.Refresh(false, true) if err.Error() == "no such image" && client.HasContainer() { logger.Message("Created instance container [" + name + " FROM " + Config.Image + "] => " + container.ID[:12]) logger.Warning("Docker created the container, but reported an error due to a 'missing image'. This is a known bug, that can be ignored") return true } logger.Error("Failed to create instance container [" + name + " FROM " + Config.Image + "] => " + err.Error()) return false } else { client.backend.Refresh(false, true) logger.Message("Created instance container [" + name + "] => " + container.ID[:12]) return true } }
func (operation *UpOperation) Run(logger log.Log) bool { logger.Info("Running operation: up") logger.Debug(log.VERBOSITY_DEBUG, "Run:Targets", operation.targets.TargetOrder()) for _, targetID := range operation.targets.TargetOrder() { target, targetExists := operation.targets.Target(targetID) if !targetExists { // this is strange logger.Warning("Internal target error, was told to use a target that doesn't exist") continue } node, hasNode := target.Node() instances, hasInstances := target.Instances() nodeLogger := logger.MakeChild(targetID) build := node.Can("build") pull := node.Can("pull") create := node.Can("create") start := node.Can("start") if !hasNode { nodeLogger.Info("No node [" + node.MachineName() + "]") } else if !node.Can("Up") { nodeLogger.Info("Node doesn't Up [" + node.MachineName() + "]") } else { nodeLogger.Message("Bringing node up") nodeClient := node.Client() if build { if operation.force || !nodeClient.HasImage() { nodeLogger.Message("Building node image") nodeClient.Build(nodeLogger, operation.force) } else { nodeLogger.Info("Node already has an image built") } } if pull { if operation.force || !nodeClient.HasImage() { nodeLogger.Message("Pulling node image") nodeClient.Pull(nodeLogger, operation.force) } else { nodeLogger.Info("Node already has an image pulled") } } if hasInstances && (create || start) { for _, id := range instances.InstancesOrder() { instance, _ := instances.Instance(id) instanceClient := instance.Client() if create { if operation.force || !instanceClient.HasContainer() { nodeLogger.Message("Creating node instance container : " + id) instanceClient.Create(nodeLogger, []string{}, operation.force) } else { nodeLogger.Info("Instance already has an container created : " + id) } } if start { if operation.force || !instanceClient.IsRunning() { nodeLogger.Message("Starting node instance container : " + id) instanceClient.Start(nodeLogger, operation.force) } else { nodeLogger.Info("Instance already has an container running : " + id) } } } } } } return true }