func (operation *BuildOperation) Run(logger log.Log) bool { logger.Info("Running operation: build") logger.Debug(log.VERBOSITY_DEBUG, "Run:Targets", operation.targets.TargetOrder()) for _, targetID := range operation.targets.TargetOrder() { target, targetExists := operation.targets.Target(targetID) if !targetExists { // this is strange logger.Warning("Internal target error, was told to use a target that doesn't exist") continue } node, hasNode := target.Node() nodeLogger := logger.MakeChild(targetID) if !hasNode { nodeLogger.Info("No node [" + node.MachineName() + "]") } else if !node.Can("build") { nodeLogger.Info("Node doesn't build [" + node.MachineName() + "]") } else { nodeLogger.Message("Building node") node.Client().Build(nodeLogger, operation.force) } } return true }
/** * This is a fallback client builder, which builds the default * coach client. The default coach client is currently the * FSouza Docker client, configured to use ENV settings, or * a local socket. */ func (clientFactories *ClientFactories) from_Default(logger log.Log, project *conf.Project) { clientFactorySettings := &FSouza_ClientFactorySettings{} clientType := "fsouza" if DockerHost := os.Getenv("DOCKER_HOST"); DockerHost == "" { logger.Debug(log.VERBOSITY_DEBUG, "No local environment DOCKER settings found, assuming a locally running docker client will be found.") clientFactorySettings.Host = "unix:///var/run/docker.sock" } else { clientFactorySettings.Host = DockerHost } // if we have no cert path, and we are going to use a TCP socket, test for a default cert path. if DockerCertPath := os.Getenv("DOCKER_CERT_PATH"); DockerCertPath != "" { clientFactorySettings.CertPath = DockerCertPath } factory := FSouza_ClientFactory{} if !factory.Init(logger, project, ClientFactorySettings(clientFactorySettings)) { logger.Error("Failed to initialize FSouza factory from client factory configuration") } // Add this factory to the factory list logger.Debug(log.VERBOSITY_DEBUG_LOTS, "Client Factory Created [Client_DockerFSouzaFactory]", factory) clientFactories.AddClientFactory(clientType, ClientFactory(&factory)) }
func (operation *InfoOperation) Run(logger log.Log) bool { logger.Message("RUNNING INFO OPERATION") logger.Debug(log.VERBOSITY_DEBUG, "Run:Targets", operation.targets.TargetOrder()) for _, targetID := range operation.targets.TargetOrder() { target, targetExists := operation.targets.Target(targetID) node, hasNode := target.Node() _, hasInstances := target.Instances() if !targetExists { // this is strange logger.Warning("Internal target error, was told to use a target that doesn't exist") continue } nodeLogger := logger.MakeChild(targetID) if hasNode { nodeLogger.Message(targetID + " Information") node.Client().NodeInfo(nodeLogger) } else { nodeLogger.Message("No node [" + node.MachineName() + "]") } if hasInstances { node.Instances().Client().InstancesInfo(nodeLogger) } else { nodeLogger.Message("|-- No instances [" + node.MachineName() + "]") } } return true }
func (operation *CreateOperation) Run(logger log.Log) bool { logger.Info("Running operation: create") logger.Debug(log.VERBOSITY_DEBUG, "Run:Targets", operation.targets.TargetOrder()) for _, targetID := range operation.targets.TargetOrder() { target, targetExists := operation.targets.Target(targetID) if !targetExists { // this is strange logger.Warning("Internal target error, was told to use a target that doesn't exist") continue } node, hasNode := target.Node() instances, hasInstances := target.Instances() nodeLogger := logger.MakeChild(targetID) if !hasNode { nodeLogger.Warning("No node [" + node.MachineName() + "]") } else if !node.Can("create") { nodeLogger.Info("Node doesn't create [" + node.MachineName() + ":" + node.Type() + "]") } else if !hasInstances { nodeLogger.Info("No valid instances specified in target list [" + node.MachineName() + "]") } else { nodeLogger.Message("Creating instance containers") for _, id := range instances.InstancesOrder() { instance, _ := instances.Instance(id) instance.Client().Create(logger, []string{}, operation.force) } } } return true }
// Constructor for BaseNode func (node *BaseNode) Init(logger log.Log, name string, project *conf.Project, client Client, instancesSettings InstancesSettings) bool { node.log = logger node.conf = project node.name = name node.client = client node.manualDependencies = []string{} instancesMachineName := node.MachineName() settingsInterface := instancesSettings.Settings() switch settingsInterface.(type) { case FixedInstancesSettings: node.instances = Instances(&FixedInstances{}) instancesMachineName += "_fixed_" case TemporaryInstancesSettings: node.instances = Instances(&TemporaryInstances{}) instancesMachineName += "_temp_" case ScaledInstancesSettings: node.instances = Instances(&ScaledInstances{}) instancesMachineName += "_scaled_" case SingleInstancesSettings: node.instances = Instances(&SingleInstances{}) default: node.instances = Instances(&SingleInstances{}) } node.instances.Init(logger, instancesMachineName, client, instancesSettings) logger.Debug(log.VERBOSITY_DEBUG_STAAAP, "Built new node:", node.client) return true }
func (clientFactory *FSouza_ClientFactory) Init(logger log.Log, project *conf.Project, settings ClientFactorySettings) bool { clientFactory.log = logger clientFactory.conf = project // make sure that the settings that were given, where the proper "FSouza_ClientFactory" type typedSettings := settings.Settings() switch asserted := typedSettings.(type) { case *FSouza_ClientFactorySettings: clientFactory.settings = *asserted default: logger.Error("Invalid settings type passed to Fsouza Factory") logger.Debug(log.VERBOSITY_DEBUG, "Settings passed:", asserted) } // if we haven't made an actual fsouza docker client, then do it now if clientFactory.client == nil { if client, pk := clientFactory.makeFsouzaClientWrapper(logger.MakeChild("fsouza")); pk { clientFactory.client = client return true } else { logger.Error("Failed to create actual FSouza Docker client from client factory configuration") return false } } return true }
func (node *PullNode) Init(logger log.Log, name string, project *conf.Project, client Client, instancesSettings InstancesSettings) bool { node.BaseNode.Init(logger, name, project, client, instancesSettings) settingsInterface := instancesSettings.Settings() switch settingsInterface.(type) { case FixedInstancesSettings: logger.Warning("Pull node cannot be configured to use fixed instances. Using null instance instead.") node.defaultInstances(logger, client, instancesSettings) case ScaledInstancesSettings: logger.Warning("Pull node cannot be configured to use scaled instances. Using null instance instead.") node.defaultInstances(logger, client, instancesSettings) case SingleInstancesSettings: logger.Warning("Pull node cannot be configured to use single instances. Using null instance instead.") node.defaultInstances(logger, client, instancesSettings) case TemporaryInstancesSettings: logger.Warning("Pull node cannot be configured to use disposable instances. Using null instance instead.") node.defaultInstances(logger, client, instancesSettings) default: node.defaultInstances(logger, client, instancesSettings) } node.instances.Init(logger, node.MachineName(), client, instancesSettings) logger.Debug(log.VERBOSITY_DEBUG_STAAAP, "Built new node:", node.client) return true }
func (operation *StatusOperation) Run(logger log.Log) bool { logger.Message("RUNNING Status OPERATION") logger.Debug(log.VERBOSITY_DEBUG, "Run:Targets", operation.targets.TargetOrder()) for _, targetID := range operation.targets.TargetOrder() { target, targetExists := operation.targets.Target(targetID) node, hasNode := target.Node() instances, hasInstances := target.Instances() if !targetExists { // this is strange logger.Warning("Internal target error, was told to use a target that doesn't exist") continue } nodeLogger := logger.MakeChild(targetID) status := []string{} if hasNode { status = append(status, operation.NodeStatus(nodeLogger, node)...) } else { status = append(status, "No node for target") } if hasInstances { status = append(status, operation.InstancesStatus(nodeLogger, instances)...) } else { status = append(status, "No instances for target") } nodeLogger.Message("[" + strings.Join(status, "][") + "]") } return true }
// Use the secrets yaml object to configure a project func (secrets *secrets_Yaml) configureProject(logger log.Log, project *Project) bool { for key, value := range secrets.Secrets { project.SetToken(key, value) } logger.Debug(log.VERBOSITY_DEBUG_STAAAP, "Configured project from YAML secrets") return true }
// Try to configure a project by parsing yaml from a byte stream func (project *Project) from_ConfYamlBytes(logger log.Log, yamlBytes []byte) bool { // parse the config file contents as a ConfSource_projectyaml object source := new(conf_Yaml) if err := yaml.Unmarshal(yamlBytes, source); err != nil { logger.Warning("YAML parsing error : " + err.Error()) return false } logger.Debug(log.VERBOSITY_DEBUG_STAAAP, "YAML source:", *source) return source.configureProject(logger, project) }
func (instances *SingleInstances) Prepare(logger log.Log, client Client, nodes *Nodes, node Node) bool { instances.log = logger logger.Debug(log.VERBOSITY_DEBUG_WOAH, "Prepare: Single Instances") instances.instance = SingleInstance{} instances.instance.Init(logger, INSTANCE_SINGLE_ID, instances.MachineName(), client, true) instances.log.Debug(log.VERBOSITY_DEBUG_WOAH, "Created single instance", instances.instance) return true }
func (client *FSouza_NodeClient) Pull(logger log.Log, force bool) bool { image, tag := client.GetImageName() actionCacheTag := "pull:" + image + ":" + tag if _, ok := actionCache[actionCacheTag]; ok { logger.Message("Node image [" + image + ":" + tag + "] was just pulled, so not pulling it again.") return true } if !force && client.HasImage() { logger.Info("Node already has an image [" + image + ":" + tag + "], so not pulling it again. You can force this operation if you want to pull this image.") return false } options := docker.PullImageOptions{ Repository: image, OutputStream: logger, RawJSONStream: false, } if tag != "" { options.Tag = tag } var auth docker.AuthConfiguration // var ok bool //options.Registry = "https://index.docker.io/v1/" // auths, _ := docker.NewAuthConfigurationsFromDockerCfg() // if auth, ok = auths.Configs[registry]; ok { // options.Registry = registry // } else { // node.log.Warning("You have no local login credentials for any repo. Defaulting to no login.") auth = docker.AuthConfiguration{} options.Registry = "https://index.docker.io/v1/" // } logger.Message("Pulling node image [" + image + ":" + tag + "] from server [" + options.Registry + "] using auth [" + auth.Username + "] : " + image + ":" + tag) logger.Debug(log.VERBOSITY_DEBUG_LOTS, "AUTH USED: ", map[string]string{"Username": auth.Username, "Password": auth.Password, "Email": auth.Email, "ServerAdddress": auth.ServerAddress}) // ask the docker client to build the image err := client.backend.PullImage(options, auth) if err != nil { logger.Error("Node image not pulled : " + image + " => " + err.Error()) actionCache[actionCacheTag] = false return false } else { client.backend.Refresh(true, false) logger.Message("Node image pulled: " + image + ":" + tag) actionCache[actionCacheTag] = false return true } }
func (operation *ScaleOperation) Run(logger log.Log) bool { logger.Info("Running operation: scale") logger.Debug(log.VERBOSITY_DEBUG, "Run:Targets", operation.targets.TargetOrder()) if operation.scale == 0 { operation.log.Warning("scale operation was told to scale to 0") return false } for _, targetID := range operation.targets.TargetOrder() { target, targetExists := operation.targets.Target(targetID) if !targetExists { // this is strange logger.Warning("Internal target error, was told to use a target that doesn't exist") continue } node, hasNode := target.Node() nodeLogger := logger.MakeChild(targetID) if !hasNode { nodeLogger.Info("No node [" + node.MachineName() + "]") } else if !node.Can("scale") { nodeLogger.Info("Node doesn't Scale [" + node.MachineName() + "]") } else { nodeLogger.Message("Scaling node " + node.Id()) if operation.scale > 0 { count := operation.ScaleUpNumber(nodeLogger, node.Instances(), operation.scale) if count == 0 { nodeLogger.Warning("Scale operation could not scale up any new instances of node") } else if count < operation.scale { nodeLogger.Warning("Scale operation could not scale up all of the requested instances of node. " + strconv.FormatInt(int64(count+1), 10) + " started.") } else { nodeLogger.Message("Scale operation scaled up " + strconv.FormatInt(int64(count), 10) + " instances") } } else { count := operation.ScaleDownNumber(nodeLogger, node.Instances(), -operation.scale) if count == 0 { nodeLogger.Warning("Scale operation could not scale down any new instances of node") } else if count < (-operation.scale) { nodeLogger.Warning("Scale operation could not scale down all of the requested instances of node. " + strconv.FormatInt(int64(count+1), 10) + " stopped.") } else { nodeLogger.Message("Scale operation scaled down " + strconv.FormatInt(int64(count), 10) + " instances") } } } } return true }
// Try to configure a project by parsing yaml from a conf file func (nodes *Nodes) from_NodesYamlFilePath(logger log.Log, project *conf.Project, clientFactories *ClientFactories, yamlFilePath string, overwrite bool) bool { // read the config file yamlFile, err := ioutil.ReadFile(yamlFilePath) if err != nil { logger.Debug(log.VERBOSITY_DEBUG_LOTS, "Could not read a YAML file: "+err.Error()) return false } if !nodes.from_NodesYamlBytes(logger.MakeChild(yamlFilePath), project, clientFactories, yamlFile, overwrite) { logger.Warning("YAML marshalling of the YAML nodes file failed [" + yamlFilePath + "]: " + err.Error()) return false } return true }
// Try to configure a project by parsing yaml from a conf file func (project *Project) from_ConfYamlFilePath(logger log.Log, yamlFilePath string) bool { // read the config file yamlFile, err := ioutil.ReadFile(yamlFilePath) if err != nil { logger.Debug(log.VERBOSITY_DEBUG_LOTS, "Could not read a YAML file: "+err.Error()) return false } if !project.from_ConfYamlBytes(logger.MakeChild(yamlFilePath), yamlFile) { logger.Warning("YAML marshalling of the YAML conf file failed [" + yamlFilePath + "]: " + err.Error()) return false } return true }
// Try to configure factories by parsing yaml from a byte stream func (clientFactories *ClientFactories) from_ClientFactoriesYamlBytes(logger log.Log, project *conf.Project, yamlBytes []byte) bool { if project != nil { // token replace tokens := &project.Tokens yamlBytes = []byte(tokens.TokenReplace(string(yamlBytes))) } var yaml_clients map[string]map[string]interface{} err := yaml.Unmarshal(yamlBytes, &yaml_clients) if err != nil { logger.Warning("YAML parsing error : " + err.Error()) return false } logger.Debug(log.VERBOSITY_DEBUG_STAAAP, "YAML source:", yaml_clients) for name, client_struct := range yaml_clients { clientType := "" client_json, _ := json.Marshal(client_struct) logger.Debug(log.VERBOSITY_DEBUG_STAAAP, "Single client JSON:", string(client_json)) if clientType_struct, ok := client_struct["Type"]; ok { clientType, _ = clientType_struct.(string) } else { clientType = name } switch strings.ToLower(clientType) { case "docker": fallthrough case "fsouza": clientFactorySettings := &FSouza_ClientFactorySettings{} err := json.Unmarshal(client_json, clientFactorySettings) if err != nil { logger.Warning("Factory definition failed to configure client factory :" + err.Error()) logger.Debug(log.VERBOSITY_DEBUG, "Factory configuration json: ", string(client_json), clientFactorySettings) continue } factory := FSouza_ClientFactory{} if !factory.Init(logger.MakeChild(clientType), project, ClientFactorySettings(clientFactorySettings)) { logger.Error("Failed to initialize FSouza factory from client factory configuration: " + err.Error()) continue } // Add this factory to the factory list logger.Debug(log.VERBOSITY_DEBUG_LOTS, "Client Factory Created [Client_DockerFSouzaFactory]", factory) clientFactories.AddClientFactory(clientType, ClientFactory(&factory)) case "": logger.Warning("Client registration failure, client has a bad value for 'Type'") default: logger.Warning("Client registration failure, client has an unknown value for 'Type' :" + clientType) } } return true }
func (operation *PauseOperation) Run(logger log.Log) bool { logger.Info("Running operation: pause") logger.Debug(log.VERBOSITY_DEBUG, "Run:Targets", operation.targets.TargetOrder()) for _, targetID := range operation.targets.TargetOrder() { target, targetExists := operation.targets.Target(targetID) if !targetExists { // this is strange logger.Warning("Internal target error, was told to use a target that doesn't exist") continue } node, hasNode := target.Node() instances, hasInstances := target.Instances() nodeLogger := logger.MakeChild(targetID) if !hasNode { nodeLogger.Warning("No node [" + node.MachineName() + "]") } else if !node.Can("pause") { nodeLogger.Info("Node doesn't Pause [" + node.MachineName() + ":" + node.Type() + "]") } else if !hasInstances { nodeLogger.Info("No valid instances specified in target list [" + node.MachineName() + "]") } else { nodeLogger.Message("Pausing instances") if !instances.IsFiltered() { nodeLogger.Info("Switching to using all instances") instances.UseAll() } for _, id := range instances.InstancesOrder() { instance, _ := instances.Instance(id) if !instance.IsRunning() { if !instance.IsReady() { nodeLogger.Info("Instance will not be paused as it is not ready :" + id) } else { nodeLogger.Info("Instance will not be paused as it is not running :" + id) } } else { instance.Client().Pause(logger) } } } } return true }
// Try to configure help by parsing yaml from a byte stream func (help *Help) from_HelpYamlBytes(logger log.Log, project *conf.Project, yamlBytes []byte) bool { if project != nil { // token replace tokens := &project.Tokens yamlBytes = []byte(tokens.TokenReplace(string(yamlBytes))) logger.Debug(log.VERBOSITY_DEBUG_LOTS, "Tokenized Bytes", string(yamlBytes)) } if err := yaml.Unmarshal(yamlBytes, help); err != nil { logger.Warning("YAML parsing error : " + err.Error()) // logger.Debug(log.VERBOSITY_DEBUG, "YAML parsing error : " + err.Error(), string(yamlBytes)) return false } return true }
func (instances *FixedInstances) Prepare(logger log.Log, client Client, nodes *Nodes, node Node) bool { logger.Debug(log.VERBOSITY_DEBUG_WOAH, "Prepare: Fixed Instances") for _, name := range instances.settings.Names { machineName := instances.MachineName() + "_" + name instance := Instance(&FixedInstance{}) if instance.Init(logger.MakeChild(name), name, machineName, client, true) { instances.instancesMap[name] = instance instances.instancesOrder = append(instances.instancesOrder, name) } } return true }
func (client *FSouza_NodeClient) Build(logger log.Log, force bool) bool { image, tag := client.GetImageName() if client.settings.BuildPath == "" { logger.Warning("Node image [" + image + ":" + tag + "] not built as an empty path was provided. You must point Build: to a path inside .coach") return false } if !force && client.HasImage() { logger.Info("Node image [" + image + ":" + tag + "] not built as an image already exists. You can force this operation to build this image") return false } // determine an absolute buildPath to the build, for Docker to use. buildPath := "" for _, confBuildPath := range client.conf.Paths.GetConfSubPaths(client.settings.BuildPath) { logger.Debug(log.VERBOSITY_DEBUG_STAAAP, "Looking for Build: "+confBuildPath) if _, err := os.Stat(confBuildPath); !os.IsNotExist(err) { buildPath = confBuildPath break } } if buildPath == "" { logger.Error("No matching build path could be found [" + client.settings.BuildPath + "]") } options := docker.BuildImageOptions{ Name: image + ":" + tag, ContextDir: buildPath, RmTmpContainer: true, OutputStream: logger, } logger.Info("Building node image [" + image + ":" + tag + "] From build path [" + buildPath + "]") // ask the docker client to build the image err := client.backend.BuildImage(options) if err != nil { logger.Error("Node build failed [" + client.node.MachineName() + "] in build path [" + buildPath + "] => " + err.Error()) return false } else { client.backend.Refresh(true, false) logger.Message("Node succesfully built image [" + image + ":" + tag + "] From path [" + buildPath + "]") return true } }
func (node *node_yaml_v2) GetClient(logger log.Log, clientFactories *ClientFactories) (Client, bool) { // if a docker client was configured then try to take it. // if !(node.Docker.Config.Image=="" && node.Docker.BuildPath=="") { if factory, ok := clientFactories.MatchClientFactory(FactoryMatchRequirements{Type: "docker"}); ok { if client, ok := factory.MakeClient(logger, ClientSettings(&node.Docker)); ok { return client, true } } else { logger.Debug(log.VERBOSITY_DEBUG_STAAAP, "Failed to match client factory:", factory) } // } logger.Warning("Invalid YAML node settings: improper client configuration") return nil, false }
func (instances *ScaledInstances) Prepare(logger log.Log, client Client, nodes *Nodes, node Node) bool { instances.log = logger logger.Debug(log.VERBOSITY_DEBUG_WOAH, "Prepare: Scaled Instances") for i := 0; i <= instances.settings.Maximum; i++ { name := strconv.Itoa(int(i)) machineName := instances.MakeId(i) isDefault := (i <= instances.settings.Initial) instance := Instance(&ScaledInstance{}) if instance.Init(logger.MakeChild(name), name, machineName, client, isDefault) { instances.instancesMap[name] = instance instances.instancesOrder = append(instances.instancesOrder, name) } } return true }
// Look for project configurations inside the environment conf paths func (project *Project) from_EnvironmentsPath(logger log.Log) { for _, yamlEnvironmentPath := range project.Paths.GetConfSubPaths(COACH_CONF_ENVIRONMENTS_SUBPATH) { logger.Debug(log.VERBOSITY_DEBUG_STAAAP, "Looking for Environment subpath: "+yamlEnvironmentPath) yamlEnvironmentPath = path.Join(yamlEnvironmentPath, project.Environment) if project.CheckFileExists(yamlEnvironmentPath) { logger.Debug(log.VERBOSITY_DEBUG, "ADDING Environment subpath: "+yamlEnvironmentPath) environmentIncrement++ envid := COACH_CONF_ENVIRONMENTS_SUBPATH + "-" + strconv.Itoa(environmentIncrement) project.SetPath(envid, yamlEnvironmentPath, true) project.setConfPath(envid) } } }
// Make a Yaml Conf apply configuration to a project object func (conf *conf_Yaml) configureProject(logger log.Log, project *Project) bool { // set a project name if conf.Project != "" { project.Name = conf.Project } // set a author name if conf.Author != "" { project.Author = conf.Author } // set an environment string if conf.Environment != "" { project.Environment = conf.Environment } // set any paths for key, keyPath := range conf.Paths { project.SetPath(key, keyPath, true) } // set any tokens for key, value := range conf.Tokens { project.SetToken(key, value) } /** * Yaml Settings set Project Flags */ for key, value := range conf.Settings { switch key { case "UsePathsAsTokens": project.UsePathsAsTokens = conf.SettingStringToFlag(value) case "UseEnvVariablesAsTokens": project.UseEnvVariablesAsTokens = conf.SettingStringToFlag(value) } } logger.Debug(log.VERBOSITY_DEBUG_LOTS, "Configured project from YAML conf", project) return true }
// Get some base configuration for the project conf based on // searching for a .coach path func (project *Project) from_DefaultPaths(logger log.Log, workingDir string) { logger.Debug(log.VERBOSITY_DEBUG_LOTS, "Creating default Project") homeDir := "." if currentUser, err := user.Current(); err == nil { homeDir = currentUser.HomeDir } else { homeDir = os.Getenv("HOME") } projectRootDirectory := workingDir _, err := os.Stat(path.Join(projectRootDirectory, COACH_PROJECT_CONF_FOLDER)) RootSearch: for err != nil { projectRootDirectory = path.Dir(projectRootDirectory) if projectRootDirectory == homeDir || projectRootDirectory == "." || projectRootDirectory == "/" { logger.Info("Could not find a project folder, coach will assume that this project is not initialized.") projectRootDirectory = workingDir break RootSearch } _, err = os.Stat(path.Join(projectRootDirectory, COACH_PROJECT_CONF_FOLDER)) } /** * Set up some frequesntly used paths */ project.SetPath("user-home", homeDir, true) project.SetPath("user-coach", path.Join(homeDir, COACH_PROJECT_CONF_FOLDER), true) project.SetPath("project-root", projectRootDirectory, true) project.SetPath("project-coach", path.Join(projectRootDirectory, COACH_PROJECT_CONF_FOLDER), true) /** * @Note that it is advisable to not test if a path exists, as * that can cause race conditions, and can produce an invalid test * as the path could be created between the test, and the use of * the path. */ }
// Generate a factory set from a project func MakeClientFactories(logger log.Log, project *conf.Project) *ClientFactories { factories := &ClientFactories{ log: logger, orderedClientFactories: []ClientFactory{}, } /** * Build Factories from YAML if possible */ factories.from_ClientFactoriesYaml(logger.MakeChild("fromyaml"), project) /** * If no factory is set up then assume that we * should use the docker fsouza library with a * local docker implementation */ if !factories.HasFactories() { logger.Debug(log.VERBOSITY_DEBUG, "No defined clients, retreiving default client") factories.from_Default(logger.MakeChild("default"), project) } return factories }
func (operation *RunOperation) Run(logger log.Log) bool { logger.Info("Running operation: run") logger.Debug(log.VERBOSITY_DEBUG, "Run:Targets", operation.targets.TargetOrder()) for _, targetID := range operation.targets.TargetOrder() { target, targetExists := operation.targets.Target(targetID) if !targetExists { // this is strange logger.Warning("Internal target error, was told to use a target that doesn't exist") continue } node, hasNode := target.Node() instances, _ := target.Instances() nodeLogger := logger.MakeChild(targetID) if !hasNode { nodeLogger.Info("No node [" + node.MachineName() + "]") } else if !node.Can("run") { nodeLogger.Info("Node doesn't Run [" + node.MachineName() + "]") } else { instanceIds := instances.InstancesOrder() if len(instanceIds) == 0 { instanceIds = []string{""} } for _, id := range instanceIds { if instance, ok := instances.Instance(id); ok { instance.Client().Run(logger, false, operation.cmd) } } } } return true }
// Init constructor for the client wrapper func (wrapper *FSouza_Wrapper) Init(logger log.Log, settings FSouza_ClientFactorySettings) bool { var client *docker.Client var err error logger.Debug(log.VERBOSITY_DEBUG_WOAH, "Docker client conf: ", settings) if strings.HasPrefix(settings.Host, "tcp://") { if _, err := os.Stat(settings.CertPath); err == nil { // TCP DOCKER CLIENT WITH CERTS client, err = docker.NewTLSClient( settings.Host, path.Join(settings.CertPath, "cert.pem"), path.Join(settings.CertPath, "key.pem"), path.Join(settings.CertPath, "ca.pem"), ) } else { // TCP DOCKER CLIENT WITHOUT CERTS client, err = docker.NewClient(settings.Host) } } else if strings.HasPrefix(settings.Host, "unix://") { // TCP DOCKER CLIENT WITHOUT CERTS client, err = docker.NewClient(settings.Host) } else { err = errors.New("Unknown client host :" + settings.Host) } if err == nil { logger.Debug(log.VERBOSITY_DEBUG_WOAH, "FSouza Docker client created:", client) wrapper.Client = client return true } else { logger.Error(err.Error()) return false } }
// Post initialization preparation func (node *BaseNode) Prepare(logger log.Log, nodes *Nodes) (success bool) { logger.Debug(log.VERBOSITY_DEBUG_WOAH, "Prepare: Base Node") success = true node.log = logger logger.Debug(log.VERBOSITY_DEBUG_WOAH, "Preparing Client", nil) success = success && node.client.Prepare(logger.MakeChild("client"), nodes, node) logger.Debug(log.VERBOSITY_DEBUG_WOAH, "Preparing Instances", nil) success = success && node.instances.Prepare(logger.MakeChild("instances"), node.client, nodes, node) return success }
// Look for project configurations inside the project confpaths func (project *Project) from_ConfYaml(logger log.Log) { for _, yamlConfFilePath := range project.Paths.GetConfSubPaths(COACH_CONF_YAMLFILE) { logger.Debug(log.VERBOSITY_DEBUG_STAAAP, "Looking for YAML conf file: "+yamlConfFilePath) project.from_ConfYamlFilePath(logger, yamlConfFilePath) } }