func (t runBs) Run(job monsterqueue.Job) { params := job.Parameters() dockerEndpoint := params["endpoint"].(string) machineID := params["machine"].(string) err := t.waitDocker(dockerEndpoint) if err != nil { job.Error(err) return } err = t.createBsContainer(dockerEndpoint) if err != nil { job.Error(err) t.destroyMachine(machineID) return } rawMetadata := params["metadata"].(monsterqueue.JobParams) metadata := make(map[string]string, len(rawMetadata)) for key, value := range rawMetadata { metadata[key] = value.(string) } _, err = mainDockerProvisioner.getCluster().Register(dockerEndpoint, metadata) if err != nil { job.Error(err) t.destroyMachine(machineID) return } job.Success(nil) }
func (t *runBs) Run(job monsterqueue.Job) { params := job.Parameters() dockerEndpoint := params["endpoint"].(string) node := cluster.Node{Address: dockerEndpoint} err := t.waitDocker(dockerEndpoint) if err != nil { job.Error(err) return } node.CreationStatus = cluster.NodeCreationStatusCreated rawMetadata := params["metadata"].(monsterqueue.JobParams) metadata := make(map[string]string, len(rawMetadata)) for key, value := range rawMetadata { metadata[key] = value.(string) } err = createContainer(dockerEndpoint, metadata["pool"], t.provisioner, true) if err != nil { t.provisioner.Cluster().UpdateNode(node) job.Error(err) return } node.Metadata = map[string]string{"LastSuccess": time.Now().Format(time.RFC3339)} _, err = t.provisioner.Cluster().UpdateNode(node) if err != nil { job.Error(err) return } job.Success(nil) }
func (t *runBs) Run(job monsterqueue.Job) { params := job.Parameters() dockerEndpoint := params["endpoint"].(string) machineID := params["machine"].(string) node := cluster.Node{Address: dockerEndpoint} err := t.waitDocker(dockerEndpoint) if err != nil { job.Error(err) t.destroyMachine(machineID) return } rawMetadata := params["metadata"].(monsterqueue.JobParams) metadata := make(map[string]string, len(rawMetadata)) for key, value := range rawMetadata { metadata[key] = value.(string) } err = CreateContainer(dockerEndpoint, metadata["pool"], t.provisioner, true) if err != nil { node.CreationStatus = cluster.NodeCreationStatusError node.Metadata = map[string]string{"creationError": err.Error()} t.provisioner.Cluster().UpdateNode(node) job.Error(err) t.destroyMachine(machineID) return } node.CreationStatus = cluster.NodeCreationStatusCreated _, err = t.provisioner.Cluster().UpdateNode(node) if err != nil { job.Error(err) t.destroyMachine(machineID) return } job.Success(nil) }
func (p *dockerProvisioner) AddNode(opts provision.AddNodeOptions) error { node := cluster.Node{ Address: opts.Address, Metadata: opts.Metadata, CreationStatus: cluster.NodeCreationStatusPending, CaCert: opts.CaCert, ClientCert: opts.ClientCert, ClientKey: opts.ClientKey, } if len(opts.CaCert) == 0 && len(p.caCert) > 0 { node.CaCert = p.caCert node.ClientCert = p.clientCert node.ClientKey = p.clientKey } err := p.Cluster().Register(node) if err != nil { return err } q, err := queue.Queue() if err != nil { return err } jobParams := monsterqueue.JobParams{"endpoint": opts.Address, "metadata": opts.Metadata} var job monsterqueue.Job if opts.WaitTO != 0 { job, err = q.EnqueueWait(internalNodeContainer.QueueTaskName, jobParams, opts.WaitTO) } else { _, err = q.Enqueue(internalNodeContainer.QueueTaskName, jobParams) } if err == nil && job != nil { _, err = job.Result() } return err }
func (a *autoScaleConfig) addNode(event *autoScaleEvent, modelNodes []*cluster.Node) (*cluster.Node, error) { metadata, err := chooseMetadataFromNodes(modelNodes) if err != nil { return nil, err } _, hasIaas := metadata["iaas"] if !hasIaas { return nil, fmt.Errorf("no IaaS information in nodes metadata: %#v", metadata) } machine, err := iaas.CreateMachineForIaaS(metadata["iaas"], metadata) if err != nil { return nil, fmt.Errorf("unable to create machine: %s", err.Error()) } newAddr := machine.FormatNodeAddress() event.logMsg("new machine created: %s - Waiting for docker to start...", newAddr) createdNode := cluster.Node{ Address: newAddr, Metadata: metadata, CreationStatus: cluster.NodeCreationStatusPending, } err = a.provisioner.Cluster().Register(createdNode) if err != nil { machine.Destroy() return nil, fmt.Errorf("error registering new node %s: %s", newAddr, err.Error()) } q, err := queue.Queue() if err == nil { jobParams := monsterqueue.JobParams{ "endpoint": createdNode.Address, "machine": machine.Id, "metadata": createdNode.Metadata, } var job monsterqueue.Job job, err = q.EnqueueWait(bs.QueueTaskName, jobParams, a.WaitTimeNewMachine) if err == nil { _, err = job.Result() } } if err != nil { machine.Destroy() a.provisioner.Cluster().Unregister(newAddr) return nil, fmt.Errorf("error running bs task: %s", err) } event.logMsg("new machine created: %s - started!", newAddr) return &createdNode, nil }
func (t *routesRebuildTask) Run(job monsterqueue.Job) { params := job.Parameters() appName, ok := params["appName"].(string) if !ok { job.Error(errors.New("invalid parameters, expected appName")) } for !runRoutesRebuildOnce(appName, true) { time.Sleep(routesRebuildRetryTime) } job.Success(nil) }
func (t *machineCreate) Run(job monsterqueue.Job) { params := job.Parameters() jobId := params["jobId"].(string) vmId := params["vmId"].(string) projectId := params["projectId"].(string) ip, err := t.iaas.waitVMIsCreated(jobId, vmId, projectId) if err != nil { _, qErr := job.Queue().Enqueue(t.iaas.taskName(machineDeleteTaskName), monsterqueue.JobParams{ "vmId": vmId, "projectId": projectId, }) if qErr != nil { job.Error(fmt.Errorf("error trying to enqueue deletion: %s caused by: %s", qErr, err)) return } job.Error(err) return } if tags, ok := params["tags"]; ok { var cloudTags []*cloudstackTag tagList := strings.Split(tags.(string), ",") cloudTags = make([]*cloudstackTag, 0, len(tagList)) for _, tag := range tagList { if strings.Contains(tag, ":") { parts := strings.SplitN(tag, ":", 2) cloudTags = append(cloudTags, &cloudstackTag{ Key: string(parts[0]), Value: string(parts[1]), }) } } if len(cloudTags) > 0 { param := make(map[string]string) param["resourceids"] = vmId param["resourcetype"] = "UserVm" for index, tag := range cloudTags { param["tags["+strconv.Itoa(index+1)+"].key"] = tag.Key param["tags["+strconv.Itoa(index+1)+"].value"] = tag.Value } param["projectId"] = projectId var result CreateTagsResponse err = t.iaas.do("createTags", param, &result) if err != nil { job.Error(err) return } } } notified, _ := job.Success(ip) if !notified { _, err = job.Queue().Enqueue(t.iaas.taskName(machineDeleteTaskName), monsterqueue.JobParams{ "vmId": vmId, "projectId": projectId, }) if err != nil { log.Errorf("could not enqueue delete unnotified vm: %s", err) return } } }
func (t *machineDelete) Run(job monsterqueue.Job) { params := job.Parameters() vmId := params["vmId"].(string) projectId := params["projectId"].(string) var volumesRsp ListVolumesResponse err := t.iaas.do("listVolumes", ApiParams{ "virtualmachineid": vmId, "projectid": projectId, }, &volumesRsp) if err != nil { job.Error(err) return } var destroyData DestroyVirtualMachineResponse err = t.iaas.do("destroyVirtualMachine", ApiParams{ "id": vmId, }, &destroyData) if err != nil { job.Error(err) return } _, err = t.iaas.waitForAsyncJob(destroyData.DestroyVirtualMachineResponse.JobID) if err != nil { job.Error(err) return } for _, vol := range volumesRsp.ListVolumesResponse.Volume { if vol.Type != diskDataDisk { continue } var detachRsp DetachVolumeResponse err = t.iaas.do("detachVolume", ApiParams{"id": vol.ID}, &detachRsp) if err != nil { job.Error(err) return } _, err = t.iaas.waitForAsyncJob(detachRsp.DetachVolumeResponse.JobID) if err != nil { job.Error(err) return } err = t.iaas.do("deleteVolume", ApiParams{"id": vol.ID}, nil) if err != nil { job.Error(err) return } } job.Success(nil) }
func (t *machineCreate) Run(job monsterqueue.Job) { params := job.Parameters() jobId := params["jobId"].(string) vmId := params["vmId"].(string) projectId := params["projectId"].(string) ip, err := t.iaas.waitVMIsCreated(jobId, vmId, projectId) if err != nil { _, qErr := job.Queue().Enqueue(t.iaas.taskName(machineDeleteTaskName), monsterqueue.JobParams{ "vmId": vmId, "projectId": projectId, }) if qErr != nil { job.Error(fmt.Errorf("error trying to enqueue deletion: %s caused by: %s", qErr, err)) return } job.Error(err) return } notified, _ := job.Success(ip) if !notified { _, err = job.Queue().Enqueue(t.iaas.taskName(machineDeleteTaskName), monsterqueue.JobParams{ "vmId": vmId, "projectId": projectId, }) if err != nil { log.Errorf("could not enqueue delete unnotified vm: %s", err) return } } }
func (t *ec2WaitTask) Run(job monsterqueue.Job) { params := job.Parameters() regionOrEndpoint := getRegionOrEndpoint(map[string]string{ "region": params["region"].(string), "endpoint": params["endpoint"].(string), }, true) machineId := params["machineId"].(string) var timeout int switch val := params["timeout"].(type) { case int: timeout = val case float64: timeout = int(val) } networkIdx := -1 if idx, ok := params["networkIndex"]; ok { switch val := idx.(type) { case int: networkIdx = val case float64: networkIdx = int(val) } } ec2Inst, err := t.iaas.createEC2Handler(regionOrEndpoint) if err != nil { job.Error(err) return } var dnsName string var notifiedSuccess bool t0 := time.Now() for { log.Debugf("ec2: waiting for dnsname for instance %s", machineId) input := ec2.DescribeInstancesInput{ InstanceIds: []*string{aws.String(machineId)}, } resp, err := ec2Inst.DescribeInstances(&input) if err != nil { job.Error(err) break } if len(resp.Reservations) == 0 || len(resp.Reservations[0].Instances) == 0 { job.Error(err) break } instance := resp.Reservations[0].Instances[0] if networkIdx < 0 { dnsName = aws.StringValue(instance.PublicDnsName) } else { if len(instance.NetworkInterfaces) < networkIdx { job.Error(errors.New("invalid network-index. ")) break } dnsName = aws.StringValue(instance.NetworkInterfaces[networkIdx].PrivateDnsName) } if dnsName != "" { notifiedSuccess, _ = job.Success(dnsName) break } if time.Now().Sub(t0) > time.Duration(2*timeout)*time.Second { job.Error(errors.New("hard timeout")) break } time.Sleep(500 * time.Millisecond) } if !notifiedSuccess { input := ec2.TerminateInstancesInput{ InstanceIds: []*string{aws.String(machineId)}, } ec2Inst.TerminateInstances(&input) } }
func (t *testTask) Run(j monsterqueue.Job) { t.callCount++ j.Success("result") }
func (t *runBs) Run(job monsterqueue.Job) { params := job.Parameters() dockerEndpoint := params["endpoint"].(string) err := t.waitDocker(dockerEndpoint) if err != nil { job.Error(err) return } node, err := t.provisioner.Cluster().GetNode(dockerEndpoint) if err != nil { job.Error(err) return } node.CreationStatus = cluster.NodeCreationStatusCreated err = RecreateContainers(t.provisioner, nil, node) if err != nil { t.provisioner.Cluster().UpdateNode(node) job.Error(err) return } node.Metadata["LastSuccess"] = time.Now().Format(time.RFC3339) _, err = t.provisioner.Cluster().UpdateNode(node) if err != nil { job.Error(err) return } job.Success(nil) }