func (t runBs) Run(job monsterqueue.Job) { params := job.Parameters() dockerEndpoint := params["endpoint"].(string) machineID := params["machine"].(string) err := t.waitDocker(dockerEndpoint) if err != nil { job.Error(err) return } err = t.createBsContainer(dockerEndpoint) if err != nil { job.Error(err) t.destroyMachine(machineID) return } rawMetadata := params["metadata"].(monsterqueue.JobParams) metadata := make(map[string]string, len(rawMetadata)) for key, value := range rawMetadata { metadata[key] = value.(string) } _, err = mainDockerProvisioner.getCluster().Register(dockerEndpoint, metadata) if err != nil { job.Error(err) t.destroyMachine(machineID) return } job.Success(nil) }
func (t *runBs) Run(job monsterqueue.Job) { params := job.Parameters() dockerEndpoint := params["endpoint"].(string) machineID := params["machine"].(string) node := cluster.Node{Address: dockerEndpoint} err := t.waitDocker(dockerEndpoint) if err != nil { job.Error(err) t.destroyMachine(machineID) return } rawMetadata := params["metadata"].(monsterqueue.JobParams) metadata := make(map[string]string, len(rawMetadata)) for key, value := range rawMetadata { metadata[key] = value.(string) } err = CreateContainer(dockerEndpoint, metadata["pool"], t.provisioner, true) if err != nil { node.CreationStatus = cluster.NodeCreationStatusError node.Metadata = map[string]string{"creationError": err.Error()} t.provisioner.Cluster().UpdateNode(node) job.Error(err) t.destroyMachine(machineID) return } node.CreationStatus = cluster.NodeCreationStatusCreated _, err = t.provisioner.Cluster().UpdateNode(node) if err != nil { job.Error(err) t.destroyMachine(machineID) return } job.Success(nil) }
func (t *machineCreate) Run(job monsterqueue.Job) { params := job.Parameters() jobId := params["jobId"].(string) vmId := params["vmId"].(string) projectId := params["projectId"].(string) ip, err := t.iaas.waitVMIsCreated(jobId, vmId, projectId) if err != nil { _, qErr := job.Queue().Enqueue(t.iaas.taskName(machineDeleteTaskName), monsterqueue.JobParams{ "vmId": vmId, "projectId": projectId, }) if qErr != nil { job.Error(fmt.Errorf("error trying to enqueue deletion: %s caused by: %s", qErr, err)) return } job.Error(err) return } if tags, ok := params["tags"]; ok { var cloudTags []*cloudstackTag tagList := strings.Split(tags.(string), ",") cloudTags = make([]*cloudstackTag, 0, len(tagList)) for _, tag := range tagList { if strings.Contains(tag, ":") { parts := strings.SplitN(tag, ":", 2) cloudTags = append(cloudTags, &cloudstackTag{ Key: string(parts[0]), Value: string(parts[1]), }) } } if len(cloudTags) > 0 { param := make(map[string]string) param["resourceids"] = vmId param["resourcetype"] = "UserVm" for index, tag := range cloudTags { param["tags["+strconv.Itoa(index+1)+"].key"] = tag.Key param["tags["+strconv.Itoa(index+1)+"].value"] = tag.Value } param["projectId"] = projectId var result CreateTagsResponse err = t.iaas.do("createTags", param, &result) if err != nil { job.Error(err) return } } } notified, _ := job.Success(ip) if !notified { _, err = job.Queue().Enqueue(t.iaas.taskName(machineDeleteTaskName), monsterqueue.JobParams{ "vmId": vmId, "projectId": projectId, }) if err != nil { log.Errorf("could not enqueue delete unnotified vm: %s", err) return } } }
func (t *machineCreate) Run(job monsterqueue.Job) { params := job.Parameters() jobId := params["jobId"].(string) vmId := params["vmId"].(string) projectId := params["projectId"].(string) ip, err := t.iaas.waitVMIsCreated(jobId, vmId, projectId) if err != nil { _, qErr := job.Queue().Enqueue(t.iaas.taskName(machineDeleteTaskName), monsterqueue.JobParams{ "vmId": vmId, "projectId": projectId, }) if qErr != nil { job.Error(fmt.Errorf("error trying to enqueue deletion: %s caused by: %s", qErr, err)) return } job.Error(err) return } notified, _ := job.Success(ip) if !notified { _, err = job.Queue().Enqueue(t.iaas.taskName(machineDeleteTaskName), monsterqueue.JobParams{ "vmId": vmId, "projectId": projectId, }) if err != nil { log.Errorf("could not enqueue delete unnotified vm: %s", err) return } } }
func (t *runBs) Run(job monsterqueue.Job) { params := job.Parameters() dockerEndpoint := params["endpoint"].(string) node := cluster.Node{Address: dockerEndpoint} err := t.waitDocker(dockerEndpoint) if err != nil { job.Error(err) return } node.CreationStatus = cluster.NodeCreationStatusCreated rawMetadata := params["metadata"].(monsterqueue.JobParams) metadata := make(map[string]string, len(rawMetadata)) for key, value := range rawMetadata { metadata[key] = value.(string) } err = createContainer(dockerEndpoint, metadata["pool"], t.provisioner, true) if err != nil { t.provisioner.Cluster().UpdateNode(node) job.Error(err) return } node.Metadata = map[string]string{"LastSuccess": time.Now().Format(time.RFC3339)} _, err = t.provisioner.Cluster().UpdateNode(node) if err != nil { job.Error(err) return } job.Success(nil) }
func (t *runBs) Run(job monsterqueue.Job) { params := job.Parameters() dockerEndpoint := params["endpoint"].(string) err := t.waitDocker(dockerEndpoint) if err != nil { job.Error(err) return } node, err := t.provisioner.Cluster().GetNode(dockerEndpoint) if err != nil { job.Error(err) return } node.CreationStatus = cluster.NodeCreationStatusCreated err = RecreateContainers(t.provisioner, nil, node) if err != nil { t.provisioner.Cluster().UpdateNode(node) job.Error(err) return } node.Metadata["LastSuccess"] = time.Now().Format(time.RFC3339) _, err = t.provisioner.Cluster().UpdateNode(node) if err != nil { job.Error(err) return } job.Success(nil) }
func (t *ec2WaitTask) Run(job monsterqueue.Job) { params := job.Parameters() regionOrEndpoint := getRegionOrEndpoint(map[string]string{ "region": params["region"].(string), "endpoint": params["endpoint"].(string), }, true) machineId := params["machineId"].(string) var timeout int switch val := params["timeout"].(type) { case int: timeout = val case float64: timeout = int(val) } ec2Inst, err := t.iaas.createEC2Handler(regionOrEndpoint) if err != nil { job.Error(err) return } var dnsName string var notifiedSuccess bool t0 := time.Now() for { log.Debugf("ec2: waiting for dnsname for instance %s", machineId) input := ec2.DescribeInstancesInput{ InstanceIds: []*string{aws.String(machineId)}, } resp, err := ec2Inst.DescribeInstances(&input) if err != nil { job.Error(err) break } if len(resp.Reservations) == 0 || len(resp.Reservations[0].Instances) == 0 { job.Error(err) break } instance := resp.Reservations[0].Instances[0] if instance.PublicDnsName != nil { dnsName = *instance.PublicDnsName } if dnsName != "" { notifiedSuccess, _ = job.Success(dnsName) break } if time.Now().Sub(t0) > time.Duration(2*timeout)*time.Second { job.Error(errors.New("hard timeout")) break } time.Sleep(500 * time.Millisecond) } if !notifiedSuccess { input := ec2.TerminateInstancesInput{ InstanceIds: []*string{aws.String(machineId)}, } ec2Inst.TerminateInstances(&input) } }
func (t *routesRebuildTask) Run(job monsterqueue.Job) { params := job.Parameters() appName, ok := params["appName"].(string) if !ok { job.Error(errors.New("invalid parameters, expected appName")) } for !runRoutesRebuildOnce(appName, true) { time.Sleep(routesRebuildRetryTime) } job.Success(nil) }
func (t *machineDelete) Run(job monsterqueue.Job) { params := job.Parameters() vmId := params["vmId"].(string) projectId := params["projectId"].(string) var volumesRsp ListVolumesResponse err := t.iaas.do("listVolumes", ApiParams{ "virtualmachineid": vmId, "projectid": projectId, }, &volumesRsp) if err != nil { job.Error(err) return } var destroyData DestroyVirtualMachineResponse err = t.iaas.do("destroyVirtualMachine", ApiParams{ "id": vmId, }, &destroyData) if err != nil { job.Error(err) return } _, err = t.iaas.waitForAsyncJob(destroyData.DestroyVirtualMachineResponse.JobID) if err != nil { job.Error(err) return } for _, vol := range volumesRsp.ListVolumesResponse.Volume { if vol.Type != diskDataDisk { continue } var detachRsp DetachVolumeResponse err = t.iaas.do("detachVolume", ApiParams{"id": vol.ID}, &detachRsp) if err != nil { job.Error(err) return } _, err = t.iaas.waitForAsyncJob(detachRsp.DetachVolumeResponse.JobID) if err != nil { job.Error(err) return } err = t.iaas.do("deleteVolume", ApiParams{"id": vol.ID}, nil) if err != nil { job.Error(err) return } } job.Success(nil) }
func (t *ec2WaitTask) Run(job monsterqueue.Job) { params := job.Parameters() regionOrEndpoint := getRegionOrEndpoint(map[string]string{ "region": params["region"].(string), "endpoint": params["endpoint"].(string), }, true) machineId := params["machineId"].(string) var timeout int switch val := params["timeout"].(type) { case int: timeout = val case float64: timeout = int(val) } networkIdx := -1 if idx, ok := params["networkIndex"]; ok { switch val := idx.(type) { case int: networkIdx = val case float64: networkIdx = int(val) } } ec2Inst, err := t.iaas.createEC2Handler(regionOrEndpoint) if err != nil { job.Error(err) return } var dnsName string var notifiedSuccess bool t0 := time.Now() for { if time.Since(t0) > time.Duration(2*timeout)*time.Second { job.Error(errors.New("hard timeout")) break } log.Debugf("ec2: waiting for dnsname for instance %s", machineId) input := ec2.DescribeInstancesInput{ InstanceIds: []*string{aws.String(machineId)}, } resp, err := ec2Inst.DescribeInstances(&input) if err != nil { log.Debug("ec2: api error") time.Sleep(1000 * time.Millisecond) continue } if len(resp.Reservations) == 0 || len(resp.Reservations[0].Instances) == 0 { job.Error(err) break } instance := resp.Reservations[0].Instances[0] if networkIdx < 0 { dnsName = aws.StringValue(instance.PublicDnsName) } else { if len(instance.NetworkInterfaces) < networkIdx { job.Error(errors.New("invalid network-index. ")) break } dnsName = aws.StringValue(instance.NetworkInterfaces[networkIdx].PrivateDnsName) } if dnsName != "" { notifiedSuccess, _ = job.Success(dnsName) break } time.Sleep(500 * time.Millisecond) } if !notifiedSuccess { input := ec2.TerminateInstancesInput{ InstanceIds: []*string{aws.String(machineId)}, } ec2Inst.TerminateInstances(&input) } }