示例#1
0
文件: create.go 项目: pulcy/quark
// Create an entire cluster
func (vp *scalewayProvider) CreateCluster(log *logging.Logger, options providers.CreateClusterOptions, dnsProvider providers.DnsProvider) error {
	wg := sync.WaitGroup{}
	errors := make(chan error, options.InstanceCount)
	instanceDatas := make(chan instanceData, options.InstanceCount)
	for i := 1; i <= options.InstanceCount; i++ {
		wg.Add(1)
		go func(i int) {
			defer wg.Done()
			time.Sleep(time.Duration((i - 1)) * time.Second * 10)
			isCore := (i <= 3)
			isLB := (i <= 2)
			instanceOptions, err := options.NewCreateInstanceOptions(isCore, isLB, i)
			if err != nil {
				errors <- maskAny(err)
				return
			}
			instance, err := vp.createInstance(log, instanceOptions, dnsProvider, nil)
			if err != nil {
				errors <- maskAny(err)
			} else {
				instanceDatas <- instanceData{
					CreateInstanceOptions: instanceOptions,
					ClusterInstance:       instance,
					FleetMetadata:         instanceOptions.CreateFleetMetadata(i),
				}
			}
		}(i)
	}
	wg.Wait()
	close(errors)
	close(instanceDatas)
	err := <-errors
	if err != nil {
		return maskAny(err)
	}

	instances := []instanceData{}
	instanceList := providers.ClusterInstanceList{}
	for data := range instanceDatas {
		instances = append(instances, data)
		instanceList = append(instanceList, data.ClusterInstance)
	}

	clusterMembers, err := instanceList.AsClusterMemberList(log, nil)
	if err != nil {
		return maskAny(err)
	}

	// Create tinc network config
	if instanceList.ReconfigureTincCluster(vp.Logger, instanceList); err != nil {
		return maskAny(err)
	}

	if err := vp.setupInstances(log, instances, clusterMembers); err != nil {
		return maskAny(err)
	}

	return nil
}
示例#2
0
文件: create.go 项目: pulcy/quark
func (dp *doProvider) CreateCluster(log *logging.Logger, options providers.CreateClusterOptions, dnsProvider providers.DnsProvider) error {
	wg := sync.WaitGroup{}
	errors := make(chan error, options.InstanceCount)
	instanceDatas := make(chan instanceData, options.InstanceCount)
	for i := 1; i <= options.InstanceCount; i++ {
		wg.Add(1)
		go func(i int) {
			defer wg.Done()
			isCore := true
			isLB := true
			instanceOptions, err := options.NewCreateInstanceOptions(isCore, isLB, i)
			if err != nil {
				errors <- maskAny(err)
				return
			}
			instance, err := dp.CreateInstance(log, instanceOptions, dnsProvider)
			if err != nil {
				errors <- maskAny(err)
			} else {
				instanceDatas <- instanceData{
					CreateInstanceOptions: instanceOptions,
					ClusterInstance:       instance,
					FleetMetadata:         instanceOptions.CreateFleetMetadata(i),
				}
			}
		}(i)
	}
	wg.Wait()
	close(errors)
	close(instanceDatas)
	err := <-errors
	if err != nil {
		return maskAny(err)
	}

	instances := []instanceData{}
	instanceList := providers.ClusterInstanceList{}
	for data := range instanceDatas {
		instances = append(instances, data)
		instanceList = append(instanceList, data.ClusterInstance)
	}

	clusterMembers, err := instanceList.AsClusterMemberList(log, nil)
	if err != nil {
		return maskAny(err)
	}

	if err := dp.setupInstances(log, instances, clusterMembers); err != nil {
		return maskAny(err)
	}

	return nil
}
示例#3
0
文件: create.go 项目: pulcy/quark
// createInstance creates a new instances, runs the bootstrap script and registers the instance
// in DNS.
func (vp *scalewayProvider) createInstance(log *logging.Logger, options providers.CreateInstanceOptions, dnsProvider providers.DnsProvider, existingInstances providers.ClusterInstanceList) (providers.ClusterInstance, error) {
	if options.RegionID != vp.Region {
		return providers.ClusterInstance{}, maskAny(fmt.Errorf("Cannot create server on region '%s' with provider configured for region '%s", options.RegionID, vp.Region))
	}
	// Create a new machine ID
	machineID, err := util.GenUUID()
	if err != nil {
		return providers.ClusterInstance{}, maskAny(err)
	}
	log.Debugf("created machined-id: %s", machineID)

	// Create server
	instance, err := vp.createAndStartServer(options)
	if err != nil {
		return providers.ClusterInstance{}, maskAny(err)
	}

	// Update `cluster-members` file on existing instances.
	// This ensures that the firewall of the existing instances allows our new instance
	if len(existingInstances) > 0 {
		rebootAfter := false
		clusterMembers, err := existingInstances.AsClusterMemberList(log, nil)
		if err != nil {
			return providers.ClusterInstance{}, maskAny(err)
		}
		newMember := providers.ClusterMember{
			ClusterID:     options.ClusterInfo.ID,
			MachineID:     machineID,
			ClusterIP:     instance.ClusterIP,
			PrivateHostIP: instance.PrivateIP,
			EtcdProxy:     options.EtcdProxy,
		}
		clusterMembers = append(clusterMembers, newMember)
		if err := existingInstances.UpdateClusterMembers(log, clusterMembers, rebootAfter, vp); err != nil {
			log.Warningf("Failed to update cluster members: %#v", err)
		}
	}

	// Bootstrap server
	if err := vp.bootstrapServer(instance, options, machineID); err != nil {
		return providers.ClusterInstance{}, maskAny(err)
	}

	// Wait for the server to be active
	server, err := vp.waitUntilServerActive(instance.ID, false)
	if err != nil {
		return providers.ClusterInstance{}, maskAny(err)
	}

	if options.RoleLoadBalancer {
		privateIpv4 := server.PrivateIP
		publicIpv4 := server.PublicAddress.IP
		publicIpv6 := ""
		if server.IPV6 != nil {
			publicIpv6 = server.IPV6.Address
		}
		if err := providers.RegisterInstance(vp.Logger, dnsProvider, options, server.Name, options.RegisterInstance, options.RoleLoadBalancer, options.RoleLoadBalancer, publicIpv4, publicIpv6, privateIpv4); err != nil {
			return providers.ClusterInstance{}, maskAny(err)
		}
	}

	vp.Logger.Infof("Server '%s' is ready", server.Name)

	return vp.clusterInstance(server, false), nil
}