示例#1
0
func (f *FakePodStore) ListPods(podPrefix kp.PodPrefix, hostname types.NodeName) ([]kp.ManifestResult, time.Duration, error) {
	f.podLock.Lock()
	defer f.podLock.Unlock()
	res := make([]kp.ManifestResult, 0)
	for key, manifest := range f.podResults {
		if key.podPrefix == podPrefix && key.hostname == hostname {
			// TODO(mpuncel) make ManifestResult not contain the path, it's silly to have to do things like this
			path := path.Join(string(podPrefix), hostname.String(), string(manifest.ID()))
			uniqueKey, err := kp.PodUniqueKeyFromConsulPath(path)
			if err != nil {
				return nil, 0, err
			}

			res = append(res, kp.ManifestResult{
				Manifest: manifest,
				PodLocation: types.PodLocation{
					Node:  hostname,
					PodID: manifest.ID(),
				},
				PodUniqueKey: uniqueKey,
			})
		}
	}
	return res, 0, nil
}
示例#2
0
func (f *FakePodStore) AllPods(podPrefix kp.PodPrefix) ([]kp.ManifestResult, time.Duration, error) {
	f.podLock.Lock()
	defer f.podLock.Unlock()
	res := make([]kp.ManifestResult, 0)
	for key, manifest := range f.podResults {
		if key.podPrefix != podPrefix {
			continue
		}
		path := path.Join(string(podPrefix), key.hostname.String(), string(manifest.ID()))
		uniqueKey, err := kp.PodUniqueKeyFromConsulPath(path)
		if err != nil {
			return nil, 0, err
		}

		res = append(res, kp.ManifestResult{
			Manifest: manifest,
			PodLocation: types.PodLocation{
				Node:  key.hostname,
				PodID: manifest.ID(),
			},
			PodUniqueKey: uniqueKey,
		})
	}
	return res, 0, nil
}
示例#3
0
func TestShouldRollMidwayDesireLessThanHealthyPartial(t *testing.T) {
	// This test is like the above, but ensures that we are not too conservative.
	// If we have a minimum health of 3, desire 3 on the old side,
	// and have 1 healthy on the new side, we should have room to roll one node.
	checks := map[types.NodeName]health.Result{
		"node1": {Status: health.Passing},
		"node2": {Status: health.Passing},
		"node3": {Status: health.Passing},
		"node4": {Status: health.Passing},
		"node5": {Status: health.Passing},
	}
	upd, _, manifest := updateWithHealth(t, 3, 2, map[types.NodeName]bool{
		// This is something that may happen in a rolling update:
		// old RC only desires three nodes, but still has four of them.
		"node1": true,
		"node2": true,
		"node3": true,
		"node4": true,
	}, map[types.NodeName]bool{
		"node5": true,
	}, checks)
	upd.DesiredReplicas = 5
	upd.MinimumReplicas = 3

	roll, err := upd.uniformShouldRollAfterDelay(t, manifest.ID())
	Assert(t).IsNil(err, "expected no error determining nodes to roll")
	Assert(t).AreEqual(roll, 1, "expected to roll one node")
}
示例#4
0
func TestShouldRollInitialUnknown(t *testing.T) {
	upd, _, manifest := updateWithHealth(t, 3, 0, nil, nil, nil)
	upd.DesiredReplicas = 3
	upd.MinimumReplicas = 2

	roll, _ := upd.uniformShouldRollAfterDelay(t, manifest.ID())
	Assert(t).AreEqual(roll, 0, "expected to roll no nodes if health is unknown")
}
示例#5
0
func TestShouldRollInitialMigrationFromZero(t *testing.T) {
	upd, _, manifest := updateWithHealth(t, 0, 0, nil, nil, nil)
	upd.DesiredReplicas = 3
	upd.MinimumReplicas = 2

	remove, add, err := upd.shouldRollAfterDelay(manifest.ID())
	Assert(t).IsNil(err, "expected no error determining nodes to roll")
	Assert(t).AreEqual(remove, 0, "expected to remove no nodes")
	Assert(t).AreEqual(add, 1, "expected to add one node")
}
示例#6
0
文件: main.go 项目: petertseng/p2
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			log.Fatalf("error getting node name: %v", err)
		}
		*nodeName = hostname
	}

	manifest, err := manifest.FromURI(*manifestURI)
	if err != nil {
		log.Fatalf("%s", err)
	}

	hookFactory := pods.NewHookFactory(filepath.Join(*podRoot, "hooks", *hookType), types.NodeName(*nodeName))

	// /data/pods/hooks/<event>/<id>
	// if the event is the empty string (global hook), then that path segment
	// will be cleaned out
	pod := hookFactory.NewHookPod(manifest.ID())

	// for now use noop verifier in this CLI
	err = pod.Install(manifest, auth.NopVerifier(), artifact.NewRegistry(*registryURI, uri.DefaultFetcher, osversion.DefaultDetector))
	if err != nil {
		log.Fatalf("Could not install manifest %s: %s", manifest.ID(), err)
	}
	// hooks write their current manifest manually since it's normally done at
	// launch time
	_, err = pod.WriteCurrentManifest(manifest)
	if err != nil {
		log.Fatalf("Could not write current manifest for %s: %s", manifest.ID(), err)
	}

	err = hooks.InstallHookScripts(*hookRoot, pod, manifest, logging.DefaultLogger)
	if err != nil {
		log.Fatalf("Could not write hook scripts: %s", err)
	}
}
示例#7
0
func TestShouldRollMidwayUnknkown(t *testing.T) {
	checks := map[types.NodeName]health.Result{
		"node3": {Status: health.Passing},
	}
	upd, _, manifest := updateWithHealth(t, 2, 1, nil, map[types.NodeName]bool{
		"node3": true,
	}, checks)
	upd.DesiredReplicas = 3
	upd.MinimumReplicas = 2

	roll, _ := upd.uniformShouldRollAfterDelay(t, manifest.ID())
	Assert(t).AreEqual(roll, 0, "expected to roll no nodes when old nodes all have unknown health")
}
示例#8
0
func TestShouldRollMidwayUnhealthyMigrationFromZero(t *testing.T) {
	checks := map[types.NodeName]health.Result{
		"node3": {Status: health.Critical},
	}
	upd, _, manifest := updateWithHealth(t, 0, 1, nil, map[types.NodeName]bool{
		"node3": true,
	}, checks)
	upd.DesiredReplicas = 3
	upd.MinimumReplicas = 2

	remove, add, _ := upd.shouldRollAfterDelay(manifest.ID())
	Assert(t).AreEqual(remove, 0, "expected to remove no nodes")
	Assert(t).AreEqual(add, 0, "expected to add no nodes")
}
示例#9
0
文件: main.go 项目: petertseng/p2
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			log.Fatalf("error getting node name: %v", err)
		}
		*nodeName = hostname
	}

	manifest, err := manifest.FromURI(*manifestURI)
	if err != nil {
		log.Fatalf("%s", err)
	}

	err = authorize(manifest)
	if err != nil {
		log.Fatalf("%s", err)
	}

	podFactory := pods.NewFactory(*podRoot, types.NodeName(*nodeName))
	pod := podFactory.NewLegacyPod(manifest.ID())
	err = pod.Install(manifest, auth.NopVerifier(), artifact.NewRegistry(nil, uri.DefaultFetcher, osversion.DefaultDetector))
	if err != nil {
		log.Fatalf("Could not install manifest %s: %s", manifest.ID(), err)
	}

	success, err := pod.Launch(manifest)
	if err != nil {
		log.Fatalf("Could not launch manifest %s: %s", manifest.ID(), err)
	}
	if !success {
		log.Fatalln("Unsuccessful launch of one or more things in the manifest")
	}
}
示例#10
0
func TestRollLoopTypicalCase(t *testing.T) {
	upd, _, manifest := updateWithHealth(t, 3, 0, map[types.NodeName]bool{
		"node1": true,
		"node2": true,
		"node3": true,
	}, nil, nil)
	upd.DesiredReplicas = 3
	upd.MinimumReplicas = 2

	healths := make(chan map[types.NodeName]health.Result)

	oldRC, oldRCUpdated := watchRCOrFail(t, upd.rcs, upd.OldRC, "old RC")
	newRC, newRCUpdated := watchRCOrFail(t, upd.rcs, upd.NewRC, "new RC")

	rollLoopResult := make(chan bool)

	go func() {
		rollLoopResult <- upd.rollLoop(manifest.ID(), healths, nil, nil)
		close(rollLoopResult)
	}()

	checks := map[types.NodeName]health.Result{
		"node1": {Status: health.Passing},
		"node2": {Status: health.Passing},
		"node3": {Status: health.Passing},
	}

	healths <- checks

	assertRCUpdates(t, oldRC, oldRCUpdated, 2, "old RC")
	assertRCUpdates(t, newRC, newRCUpdated, 1, "new RC")

	transferNode("node1", manifest, upd)
	healths <- checks

	assertRCUpdates(t, oldRC, oldRCUpdated, 1, "old RC")
	assertRCUpdates(t, newRC, newRCUpdated, 2, "new RC")

	transferNode("node2", manifest, upd)
	healths <- checks

	assertRCUpdates(t, oldRC, oldRCUpdated, 0, "old RC")
	assertRCUpdates(t, newRC, newRCUpdated, 3, "new RC")

	transferNode("node3", manifest, upd)
	healths <- checks

	assertRollLoopResult(t, rollLoopResult, true)
}
示例#11
0
func TestShouldRollMidwayHealthyMigrationFromZeroWhenNewSatisfies(t *testing.T) {
	checks := map[types.NodeName]health.Result{
		"node2": {Status: health.Passing},
		"node3": {Status: health.Passing},
	}
	upd, _, manifest := updateWithHealth(t, 0, 2, nil, map[types.NodeName]bool{
		"node2": true,
		"node3": true,
	}, checks)
	upd.DesiredReplicas = 3
	upd.MinimumReplicas = 2

	remove, add, err := upd.shouldRollAfterDelay(manifest.ID())
	Assert(t).IsNil(err, "expected no error determining nodes to roll")
	Assert(t).AreEqual(remove, 0, "expected to remove no nodes")
	Assert(t).AreEqual(add, 1, "expected to add one node")
}
示例#12
0
func TestShouldRollInitial(t *testing.T) {
	checks := map[types.NodeName]health.Result{
		"node1": {Status: health.Passing},
		"node2": {Status: health.Passing},
		"node3": {Status: health.Passing},
	}
	upd, _, manifest := updateWithHealth(t, 3, 0, map[types.NodeName]bool{
		"node1": true,
		"node2": true,
		"node3": true,
	}, nil, checks)
	upd.DesiredReplicas = 3
	upd.MinimumReplicas = 2

	roll, err := upd.uniformShouldRollAfterDelay(t, manifest.ID())
	Assert(t).IsNil(err, "expected no error determining nodes to roll")
	Assert(t).AreEqual(roll, 1, "expected to only roll one node")
}
示例#13
0
func TestRollLoopStallsIfUnhealthy(t *testing.T) {
	upd, _, manifest := updateWithHealth(t, 3, 0, map[types.NodeName]bool{
		"node1": true,
		"node2": true,
		"node3": true,
	}, nil, nil)
	upd.DesiredReplicas = 3
	upd.MinimumReplicas = 2

	healths := make(chan map[types.NodeName]health.Result)

	oldRC, oldRCUpdated := watchRCOrFail(t, upd.rcs, upd.OldRC, "old RC")
	newRC, newRCUpdated := watchRCOrFail(t, upd.rcs, upd.NewRC, "new RC")

	rollLoopResult := make(chan bool)
	quitRoll := make(chan struct{})

	go func() {
		rollLoopResult <- upd.rollLoop(manifest.ID(), healths, nil, quitRoll)
		close(rollLoopResult)
	}()

	checks := map[types.NodeName]health.Result{
		"node1": {Status: health.Passing},
		"node2": {Status: health.Passing},
		"node3": {Status: health.Passing},
	}

	healths <- checks

	assertRCUpdates(t, oldRC, oldRCUpdated, 2, "old RC")
	assertRCUpdates(t, newRC, newRCUpdated, 1, "new RC")

	transferNode("node1", manifest, upd)
	checks["node1"] = health.Result{Status: health.Critical}
	go failIfRCDesireChanges(t, oldRC, 2, oldRCUpdated)
	go failIfRCDesireChanges(t, newRC, 1, newRCUpdated)
	for i := 0; i < 5; i++ {
		healths <- checks
	}

	quitRoll <- struct{}{}
	assertRollLoopResult(t, rollLoopResult, false)
}
示例#14
0
func TestRollLoopMigrateFromZero(t *testing.T) {
	upd, _, manifest := updateWithHealth(t, 0, 0, nil, nil, nil)
	upd.DesiredReplicas = 3
	upd.MinimumReplicas = 2

	healths := make(chan map[types.NodeName]health.Result)

	oldRC, oldRCUpdated := watchRCOrFail(t, upd.rcs, upd.OldRC, "old RC")
	newRC, newRCUpdated := watchRCOrFail(t, upd.rcs, upd.NewRC, "new RC")
	go failIfRCDesireChanges(t, oldRC, 0, oldRCUpdated)

	rollLoopResult := make(chan bool)

	go func() {
		rollLoopResult <- upd.rollLoop(manifest.ID(), healths, nil, nil)
		close(rollLoopResult)
	}()

	checks := map[types.NodeName]health.Result{}
	healths <- checks

	assertRCUpdates(t, newRC, newRCUpdated, 1, "new RC")

	checks["node1"] = health.Result{Status: health.Passing}
	transferNode("node1", manifest, upd)
	healths <- checks

	assertRCUpdates(t, newRC, newRCUpdated, 2, "new RC")

	checks["node2"] = health.Result{Status: health.Passing}
	transferNode("node2", manifest, upd)
	healths <- checks

	assertRCUpdates(t, newRC, newRCUpdated, 3, "new RC")

	checks["node3"] = health.Result{Status: health.Passing}
	transferNode("node3", manifest, upd)
	healths <- checks

	assertRollLoopResult(t, rollLoopResult, true)
}
示例#15
0
func TestShouldRollWhenNewSatisfiesButNotAllDesiredHealthy(t *testing.T) {
	// newHealthy < newDesired, and newHealthy >= minHealthy.
	// In this case, we schedule the remaining nodes.
	// We want to ensure that remaining == targetDesired - newDesired
	// instead of targetDesired - newHealthy
	checks := map[types.NodeName]health.Result{
		"node1": {Status: health.Passing},
		"node2": {Status: health.Passing},
		"node3": {Status: health.Critical},
	}
	upd, _, manifest := updateWithHealth(t, 1, 2, map[types.NodeName]bool{
		"node1": true,
	}, map[types.NodeName]bool{
		"node2": true,
		"node3": true,
	}, checks)
	upd.DesiredReplicas = 3
	upd.MinimumReplicas = 1

	roll, err := upd.uniformShouldRollAfterDelay(t, manifest.ID())
	Assert(t).IsNil(err, "expected no error determining nodes to roll")
	Assert(t).AreEqual(roll, 1, "expected to roll one node")
}
示例#16
0
func TestShouldRollMidwayDesireLessThanHealthy(t *testing.T) {
	checks := map[types.NodeName]health.Result{
		"node1": {Status: health.Passing},
		"node2": {Status: health.Passing},
		"node3": {Status: health.Passing},
		"node4": {Status: health.Passing},
		"node5": {Status: health.Passing},
	}
	upd, _, manifest := updateWithHealth(t, 3, 2, map[types.NodeName]bool{
		// This is something that may happen in a rolling update:
		// old RC only desires three nodes, but still has all five.
		"node1": true,
		"node2": true,
		"node3": true,
		"node4": true,
		"node5": true,
	}, map[types.NodeName]bool{}, checks)
	upd.DesiredReplicas = 5
	upd.MinimumReplicas = 3

	roll, _ := upd.uniformShouldRollAfterDelay(t, manifest.ID())
	Assert(t).AreEqual(roll, 0, "expected to roll no nodes")
}
示例#17
0
文件: main.go 项目: rudle/p2
func main() {
	cmd, consulOpts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(consulOpts)
	logger := logging.NewLogger(logrus.Fields{})
	dsstore := dsstore.NewConsul(client, 3, &logger)
	applicator := labels.NewConsulApplicator(client, 3)

	switch cmd {
	case CmdCreate:
		minHealth, err := strconv.Atoi(*createMinHealth)
		if err != nil {
			log.Fatalf("Invalid value for minimum health, expected integer: %v", err)
		}
		name := ds_fields.ClusterName(*createName)

		manifest, err := manifest.FromPath(*createManifest)
		if err != nil {
			log.Fatalf("%s", err)
		}

		podID := manifest.ID()

		if *createTimeout <= time.Duration(0) {
			log.Fatalf("Timeout must be a positive non-zero value, got '%v'", *createTimeout)
		}

		selectorString := *createSelector
		if *createEverywhere {
			selectorString = klabels.Everything().String()
		} else if selectorString == "" {
			selectorString = klabels.Nothing().String()
			log.Fatal("Explicit everything selector not allowed, please use the --everwhere flag")
		}
		selector, err := parseNodeSelectorWithPrompt(klabels.Nothing(), selectorString, applicator)
		if err != nil {
			log.Fatalf("Error occurred: %v", err)
		}

		if err = confirmMinheathForSelector(minHealth, selector, applicator); err != nil {
			log.Fatalf("Error occurred: %v", err)
		}

		ds, err := dsstore.Create(manifest, minHealth, name, selector, podID, *createTimeout)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("%v has been created in consul", ds.ID)
		fmt.Println()

	case CmdGet:
		id := ds_fields.ID(*getID)
		ds, _, err := dsstore.Get(id)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		bytes, err := json.Marshal(ds)
		if err != nil {
			logger.WithError(err).Fatalln("Unable to marshal daemon set as JSON")
		}
		fmt.Printf("%s", bytes)

	case CmdList:
		dsList, err := dsstore.List()
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		podID := types.PodID(*listPod)
		for _, ds := range dsList {
			if *listPod == "" || podID == ds.PodID {
				fmt.Printf("%s/%s:%s\n", ds.PodID, ds.Name, ds.ID)
			}
		}

	case CmdEnable:
		id := ds_fields.ID(*enableID)

		mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
			if !ds.Disabled {
				return ds, util.Errorf("Daemon set has already been enabled")
			}
			ds.Disabled = false
			return ds, nil
		}

		_, err := dsstore.MutateDS(id, mutator)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("The daemon set '%s' has been successfully enabled in consul", id.String())
		fmt.Println()

	case CmdDisable:
		id := ds_fields.ID(*disableID)

		mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
			if ds.Disabled {
				return ds, util.Errorf("Daemon set has already been disabled")
			}
			ds.Disabled = true
			return ds, nil
		}

		_, err := dsstore.MutateDS(id, mutator)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("The daemon set '%s' has been successfully disabled in consul", id.String())
		fmt.Println()

	case CmdDelete:
		id := ds_fields.ID(*deleteID)
		err := dsstore.Delete(id)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("The daemon set '%s' has been successfully deleted from consul", id.String())
		fmt.Println()

	case CmdUpdate:
		id := ds_fields.ID(*updateID)

		mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
			changed := false
			if *updateMinHealth != "" {
				minHealth, err := strconv.Atoi(*updateMinHealth)
				if err != nil {
					log.Fatalf("Invalid value for minimum health, expected integer")
				}
				if ds.MinHealth != minHealth {
					changed = true
					ds.MinHealth = minHealth
				}
			}
			if *updateName != "" {
				name := ds_fields.ClusterName(*updateName)
				if ds.Name != name {
					changed = true
					ds.Name = name
				}
			}

			if *updateTimeout != TimeoutNotSpecified {
				if *updateTimeout <= time.Duration(0) {
					return ds, util.Errorf("Timeout must be a positive non-zero value, got '%v'", *createTimeout)
				}
				if ds.Timeout != *updateTimeout {
					changed = true
					ds.Timeout = *updateTimeout
				}
			}
			if *updateManifest != "" {
				manifest, err := manifest.FromPath(*updateManifest)
				if err != nil {
					return ds, util.Errorf("%s", err)
				}

				if manifest.ID() != ds.PodID {
					return ds, util.Errorf("Manifest ID of %s does not match daemon set's pod ID (%s)", manifest.ID(), ds.PodID)
				}

				dsSHA, err := ds.Manifest.SHA()
				if err != nil {
					return ds, util.Errorf("Unable to get SHA from consul daemon set manifest: %v", err)
				}
				newSHA, err := manifest.SHA()
				if err != nil {
					return ds, util.Errorf("Unable to get SHA from new manifest: %v", err)
				}
				if dsSHA != newSHA {
					changed = true
					ds.Manifest = manifest
				}
			}
			if updateSelectorGiven {
				selectorString := *updateSelector
				if *updateEverywhere {
					selectorString = klabels.Everything().String()
				} else if selectorString == "" {
					return ds, util.Errorf("Explicit everything selector not allowed, please use the --everwhere flag")
				}
				selector, err := parseNodeSelectorWithPrompt(ds.NodeSelector, selectorString, applicator)
				if err != nil {
					return ds, util.Errorf("Error occurred: %v", err)
				}
				if ds.NodeSelector.String() != selector.String() {
					changed = true
					ds.NodeSelector = selector
				}
			}

			if !changed {
				return ds, util.Errorf("No changes were made")
			}

			if updateSelectorGiven || *updateMinHealth != "" {
				if err := confirmMinheathForSelector(ds.MinHealth, ds.NodeSelector, applicator); err != nil {
					return ds, util.Errorf("Error occurred: %v", err)
				}
			}

			return ds, nil
		}

		_, err := dsstore.MutateDS(id, mutator)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("The daemon set '%s' has been successfully updated in consul", id.String())
		fmt.Println()

	case CmdTestSelector:
		selectorString := *testSelectorString
		if *testSelectorEverywhere {
			selectorString = klabels.Everything().String()
		} else if selectorString == "" {
			fmt.Println("Explicit everything selector not allowed, please use the --everwhere flag")
		}
		selector, err := parseNodeSelector(selectorString)
		if err != nil {
			log.Fatalf("Error occurred: %v", err)
		}

		matches, err := applicator.GetMatches(selector, labels.NODE, false)
		if err != nil {
			log.Fatalf("Error getting matching labels: %v", err)
		}
		fmt.Println(matches)

	default:
		log.Fatalf("Unrecognized command %v", cmd)
	}
}
示例#18
0
文件: pod_test.go 项目: rudle/p2
func TestPodSetupConfigWritesFiles(t *testing.T) {
	manifestStr := `id: thepod
launchables:
  my-app:
    launchable_type: hoist
    launchable_id: web
    location: https://localhost:4444/foo/bar/baz_3c021aff048ca8117593f9c71e03b87cf72fd440.tar.gz
    cgroup:
      cpus: 4
      memory: 4G
    env:
      ENABLED_BLAMS: 5
config:
  ENVIRONMENT: staging
`
	currUser, err := user.Current()
	Assert(t).IsNil(err, "Could not get the current user")
	manifestStr += fmt.Sprintf("run_as: %s", currUser.Username)
	manifest, err := manifest.FromBytes(bytes.NewBufferString(manifestStr).Bytes())
	Assert(t).IsNil(err, "should not have erred reading the manifest")

	podTemp, _ := ioutil.TempDir("", "pod")

	podFactory := NewFactory(podTemp, "testNode")
	pod := podFactory.NewPod(manifest.ID())

	launchables := make([]launch.Launchable, 0)
	for _, stanza := range manifest.GetLaunchableStanzas() {
		launchable, err := pod.getLaunchable(stanza, manifest.RunAsUser(), manifest.GetRestartPolicy())
		Assert(t).IsNil(err, "There shouldn't have been an error getting launchable")
		launchables = append(launchables, launchable)
	}
	Assert(t).IsTrue(len(launchables) > 0, "Test setup error: no launchables from launchable stanzas")

	err = pod.setupConfig(manifest, launchables)
	Assert(t).IsNil(err, "There shouldn't have been an error setting up config")

	configFileName, err := manifest.ConfigFileName()
	Assert(t).IsNil(err, "Couldn't generate config filename")
	configPath := filepath.Join(pod.ConfigDir(), configFileName)
	config, err := ioutil.ReadFile(configPath)
	Assert(t).IsNil(err, "should not have erred reading the config")
	Assert(t).AreEqual("ENVIRONMENT: staging\n", string(config), "the config didn't match")

	env, err := ioutil.ReadFile(filepath.Join(pod.EnvDir(), "CONFIG_PATH"))
	Assert(t).IsNil(err, "should not have erred reading the env file")
	Assert(t).AreEqual(configPath, string(env), "The env path to config didn't match")

	platformConfigFileName, err := manifest.PlatformConfigFileName()
	Assert(t).IsNil(err, "Couldn't generate platform config filename")
	platformConfigPath := filepath.Join(pod.ConfigDir(), platformConfigFileName)
	platConfig, err := ioutil.ReadFile(platformConfigPath)
	Assert(t).IsNil(err, "should not have erred reading the platform config")

	expectedPlatConfig := `web:
  cgroup:
    cpus: 4
    memory: 4294967296
`
	Assert(t).AreEqual(expectedPlatConfig, string(platConfig), "the platform config didn't match")

	platEnv, err := ioutil.ReadFile(filepath.Join(pod.EnvDir(), "PLATFORM_CONFIG_PATH"))
	Assert(t).IsNil(err, "should not have erred reading the platform config env file")
	Assert(t).AreEqual(platformConfigPath, string(platEnv), "The env path to platform config didn't match")

	for _, launchable := range launchables {
		launchableIdEnv, err := ioutil.ReadFile(filepath.Join(launchable.EnvDir(), "LAUNCHABLE_ID"))
		Assert(t).IsNil(err, "should not have erred reading the launchable ID env file")

		if launchable.ID().String() != string(launchableIdEnv) {
			t.Errorf("Launchable Id did not have expected value: wanted '%s' was '%s'", launchable.ID().String(), launchableIdEnv)
		}

		launchableRootEnv, err := ioutil.ReadFile(filepath.Join(launchable.EnvDir(), "LAUNCHABLE_ROOT"))
		Assert(t).IsNil(err, "should not have erred reading the launchable root env file")
		Assert(t).AreEqual(launchable.InstallDir(), string(launchableRootEnv), "The launchable root path did not match expected")

		enableBlamSetting, err := ioutil.ReadFile(filepath.Join(launchable.EnvDir(), "ENABLED_BLAMS"))
		Assert(t).IsNil(err, "should not have erred reading custom env var")
		Assert(t).AreEqual("5", string(enableBlamSetting), "The user-supplied custom env var was wrong")
	}
}
示例#19
0
文件: main.go 项目: petertseng/p2
func main() {
	kingpin.CommandLine.Name = "p2-replicate"
	kingpin.CommandLine.Help = `p2-replicate uses the replication package to schedule deployment of a pod across multiple nodes. See the replication package's README and godoc for more information.

	Example invocation: p2-replicate --min-nodes 2 helloworld.yaml aws{1,2,3}.example.com

	This will take the pod whose manifest is located at helloworld.yaml and
	deploy it to the three nodes aws1.example.com, aws2.example.com, and
	aws3.example.com

	Because of --min-nodes 2, the replicator will ensure that at least two healthy
	nodes remain up at all times, according to p2's health checks.
`

	kingpin.Version(version.VERSION)
	_, opts, labeler := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)
	healthChecker := checker.NewConsulHealthChecker(client)

	manifest, err := manifest.FromURI(*manifestURI)
	if err != nil {
		log.Fatalf("%s", err)
	}

	logger := logging.NewLogger(logrus.Fields{
		"pod": manifest.ID(),
	})
	logger.Logger.Formatter = &logrus.TextFormatter{
		DisableTimestamp: false,
		FullTimestamp:    true,
		TimestampFormat:  "15:04:05.000",
	}

	// create a lock with a meaningful name and set up a renewal loop for it
	thisHost, err := os.Hostname()
	if err != nil {
		log.Fatalf("Could not retrieve hostname: %s", err)
	}
	thisUser, err := user.Current()
	if err != nil {
		log.Fatalf("Could not retrieve user: %s", err)
	}

	nodes := make([]types.NodeName, len(*hosts))
	for i, host := range *hosts {
		nodes[i] = types.NodeName(host)
	}

	lockMessage := fmt.Sprintf("%q from %q at %q", thisUser.Username, thisHost, time.Now())
	repl, err := replication.NewReplicator(
		manifest,
		logger,
		nodes,
		len(*hosts)-*minNodes,
		store,
		labeler,
		healthChecker,
		health.HealthState(*threshold),
		lockMessage,
		replication.NoTimeout,
	)
	if err != nil {
		log.Fatalf("Could not initialize replicator: %s", err)
	}

	replication, errCh, err := repl.InitializeReplication(
		*overrideLock,
		*ignoreControllers,
		*concurrentRealityChecks,
		0,
	)
	if err != nil {
		log.Fatalf("Unable to initialize replication: %s", err)
	}

	// auto-drain this channel
	go func() {
		for range errCh {
		}
	}()

	go func() {
		// clear lock immediately on ctrl-C
		signals := make(chan os.Signal, 1)
		signal.Notify(signals, os.Interrupt)
		<-signals
		replication.Cancel()
		os.Exit(1)
	}()

	replication.Enact()
}