Exemple #1
0
// no return value, no output channels. This should do everything it needs to do
// without outside intervention (other than being signalled to quit)
func (p *Preparer) handlePods(podChan <-chan ManifestPair, quit <-chan struct{}) {
	// install new launchables
	var nextLaunch ManifestPair

	// used to track if we have work to do (i.e. pod manifest came through channel
	// and we have yet to operate on it)
	working := false
	var manifestLogger logging.Logger
	for {
		select {
		case <-quit:
			return
		case nextLaunch = <-podChan:
			var sha string
			if nextLaunch.Intent != nil {
				sha, _ = nextLaunch.Intent.SHA()
			} else {
				sha, _ = nextLaunch.Reality.SHA()
			}
			manifestLogger = p.Logger.SubLogger(logrus.Fields{
				"pod": nextLaunch.ID,
				"sha": sha,
			})
			manifestLogger.NoFields().Debugln("New manifest received")

			if nextLaunch.Intent == nil {
				// if intent=nil then reality!=nil and we need to delete the pod
				// therefore we must set working=true here
				working = true
			} else {
				// non-nil intent manifests need to be authorized first
				working = p.authorize(nextLaunch.Intent, manifestLogger)
				if !working {
					p.tryRunHooks(hooks.AFTER_AUTH_FAIL, pods.NewPod(nextLaunch.ID, pods.PodPath(p.podRoot, nextLaunch.ID)), nextLaunch.Intent, manifestLogger)
				}
			}
		case <-time.After(1 * time.Second):
			if working {
				pod := pods.NewPod(nextLaunch.ID, pods.PodPath(p.podRoot, nextLaunch.ID))

				// TODO better solution: force the preparer to have a 0s default timeout, prevent KILLs
				if pod.Id == POD_ID {
					pod.DefaultTimeout = time.Duration(0)
				}

				ok := p.resolvePair(nextLaunch, pod, manifestLogger)
				if ok {
					nextLaunch = ManifestPair{}
					working = false
				}
			}
		}
	}
}
Exemple #2
0
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()
	localMan, err := ioutil.TempFile("", "tempmanifest")
	defer os.Remove(localMan.Name())
	if err != nil {
		log.Fatalln("Couldn't create tempfile")
	}

	err = uri.URICopy(*manifestURI, localMan.Name())
	if err != nil {
		log.Fatalf("Could not fetch manifest: %s", err)
	}
	manifest, err := pods.ManifestFromPath(localMan.Name())
	if err != nil {
		log.Fatalf("Invalid manifest: %s", err)
	}

	pod := pods.NewPod(manifest.ID(), pods.PodPath(*podRoot, manifest.ID()))
	err = pod.Install(manifest)
	if err != nil {
		log.Fatalf("Could not install manifest %s: %s", manifest.ID(), err)
	}

	success, err := pod.Launch(manifest)
	if err != nil {
		log.Fatalf("Could not launch manifest %s: %s", manifest.ID(), err)
	}
	if !success {
		log.Fatalln("Unsuccessful launch of one or more things in the manifest")
	}
}
Exemple #3
0
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()

	manifest, err := pods.ManifestFromURI(*manifestURI)
	if err != nil {
		log.Fatalf("%s", err)
	}

	// /data/pods/hooks/<event>/<id>
	// if the event is the empty string (global hook), then that path segment
	// will be cleaned out
	pod := pods.NewPod(manifest.ID(), pods.PodPath(filepath.Join(*podRoot, "hooks", *hookType), manifest.ID()))
	err = pod.Install(manifest)
	if err != nil {
		log.Fatalf("Could not install manifest %s: %s", manifest.ID(), err)
	}
	// hooks write their current manifest manually since it's normally done at
	// launch time
	_, err = pod.WriteCurrentManifest(manifest)
	if err != nil {
		log.Fatalf("Could not write current manifest for %s: %s", manifest.ID(), err)
	}

	err = hooks.InstallHookScripts(*hookRoot, pod, manifest, logging.DefaultLogger)
	if err != nil {
		log.Fatalf("Could not write hook scripts: %s", err)
	}
}
Exemple #4
0
func InstallBaseAgent(agentManifest *pods.Manifest) error {
	agentPod := pods.NewPod(agentManifest.ID(), pods.PodPath(*podRoot, agentManifest.ID()))
	err := agentPod.Install(agentManifest)
	if err != nil {
		return err
	}
	_, err = agentPod.Launch(agentManifest)
	return err
}
Exemple #5
0
// no return value, no output channels. This should do everything it needs to do
// without outside intervention (other than being signalled to quit)
func (p *Preparer) handlePods(podChan <-chan pods.Manifest, quit <-chan struct{}) {
	// install new launchables
	var manifestToLaunch pods.Manifest

	// used to track if we have work to do (i.e. pod manifest came through channel
	// and we have yet to operate on it)
	working := false
	var manifestLogger logging.Logger
	for {
		select {
		case <-quit:
			return
		case manifestToLaunch = <-podChan:
			sha, err := manifestToLaunch.SHA()
			manifestLogger = p.Logger.SubLogger(logrus.Fields{
				"pod":     manifestToLaunch.ID(),
				"sha":     sha,
				"sha_err": err,
			})
			manifestLogger.NoFields().Debugln("New manifest received")

			working = p.authorize(manifestToLaunch, manifestLogger)
			if !working {
				p.tryRunHooks(hooks.AFTER_AUTH_FAIL, pods.NewPod(manifestToLaunch.ID(), pods.PodPath(p.podRoot, manifestToLaunch.ID())), &manifestToLaunch, manifestLogger)
			}
		case <-time.After(1 * time.Second):
			if working {
				pod := pods.NewPod(manifestToLaunch.ID(), pods.PodPath(p.podRoot, manifestToLaunch.ID()))

				ok := p.installAndLaunchPod(&manifestToLaunch, pod, manifestLogger)
				if ok {
					manifestToLaunch = pods.Manifest{}
					working = false
				}
			}
		}
	}
}
Exemple #6
0
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()

	manifest, err := pods.ManifestFromURI(*manifestURI)
	if err != nil {
		log.Fatalf("%s", err)
	}

	pod := pods.NewPod(manifest.ID(), pods.PodPath(*podRoot, manifest.ID()))
	err = pod.Install(manifest)
	if err != nil {
		log.Fatalf("Could not install manifest %s: %s", manifest.ID(), err)
	}

	success, err := pod.Launch(manifest)
	if err != nil {
		log.Fatalf("Could not launch manifest %s: %s", manifest.ID(), err)
	}
	if !success {
		log.Fatalln("Unsuccessful launch of one or more things in the manifest")
	}
}
Exemple #7
0
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()
	log.Println("Starting bootstrap")
	agentManifest, err := pods.ManifestFromPath(*agentManifestPath)
	if err != nil {
		log.Fatalln("Could not get agent manifest: %s", err)
	}
	log.Println("Installing and launching consul")

	var consulPod *pods.Pod
	var consulManifest *pods.Manifest
	if *existingConsul == "" {
		consulManifest, err = pods.ManifestFromPath(*consulManifestPath)
		if err != nil {
			log.Fatalf("Could not get consul manifest: %s", err)
		}
		consulPod = pods.NewPod(consulManifest.ID(), pods.PodPath(*podRoot, consulManifest.ID()))
		err = InstallConsul(consulPod, consulManifest)
		if err != nil {
			log.Fatalf("Could not install consul: %s", err)
		}
	} else {
		log.Printf("Using existing Consul at %s\n", *existingConsul)

		consulPod, err = pods.ExistingPod(*existingConsul)
		if err != nil {
			log.Fatalf("The existing consul pod is invalid: %s", err)
		}
		consulManifest, err = consulPod.CurrentManifest()
		if err != nil {
			log.Fatalf("Cannot get the current consul manifest: %s", err)
		}
	}

	if err = VerifyConsulUp(*timeout); err != nil {
		log.Fatalln(err)
	}
	time.Sleep(500 * time.Millisecond)
	// schedule consul in the reality store as well, to ensure the preparers do
	// not all restart their consul agents simultaneously after bootstrapping
	err = ScheduleForThisHost(consulManifest, true)
	if err != nil {
		log.Fatalf("Could not register consul in the intent store: %s", err)
	}

	log.Println("Registering base agent in consul")
	err = ScheduleForThisHost(agentManifest, false)
	if err != nil {
		log.Fatalf("Could not register base agent with consul: %s", err)
	}
	log.Println("Installing and launching base agent")
	err = InstallBaseAgent(agentManifest)
	if err != nil {
		log.Fatalf("Could not install base agent: %s", err)
	}
	if err := VerifyReality(30*time.Second, consulManifest.ID(), agentManifest.ID()); err != nil {
		log.Fatalln(err)
	}
	log.Println("Bootstrapping complete")
}