예제 #1
0
파일: orchestrate.go 프로젝트: tomzhang/p2
// no return value, no output channels. This should do everything it needs to do
// without outside intervention (other than being signalled to quit)
func (p *Preparer) handlePods(podChan <-chan ManifestPair, quit <-chan struct{}) {
	// install new launchables
	var nextLaunch ManifestPair

	// used to track if we have work to do (i.e. pod manifest came through channel
	// and we have yet to operate on it)
	working := false
	var manifestLogger logging.Logger
	for {
		select {
		case <-quit:
			return
		case nextLaunch = <-podChan:
			var sha string
			if nextLaunch.Intent != nil {
				sha, _ = nextLaunch.Intent.SHA()
			} else {
				sha, _ = nextLaunch.Reality.SHA()
			}
			manifestLogger = p.Logger.SubLogger(logrus.Fields{
				"pod": nextLaunch.ID,
				"sha": sha,
			})
			manifestLogger.NoFields().Debugln("New manifest received")

			if nextLaunch.Intent == nil {
				// if intent=nil then reality!=nil and we need to delete the pod
				// therefore we must set working=true here
				working = true
			} else {
				// non-nil intent manifests need to be authorized first
				working = p.authorize(nextLaunch.Intent, manifestLogger)
				if !working {
					p.tryRunHooks(hooks.AFTER_AUTH_FAIL, pods.NewPod(nextLaunch.ID, pods.PodPath(p.podRoot, nextLaunch.ID)), nextLaunch.Intent, manifestLogger)
				}
			}
		case <-time.After(1 * time.Second):
			if working {
				pod := pods.NewPod(nextLaunch.ID, pods.PodPath(p.podRoot, nextLaunch.ID))

				// TODO better solution: force the preparer to have a 0s default timeout, prevent KILLs
				if pod.Id == POD_ID {
					pod.DefaultTimeout = time.Duration(0)
				}

				ok := p.resolvePair(nextLaunch, pod, manifestLogger)
				if ok {
					nextLaunch = ManifestPair{}
					working = false
				}
			}
		}
	}
}
예제 #2
0
파일: main.go 프로젝트: robertabbott/p2
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()
	localMan, err := ioutil.TempFile("", "tempmanifest")
	defer os.Remove(localMan.Name())
	if err != nil {
		log.Fatalln("Couldn't create tempfile")
	}

	err = uri.URICopy(*manifestURI, localMan.Name())
	if err != nil {
		log.Fatalf("Could not fetch manifest: %s", err)
	}
	manifest, err := pods.ManifestFromPath(localMan.Name())
	if err != nil {
		log.Fatalf("Invalid manifest: %s", err)
	}

	pod := pods.NewPod(manifest.ID(), pods.PodPath(*podRoot, manifest.ID()))
	err = pod.Install(manifest)
	if err != nil {
		log.Fatalf("Could not install manifest %s: %s", manifest.ID(), err)
	}

	success, err := pod.Launch(manifest)
	if err != nil {
		log.Fatalf("Could not launch manifest %s: %s", manifest.ID(), err)
	}
	if !success {
		log.Fatalln("Unsuccessful launch of one or more things in the manifest")
	}
}
예제 #3
0
파일: main.go 프로젝트: tomzhang/p2
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()

	manifest, err := pods.ManifestFromURI(*manifestURI)
	if err != nil {
		log.Fatalf("%s", err)
	}

	// /data/pods/hooks/<event>/<id>
	// if the event is the empty string (global hook), then that path segment
	// will be cleaned out
	pod := pods.NewPod(manifest.ID(), pods.PodPath(filepath.Join(*podRoot, "hooks", *hookType), manifest.ID()))
	err = pod.Install(manifest)
	if err != nil {
		log.Fatalf("Could not install manifest %s: %s", manifest.ID(), err)
	}
	// hooks write their current manifest manually since it's normally done at
	// launch time
	_, err = pod.WriteCurrentManifest(manifest)
	if err != nil {
		log.Fatalf("Could not write current manifest for %s: %s", manifest.ID(), err)
	}

	err = hooks.InstallHookScripts(*hookRoot, pod, manifest, logging.DefaultLogger)
	if err != nil {
		log.Fatalf("Could not write hook scripts: %s", err)
	}
}
예제 #4
0
파일: main.go 프로젝트: robertabbott/p2
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()

	dir := hooks.Hooks(*HookDir, &logging.DefaultLogger)

	hookType, err := hooks.AsHookType(*Lifecycle)
	if err != nil {
		log.Fatalln(err)
	}

	pod := pods.NewPod(path.Base(*PodDir), *PodDir)

	var manifest *pods.Manifest
	if *Manifest != "" {
		manifest, err = pods.ManifestFromPath(*Manifest)
		if err != nil {
			log.Fatalln(err)
		}
	} else {
		manifest, err = pod.CurrentManifest()
		if err != nil {
			log.Fatalln(err)
		}
	}

	log.Printf("About to run %s hooks for pod %s\n", hookType, pod.Path())
	err = dir.RunHookType(hookType, pod, manifest)
	if err != nil {
		log.Fatalln(err)
	}
}
예제 #5
0
func InstallBaseAgent(agentManifest *pods.Manifest) error {
	agentPod := pods.NewPod(agentManifest.ID(), pods.PodPath(*podRoot, agentManifest.ID()))
	err := agentPod.Install(agentManifest)
	if err != nil {
		return err
	}
	_, err = agentPod.Launch(agentManifest)
	return err
}
예제 #6
0
파일: hook_env.go 프로젝트: tomzhang/p2
func (h *HookEnv) Pod() (*pods.Pod, error) {
	id := os.Getenv(HOOKED_POD_ID_ENV_VAR)
	if id == "" {
		return nil, util.Errorf("Did not provide a pod ID to use")
	}
	path := os.Getenv(HOOKED_POD_HOME_ENV_VAR)
	if path == "" {
		return nil, util.Errorf("No pod home given for pod ID %s", id)
	}

	return pods.NewPod(id, path), nil
}
예제 #7
0
// no return value, no output channels. This should do everything it needs to do
// without outside intervention (other than being signalled to quit)
func (p *Preparer) handlePods(podChan <-chan pods.Manifest, quit <-chan struct{}) {
	// install new launchables
	var manifestToLaunch pods.Manifest

	// used to track if we have work to do (i.e. pod manifest came through channel
	// and we have yet to operate on it)
	working := false
	var manifestLogger logging.Logger
	for {
		select {
		case <-quit:
			return
		case manifestToLaunch = <-podChan:
			sha, err := manifestToLaunch.SHA()
			manifestLogger = p.Logger.SubLogger(logrus.Fields{
				"pod":     manifestToLaunch.ID(),
				"sha":     sha,
				"sha_err": err,
			})
			manifestLogger.NoFields().Debugln("New manifest received")

			working = p.authorize(manifestToLaunch, manifestLogger)
			if !working {
				p.tryRunHooks(hooks.AFTER_AUTH_FAIL, pods.NewPod(manifestToLaunch.ID(), pods.PodPath(p.podRoot, manifestToLaunch.ID())), &manifestToLaunch, manifestLogger)
			}
		case <-time.After(1 * time.Second):
			if working {
				pod := pods.NewPod(manifestToLaunch.ID(), pods.PodPath(p.podRoot, manifestToLaunch.ID()))

				ok := p.installAndLaunchPod(&manifestToLaunch, pod, manifestLogger)
				if ok {
					manifestToLaunch = pods.Manifest{}
					working = false
				}
			}
		}
	}
}
예제 #8
0
파일: hooks_test.go 프로젝트: tomzhang/p2
func TestDirectoriesDoNotBreakEverything(t *testing.T) {
	tempDir, err := ioutil.TempDir("", "hook")
	Assert(t).IsNil(err, "the error should have been nil")
	defer os.RemoveAll(tempDir)

	podDir, err := ioutil.TempDir("", "pod")
	defer os.RemoveAll(podDir)
	Assert(t).IsNil(err, "the error should have been nil")

	Assert(t).IsNil(os.Mkdir(path.Join(tempDir, "mydir"), 0755), "Should not have erred")

	pod := pods.NewPod(podId, podDir)
	logger := logging.TestLogger()
	hooks := Hooks(os.TempDir(), &logger)
	err = hooks.runHooks(tempDir, AFTER_INSTALL, pod, testManifest(), logging.DefaultLogger)

	Assert(t).IsNil(err, "Got an error when running a directory inside the hooks directory")
}
예제 #9
0
파일: hooks_test.go 프로젝트: tomzhang/p2
func TestNonExecutableHooksAreNotRun(t *testing.T) {
	tempDir, err := ioutil.TempDir("", "hook")
	Assert(t).IsNil(err, "the error should have been nil")
	defer os.RemoveAll(tempDir)

	podDir, err := ioutil.TempDir("", "pod")
	defer os.RemoveAll(podDir)
	Assert(t).IsNil(err, "the error should have been nil")

	err = ioutil.WriteFile(path.Join(tempDir, "test2"), []byte("#!/bin/sh\ntouch $(dirname $0)/failed"), 0644)
	Assert(t).IsNil(err, "the error should have been nil")

	hooks := Hooks(os.TempDir(), &logging.DefaultLogger)
	hooks.runHooks(tempDir, AFTER_INSTALL, pods.NewPod(podId, podDir), testManifest(), logging.DefaultLogger)

	if _, err := os.Stat(path.Join(tempDir, "failed")); err == nil {
		t.Fatal("`failed` file exists; non-executable hook ran but should not have run")
	}
}
예제 #10
0
파일: hooks_test.go 프로젝트: tomzhang/p2
func TestExecutableHooksAreRun(t *testing.T) {
	tempDir, err := ioutil.TempDir("", "hook")
	Assert(t).IsNil(err, "the error should have been nil")
	defer os.RemoveAll(tempDir)

	podDir, err := ioutil.TempDir("", "pod")
	defer os.RemoveAll(podDir)
	Assert(t).IsNil(err, "the error should have been nil")

	ioutil.WriteFile(path.Join(tempDir, "test1"), []byte("#!/bin/sh\necho $HOOKED_POD_ID > $(dirname $0)/output"), 0755)

	hooks := Hooks(os.TempDir(), &logging.DefaultLogger)
	hooks.runHooks(tempDir, AFTER_INSTALL, pods.NewPod(podId, podDir), testManifest(), logging.DefaultLogger)

	contents, err := ioutil.ReadFile(path.Join(tempDir, "output"))
	Assert(t).IsNil(err, "the error should have been nil")

	Assert(t).AreEqual(string(contents), "TestPod\n", "hook should output pod ID into output file")
}
예제 #11
0
파일: main.go 프로젝트: tomzhang/p2
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()

	manifest, err := pods.ManifestFromURI(*manifestURI)
	if err != nil {
		log.Fatalf("%s", err)
	}

	pod := pods.NewPod(manifest.ID(), pods.PodPath(*podRoot, manifest.ID()))
	err = pod.Install(manifest)
	if err != nil {
		log.Fatalf("Could not install manifest %s: %s", manifest.ID(), err)
	}

	success, err := pod.Launch(manifest)
	if err != nil {
		log.Fatalf("Could not launch manifest %s: %s", manifest.ID(), err)
	}
	if !success {
		log.Fatalln("Unsuccessful launch of one or more things in the manifest")
	}
}
예제 #12
0
파일: listener.go 프로젝트: robertabbott/p2
// Sync keeps manifests located at the hook pods in the intent store.
// This function will open a new Pod watch on the given prefix and install
// any pods listed there in a hook pod directory. Following that, it will
// remove old links named by the same pod in the same event directory and
// symlink in the new pod's launchables.
func (l *HookListener) Sync(quit <-chan struct{}, errCh chan<- error) {

	watchPath := l.HookPrefix

	watcherQuit := make(chan struct{})
	watcherErrCh := make(chan error)
	podChan := make(chan kp.ManifestResult)

	go l.Intent.WatchPods(watchPath, watcherQuit, watcherErrCh, podChan)

	for {
		select {
		case <-quit:
			l.Logger.NoFields().Infoln("Terminating hook listener")
			watcherQuit <- struct{}{}
			return
		case err := <-watcherErrCh:
			l.Logger.WithField("err", err).Errorln("Error while watching pods")
			errCh <- err
		case result := <-podChan:
			sub := l.Logger.SubLogger(logrus.Fields{
				"pod":  result.Manifest.ID(),
				"dest": l.DestinationDir,
			})

			err := l.authPolicy.AuthorizeHook(&result.Manifest, sub)
			if err != nil {
				if err, ok := err.(auth.Error); ok {
					sub.WithFields(err.Fields).Errorln(err)
				} else {
					sub.NoFields().Errorln(err)
				}
				break
			}

			// Figure out what event we're setting a hook pod for. For example,
			// if we find a pod at /hooks/before_install/usercreate, then the
			// event is called "before_install"
			event, err := l.determineEvent(result.Path)
			if err != nil {
				sub.WithField("err", err).Errorln("Couldn't determine hook path")
				break
			}

			hookPod := pods.NewPod(result.Manifest.ID(), path.Join(l.DestinationDir, event, result.Manifest.ID()))

			// Figure out if we even need to install anything.
			// Hooks aren't running services and so there isn't a need
			// to write the current manifest to the reality store. Instead
			// we just compare to the manifest on disk.
			current, err := hookPod.CurrentManifest()
			if err != nil && err != pods.NoCurrentManifest {
				l.Logger.WithField("err", err).Errorln("Could not check current manifest")
				errCh <- err
				break
			}

			currentSHA, _ := current.SHA()
			newSHA, _ := result.Manifest.SHA()

			if err != pods.NoCurrentManifest && currentSHA == newSHA {
				// we are up-to-date, continue
				break
			}

			// The manifest is new, go ahead and install
			err = hookPod.Install(&result.Manifest)
			if err != nil {
				sub.WithField("err", err).Errorln("Could not install hook")
				errCh <- err
				break
			}

			_, err = hookPod.WriteCurrentManifest(&result.Manifest)
			if err != nil {
				sub.WithField("err", err).Errorln("Could not write current manifest")
				errCh <- err
				break
			}

			// Now that the pod is installed, link it up to the exec dir.
			err = l.writeHook(event, hookPod, &result.Manifest)
			if err != nil {
				sub.WithField("err", err).Errorln("Could not write hook link")
			} else {
				sub.NoFields().Infoln("Updated hook")
			}
		}
	}
}
예제 #13
0
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()
	log.Println("Starting bootstrap")
	agentManifest, err := pods.ManifestFromPath(*agentManifestPath)
	if err != nil {
		log.Fatalln("Could not get agent manifest: %s", err)
	}
	log.Println("Installing and launching consul")

	var consulPod *pods.Pod
	var consulManifest *pods.Manifest
	if *existingConsul == "" {
		consulManifest, err = pods.ManifestFromPath(*consulManifestPath)
		if err != nil {
			log.Fatalf("Could not get consul manifest: %s", err)
		}
		consulPod = pods.NewPod(consulManifest.ID(), pods.PodPath(*podRoot, consulManifest.ID()))
		err = InstallConsul(consulPod, consulManifest)
		if err != nil {
			log.Fatalf("Could not install consul: %s", err)
		}
	} else {
		log.Printf("Using existing Consul at %s\n", *existingConsul)

		consulPod, err = pods.ExistingPod(*existingConsul)
		if err != nil {
			log.Fatalf("The existing consul pod is invalid: %s", err)
		}
		consulManifest, err = consulPod.CurrentManifest()
		if err != nil {
			log.Fatalf("Cannot get the current consul manifest: %s", err)
		}
	}

	if err = VerifyConsulUp(*timeout); err != nil {
		log.Fatalln(err)
	}
	time.Sleep(500 * time.Millisecond)
	// schedule consul in the reality store as well, to ensure the preparers do
	// not all restart their consul agents simultaneously after bootstrapping
	err = ScheduleForThisHost(consulManifest, true)
	if err != nil {
		log.Fatalf("Could not register consul in the intent store: %s", err)
	}

	log.Println("Registering base agent in consul")
	err = ScheduleForThisHost(agentManifest, false)
	if err != nil {
		log.Fatalf("Could not register base agent with consul: %s", err)
	}
	log.Println("Installing and launching base agent")
	err = InstallBaseAgent(agentManifest)
	if err != nil {
		log.Fatalf("Could not install base agent: %s", err)
	}
	if err := VerifyReality(30*time.Second, consulManifest.ID(), agentManifest.ID()); err != nil {
		log.Fatalln(err)
	}
	log.Println("Bootstrapping complete")
}
예제 #14
0
파일: listener.go 프로젝트: tomzhang/p2
func (l *HookListener) installHook(result kp.ManifestResult) error {
	sub := l.Logger.SubLogger(logrus.Fields{
		"pod":  result.Manifest.ID(),
		"dest": l.DestinationDir,
	})

	err := l.authPolicy.AuthorizeHook(result.Manifest, sub)
	if err != nil {
		if err, ok := err.(auth.Error); ok {
			sub.WithFields(err.Fields).Errorln(err)
		} else {
			sub.NoFields().Errorln(err)
		}
		return err
	}

	hookPod := pods.NewPod(result.Manifest.ID(), filepath.Join(l.DestinationDir, result.Manifest.ID()))

	// Figure out if we even need to install anything.
	// Hooks aren't running services and so there isn't a need
	// to write the current manifest to the reality store. Instead
	// we just compare to the manifest on disk.
	current, err := hookPod.CurrentManifest()
	if err != nil && err != pods.NoCurrentManifest {
		l.Logger.WithError(err).Errorln("Could not check current manifest")
		return err
	}

	var currentSHA string
	if current != nil {
		currentSHA, _ = current.SHA()
	}
	newSHA, _ := result.Manifest.SHA()

	if err != pods.NoCurrentManifest && currentSHA == newSHA {
		// we are up-to-date, continue
		return nil
	}

	// The manifest is new, go ahead and install
	err = hookPod.Install(result.Manifest)
	if err != nil {
		sub.WithError(err).Errorln("Could not install hook")
		return err
	}

	_, err = hookPod.WriteCurrentManifest(result.Manifest)
	if err != nil {
		sub.WithError(err).Errorln("Could not write current manifest")
		return err
	}

	// Now that the pod is installed, link it up to the exec dir.
	err = hooks.InstallHookScripts(l.ExecDir, hookPod, result.Manifest, sub)
	if err != nil {
		sub.WithError(err).Errorln("Could not write hook link")
		return err
	}
	sub.NoFields().Infoln("Updated hook")
	return nil
}