Esempio n. 1
0
File: hooks.go Progetto: tomzhang/p2
func (h *HookDir) RunHookType(hookType HookType, pod Pod, manifest pods.Manifest) error {
	logger := h.logger.SubLogger(logrus.Fields{
		"pod":      manifest.ID(),
		"pod_path": pod.Path(),
		"event":    hookType.String(),
	})
	logger.NoFields().Infof("Running %s hooks", hookType.String())
	return h.runHooks(h.dirpath, hookType, pod, manifest, logger)
}
Esempio n. 2
0
func InstallBaseAgent(agentManifest *pods.Manifest) error {
	agentPod := pods.NewPod(agentManifest.ID(), pods.PodPath(*podRoot, agentManifest.ID()))
	err := agentPod.Install(agentManifest)
	if err != nil {
		return err
	}
	_, err = agentPod.Launch(agentManifest)
	return err
}
Esempio n. 3
0
func writeManifest(workingDir string, manifest *pods.Manifest) (string, error) {
	file, err := os.OpenFile(path.Join(workingDir, fmt.Sprintf("%s.yaml", podId())), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
	defer file.Close()
	if err != nil {
		return "", err
	}

	err = manifest.Write(file)
	if err != nil {
		return "", err
	}
	return file.Name(), nil
}
Esempio n. 4
0
// RegisterService creates a consul service for the given pod manifest. If the
// manifest specifies a status port, the resulting consul service will also
// include a health check for that port.
func (c consulStore) RegisterService(manifest pods.Manifest, caPath string) error {
	podService := &api.AgentServiceRegistration{
		Name: manifest.ID(),
	}

	if manifest.StatusPort != 0 {
		podService.Port = manifest.StatusPort
		podService.Check = &api.AgentServiceCheck{
			Interval: checkInterval,
		}
		if manifest.StatusHTTP {
			podService.Check.Script = fmt.Sprintf(httpStatusCheck, manifest.StatusPort)
		} else {
			podService.Check.Script = fmt.Sprintf(httpsStatusCheck, manifest.StatusPort, caPath)
		}
	}

	return c.client.Agent().ServiceRegister(podService)
}
Esempio n. 5
0
func ScheduleForThisHost(manifest pods.Manifest, alsoReality bool) error {
	store := kp.NewConsulStore(kp.NewConsulClient(kp.Options{
		Token: *consulToken,
	}))
	hostname, err := os.Hostname()
	if err != nil {
		return err
	}
	_, err = store.SetPod(kp.IntentPath(hostname, manifest.ID()), manifest)
	if err != nil {
		return err
	}

	if alsoReality {
		_, err = store.SetPod(kp.RealityPath(hostname, manifest.ID()), manifest)
		return err
	}
	return nil
}
Esempio n. 6
0
func addManifestConfig(manifest *pods.Manifest) error {
	manifest.Config = make(map[interface{}]interface{})
	for _, pair := range *config {
		res := strings.Split(pair, "=")
		if len(res) != 2 {
			return fmt.Errorf("%s is not a valid key=value config assignment", pair)
		}
		manifest.Config[res[0]] = res[1]
	}
	return nil
}
Esempio n. 7
0
// SetPod writes a pod manifest into the consul key-value store. The key should
// not have a leading or trailing slash.
func (c consulStore) SetPod(key string, manifest pods.Manifest) (time.Duration, error) {
	buf := bytes.Buffer{}
	err := manifest.Write(&buf)
	if err != nil {
		return 0, err
	}
	keyPair := &api.KVPair{
		Key:   key,
		Value: buf.Bytes(),
	}

	writeMeta, err := c.client.KV().Put(keyPair, nil)
	var retDur time.Duration
	if writeMeta != nil {
		retDur = writeMeta.RequestTime
	}
	if err != nil {
		return retDur, KVError{Op: "set", Key: key, UnsafeError: err}
	}
	return retDur, nil
}
Esempio n. 8
0
File: hooks.go Progetto: tomzhang/p2
func (h *HookDir) runHooks(dirpath string, hType HookType, pod Pod, podManifest pods.Manifest, logger logging.Logger) error {
	configFileName, err := podManifest.ConfigFileName()
	if err != nil {
		return err
	}

	// Write manifest to a file so hooks can read it.
	tmpManifestFile, err := ioutil.TempFile("", fmt.Sprintf("%s-manifest.yaml", podManifest.ID()))
	if err != nil {
		logger.WithErrorAndFields(err, logrus.Fields{
			"dir": dirpath,
		}).Warnln("Unable to open manifest file for hooks")
		return err
	}
	defer os.Remove(tmpManifestFile.Name())

	err = podManifest.Write(tmpManifestFile)
	if err != nil {
		logger.WithErrorAndFields(err, logrus.Fields{
			"dir": dirpath,
		}).Warnln("Unable to write manifest file for hooks")
		return err
	}

	hookEnvironment := []string{
		fmt.Sprintf("%s=%s", HOOK_ENV_VAR, path.Base(dirpath)),
		fmt.Sprintf("%s=%s", HOOK_EVENT_ENV_VAR, hType.String()),
		fmt.Sprintf("%s=%s", HOOKED_POD_ID_ENV_VAR, podManifest.ID()),
		fmt.Sprintf("%s=%s", HOOKED_POD_HOME_ENV_VAR, pod.Path()),
		fmt.Sprintf("%s=%s", HOOKED_POD_MANIFEST_ENV_VAR, tmpManifestFile.Name()),
		fmt.Sprintf("%s=%s", HOOKED_CONFIG_PATH_ENV_VAR, path.Join(pod.ConfigDir(), configFileName)),
		fmt.Sprintf("%s=%s", HOOKED_ENV_PATH_ENV_VAR, pod.EnvDir()),
	}

	return runDirectory(dirpath, hookEnvironment, logger)
}
Esempio n. 9
0
func (h *HookDir) runHooks(dirpath string, pod Pod, podManifest *pods.Manifest) error {

	logger := h.logger.SubLogger(logrus.Fields{
		"hook":     dirpath,
		"pod":      podManifest.ID(),
		"pod_path": pod.Path(),
	})

	configFileName, err := podManifest.ConfigFileName()
	if err != nil {
		return err
	}

	// Write manifest to a file so hooks can read it.
	tmpManifestFile, err := ioutil.TempFile("", fmt.Sprintf("%s-manifest.yaml", podManifest.Id))
	if err != nil {
		logger.WithField("err", err).Warnln("Unable to open manifest file for hooks")
		return err
	}
	defer os.Remove(tmpManifestFile.Name())

	err = podManifest.Write(tmpManifestFile)
	if err != nil {
		logger.WithField("err", err).Warnln("Unable to write manifest file for hooks")
		return err
	}

	hookEnvironment := []string{
		fmt.Sprintf("HOOK=%s", path.Base(dirpath)),
		fmt.Sprintf("HOOKED_POD_ID=%s", podManifest.Id),
		fmt.Sprintf("HOOKED_POD_HOME=%s", pod.Path()),
		fmt.Sprintf("HOOKED_POD_MANIFEST=%s", tmpManifestFile.Name()),
		fmt.Sprintf("HOOKED_CONFIG_PATH=%s", path.Join(pod.ConfigDir(), configFileName)),
		fmt.Sprintf("HOOKED_ENV_PATH=%s", pod.EnvDir()),
	}

	return runDirectory(dirpath, hookEnvironment, logger)
}
Esempio n. 10
0
func (p *Preparer) installAndLaunchPod(newManifest *pods.Manifest, pod Pod, logger logging.Logger) bool {
	// do not remove the logger argument, it's not the same as p.Logger

	// get currently running pod to compare with the new pod
	realityPath := kp.RealityPath(p.node, newManifest.ID())
	currentManifest, _, err := p.store.Pod(realityPath)
	currentSHA := ""
	if currentManifest != nil {
		currentSHA, _ = currentManifest.SHA()
	}
	newSHA, _ := newManifest.SHA()

	// if new or the manifest is different, launch
	newOrDifferent := (err == pods.NoCurrentManifest) || (currentSHA != newSHA)
	if newOrDifferent {
		logger.WithFields(logrus.Fields{
			"old_sha": currentSHA,
			"sha":     newSHA,
			"pod":     newManifest.ID(),
		}).Infoln("SHA is new or different from old, will update")
	}

	// if the old manifest is corrupted somehow, re-launch since we don't know if this is an update.
	problemReadingCurrentManifest := (err != nil && err != pods.NoCurrentManifest)
	if problemReadingCurrentManifest {
		logger.WithFields(logrus.Fields{
			"sha":       newSHA,
			"inner_err": err,
		}).Errorln("Current manifest not readable, will relaunch")
	}

	if newOrDifferent || problemReadingCurrentManifest {
		p.tryRunHooks(hooks.BEFORE_INSTALL, pod, newManifest, logger)

		err = pod.Install(newManifest)
		if err != nil {
			// install failed, abort and retry
			logger.WithFields(logrus.Fields{
				"err": err,
			}).Errorln("Install failed")
			return false
		}

		err = pod.Verify(newManifest, p.authPolicy)
		if err != nil {
			logger.WithField("err", err).Errorln("Pod digest verification failed")
			p.tryRunHooks(hooks.AFTER_AUTH_FAIL, pod, newManifest, logger)
			return false
		}

		p.tryRunHooks(hooks.AFTER_INSTALL, pod, newManifest, logger)

		err = p.store.RegisterService(*newManifest, p.caPath)
		if err != nil {
			logger.WithField("err", err).Errorln("Service registration failed")
			return false
		}

		if currentManifest != nil {
			success, err := pod.Halt(currentManifest)
			if err != nil {
				logger.WithField("err", err).Errorln("Pod halt failed")
			} else if !success {
				logger.NoFields().Warnln("One or more launchables did not halt successfully")
			}
		}

		ok, err := pod.Launch(newManifest)
		if err != nil {
			logger.WithFields(logrus.Fields{
				"err": err,
			}).Errorln("Launch failed")
		} else {
			duration, err := p.store.SetPod(realityPath, *newManifest)
			if err != nil {
				logger.WithFields(logrus.Fields{
					"err":      err,
					"duration": duration,
				}).Errorln("Could not set pod in reality store")
			}

			p.tryRunHooks(hooks.AFTER_LAUNCH, pod, newManifest, logger)
		}
		return err == nil && ok
	}

	// TODO: shut down removed launchables between pod versions.
	return true
}
Esempio n. 11
0
// no return value, no output channels. This should do everything it needs to do
// without outside intervention (other than being signalled to quit)
func (p *Preparer) handlePods(podChan <-chan pods.Manifest, quit <-chan struct{}) {
	// install new launchables
	var manifestToLaunch pods.Manifest

	// used to track if we have work to do (i.e. pod manifest came through channel
	// and we have yet to operate on it)
	working := false
	var manifestLogger logging.Logger
	for {
		select {
		case <-quit:
			return
		case manifestToLaunch = <-podChan:
			sha, err := manifestToLaunch.SHA()
			manifestLogger = p.Logger.SubLogger(logrus.Fields{
				"pod":     manifestToLaunch.ID(),
				"sha":     sha,
				"sha_err": err,
			})
			manifestLogger.NoFields().Debugln("New manifest received")

			working = p.authorize(manifestToLaunch, manifestLogger)
			if !working {
				p.tryRunHooks(hooks.AFTER_AUTH_FAIL, pods.NewPod(manifestToLaunch.ID(), pods.PodPath(p.podRoot, manifestToLaunch.ID())), &manifestToLaunch, manifestLogger)
			}
		case <-time.After(1 * time.Second):
			if working {
				pod := pods.NewPod(manifestToLaunch.ID(), pods.PodPath(p.podRoot, manifestToLaunch.ID()))

				ok := p.installAndLaunchPod(&manifestToLaunch, pod, manifestLogger)
				if ok {
					manifestToLaunch = pods.Manifest{}
					working = false
				}
			}
		}
	}
}
Esempio n. 12
0
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()
	log.Println("Starting bootstrap")
	agentManifest, err := pods.ManifestFromPath(*agentManifestPath)
	if err != nil {
		log.Fatalln("Could not get agent manifest: %s", err)
	}
	log.Println("Installing and launching consul")

	var consulPod *pods.Pod
	var consulManifest *pods.Manifest
	if *existingConsul == "" {
		consulManifest, err = pods.ManifestFromPath(*consulManifestPath)
		if err != nil {
			log.Fatalf("Could not get consul manifest: %s", err)
		}
		consulPod = pods.NewPod(consulManifest.ID(), pods.PodPath(*podRoot, consulManifest.ID()))
		err = InstallConsul(consulPod, consulManifest)
		if err != nil {
			log.Fatalf("Could not install consul: %s", err)
		}
	} else {
		log.Printf("Using existing Consul at %s\n", *existingConsul)

		consulPod, err = pods.ExistingPod(*existingConsul)
		if err != nil {
			log.Fatalf("The existing consul pod is invalid: %s", err)
		}
		consulManifest, err = consulPod.CurrentManifest()
		if err != nil {
			log.Fatalf("Cannot get the current consul manifest: %s", err)
		}
	}

	if err = VerifyConsulUp(*timeout); err != nil {
		log.Fatalln(err)
	}
	time.Sleep(500 * time.Millisecond)
	// schedule consul in the reality store as well, to ensure the preparers do
	// not all restart their consul agents simultaneously after bootstrapping
	err = ScheduleForThisHost(consulManifest, true)
	if err != nil {
		log.Fatalf("Could not register consul in the intent store: %s", err)
	}

	log.Println("Registering base agent in consul")
	err = ScheduleForThisHost(agentManifest, false)
	if err != nil {
		log.Fatalf("Could not register base agent with consul: %s", err)
	}
	log.Println("Installing and launching base agent")
	err = InstallBaseAgent(agentManifest)
	if err != nil {
		log.Fatalf("Could not install base agent: %s", err)
	}
	if err := VerifyReality(30*time.Second, consulManifest.ID(), agentManifest.ID()); err != nil {
		log.Fatalln(err)
	}
	log.Println("Bootstrapping complete")
}