func main() { kingpin.Version(version.VERSION) kingpin.Parse() localMan, err := ioutil.TempFile("", "tempmanifest") defer os.Remove(localMan.Name()) if err != nil { log.Fatalln("Couldn't create tempfile") } err = uri.URICopy(*manifestURI, localMan.Name()) if err != nil { log.Fatalf("Could not fetch manifest: %s", err) } manifest, err := pods.ManifestFromPath(localMan.Name()) if err != nil { log.Fatalf("Invalid manifest: %s", err) } pod := pods.NewPod(manifest.ID(), pods.PodPath(*podRoot, manifest.ID())) err = pod.Install(manifest) if err != nil { log.Fatalf("Could not install manifest %s: %s", manifest.ID(), err) } success, err := pod.Launch(manifest) if err != nil { log.Fatalf("Could not launch manifest %s: %s", manifest.ID(), err) } if !success { log.Fatalln("Unsuccessful launch of one or more things in the manifest") } }
func (h *HookEnv) Manifest() (pods.Manifest, error) { path := os.Getenv(HOOKED_POD_MANIFEST_ENV_VAR) if path == "" { return nil, util.Errorf("No manifest exported") } return pods.ManifestFromPath(path) }
func main() { kingpin.Version(version.VERSION) kingpin.Parse() dir := hooks.Hooks(*HookDir, &logging.DefaultLogger) hookType, err := hooks.AsHookType(*Lifecycle) if err != nil { log.Fatalln(err) } pod := pods.NewPod(path.Base(*PodDir), *PodDir) var manifest *pods.Manifest if *Manifest != "" { manifest, err = pods.ManifestFromPath(*Manifest) if err != nil { log.Fatalln(err) } } else { manifest, err = pod.CurrentManifest() if err != nil { log.Fatalln(err) } } log.Printf("About to run %s hooks for pod %s\n", hookType, pod.Path()) err = dir.RunHookType(hookType, pod, manifest) if err != nil { log.Fatalln(err) } }
func main() { kingpin.Version(version.VERSION) _, opts := flags.ParseWithConsulOptions() client := kp.NewConsulClient(opts) store := kp.NewConsulStore(client) if *nodeName == "" { hostname, err := os.Hostname() if err != nil { log.Fatalf("Could not get the hostname to do scheduling: %s", err) } *nodeName = hostname } if len(*manifests) == 0 { kingpin.Usage() log.Fatalln("No manifests given") } for _, manifestPath := range *manifests { manifest, err := pods.ManifestFromPath(manifestPath) if err != nil { log.Fatalf("Could not read manifest at %s: %s\n", manifestPath, err) } path := kp.IntentPath(*nodeName, manifest.ID()) if *hookGlobal { path = kp.HookPath(manifest.ID()) } duration, err := store.SetPod(path, manifest) if err != nil { log.Fatalf("Could not write manifest %s to intent store: %s\n", manifest.ID(), err) } log.Printf("Scheduling %s took %s\n", manifest.ID(), duration) } }
func testManifest(t *testing.T) pods.Manifest { manifestPath := util.From(runtime.Caller(0)).ExpandPath("test_manifest.yaml") manifest, err := pods.ManifestFromPath(manifestPath) if err != nil { t.Fatal("No test manifest found, failing\n") } return manifest }
func generatePreparerPod(workdir string) (string, error) { // build the artifact from HEAD err := exec.Command("go", "build", "github.com/square/p2/bin/p2-preparer").Run() if err != nil { return "", util.Errorf("Couldn't build preparer: %s", err) } wd, _ := os.Getwd() hostname, err := os.Hostname() if err != nil { return "", util.Errorf("Couldn't get hostname: %s", err) } // the test number forces the pod manifest to change every test run. testNumber := fmt.Sprintf("test=%d", rand.Intn(2000000000)) cmd := exec.Command("p2-bin2pod", "--work-dir", workdir, "--id", "p2-preparer", "--config", fmt.Sprintf("node_name=%s", hostname), "--config", testNumber, wd+"/p2-preparer") manifestPath, err := executeBin2Pod(cmd) if err != nil { return "", err } manifest, err := pods.ManifestFromPath(manifestPath) if err != nil { return "", err } builder := manifest.GetBuilder() builder.SetID("p2-preparer") builder.SetConfig(map[interface{}]interface{}{ "preparer": map[interface{}]interface{}{ "auth": map[string]string{ "type": "keyring", "keyring": util.From(runtime.Caller(0)).ExpandPath("pubring.gpg"), }, "ca_file": filepath.Join(certpath, "cert.pem"), "cert_file": filepath.Join(certpath, "cert.pem"), "key_file": filepath.Join(certpath, "key.pem"), "status_port": preparerStatusPort, }, }) builder.SetRunAsUser("root") builder.SetStatusPort(preparerStatusPort) builder.SetStatusHTTP(true) manifest = builder.GetManifest() manifestBytes, err := manifest.Marshal() if err != nil { return "", err } err = ioutil.WriteFile(manifestPath, manifestBytes, 0644) if err != nil { return "", err } return manifestPath, err }
func main() { kingpin.Version(version.VERSION) kingpin.Parse() store := kp.NewConsulStore(kp.Options{ Address: *consulAddress, Token: *consulToken, Client: net.NewHeaderClient(*headers, http.DefaultTransport), HTTPS: *https, }) if *nodeName == "" { hostname, err := os.Hostname() if err != nil { log.Fatalf("Could not get the hostname to do scheduling: %s", err) } *nodeName = hostname } if len(*manifests) == 0 { kingpin.Usage() log.Fatalln("No manifests given") } for _, manifestPath := range *manifests { manifest, err := pods.ManifestFromPath(manifestPath) if err != nil { log.Fatalf("Could not read manifest at %s: %s\n", manifestPath, err) } path := kp.IntentPath(*nodeName, manifest.ID()) if *hookTypeName != "" { hookType, err := hooks.AsHookType(*hookTypeName) if err != nil { log.Fatalln(err) } path = kp.HookPath(hookType, manifest.ID()) } duration, err := store.SetPod(path, *manifest) if err != nil { log.Fatalf("Could not write manifest %s to intent store: %s\n", manifest.ID(), err) } log.Printf("Scheduling %s took %s\n", manifest.ID(), duration) } }
func (r RCtl) Create(manifestPath, nodeSelector string, podLabels map[string]string) { manifest, err := pods.ManifestFromPath(manifestPath) if err != nil { r.logger.WithErrorAndFields(err, logrus.Fields{ "manifest": manifestPath, }).Fatalln("Could not read pod manifest") } nodeSel, err := klabels.Parse(nodeSelector) if err != nil { r.logger.WithErrorAndFields(err, logrus.Fields{ "selector": nodeSelector, }).Fatalln("Could not parse node selector") } newRC, err := r.rcs.Create(manifest, nodeSel, klabels.Set(podLabels)) if err != nil { r.logger.WithError(err).Fatalln("Could not create replication controller in Consul") } r.logger.WithField("id", newRC.ID).Infoln("Created new replication controller") }
func scheduleUserCreationHook(tmpdir string) error { createUserPath := path.Join(tmpdir, "create_user") script := `#!/usr/bin/env bash set -e mkdir -p $HOOKED_POD_HOME /sbin/adduser $HOOKED_POD_ID -d $HOOKED_POD_HOME ` err := ioutil.WriteFile(createUserPath, []byte(script), 0744) if err != nil { return err } cmd := exec.Command("p2-bin2pod", "--work-dir", tmpdir, createUserPath) manifestPath, err := executeBin2Pod(cmd) if err != nil { return err } userHookManifest, err := pods.ManifestFromPath(manifestPath) if err != nil { return err } builder := userHookManifest.GetBuilder() builder.SetRunAsUser("root") userHookManifest = builder.GetManifest() contents, err := userHookManifest.Marshal() if err != nil { return err } ioutil.WriteFile(manifestPath, contents, 0644) manifestPath, err = signManifest(manifestPath, tmpdir) if err != nil { return err } return exec.Command("p2-schedule", "--hook", manifestPath).Run() }
func main() { replicate.Version(version.VERSION) replicate.Parse(os.Args[1:]) opts := kp.Options{ Address: *consulUrl, Token: *consulToken, Client: net.NewHeaderClient(*headers, http.DefaultTransport), HTTPS: *https, } store := kp.NewConsulStore(opts) healthChecker := health.NewConsulHealthChecker(opts) // Fetch manifest (could be URI) into temp file localMan, err := ioutil.TempFile("", "tempmanifest") defer os.Remove(localMan.Name()) if err != nil { log.Fatalln("Couldn't create tempfile") } if err := uri.URICopy(*manifestUri, localMan.Name()); err != nil { log.Fatalf("Could not fetch manifest: %s", err) } manifest, err := pods.ManifestFromPath(localMan.Name()) if err != nil { log.Fatalf("Invalid manifest: %s", err) } healthResults, err := healthChecker.Service(manifest.ID()) if err != nil { log.Fatalf("Could not get initial health results: %s", err) } order := health.SortOrder{ Nodes: *hosts, Health: healthResults, } sort.Sort(order) repl := replication.Replicator{ Manifest: *manifest, Store: store, Health: healthChecker, Nodes: *hosts, // sorted by the health.SortOrder Active: len(*hosts) - *minNodes, Logger: logging.NewLogger(logrus.Fields{ "pod": manifest.ID(), }), Threshold: health.HealthState(*threshold), } repl.Logger.Logger.Formatter = &logrus.TextFormatter{ DisableTimestamp: false, FullTimestamp: true, TimestampFormat: "15:04:05.000", } if err := repl.CheckPreparers(); err != nil { log.Fatalf("Preparer check failed: %s", err) } // create a lock with a meaningful name and set up a renewal loop for it thisHost, err := os.Hostname() if err != nil { log.Fatalf("Could not retrieve hostname: %s", err) } thisUser, err := user.Current() if err != nil { log.Fatalf("Could not retrieve user: %s", err) } lock, err := store.NewLock(fmt.Sprintf("%q from %q at %q", thisUser.Username, thisHost, time.Now())) if err != nil { log.Fatalf("Could not generate lock: %s", err) } // deferring on main is not particularly useful, since os.Exit will skip // the defer, so we have to manually destroy the lock at the right exit // paths go func() { for range time.Tick(10 * time.Second) { if err := lock.Renew(); err != nil { // if the renewal failed, then either the lock is already dead // or the consul agent cannot be reached log.Fatalf("Lock could not be renewed: %s", err) } } }() if err := repl.LockHosts(lock, *overrideLock); err != nil { lock.Destroy() log.Fatalf("Could not lock all hosts: %s", err) } // auto-drain this channel errs := make(chan error) go func() { for range errs { } }() quitch := make(chan struct{}) go func() { // clear lock immediately on ctrl-C signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt) <-signals close(quitch) lock.Destroy() os.Exit(1) }() repl.Enact(errs, quitch) lock.Destroy() }
func main() { kingpin.Version(version.VERSION) kingpin.Parse() log.Println("Starting bootstrap") agentManifest, err := pods.ManifestFromPath(*agentManifestPath) if err != nil { log.Fatalln("Could not get agent manifest: %s", err) } log.Println("Installing and launching consul") var consulPod *pods.Pod var consulManifest *pods.Manifest if *existingConsul == "" { consulManifest, err = pods.ManifestFromPath(*consulManifestPath) if err != nil { log.Fatalf("Could not get consul manifest: %s", err) } consulPod = pods.NewPod(consulManifest.ID(), pods.PodPath(*podRoot, consulManifest.ID())) err = InstallConsul(consulPod, consulManifest) if err != nil { log.Fatalf("Could not install consul: %s", err) } } else { log.Printf("Using existing Consul at %s\n", *existingConsul) consulPod, err = pods.ExistingPod(*existingConsul) if err != nil { log.Fatalf("The existing consul pod is invalid: %s", err) } consulManifest, err = consulPod.CurrentManifest() if err != nil { log.Fatalf("Cannot get the current consul manifest: %s", err) } } if err = VerifyConsulUp(*timeout); err != nil { log.Fatalln(err) } time.Sleep(500 * time.Millisecond) // schedule consul in the reality store as well, to ensure the preparers do // not all restart their consul agents simultaneously after bootstrapping err = ScheduleForThisHost(consulManifest, true) if err != nil { log.Fatalf("Could not register consul in the intent store: %s", err) } log.Println("Registering base agent in consul") err = ScheduleForThisHost(agentManifest, false) if err != nil { log.Fatalf("Could not register base agent with consul: %s", err) } log.Println("Installing and launching base agent") err = InstallBaseAgent(agentManifest) if err != nil { log.Fatalf("Could not install base agent: %s", err) } if err := VerifyReality(30*time.Second, consulManifest.ID(), agentManifest.ID()); err != nil { log.Fatalln(err) } log.Println("Bootstrapping complete") }