예제 #1
0
파일: hook_env.go 프로젝트: rudle/p2
// Initializes a pod based on the hooked pod manifest and the system pod root
func (h *HookEnv) Pod() (*pods.Pod, error) {
	factory := pods.NewFactory(os.Getenv(HOOKED_SYSTEM_POD_ROOT_ENV_VAR), HOOKED_NODE_ENV_VAR)

	podID, err := h.PodID()
	if err != nil {
		return nil, err
	}

	return factory.NewPod(podID), nil
}
예제 #2
0
파일: hook_env.go 프로젝트: petertseng/p2
// Initializes a pod based on the hooked pod manifest and the system pod root
func (h *HookEnv) Pod() (*pods.Pod, error) {
	factory := pods.NewFactory(os.Getenv(HOOKED_SYSTEM_POD_ROOT_ENV_VAR), HOOKED_NODE_ENV_VAR)

	podID, err := h.PodID()
	if err != nil {
		return nil, err
	}

	uuid := types.PodUniqueKey(os.Getenv(HOOKED_POD_UNIQUE_KEY_ENV_VAR))
	if uuid == "" {
		return factory.NewLegacyPod(podID), nil
	}
	return factory.NewUUIDPod(podID, uuid)
}
예제 #3
0
파일: main.go 프로젝트: petertseng/p2
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			log.Fatalf("error getting node name: %v", err)
		}
		*nodeName = hostname
	}

	manifest, err := manifest.FromURI(*manifestURI)
	if err != nil {
		log.Fatalf("%s", err)
	}

	err = authorize(manifest)
	if err != nil {
		log.Fatalf("%s", err)
	}

	podFactory := pods.NewFactory(*podRoot, types.NodeName(*nodeName))
	pod := podFactory.NewLegacyPod(manifest.ID())
	err = pod.Install(manifest, auth.NopVerifier(), artifact.NewRegistry(nil, uri.DefaultFetcher, osversion.DefaultDetector))
	if err != nil {
		log.Fatalf("Could not install manifest %s: %s", manifest.ID(), err)
	}

	success, err := pod.Launch(manifest)
	if err != nil {
		log.Fatalf("Could not launch manifest %s: %s", manifest.ID(), err)
	}
	if !success {
		log.Fatalln("Unsuccessful launch of one or more things in the manifest")
	}
}
예제 #4
0
파일: setup.go 프로젝트: drcapulet/p2
func New(preparerConfig *PreparerConfig, logger logging.Logger) (*Preparer, error) {
	addHooks(preparerConfig, logger)

	if preparerConfig.ConsulAddress == "" {
		return nil, util.Errorf("No Consul address given to the preparer")
	}
	if preparerConfig.PodRoot == "" {
		return nil, util.Errorf("No pod root given to the preparer")
	}

	if preparerConfig.LogLevel != "" {
		lv, err := logrus.ParseLevel(preparerConfig.LogLevel)
		if err != nil {
			return nil, util.Errorf("Received invalid log level %q", preparerConfig.LogLevel)
		}
		logger.Logger.Level = lv
	}

	authPolicy, err := getDeployerAuth(preparerConfig)
	if err != nil {
		return nil, err
	}

	artifactVerifier, err := getArtifactVerifier(preparerConfig, &logger)
	if err != nil {
		return nil, err
	}

	artifactRegistry, err := getArtifactRegistry(preparerConfig)
	if err != nil {
		return nil, err
	}

	store, err := preparerConfig.GetStore()
	if err != nil {
		return nil, err
	}

	maxLaunchableDiskUsage := launch.DefaultAllowableDiskUsage
	if preparerConfig.MaxLaunchableDiskUsage != "" {
		maxLaunchableDiskUsage, err = size.Parse(preparerConfig.MaxLaunchableDiskUsage)
		if err != nil {
			return nil, util.Errorf("Unparseable value for max_launchable_disk_usage %v, %v", preparerConfig.MaxLaunchableDiskUsage, err)
		}
	}

	listener := HookListener{
		Intent:           store,
		HookPrefix:       kp.HOOK_TREE,
		Node:             preparerConfig.NodeName,
		HookFactory:      pods.NewHookFactory(filepath.Join(preparerConfig.PodRoot, "hooks"), preparerConfig.NodeName),
		ExecDir:          preparerConfig.HooksDirectory,
		Logger:           logger,
		authPolicy:       authPolicy,
		artifactVerifier: artifactVerifier,
		artifactRegistry: artifactRegistry,
	}

	err = os.MkdirAll(preparerConfig.PodRoot, 0755)
	if err != nil {
		return nil, util.Errorf("Could not create preparer pod directory: %s", err)
	}

	var logExec []string
	if len(preparerConfig.LogExec) > 0 {
		logExec = preparerConfig.LogExec
	} else {
		logExec = runit.DefaultLogExec()
	}

	var finishExec []string
	if len(preparerConfig.FinishExec) > 0 {
		finishExec = preparerConfig.FinishExec
	} else {
		finishExec = pods.DefaultFinishExec
	}

	return &Preparer{
		node:                   preparerConfig.NodeName,
		store:                  store,
		hooks:                  hooks.Hooks(preparerConfig.HooksDirectory, &logger),
		hookListener:           listener,
		Logger:                 logger,
		podFactory:             pods.NewFactory(preparerConfig.PodRoot, preparerConfig.NodeName),
		authPolicy:             authPolicy,
		maxLaunchableDiskUsage: maxLaunchableDiskUsage,
		finishExec:             finishExec,
		logExec:                logExec,
		logBridgeBlacklist:     preparerConfig.LogBridgeBlacklist,
		artifactVerifier:       artifactVerifier,
		artifactRegistry:       artifactRegistry,
	}, nil
}
예제 #5
0
파일: bootstrap.go 프로젝트: petertseng/p2
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()
	log.Println("Starting bootstrap")
	hostname, err := os.Hostname()
	if err != nil {
		log.Fatalf("error getting node name: %v", err)
	}
	nodeName := types.NodeName(hostname)
	agentManifest, err := manifest.FromPath(*agentManifestPath)
	if err != nil {
		log.Fatalln("Could not get agent manifest: %s", err)
	}
	log.Println("Installing and launching consul")

	podFactory := pods.NewFactory(*podRoot, nodeName)

	var consulPod *pods.Pod
	var consulManifest manifest.Manifest
	if *existingConsul == "" {
		consulManifest, err = manifest.FromPath(*consulManifestPath)
		if err != nil {
			log.Fatalf("Could not get consul manifest: %s", err)
		}

		// Consul will never have a uuid (for now)
		consulPod = podFactory.NewLegacyPod(consulManifest.ID())
		err = installConsul(consulPod, consulManifest, *registryURL)
		if err != nil {
			log.Fatalf("Could not install consul: %s", err)
		}
	} else {
		log.Printf("Using existing Consul at %s\n", *existingConsul)

		consulPod, err = pods.PodFromPodHome(nodeName, *existingConsul)
		if err != nil {
			log.Fatalf("The existing consul pod is invalid: %s", err)
		}
		consulManifest, err = consulPod.CurrentManifest()
		if err != nil {
			log.Fatalf("Cannot get the current consul manifest: %s", err)
		}
	}

	if err = verifyConsulUp(*timeout); err != nil {
		log.Fatalln(err)
	}
	time.Sleep(500 * time.Millisecond)
	// schedule consul in the reality store as well, to ensure the preparers do
	// not all restart their consul agents simultaneously after bootstrapping
	err = scheduleForThisHost(consulManifest, true)
	if err != nil {
		log.Fatalf("Could not register consul in the intent store: %s", err)
	}

	log.Println("Registering base agent in consul")
	err = scheduleForThisHost(agentManifest, false)
	if err != nil {
		log.Fatalf("Could not register base agent with consul: %s", err)
	}
	log.Println("Installing and launching base agent")
	err = installBaseAgent(podFactory, agentManifest, *registryURL)
	if err != nil {
		log.Fatalf("Could not install base agent: %s", err)
	}
	if err := verifyReality(30*time.Second, consulManifest.ID(), agentManifest.ID()); err != nil {
		log.Fatalln(err)
	}
	log.Println("Bootstrapping complete")
}
예제 #6
0
파일: setup.go 프로젝트: petertseng/p2
func New(preparerConfig *PreparerConfig, logger logging.Logger) (*Preparer, error) {
	addHooks(preparerConfig, logger)

	if preparerConfig.ConsulAddress == "" {
		return nil, util.Errorf("No Consul address given to the preparer")
	}
	if preparerConfig.PodRoot == "" {
		return nil, util.Errorf("No pod root given to the preparer")
	}

	if preparerConfig.LogLevel != "" {
		lv, err := logrus.ParseLevel(preparerConfig.LogLevel)
		if err != nil {
			return nil, util.Errorf("Received invalid log level %q", preparerConfig.LogLevel)
		}
		logger.Logger.Level = lv
	}

	authPolicy, err := getDeployerAuth(preparerConfig)
	if err != nil {
		return nil, err
	}

	artifactVerifier, err := getArtifactVerifier(preparerConfig, &logger)
	if err != nil {
		return nil, err
	}

	artifactRegistry, err := getArtifactRegistry(preparerConfig)
	if err != nil {
		return nil, err
	}

	client, err := preparerConfig.GetConsulClient()
	if err != nil {
		return nil, err
	}

	statusStore := statusstore.NewConsul(client)
	podStatusStore := podstatus.NewConsul(statusStore, kp.PreparerPodStatusNamespace)
	podStore := podstore.NewConsul(client.KV())

	store := kp.NewConsulStore(client)

	maxLaunchableDiskUsage := launch.DefaultAllowableDiskUsage
	if preparerConfig.MaxLaunchableDiskUsage != "" {
		maxLaunchableDiskUsage, err = size.Parse(preparerConfig.MaxLaunchableDiskUsage)
		if err != nil {
			return nil, util.Errorf("Unparseable value for max_launchable_disk_usage %v, %v", preparerConfig.MaxLaunchableDiskUsage, err)
		}
	}

	err = os.MkdirAll(preparerConfig.PodRoot, 0755)
	if err != nil {
		return nil, util.Errorf("Could not create preparer pod directory: %s", err)
	}

	var logExec []string
	if len(preparerConfig.LogExec) > 0 {
		logExec = preparerConfig.LogExec
	} else {
		logExec = runit.DefaultLogExec()
	}

	finishExec := pods.NopFinishExec
	var podProcessReporter *podprocess.Reporter
	if preparerConfig.PodProcessReporterConfig.FullyConfigured() {
		podProcessReporterLogger := logger.SubLogger(logrus.Fields{
			"component": "PodProcessReporter",
		})

		podProcessReporter, err = podprocess.New(preparerConfig.PodProcessReporterConfig, podProcessReporterLogger, podStatusStore)
		if err != nil {
			return nil, err
		}

		finishExec = preparerConfig.PodProcessReporterConfig.FinishExec()
	}

	var hooksManifest manifest.Manifest
	var hooksPod *pods.Pod
	if preparerConfig.HooksManifest != NoHooksSentinelValue {
		if preparerConfig.HooksManifest == "" {
			return nil, util.Errorf("Most provide a hooks_manifest or sentinel value %q to indicate that there are no hooks", NoHooksSentinelValue)
		}

		hooksManifest, err = manifest.FromBytes([]byte(preparerConfig.HooksManifest))
		if err != nil {
			return nil, util.Errorf("Could not parse configured hooks manifest: %s", err)
		}
		hooksPodFactory := pods.NewHookFactory(filepath.Join(preparerConfig.PodRoot, "hooks"), preparerConfig.NodeName)
		hooksPod = hooksPodFactory.NewHookPod(hooksManifest.ID())
	}
	return &Preparer{
		node:                   preparerConfig.NodeName,
		store:                  store,
		hooks:                  hooks.Hooks(preparerConfig.HooksDirectory, preparerConfig.PodRoot, &logger),
		podStatusStore:         podStatusStore,
		podStore:               podStore,
		Logger:                 logger,
		podFactory:             pods.NewFactory(preparerConfig.PodRoot, preparerConfig.NodeName),
		authPolicy:             authPolicy,
		maxLaunchableDiskUsage: maxLaunchableDiskUsage,
		finishExec:             finishExec,
		logExec:                logExec,
		logBridgeBlacklist:     preparerConfig.LogBridgeBlacklist,
		artifactVerifier:       artifactVerifier,
		artifactRegistry:       artifactRegistry,
		PodProcessReporter:     podProcessReporter,
		hooksManifest:          hooksManifest,
		hooksPod:               hooksPod,
		hooksExecDir:           preparerConfig.HooksDirectory,
	}, nil
}