Example #1
1
func (e EnvironmentExtractor) constructFinishFromEnvironment(exitCode int, exitStatus int) (FinishOutput, error) {
	podID := os.Getenv(pods.PodIDEnvVar)
	if podID == "" {
		return FinishOutput{}, util.Errorf("No %s env var set", pods.PodIDEnvVar)
	}

	launchableID := os.Getenv(pods.LaunchableIDEnvVar)
	if launchableID == "" {
		return FinishOutput{}, util.Errorf("No %s env var set", pods.LaunchableIDEnvVar)
	}

	entryPoint := os.Getenv(launch.EntryPointEnvVar)
	if entryPoint == "" {
		return FinishOutput{}, util.Errorf("No %s env var set", launch.EntryPointEnvVar)
	}

	// It's okay if this one is missing, most pods are "legacy" pods that have a blank unique key
	podUniqueKey := os.Getenv(pods.PodUniqueKeyEnvVar)

	return FinishOutput{
		PodID:        types.PodID(podID),
		LaunchableID: launch.LaunchableID(launchableID),
		EntryPoint:   entryPoint,
		PodUniqueKey: types.PodUniqueKey(podUniqueKey),
		ExitCode:     exitCode,
		ExitStatus:   exitStatus,
	}, nil
}
Example #2
0
func TestSchedulePod(t *testing.T) {
	fakePodStore, server := setupServerWithFakePodStore()

	req := &podstore_protos.SchedulePodRequest{
		Manifest: validManifestString(),
		NodeName: "test_node",
	}

	resp, err := server.SchedulePod(context.Background(), req)
	if err != nil {
		t.Fatalf("Unexpected error from SchedulePod: %s", err)
	}

	// check that the key we got back actually returns an entry from the store
	pod, err := fakePodStore.ReadPod(types.PodUniqueKey(resp.PodUniqueKey))
	if err != nil {
		t.Fatalf("Unexpected error reading pod out of store after scheduling: %s", err)
	}

	if pod.Manifest.ID() != "test_app" {
		t.Errorf("Scheduled pod manifest had wrong ID, expected %q but got %q", "test_app", pod.Manifest.ID())
	}

	if pod.Node != "test_node" {
		t.Errorf("Scheduled node didn't match expectation, expected %q but got %q", "test_node", pod.Node)
	}
}
Example #3
0
func PodFromPodHome(node types.NodeName, home string) (*Pod, error) {
	// Check if the pod home is namespaced by a UUID by splitting on a hyphen and
	// checking the last part. If it parses as a UUID, pass it to newPodWithHome.
	// Otherwise, pass a nil uniqueKey
	homeParts := strings.Split(filepath.Base(home), "-")

	var uniqueKey types.PodUniqueKey
	podUUID := uuid.Parse(homeParts[len(homeParts)-1])
	if podUUID != nil {
		uniqueKey = types.PodUniqueKey(podUUID.String())
	}

	temp := Pod{
		home: home,
		node: node,
	}
	manifest, err := temp.CurrentManifest()
	if err == NoCurrentManifest {
		return nil, util.Errorf("No current manifest set, this is not an extant pod directory")
	} else if err != nil {
		return nil, err
	}

	return newPodWithHome(manifest.ID(), uniqueKey, home, node), nil
}
Example #4
0
// Initializes a pod based on the hooked pod manifest and the system pod root
func (h *HookEnv) Pod() (*pods.Pod, error) {
	factory := pods.NewFactory(os.Getenv(HOOKED_SYSTEM_POD_ROOT_ENV_VAR), HOOKED_NODE_ENV_VAR)

	podID, err := h.PodID()
	if err != nil {
		return nil, err
	}

	uuid := types.PodUniqueKey(os.Getenv(HOOKED_POD_UNIQUE_KEY_ENV_VAR))
	if uuid == "" {
		return factory.NewLegacyPod(podID), nil
	}
	return factory.NewUUIDPod(podID, uuid)
}
Example #5
0
func handlePodRemoval(consulClient consulutil.ConsulClient, labeler labels.ApplicatorWithoutWatches) error {
	var rm *P2RM
	if *podUniqueKey != "" {
		rm = NewUUIDP2RM(consulClient, types.PodUniqueKey(*podUniqueKey), types.PodID(*podName), labeler)
	} else {
		if *nodeName == "" {
			hostname, err := os.Hostname()
			if err != nil {
				return fmt.Errorf("error getting hostname. use --node to specify a node: %v\n", err)
			}
			*nodeName = hostname
		}

		rm = NewLegacyP2RM(consulClient, types.PodID(*podName), types.NodeName(*nodeName), labeler)
	}

	podIsManagedByRC, rcID, err := rm.checkForManagingReplicationController()
	if err != nil {
		return err
	}

	if !podIsManagedByRC {
		err = rm.deletePod()
		if err != nil {
			return err
		}
	}

	if podIsManagedByRC && !*deallocation {
		return fmt.Errorf("error: %s is managed by replication controller: %s\n"+
			"It's possible you meant you deallocate this pod on this node. If so, please confirm your intention with --deallocate\n", *nodeName, rcID)
	}

	if podIsManagedByRC && *deallocation {
		err = rm.decrementDesiredCount(rcID)
		if err != nil {
			return fmt.Errorf("Encountered error deallocating from the RC %s. You may attempt this command again or use `p2-rctl` to cleanup manually.\n%v",
				rcID,
				err)
		}
	}

	fmt.Printf("%s: successfully removed %s\n", rm.NodeName, rm.PodID)
	return nil
}
Example #6
0
// matches podstore.consulStore signature
func (c Client) Schedule(manifest manifest.Manifest, node types.NodeName) (types.PodUniqueKey, error) {
	manifestBytes, err := manifest.Marshal()
	if err != nil {
		return "", util.Errorf("Could not marshal manifest: %s", err)
	}

	req := &podstore_protos.SchedulePodRequest{
		NodeName: node.String(),
		Manifest: string(manifestBytes),
	}

	resp, err := c.client.SchedulePod(context.Background(), req)
	if err != nil {
		return "", util.Errorf("Could not schedule pod: %s", err)
	}

	return types.PodUniqueKey(resp.PodUniqueKey), nil
}
Example #7
0
// Runs Scan() once on the passed Scanner and converts the result to a FinishOutput
func scanRow(scanner Scanner) (FinishOutput, error) {
	var id int64
	var date time.Time
	var podID, podUniqueKey, launchableID, entryPoint string
	var exitCode, exitStatus int

	err := scanner.Scan(&id, &date, &podID, &podUniqueKey, &launchableID, &entryPoint, &exitCode, &exitStatus)
	if err != nil {
		return FinishOutput{}, err
	}

	return FinishOutput{
		ID:           id,
		PodID:        types.PodID(podID),
		LaunchableID: launch.LaunchableID(launchableID),
		EntryPoint:   entryPoint,
		PodUniqueKey: types.PodUniqueKey(podUniqueKey),
		ExitCode:     exitCode,
		ExitStatus:   exitStatus,
		ExitTime:     date,
	}, nil
}
Example #8
0
func main() {
	// 1. Generate pod for preparer in this code version (`rake artifact:prepare`)
	// 2. Locate manifests for preparer pod, premade consul pod
	// 3. Execute bootstrap with premade consul pod and preparer pod
	// 4. Delete all pods from the pod store (uuid pods). This allows the same vagrant VM to be used
	// between tests
	// 5. Deploy p2-rctl-server pod with p2-schedule
	// 6. Schedule a hello pod manifest with a replication controller
	// 7. Schedule a hello pod as a "uuid pod"
	// 8. Verify that p2-rctl-server is running by checking health.
	// 9. Verify that the RC-deployed hello is running by checking health.
	// Monitor using written pod label queries.
	// 10. Verify that the uuid hello pod is running by curling its HTTP port.
	// Health is not checked for uuid pods so checking health cannot be used.

	// list of services running on integration test host
	services := []string{"p2-preparer", "hello"}
	tempdir, err := ioutil.TempDir("", "single-node-check")
	log.Printf("Putting test manifests in %s\n", tempdir)
	if err != nil {
		log.Fatalln("Could not create temp directory, bailing")
	}

	userHookManifest, err := userCreationHookManifest(tempdir)
	if err != nil {
		log.Fatalf("Couldn't schedule the user creation hook: %s", err)
	}

	preparerManifest, err := generatePreparerPod(tempdir, userHookManifest)
	if err != nil {
		log.Fatalf("Could not generate preparer pod: %s\n", err)
	}
	config, err := preparer.LoadConfig(preparerManifest)
	if err != nil {
		log.Fatalf("could not unmarshal config: %s\n", err)
	}

	consulManifest, err := getConsulManifest(tempdir)
	if err != nil {
		log.Fatalf("Could not generate consul pod: %s\n", err)
	}
	signedPreparerManifest, err := signManifest(preparerManifest, tempdir)
	if err != nil {
		log.Fatalf("Could not sign preparer manifest: %s\n", err)
	}
	signedConsulManifest, err := signManifest(consulManifest, tempdir)
	if err != nil {
		log.Fatalf("Could not sign consul manifest: %s\n", err)
	}

	fmt.Println("Executing bootstrap")
	err = executeBootstrap(signedPreparerManifest, signedConsulManifest)
	if err != nil {
		log.Fatalf("Could not execute bootstrap: %s\n%s", err, targetLogs())
	}

	// Wait a bit for preparer's http server to be ready
	err = waitForStatus(preparerStatusPort, "preparer", 10*time.Second)
	if err != nil {
		log.Fatalf("Couldn't check preparer status: %s", err)
	}

	consulClient := kp.NewConsulClient(kp.Options{})
	// Get all the pod unique keys so we can unschedule them all
	keys, _, err := consulClient.KV().Keys(podstore.PodTree+"/", "", nil)
	if err != nil {
		log.Fatalf("Could not fetch pod keys to remove from store at beginning of test: %s", err)
	}

	podStore := podstore.NewConsul(consulClient.KV())
	for _, key := range keys {
		keyParts := strings.Split(key, "/")
		err = podStore.Unschedule(types.PodUniqueKey(keyParts[len(keyParts)-1]))
		if err != nil {
			log.Fatalf("Could not unschedule pod %s from consul: %s", keyParts[len(keyParts)-1], err)
		}
	}

	err = scheduleRCTLServer(tempdir)
	if err != nil {
		log.Fatalf("Could not schedule RCTL server: %s", err)
	}

	// Now we're going to test some conditions that each take non-negligible amount of time to verify.
	// We'll spin up a goroutine for each "test" which either closes the error channel, or passes an error.
	type testCase struct {
		testName string
		errCh    chan error
		logger   logging.Logger
	}

	var testCases []testCase

	// Test that a "legacy" pod installed by an RC comes up correctly and has health reported
	legacyTest := make(chan error)
	verifyLegacyPodLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"test_case": "verifyLegacyPod",
	})
	go verifyLegacyPod(legacyTest, tempdir, config, services, verifyLegacyPodLogger)
	testCases = append(testCases, testCase{
		testName: "verifyLegacyPod",
		errCh:    legacyTest,
		logger:   verifyLegacyPodLogger,
	})

	// Test that a "uuid" pod installed by p2-schedule comes up correctly
	uuidTest := make(chan error)
	verifyUUIDPodLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"test_case": "verifyUUIDPod",
	})
	testCases = append(testCases, testCase{
		testName: "verifyUUIDPod",
		errCh:    uuidTest,
		logger:   verifyUUIDPodLogger,
	})
	go verifyUUIDPod(uuidTest, tempdir, verifyUUIDPodLogger)

	// Test that exit information for a process started by a pod is properly recorded in consul.
	processExitTest := make(chan error)
	verifyProcessExitLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"test_case": "verifyProcessExit",
	})
	go verifyProcessExit(processExitTest, tempdir, verifyProcessExitLogger)
	testCases = append(testCases, testCase{
		testName: "verifyProcessResult",
		errCh:    processExitTest,
		logger:   verifyProcessExitLogger,
	})

	for _, t := range testCases {
		select {
		case err, ok := <-t.errCh:
			if err != nil {
				t.logger.Fatal(err)
			}
			if ok {
				t.logger.Fatal("Error channel not closed")
			}
		case <-time.After(1 * time.Minute):
			t.logger.Fatal("Timed out waiting for a result")
		}

		t.logger.Infoln("Success!")
	}
}