Esempio n. 1
0
func watchStatus(client client.Client, logger logging.Logger) {
	key, err := types.ToPodUniqueKey(*podUniqueKey)
	if err != nil {
		logger.Fatalf("Could not parse passed pod unique key %q as uuid: %s", *podUniqueKey, err)
	}

	ctx, cancelFunc := context.WithCancel(context.Background())
	defer cancelFunc()
	outCh, err := client.WatchStatus(ctx, key, 1) // 1 so we wait for the key to exist
	if err != nil {
		logger.Fatal(err)
	}

	for i := 0; i < *numIterations; i++ {
		val, ok := <-outCh
		if !ok {
			logger.Fatal("Channel closed unexpectedly")
		}

		if val.Error != nil {
			logger.Fatal(val.Error)
		}

		bytes, err := json.Marshal(val)
		if err != nil {
			logger.Fatal(err)
		}

		fmt.Println(string(bytes))
	}
}
Esempio n. 2
0
func schedule(client client.Client, logger logging.Logger) {
	m, err := manifest.FromPath(*manifestFile)
	if err != nil {
		logger.Fatalf("Could not read manifest: %s", err)
	}

	podUniqueKey, err := client.Schedule(m, types.NodeName(*node))
	if err != nil {
		logger.Fatalf("Could not schedule: %s", err)
	}

	output := struct {
		PodID        types.PodID        `json:"pod_id"`
		PodUniqueKey types.PodUniqueKey `json:"pod_unique_key"`
	}{
		PodID:        m.ID(),
		PodUniqueKey: podUniqueKey,
	}

	outBytes, err := json.Marshal(output)
	if err != nil {
		logger.Infof("Scheduled pod with key: %s", podUniqueKey)
		return
	}

	fmt.Println(string(outBytes))
}