コード例 #1
0
ファイル: listener.go プロジェクト: romana/core
// SetConfig implements SetConfig function of the Service interface.
func (l *KubeListener) SetConfig(config common.ServiceConfig) error {
	confString := "/etc/romana/romana.conf.yml:kubernetesListener:config:"
	log.Trace(trace.Inside, confString, config)

	m := config.ServiceSpecific
	if kl, ok := m["kubernetes_url"]; !ok || kl == "" {
		return fmt.Errorf("%s%s", confString, "kubernetes_url required in config.")
	}
	l.kubeURL = m["kubernetes_url"].(string)

	if nnp, ok := m["namespace_notification_path"]; !ok || nnp == "" {
		return fmt.Errorf("%s%s", confString, "namespace_notification_path required in config.")
	}
	l.namespaceNotificationPath = m["namespace_notification_path"].(string)

	if pnppre, ok := m["policy_notification_path_prefix"]; !ok || pnppre == "" {
		return fmt.Errorf("%s%s", confString, "policy_notification_path_prefix required in config.")
	}
	l.policyNotificationPathPrefix = m["policy_notification_path_prefix"].(string)

	if pnppost, ok := m["policy_notification_path_prefix"]; !ok || pnppost == "" {
		return fmt.Errorf("%s%s", confString, "policy_notification_path_postfix required in config.")
	}
	l.policyNotificationPathPostfix = m["policy_notification_path_postfix"].(string)

	if sln, ok := m["segment_label_name"]; !ok || sln == "" {
		return fmt.Errorf("%s%s", confString, "segment_label_name required in config.")
	}
	l.segmentLabelName = m["segment_label_name"].(string)

	if tln, ok := m["tenant_label_name"]; !ok || tln == "" {
		return fmt.Errorf("%s%s", confString, "tenant_label_name required in config.")
	}
	l.tenantLabelName = m["tenant_label_name"].(string)

	l.namespaceBufferSize = 1000

	if kc, ok := m["kubernetes_config"]; !ok || kc == "" {
		// Default kubernetes config location on ubuntu
		// TODO: this should not be hard coded, other
		//       distributions may have other user names.
		m["kubernetes_config"] = "/home/ubuntu/.kube/config"
	}

	// TODO, this loads kubernetes config from flags provided in main
	// should be loading from path provided by romana-root. Stas.
	kubeClientConfig, err := clientcmd.BuildConfigFromFlags("", m["kubernetes_config"].(string))
	if err != nil {
		return errors.New(fmt.Sprintf("Failed to load kubernetes kubeClientConfig %s", err))
	}
	clientset, err := kubernetes.NewForConfig(kubeClientConfig)
	if err != nil {
		return fmt.Errorf("Failed to make kubernetes client %s", err)
	}
	l.kubeClient = clientset

	return nil
}
コード例 #2
0
ファイル: client.go プロジェクト: vdemeester/traefik
// NewInClusterClient returns a new Kubernetes client that expect to run inside the cluster
func NewInClusterClient() (Client, error) {
	config, err := rest.InClusterConfig()
	if err != nil {
		return nil, err
	}
	clientset, err := kubernetes.NewForConfig(config)
	if err != nil {
		return nil, err
	}

	return &clientImpl{
		clientset: clientset,
	}, nil
}
コード例 #3
0
ファイル: kubernetes.go プロジェクト: swsnider/prometheus
// New creates a new Kubernetes discovery for the given role.
func New(l log.Logger, conf *config.KubernetesSDConfig) (*Kubernetes, error) {
	var (
		kcfg *rest.Config
		err  error
	)
	if conf.APIServer.URL == nil {
		kcfg, err = rest.InClusterConfig()
		if err != nil {
			return nil, err
		}
	} else {
		token := conf.BearerToken
		if conf.BearerTokenFile != "" {
			bf, err := ioutil.ReadFile(conf.BearerTokenFile)
			if err != nil {
				return nil, err
			}
			token = string(bf)
		}

		kcfg = &rest.Config{
			Host:        conf.APIServer.String(),
			BearerToken: token,
			TLSClientConfig: rest.TLSClientConfig{
				CAFile: conf.TLSConfig.CAFile,
			},
		}
	}
	kcfg.UserAgent = "prometheus/discovery"

	if conf.BasicAuth != nil {
		kcfg.Username = conf.BasicAuth.Username
		kcfg.Password = conf.BasicAuth.Password
	}
	kcfg.TLSClientConfig.CertFile = conf.TLSConfig.CertFile
	kcfg.TLSClientConfig.KeyFile = conf.TLSConfig.KeyFile
	kcfg.Insecure = conf.TLSConfig.InsecureSkipVerify

	c, err := kubernetes.NewForConfig(kcfg)
	if err != nil {
		return nil, err
	}
	return &Kubernetes{
		client: c,
		logger: l,
		role:   conf.Role,
	}, nil
}
コード例 #4
0
ファイル: daemon.go プロジェクト: cilium-team/cilium
func createK8sClient(endpoint, kubeCfgPath string) (*k8s.Clientset, error) {
	var (
		config *k8sRest.Config
		err    error
	)
	if kubeCfgPath != "" {
		config, err = k8sClientCmd.BuildConfigFromFlags("", kubeCfgPath)
	} else {
		config = &k8sRest.Config{Host: endpoint}
		err = k8sRest.SetKubernetesDefaults(config)
	}
	if err != nil {
		return nil, err
	}
	return k8s.NewForConfig(config)
}
コード例 #5
0
ファイル: client.go プロジェクト: vdemeester/traefik
// NewInClusterClientWithEndpoint is the same as NewInClusterClient but uses the provided endpoint URL
func NewInClusterClientWithEndpoint(endpoint string) (Client, error) {
	config, err := rest.InClusterConfig()
	if err != nil {
		return nil, err
	}

	config.Host = endpoint

	clientset, err := kubernetes.NewForConfig(config)
	if err != nil {
		return nil, err
	}

	return &clientImpl{
		clientset: clientset,
	}, nil
}
コード例 #6
0
ファイル: main.go プロジェクト: ncdc/kubernetes
func main() {
	// creates the in-cluster config
	config, err := rest.InClusterConfig()
	if err != nil {
		panic(err.Error())
	}
	// creates the clientset
	clientset, err := kubernetes.NewForConfig(config)
	if err != nil {
		panic(err.Error())
	}
	for {
		pods, err := clientset.Core().Pods("").List(api.ListOptions{})
		if err != nil {
			panic(err.Error())
		}
		fmt.Printf("There are %d pods in the cluster\n", len(pods.Items))
		time.Sleep(10 * time.Second)
	}
}
コード例 #7
0
ファイル: main.go プロジェクト: ncdc/kubernetes
func main() {
	flag.Parse()
	// uses the current context in kubeconfig
	config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
	if err != nil {
		panic(err.Error())
	}
	// creates the clientset
	clientset, err := kubernetes.NewForConfig(config)
	if err != nil {
		panic(err.Error())
	}
	for {
		pods, err := clientset.Core().Pods("").List(api.ListOptions{})
		if err != nil {
			panic(err.Error())
		}
		fmt.Printf("There are %d pods in the cluster\n", len(pods.Items))
		time.Sleep(10 * time.Second)
	}
}
コード例 #8
0
ファイル: main.go プロジェクト: romana/core
func main() {
	// Accept a kubernetes config file of try the default location.
	var kubeConfig = flag.String("kubeconfig", os.Getenv("HOME")+"/.kube/config",
		"Kubernetes config file.")
	var romanaConfig = flag.String("romanaconfig", os.Getenv("HOME")+"/.romana.yaml",
		"Romana config file.")
	version := flag.Bool("version", false, "Build Information.")
	flag.Parse()

	if *version {
		fmt.Println(common.BuildInfo())
		return
	}

	if *kubeConfig == "" {
		log.Println("Error: must have kubernetes config files specified.")
		os.Exit(1)
	}

	if err := initConfig(*romanaConfig); err != nil {
		log.Println("Error reading romana config file: ", err)
		os.Exit(1)
	}

	// Since romana config was successful above, now set rootURL from config.
	setRomanaRootURL()

	// Try generating config for kubernetes client-go from flags passed,
	// so that we can connect to kubernetes using them.
	kConfig, err := clientcmd.BuildConfigFromFlags("", *kubeConfig)
	if err != nil {
		log.Println("Error: ", err.Error())
		os.Exit(1)
	}

	// Get a set of REST clients which connect to kubernetes services
	// from the config generated above.
	restClientSet, err := kubernetes.NewForConfig(kConfig)
	if err != nil {
		log.Println("Error: ", err.Error())
		os.Exit(1)
	}

	// Channel for stopping watching node events.
	stop := make(chan struct{}, 1)

	// nodeWatcher is a new ListWatch object created from the specified
	// restClientSet above for watching node events.
	nodeWatcher := cache.NewListWatchFromClient(
		restClientSet.CoreClient,
		"nodes",
		api.NamespaceAll,
		fields.Everything())

	// Setup a notifications for specific events using NewInformer.
	_, nodeInformer := cache.NewInformer(
		nodeWatcher,
		&v1.Node{},
		time.Minute,
		cache.ResourceEventHandlerFuncs{
			AddFunc:    kubernetesAddNodeEventHandler,
			UpdateFunc: kubernetesUpdateNodeEventHandler,
			DeleteFunc: kubernetesDeleteNodeEventHandler,
		},
	)

	log.Println("Starting receving node events.")
	go nodeInformer.Run(stop)

	// Set up channel on which to send signal notifications.
	// We must use a buffered channel or risk missing the signal
	// if we're not ready to receive when the signal is sent.
	c := make(chan os.Signal, 1)
	signal.Notify(c, os.Interrupt)

	// Block until a signal is received.
	<-c

	// Stop watching node events.
	close(stop)
	log.Println("Stopped watching node events and quitting watchnodes.")
}
コード例 #9
0
ファイル: framework.go プロジェクト: olegshaldybin/kubernetes
// BeforeEach gets a client and makes a namespace.
func (f *Framework) BeforeEach() {
	// The fact that we need this feels like a bug in ginkgo.
	// https://github.com/onsi/ginkgo/issues/222
	f.cleanupHandle = AddCleanupAction(f.AfterEach)
	if f.Client == nil {
		By("Creating a kubernetes client")
		config, err := LoadConfig()
		Expect(err).NotTo(HaveOccurred())
		config.QPS = f.options.ClientQPS
		config.Burst = f.options.ClientBurst
		if f.options.GroupVersion != nil {
			config.GroupVersion = f.options.GroupVersion
		}
		if TestContext.KubeAPIContentType != "" {
			config.ContentType = TestContext.KubeAPIContentType
		}
		c, err := loadClientFromConfig(config)
		Expect(err).NotTo(HaveOccurred())
		f.Client = c
		f.ClientSet, err = internalclientset.NewForConfig(config)
		Expect(err).NotTo(HaveOccurred())
		f.ClientSet_1_5, err = release_1_5.NewForConfig(config)
		Expect(err).NotTo(HaveOccurred())
		clientRepoConfig := getClientRepoConfig(config)
		f.StagingClient, err = staging.NewForConfig(clientRepoConfig)
		Expect(err).NotTo(HaveOccurred())
		f.ClientPool = dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
	}

	if f.federated {
		if f.FederationClientset_1_5 == nil {
			By("Creating a release 1.4 federation Clientset")
			var err error
			f.FederationClientset_1_5, err = LoadFederationClientset_1_5()
			Expect(err).NotTo(HaveOccurred())
		}
		By("Waiting for federation-apiserver to be ready")
		err := WaitForFederationApiserverReady(f.FederationClientset_1_5)
		Expect(err).NotTo(HaveOccurred())
		By("federation-apiserver is ready")

		By("Creating a federation namespace")
		ns, err := f.createFederationNamespace(f.BaseName)
		Expect(err).NotTo(HaveOccurred())
		f.FederationNamespace = ns
		By(fmt.Sprintf("Created federation namespace %s", ns.Name))
	}

	By("Building a namespace api object")
	namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
		"e2e-framework": f.BaseName,
	})
	Expect(err).NotTo(HaveOccurred())

	f.Namespace = namespace

	if TestContext.VerifyServiceAccount {
		By("Waiting for a default service account to be provisioned in namespace")
		err = WaitForDefaultServiceAccountInNamespace(f.Client, namespace.Name)
		Expect(err).NotTo(HaveOccurred())
	} else {
		Logf("Skipping waiting for service account")
	}

	if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" {
		f.gatherer, err = NewResourceUsageGatherer(f.Client, ResourceGathererOptions{
			inKubemark: ProviderIs("kubemark"),
			masterOnly: TestContext.GatherKubeSystemResourceUsageData == "master",
		})
		if err != nil {
			Logf("Error while creating NewResourceUsageGatherer: %v", err)
		} else {
			go f.gatherer.startGatheringData()
		}
	}

	if TestContext.GatherLogsSizes {
		f.logsSizeWaitGroup = sync.WaitGroup{}
		f.logsSizeWaitGroup.Add(1)
		f.logsSizeCloseChannel = make(chan bool)
		f.logsSizeVerifier = NewLogsVerifier(f.Client, f.logsSizeCloseChannel)
		go func() {
			f.logsSizeVerifier.Run()
			f.logsSizeWaitGroup.Done()
		}()
	}
}