Пример #1
0
func doGenCommitters(client *github_api.Client) {
	c, err := github.UsersWithCommit(client, org, project)
	if err != nil {
		glog.Fatalf("Unable to read committers from github: %v", err)
	}
	if err = writeWhitelist(*committers, "# auto-generated by "+os.Args[0]+" -gen-committers; manual additions should go in the whitelist", c); err != nil {
		glog.Fatalf("Unable to write committers: %v", err)
	}
	glog.Info("Successfully updated committers file.")

	users, err := loadWhitelist(*userWhitelist)
	if err != nil {
		glog.Fatalf("error loading whitelist; it will not be updated: %v", err)
	}
	existing := util.NewStringSet(c...)
	newUsers := []string{}
	for _, u := range users {
		if existing.Has(u) {
			glog.Infof("%v is a dup, or already a committer. Will remove from whitelist.", u)
			continue
		}
		existing.Insert(u)
		newUsers = append(newUsers, u)
	}
	if err = writeWhitelist(*userWhitelist, "# remove dups with "+os.Args[0]+" -gen-committers", newUsers); err != nil {
		glog.Fatalf("Unable to write de-duped whitelist: %v", err)
	}
	glog.Info("Successfully de-duped whitelist.")
	os.Exit(0)
}
Пример #2
0
func main() {
	clientConfig := kubectl_util.DefaultClientConfig(flags)
	flags.Parse(os.Args)
	cfg := parseCfg(*config, *lbDefAlgorithm)

	var kubeClient *unversioned.Client
	var err error

	defErrorPage := newStaticPageHandler(*errorPage, defaultErrorPage)
	if defErrorPage == nil {
		glog.Fatalf("Failed to load the default error page")
	}

	go registerHandlers(defErrorPage)

	var tcpSvcs map[string]int
	if *tcpServices != "" {
		tcpSvcs = parseTCPServices(*tcpServices)
	} else {
		glog.Infof("No tcp/https services specified")
	}

	if *startSyslog {
		cfg.startSyslog = true
		_, err = newSyslogServer("/var/run/haproxy.log.socket")
		if err != nil {
			glog.Fatalf("Failed to start syslog server: %v", err)
		}
	}

	if *cluster {
		if kubeClient, err = unversioned.NewInCluster(); err != nil {
			glog.Fatalf("Failed to create client: %v", err)
		}
	} else {
		config, err := clientConfig.ClientConfig()
		if err != nil {
			glog.Fatalf("error connecting to the client: %v", err)
		}
		kubeClient, err = unversioned.New(config)
	}
	namespace, specified, err := clientConfig.Namespace()
	if err != nil {
		glog.Fatalf("unexpected error: %v", err)
	}
	if !specified {
		namespace = api.NamespaceAll
	}

	// TODO: Handle multiple namespaces
	lbc := newLoadBalancerController(cfg, kubeClient, namespace, tcpSvcs)
	go lbc.epController.Run(util.NeverStop)
	go lbc.svcController.Run(util.NeverStop)
	if *dry {
		dryRun(lbc)
	} else {
		lbc.cfg.reload()
		util.Until(lbc.worker, time.Second, util.NeverStop)
	}
}
Пример #3
0
func (t *CloudInitTarget) fetch(p *fi.Source, destPath string) {
	// We could probably move this to fi.Source - it is likely to be the same for every provider
	if p.URL != "" {
		if p.Parent != nil {
			glog.Fatalf("unexpected parent with SourceURL in FetchInstructions: %v", p)
		}
		t.AddDownloadCommand(Once, p.URL, destPath)
	} else if p.ExtractFromArchive != "" {
		if p.Parent == nil {
			glog.Fatalf("unexpected ExtractFromArchive without parent in FetchInstructions: %v", p)
		}

		// TODO: Remove duplicate commands?
		archivePath := "/tmp/" + utils.SanitizeString(p.Parent.Key())
		t.fetch(p.Parent, archivePath)

		extractDir := "/tmp/extracted_" + utils.SanitizeString(p.Parent.Key())
		t.AddMkdirpCommand(extractDir, 0755)
		t.AddCommand(Once, "tar", "zxf", archivePath, "-C", extractDir)

		// Always because this shouldn't happen and we want an indication that it happened
		t.AddCommand(Always, "cp", path.Join(extractDir, p.ExtractFromArchive), destPath)
	} else {
		glog.Fatalf("unknown FetchInstructions: %v", p)
	}
}
Пример #4
0
// run is responsible for preparing environment for actual build.
// It accepts factoryFunc and an ordered array of SCMAuths.
func run(builderFactory factoryFunc, scmAuths []scmauth.SCMAuth) {
	client, endpoint, err := dockerutil.NewHelper().GetClient()
	if err != nil {
		glog.Fatalf("Error obtaining docker client: %v", err)
	}
	buildStr := os.Getenv("BUILD")
	build := api.Build{}
	if err := latest.Codec.DecodeInto([]byte(buildStr), &build); err != nil {
		glog.Fatalf("Unable to parse build: %v", err)
	}
	var (
		authcfg     docker.AuthConfiguration
		authPresent bool
	)
	output := build.Spec.Output.To != nil && len(build.Spec.Output.To.Name) != 0
	if output {
		authcfg, authPresent = dockercfg.NewHelper().GetDockerAuth(
			build.Spec.Output.To.Name,
			dockercfg.PullAuthType,
		)
	}
	if build.Spec.Source.SourceSecret != nil {
		if err := setupSourceSecret(build.Spec.Source.SourceSecret.Name, scmAuths); err != nil {
			glog.Fatalf("Cannot setup secret file for accessing private repository: %v", err)
		}
	}
	b := builderFactory(client, endpoint, authcfg, authPresent, &build)
	if err = b.Build(); err != nil {
		glog.Fatalf("Build error: %v", err)
	}
	if !output {
		glog.Warning("Build does not have an Output defined, no output image was pushed to a registry.")
	}

}
Пример #5
0
// RunServiceAccountTokensController starts the service account token controller
func (c *MasterConfig) RunServiceAccountTokensController() {
	if len(c.Options.ServiceAccountConfig.PrivateKeyFile) == 0 {
		glog.Infof("Skipped starting Service Account Token Manager, no private key specified")
		return
	}

	privateKey, err := serviceaccount.ReadPrivateKey(c.Options.ServiceAccountConfig.PrivateKeyFile)
	if err != nil {
		glog.Fatalf("Error reading signing key for Service Account Token Manager: %v", err)
	}
	rootCA := []byte{}
	if len(c.Options.ServiceAccountConfig.MasterCA) > 0 {
		rootCA, err = ioutil.ReadFile(c.Options.ServiceAccountConfig.MasterCA)
		if err != nil {
			glog.Fatalf("Error reading master ca file for Service Account Token Manager: %s: %v", c.Options.ServiceAccountConfig.MasterCA, err)
		}
		if _, err := kcrypto.CertsFromPEM(rootCA); err != nil {
			glog.Fatalf("Error parsing master ca file for Service Account Token Manager: %s: %v", c.Options.ServiceAccountConfig.MasterCA, err)
		}
	}

	options := sacontroller.TokensControllerOptions{
		TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
		RootCA:         rootCA,
	}

	sacontroller.NewTokensController(clientadapter.FromUnversionedClient(c.KubeClient()), options).Run()
}
Пример #6
0
// RunDNSServer starts the DNS server
func (c *MasterConfig) RunDNSServer() {
	config, err := dns.NewServerDefaults()
	if err != nil {
		glog.Fatalf("Could not start DNS: %v", err)
	}
	config.DnsAddr = c.Options.DNSConfig.BindAddress
	config.NoRec = true // do not want to deploy an open resolver

	_, port, err := net.SplitHostPort(c.Options.DNSConfig.BindAddress)
	if err != nil {
		glog.Fatalf("Could not start DNS: %v", err)
	}
	if port != "53" {
		glog.Warningf("Binding DNS on port %v instead of 53 (you may need to run as root and update your config), using %s which will not resolve from all locations", port, c.Options.DNSConfig.BindAddress)
	}

	if ok, err := cmdutil.TryListen(c.Options.DNSConfig.BindAddress); !ok {
		glog.Warningf("Could not start DNS: %v", err)
		return
	}

	go func() {
		err := dns.ListenAndServe(config, c.DNSServerClient(), c.EtcdHelper.Client.(*etcdclient.Client))
		glog.Fatalf("Could not start DNS: %v", err)
	}()

	cmdutil.WaitForSuccessfulDial(false, "tcp", c.Options.DNSConfig.BindAddress, 100*time.Millisecond, 100*time.Millisecond, 100)

	glog.Infof("DNS listening at %s", c.Options.DNSConfig.BindAddress)
}
Пример #7
0
func (r *RuntimeSort) Less(i, j int) bool {
	iObj := r.objs[i]
	jObj := r.objs[j]

	parser := jsonpath.New("sorting")
	parser.Parse(r.field)

	iValues, err := parser.FindResults(reflect.ValueOf(iObj).Elem().Interface())
	if err != nil {
		glog.Fatalf("Failed to get i values for %#v using %s (%#v)", iObj, r.field, err)
	}
	jValues, err := parser.FindResults(reflect.ValueOf(jObj).Elem().Interface())
	if err != nil {
		glog.Fatalf("Failed to get j values for %#v using %s (%v)", jObj, r.field, err)
	}

	iField := iValues[0][0]
	jField := jValues[0][0]

	less, err := isLess(iField, jField)
	if err != nil {
		glog.Fatalf("Field %s in %v is an unsortable type: %s, err: %v", r.field, iObj, iField.Kind().String(), err)
	}
	return less
}
Пример #8
0
func Fatalf(ctx context.Context, format string, args ...interface{}) {
	if ctx == nil || !hasTraceKey(ctx) {
		glog.Fatalf(format, args)
		return
	}
	glog.Fatalf(prependFormat(format), prependParam(args, ctx)...)
}
Пример #9
0
// run is responsible for preparing environment for actual build.
// It accepts factoryFunc and an ordered array of SCMAuths.
func run(builderFactory factoryFunc, scmAuths []scmauth.SCMAuth) {
	client, endpoint, err := dockerutil.NewHelper().GetClient()
	if err != nil {
		glog.Fatalf("Error obtaining docker client: %v", err)
	}
	buildStr := os.Getenv("BUILD")
	glog.V(4).Infof("$BUILD env var is %s \n", buildStr)
	build := api.Build{}
	if err := latest.Codec.DecodeInto([]byte(buildStr), &build); err != nil {
		glog.Fatalf("Unable to parse build: %v", err)
	}
	if build.Spec.Source.SourceSecret != nil {
		if err := setupSourceSecret(build.Spec.Source.SourceSecret.Name, scmAuths); err != nil {
			glog.Fatalf("Cannot setup secret file for accessing private repository: %v", err)
		}
	}
	b := builderFactory(client, endpoint, &build)
	if err = b.Build(); err != nil {
		glog.Fatalf("Build error: %v", err)
	}

	if build.Spec.Output.To == nil || len(build.Spec.Output.To.Name) == 0 {
		glog.Warning("Build does not have an Output defined, no output image was pushed to a registry.")
	}

}
Пример #10
0
// InstallAPIs will install the APIs for the restStorageProviders if they are enabled.
func (m *Master) InstallAPIs(apiResourceConfigSource genericapiserver.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter, restStorageProviders ...RESTStorageProvider) {
	apiGroupsInfo := []genericapiserver.APIGroupInfo{}

	for _, restStorageBuilder := range restStorageProviders {
		groupName := restStorageBuilder.GroupName()
		if !apiResourceConfigSource.AnyResourcesForGroupEnabled(groupName) {
			glog.V(1).Infof("Skipping disabled API group %q.", groupName)
			continue
		}
		apiGroupInfo, enabled := restStorageBuilder.NewRESTStorage(apiResourceConfigSource, restOptionsGetter)
		if !enabled {
			glog.Warningf("Problem initializing API group %q, skipping.", groupName)
			continue
		}
		glog.V(1).Infof("Enabling API group %q.", groupName)

		if postHookProvider, ok := restStorageBuilder.(genericapiserver.PostStartHookProvider); ok {
			name, hook, err := postHookProvider.PostStartHook()
			if err != nil {
				glog.Fatalf("Error building PostStartHook: %v", err)
			}
			if err := m.GenericAPIServer.AddPostStartHook(name, hook); err != nil {
				glog.Fatalf("Error registering PostStartHook %q: %v", name, err)
			}
		}

		apiGroupsInfo = append(apiGroupsInfo, apiGroupInfo)
	}

	for i := range apiGroupsInfo {
		if err := m.GenericAPIServer.InstallAPIGroup(&apiGroupsInfo[i]); err != nil {
			glog.Fatalf("Error in registering group versions: %v", err)
		}
	}
}
Пример #11
0
func (r *RuntimeSort) Less(i, j int) bool {
	iObj := r.objs[i]
	jObj := r.objs[j]

	parser := jsonpath.New("sorting")
	parser.Parse(r.field)

	iValues, err := parser.FindResults(reflect.ValueOf(iObj).Elem().Interface())
	if err != nil {
		glog.Fatalf("Failed to get i values for %#v using %s (%#v)", iObj, r.field, err)
	}
	jValues, err := parser.FindResults(reflect.ValueOf(jObj).Elem().Interface())
	if err != nil {
		glog.Fatalf("Failed to get j values for %#v using %s (%v)", jObj, r.field, err)
	}

	iField := iValues[0][0]
	jField := jValues[0][0]

	switch iField.Kind() {
	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
		return iField.Int() < jField.Int()
	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
		return iField.Uint() < jField.Uint()
	case reflect.Float32, reflect.Float64:
		return iField.Float() < jField.Float()
	case reflect.String:
		return iField.String() < jField.String()
	default:
		glog.Fatalf("Field %s in %v is an unsortable type: %s", r.field, iObj, iField.Kind().String())
	}
	// default to preserving order
	return i < j
}
Пример #12
0
func multisnapshotCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) {
	concurrency := subFlags.Int("concurrency", 8, "how many compression jobs to run simultaneously")
	spec := subFlags.String("spec", "-", "shard specification")
	tablesString := subFlags.String("tables", "", "dump only this comma separated list of tables")
	skipSlaveRestart := subFlags.Bool("skip-slave-restart", false, "after the snapshot is done, do not restart slave replication")
	maximumFilesize := subFlags.Uint64("maximum-file-size", 128*1024*1024, "the maximum size for an uncompressed data file")
	subFlags.Parse(args)
	if subFlags.NArg() != 2 {
		log.Fatalf("action multisnapshot requires <db name> <key name>")
	}

	shards, err := key.ParseShardingSpec(*spec)
	if err != nil {
		log.Fatalf("multisnapshot failed: %v", err)
	}
	var tables []string
	if *tablesString != "" {
		tables = strings.Split(*tablesString, ",")
	}
	filenames, err := mysqld.CreateMultiSnapshot(shards, subFlags.Arg(0), subFlags.Arg(1), tabletAddr, false, *concurrency, tables, *skipSlaveRestart, *maximumFilesize, nil)
	if err != nil {
		log.Fatalf("multisnapshot failed: %v", err)
	} else {
		log.Infof("manifest locations: %v", filenames)
	}
}
Пример #13
0
// InitPlugin creates an instance of the named interface.
func InitPlugin(name string, client client.Interface, configFilePath string) Interface {
	var (
		config *os.File
		err    error
	)

	if name == "" {
		glog.Info("No admission plugin specified.")
		return nil
	}

	if configFilePath != "" {
		config, err = os.Open(configFilePath)
		if err != nil {
			glog.Fatalf("Couldn't open admission plugin configuration %s: %#v",
				configFilePath, err)
		}

		defer config.Close()
	}

	plugin, err := GetPlugin(name, client, config)
	if err != nil {
		glog.Fatalf("Couldn't init admission plugin %q: %v", name, err)
	}
	if plugin == nil {
		glog.Fatalf("Unknown admission plugin: %s", name)
	}

	return plugin
}
Пример #14
0
func main() {
	flag.Parse()

	// create the client
	client, err := vtctlclient.New(*server, *dialTimeout)
	if err != nil {
		log.Fatalf("Cannot dial to server %v: %v", *server, err)
	}
	defer client.Close()

	// run the command
	c, errFunc := client.ExecuteVtctlCommand(flag.Args(), *actionTimeout, *lockWaitTimeout)
	if err = errFunc(); err != nil {
		log.Fatalf("Cannot execute remote command: %v", err)
	}

	// stream the result
	for e := range c {
		switch e.Level {
		case logutil.LOGGER_INFO:
			log.Info(e.String())
		case logutil.LOGGER_WARNING:
			log.Warning(e.String())
		case logutil.LOGGER_ERROR:
			log.Error(e.String())
		case logutil.LOGGER_CONSOLE:
			fmt.Print(e.Value)
		}
	}

	// then display the overall error
	if err = errFunc(); err != nil {
		log.Fatalf("Remote error: %v", err)
	}
}
Пример #15
0
// CopyTablets will create the tablets in the destination topo
func CopyTablets(fromTS, toTS topo.Server) {
	cells, err := fromTS.GetKnownCells()
	if err != nil {
		log.Fatalf("fromTS.GetKnownCells failed: %v", err)
	}

	wg := sync.WaitGroup{}
	rec := concurrency.AllErrorRecorder{}
	for _, cell := range cells {
		wg.Add(1)
		go func(cell string) {
			defer wg.Done()
			tabletAliases, err := fromTS.GetTabletsByCell(cell)
			if err != nil {
				rec.RecordError(err)
			} else {
				for _, tabletAlias := range tabletAliases {
					wg.Add(1)
					go func(tabletAlias topo.TabletAlias) {
						defer wg.Done()

						// read the source tablet
						ti, err := fromTS.GetTablet(tabletAlias)
						if err != nil {
							rec.RecordError(err)
							return
						}

						// try to create the destination
						err = toTS.CreateTablet(ti.Tablet)
						if err == topo.ErrNodeExists {
							// update the destination tablet
							log.Warningf("tablet %v already exists, updating it", tabletAlias)
							err = toTS.UpdateTabletFields(ti.Alias(), func(t *topo.Tablet) error {
								*t = *ti.Tablet
								return nil
							})
						}
						if err != nil {
							rec.RecordError(err)
							return
						}

						// create the replication paths
						// for masters only here
						if ti.Type == topo.TYPE_MASTER {
							if err = toTS.CreateReplicationPath(ti.Keyspace, ti.Shard, ti.Alias().String()); err != nil && err != topo.ErrNodeExists {
								rec.RecordError(err)
							}
						}
					}(tabletAlias)
				}
			}
		}(cell)
	}
	wg.Wait()
	if rec.HasErrors() {
		log.Fatalf("copyTablets failed: %v", rec.Error())
	}
}
Пример #16
0
// NewModel creates a new HMM.
func NewModel(options ...Option) *Model {

	m := &Model{
		ModelName: "HMM",
		updateTP:  true,
		updateOP:  true,
		maxGenLen: 100,
	}
	m.Type = reflect.TypeOf(*m).String()

	// Set options.
	for _, option := range options {
		option(m)
	}

	//	glog.Infof("New HMM. Num states = %d.", r)

	if m.Set == nil {
		glog.Fatalf("need model set to create HMM model - use the OSet option to specify a model set")
	}
	if m.Set.size() > 1 && m.assigner == nil {
		glog.Fatalf("need assigner to create an HMM model with more than one network - use OAssign option to specify assigner")
	}
	if m.useAlignments {
		m.updateTP = false
	}
	if glog.V(5) {
		glog.Info("created new hmm model")
		glog.Infof("model set: %s", m.Set)
	}
	return m
}
Пример #17
0
// CopyKeyspaces will create the keyspaces in the destination topo
func CopyKeyspaces(fromTS, toTS topo.Server) {
	keyspaces, err := fromTS.GetKeyspaces()
	if err != nil {
		log.Fatalf("GetKeyspaces: %v", err)
	}

	wg := sync.WaitGroup{}
	rec := concurrency.AllErrorRecorder{}
	for _, keyspace := range keyspaces {
		wg.Add(1)
		go func(keyspace string) {
			defer wg.Done()

			k, err := fromTS.GetKeyspace(keyspace)
			if err != nil {
				rec.RecordError(fmt.Errorf("GetKeyspace(%v): %v", keyspace, err))
				return
			}

			if err := toTS.CreateKeyspace(keyspace, k.Keyspace); err != nil {
				if err == topo.ErrNodeExists {
					log.Warningf("keyspace %v already exists", keyspace)
				} else {
					rec.RecordError(fmt.Errorf("CreateKeyspace(%v): %v", keyspace, err))
				}
			}
		}(keyspace)
	}
	wg.Wait()
	if rec.HasErrors() {
		log.Fatalf("copyKeyspaces failed: %v", rec.Error())
	}
}
Пример #18
0
func validateLeadershipTransition(desired, current string) {
	log.Infof("validating leadership transition")
	// desired, current are of the format <executor-id>:<scheduler-uuid> (see Run()).
	// parse them and ensure that executor ID's match, otherwise the cluster can get into
	// a bad state after scheduler failover: executor ID is a config hash that must remain
	// consistent across failover events.
	var (
		i = strings.LastIndex(desired, ":")
		j = strings.LastIndex(current, ":")
	)

	if i > -1 {
		desired = desired[0:i]
	} else {
		log.Fatalf("desired id %q is invalid", desired)
	}
	if j > -1 {
		current = current[0:j]
	} else if current != "" {
		log.Fatalf("current id %q is invalid", current)
	}

	if desired != current && current != "" {
		log.Fatalf("desired executor id %q != current executor id %q", desired, current)
	}
}
Пример #19
0
func main() {
	flag.Parse()
	var err error
	// TODO: Validate input flags.
	domain := *argDomain
	if !strings.HasSuffix(domain, ".") {
		domain = fmt.Sprintf("%s.", domain)
	}
	ks := kube2sky{
		domain:              domain,
		etcdMutationTimeout: *argEtcdMutationTimeout,
	}
	if ks.etcdClient, err = newEtcdClient(*argEtcdServer); err != nil {
		glog.Fatalf("Failed to create etcd client - %v", err)
	}

	kubeClient, err := newKubeClient()
	if err != nil {
		glog.Fatalf("Failed to create a kubernetes client: %v", err)
	}

	ks.endpointsStore = watchEndpoints(kubeClient, &ks)
	ks.servicesStore = watchForServices(kubeClient, &ks)
	ks.servicesStore = watchPods(kubeClient, &ks)

	select {}
}
Пример #20
0
func runReplicationControllerTest(kubeClient *client.Client) {
	data, err := ioutil.ReadFile("api/examples/controller.json")
	if err != nil {
		glog.Fatalf("Unexpected error: %#v", err)
	}
	var controllerRequest api.ReplicationController
	if err := json.Unmarshal(data, &controllerRequest); err != nil {
		glog.Fatalf("Unexpected error: %#v", err)
	}

	glog.Infof("Creating replication controllers")
	if _, err := kubeClient.CreateReplicationController(controllerRequest); err != nil {
		glog.Fatalf("Unexpected error: %#v", err)
	}
	glog.Infof("Done creating replication controllers")

	// Give the controllers some time to actually create the pods
	time.Sleep(time.Second * 10)

	// Validate that they're truly up.
	pods, err := kubeClient.ListPods(labels.Set(controllerRequest.DesiredState.ReplicaSelector).AsSelector())
	if err != nil || len(pods.Items) != controllerRequest.DesiredState.Replicas {
		glog.Fatalf("FAILED: %#v", pods.Items)
	}
	glog.Infof("Replication controller produced:\n\n%#v\n\n", pods)
}
Пример #21
0
func main() {
	flag.Parse()
	if len(*userWhitelist) == 0 {
		glog.Fatalf("--user-whitelist is required.")
	}
	if len(*jenkinsHost) == 0 {
		glog.Fatalf("--jenkins-host is required.")
	}
	client := github.MakeClient(*token)

	users, err := loadWhitelist(*userWhitelist)
	if err != nil {
		glog.Fatalf("error loading user whitelist: %v", err)
	}
	requiredContexts := strings.Split(*requiredContexts, ",")
	config := &github.FilterConfig{
		MinPRNumber:            *minPRNumber,
		UserWhitelist:          users,
		RequiredStatusContexts: requiredContexts,
		WhitelistOverride:      *whitelistOverride,
	}
	for !*oneOff {
		if err := github.ForEachCandidatePRDo(client, org, project, runE2ETests, *oneOff, config); err != nil {
			glog.Fatalf("Error getting candidate PRs: %v", err)
		}
	}
}
Пример #22
0
// Initialize will initialize the munger
func (b *BlockPath) Initialize(config *github.Config, features *features.Features) error {
	if len(b.Path) == 0 {
		glog.Fatalf("--block-path-config is required with the block-path munger")
	}
	file, err := os.Open(b.Path)
	if err != nil {
		glog.Fatalf("Failed to load block-path config: %v", err)
	}
	defer file.Close()

	c := &configBlockPath{}
	if err := yaml.NewYAMLToJSONDecoder(file).Decode(c); err != nil {
		glog.Fatalf("Failed to decode the block-path config: %v", err)
	}

	b.blockRegexp = []regexp.Regexp{}
	for _, str := range c.BlockRegexp {
		reg, err := regexp.Compile(str)
		if err != nil {
			return err
		}
		b.blockRegexp = append(b.blockRegexp, *reg)
	}

	b.doNotBlockRegexp = []regexp.Regexp{}
	for _, str := range c.DoNotBlockRegexp {
		reg, err := regexp.Compile(str)
		if err != nil {
			return err
		}
		b.doNotBlockRegexp = append(b.doNotBlockRegexp, *reg)
	}
	return nil
}
Пример #23
0
func runMasterServiceTest(client *client.Client) {
	time.Sleep(12 * time.Second)
	svcList, err := client.Services(api.NamespaceDefault).List(labels.Everything())
	if err != nil {
		glog.Fatalf("unexpected error listing services: %v", err)
	}
	var foundRW bool
	found := util.StringSet{}
	for i := range svcList.Items {
		found.Insert(svcList.Items[i].Name)
		if svcList.Items[i].Name == "kubernetes" {
			foundRW = true
		}
	}
	if foundRW {
		ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes")
		if err != nil {
			glog.Fatalf("unexpected error listing endpoints for kubernetes service: %v", err)
		}
		if countEndpoints(ep) == 0 {
			glog.Fatalf("no endpoints for kubernetes service: %v", ep)
		}
	} else {
		glog.Errorf("no RW service found: %v", found)
		glog.Fatal("Kubernetes service test failed")
	}
	glog.Infof("Master service test passed.")
}
Пример #24
0
// getClusterNodesIP returns the IP address of each node in the kubernetes cluster
func getClusterNodesIP(kubeClient *unversioned.Client, nodeSelector string) (clusterNodes []string) {
	listOpts := api.ListOptions{}

	if nodeSelector != "" {
		label, err := labels.Parse(nodeSelector)
		if err != nil {
			glog.Fatalf("'%v' is not a valid selector: %v", nodeSelector, err)
		}
		listOpts.LabelSelector = label
	}

	nodes, err := kubeClient.Nodes().List(listOpts)
	if err != nil {
		glog.Fatalf("Error getting running nodes: %v", err)
	}

	for _, nodo := range nodes.Items {
		nodeIP, err := node.GetNodeHostIP(&nodo)
		if err == nil {
			clusterNodes = append(clusterNodes, nodeIP.String())
		}
	}
	sort.Strings(clusterNodes)

	return
}
Пример #25
0
// getGrantHandler returns the object that handles approving or rejecting grant requests
func (c *AuthConfig) getGrantHandler(mux cmdutil.Mux, auth authenticator.Request, clientregistry clientregistry.Getter, authregistry clientauthregistry.Registry) handlers.GrantHandler {
	startGrantServer := false

	var saGrantHandler handlers.GrantHandler
	switch c.Options.GrantConfig.ServiceAccountMethod {
	case configapi.GrantHandlerDeny:
		saGrantHandler = handlers.NewEmptyGrant()
	case configapi.GrantHandlerPrompt:
		startGrantServer = true
		saGrantHandler = handlers.NewRedirectGrant(OpenShiftApprovePrefix)
	default:
		glog.Fatalf("No grant handler found that matches %v.  The oauth server cannot start!", c.Options.GrantConfig.ServiceAccountMethod)
	}

	var standardGrantHandler handlers.GrantHandler
	switch c.Options.GrantConfig.Method {
	case configapi.GrantHandlerDeny:
		standardGrantHandler = handlers.NewEmptyGrant()
	case configapi.GrantHandlerAuto:
		standardGrantHandler = handlers.NewAutoGrant()
	case configapi.GrantHandlerPrompt:
		startGrantServer = true
		standardGrantHandler = handlers.NewRedirectGrant(OpenShiftApprovePrefix)
	default:
		glog.Fatalf("No grant handler found that matches %v.  The oauth server cannot start!", c.Options.GrantConfig.Method)
	}

	if startGrantServer {
		grantServer := grant.NewGrant(c.getCSRF(), auth, grant.DefaultFormRenderer, clientregistry, authregistry)
		grantServer.Install(mux, OpenShiftApprovePrefix)
	}

	return handlers.NewServiceAccountAwareGrant(standardGrantHandler, saGrantHandler)
}
Пример #26
0
func main() {
	flag.Parse()
	if len(*org) == 0 {
		glog.Fatalf("--organization is required.")
	}
	if len(*project) == 0 {
		glog.Fatalf("--project is required.")
	}
	tokenData := *token
	if len(tokenData) == 0 && len(*tokenFile) != 0 {
		data, err := ioutil.ReadFile(*tokenFile)
		if err != nil {
			glog.Fatalf("error reading token file: %v", err)
		}
		tokenData = string(data)
	}
	client := github.MakeClient(tokenData)

	if len(*issueMungers) > 0 {
		glog.Infof("Running issue mungers")
		if err := issues.MungeIssues(client, *issueMungers, *org, *project, *minIssueNumber, *dryrun); err != nil {
			glog.Errorf("Error munging issues: %v", err)
		}
	}
	if len(*prMungers) > 0 {
		glog.Infof("Running PR mungers")
		if err := pulls.MungePullRequests(client, *prMungers, *org, *project, *minPRNumber, *dryrun); err != nil {
			glog.Errorf("Error munging PRs: %v", err)
		}
	}
}
Пример #27
0
func main() {
	flag.Parse()

	storageDriver, err := NewStorageDriver(*argDbDriver)
	if err != nil {
		glog.Fatalf("Failed to connect to database: %s", err)
	}

	containerManager, err := manager.New(storageDriver)
	if err != nil {
		glog.Fatalf("Failed to create a Container Manager: %s", err)
	}

	// Register Docker.
	if err := docker.Register(containerManager); err != nil {
		glog.Errorf("Docker registration failed: %v.", err)
	}

	// Register the raw driver.
	if err := raw.Register(containerManager); err != nil {
		glog.Fatalf("raw registration failed: %v.", err)
	}

	// Handler for static content.
	http.HandleFunc(static.StaticResource, func(w http.ResponseWriter, r *http.Request) {
		err := static.HandleRequest(w, r.URL)
		if err != nil {
			fmt.Fprintf(w, "%s", err)
		}
	})

	// Register API handler.
	if err := api.RegisterHandlers(containerManager); err != nil {
		glog.Fatalf("failed to register API handlers: %s", err)
	}

	// Redirect / to containers page.
	http.Handle("/", http.RedirectHandler(pages.ContainersPage, http.StatusTemporaryRedirect))

	// Register the handler for the containers page.
	http.HandleFunc(pages.ContainersPage, func(w http.ResponseWriter, r *http.Request) {
		err := pages.ServerContainersPage(containerManager, w, r.URL)
		if err != nil {
			fmt.Fprintf(w, "%s", err)
		}
	})

	defer glog.Flush()

	go func() {
		glog.Fatal(containerManager.Start())
	}()

	glog.Infof("Starting cAdvisor version: %q", info.VERSION)
	glog.Infof("About to serve on port ", *argPort)

	addr := fmt.Sprintf(":%v", *argPort)

	glog.Fatal(http.ListenAndServe(addr, nil))
}
Пример #28
0
// connectGossip connects to gossip network and reads cluster ID. If
// this node is already part of a cluster, the cluster ID is verified
// for a match. If not part of a cluster, the cluster ID is set. The
// node's address is gossipped with node ID as the gossip key.
func (n *Node) connectGossip() {
	glog.Infof("connecting to gossip network to verify cluster ID...")
	<-n.gossip.Connected

	val, err := n.gossip.GetInfo(gossip.KeyClusterID)
	if err != nil || val == nil {
		glog.Fatalf("unable to ascertain cluster ID from gossip network: %v", err)
	}
	gossipClusterID := val.(string)

	if n.ClusterID == "" {
		n.ClusterID = gossipClusterID
	} else if n.ClusterID != gossipClusterID {
		glog.Fatalf("node %d belongs to cluster %q but is attempting to connect to a gossip network for cluster %q",
			n.Descriptor.NodeID, n.ClusterID, gossipClusterID)
	}
	glog.Infof("node connected via gossip and verified as part of cluster %q", gossipClusterID)

	// Gossip node address keyed by node ID.
	if n.Descriptor.NodeID != 0 {
		nodeIDKey := gossip.MakeNodeIDGossipKey(n.Descriptor.NodeID)
		if err := n.gossip.AddInfo(nodeIDKey, n.Descriptor.Address, ttlNodeIDGossip); err != nil {
			glog.Errorf("couldn't gossip address for node %d: %v", n.Descriptor.NodeID, err)
		}
	}
}
Пример #29
0
// Start begins the core controller loops that must exist for bootstrapping
// a cluster.
func (c *Controller) Start() {
	if c.runner != nil {
		return
	}

	repairClusterIPs := servicecontroller.NewRepair(c.ServiceClusterIPInterval, c.ServiceRegistry, c.ServiceClusterIPRange, c.ServiceClusterIPRegistry)
	repairNodePorts := portallocatorcontroller.NewRepair(c.ServiceNodePortInterval, c.ServiceRegistry, c.ServiceNodePortRange, c.ServiceNodePortRegistry)

	// run all of the controllers once prior to returning from Start.
	if err := repairClusterIPs.RunOnce(); err != nil {
		// If we fail to repair cluster IPs apiserver is useless. We should restart and retry.
		glog.Fatalf("Unable to perform initial IP allocation check: %v", err)
	}
	if err := repairNodePorts.RunOnce(); err != nil {
		// If we fail to repair node ports apiserver is useless. We should restart and retry.
		glog.Fatalf("Unable to perform initial service nodePort check: %v", err)
	}
	// Service definition is reconciled during first run to correct port and type per expectations.
	if err := c.UpdateKubernetesService(true); err != nil {
		glog.Errorf("Unable to perform initial Kubernetes service initialization: %v", err)
	}

	c.runner = util.NewRunner(c.RunKubernetesService, repairClusterIPs.RunUntil, repairNodePorts.RunUntil)
	c.runner.Start()
}
Пример #30
0
func loadClientOrDie() *client.Client {
	config := client.Config{
		Host: *host,
	}
	info, err := clientauth.LoadFromFile(*authConfig)
	if err != nil {
		glog.Fatalf("Error loading auth: %v", err)
	}
	// If the certificate directory is provided, set the cert paths to be there.
	if *certDir != "" {
		glog.Infof("Expecting certs in %v.", *certDir)
		info.CAFile = filepath.Join(*certDir, "ca.crt")
		info.CertFile = filepath.Join(*certDir, "kubecfg.crt")
		info.KeyFile = filepath.Join(*certDir, "kubecfg.key")
	}
	config, err = info.MergeWithConfig(config)
	if err != nil {
		glog.Fatalf("Error creating client")
	}
	c, err := client.New(&config)
	if err != nil {
		glog.Fatalf("Error creating client")
	}
	return c
}