Ejemplo n.º 1
0
func Warningf(ctx context.Context, format string, args ...interface{}) {
	if ctx == nil || !hasTraceKey(ctx) {
		glog.Warningf(format, args)
		return
	}
	glog.Warningf(prependFormat(format), prependParam(args, ctx)...)
}
Ejemplo n.º 2
0
// terminateHealthChecks is called when we enter lame duck mode.
// We will clean up our state, and shut down query service.
// We only do something if we are in targetTabletType state, and then
// we just go to spare.
func (agent *ActionAgent) terminateHealthChecks(targetTabletType pbt.TabletType) {
	agent.actionMutex.Lock()
	defer agent.actionMutex.Unlock()
	log.Info("agent.terminateHealthChecks is starting")

	// read the current tablet record
	tablet := agent.Tablet()
	if tablet.Type != targetTabletType {
		log.Infof("Tablet in state %v, not changing it", tablet.Type)
		return
	}

	// Change the Type to spare, update the health. Note we pass in a map
	// that's not nil, meaning we will clear it.
	if err := topotools.ChangeType(agent.batchCtx, agent.TopoServer, tablet.Alias, pbt.TabletType_SPARE, make(map[string]string)); err != nil {
		log.Infof("Error updating tablet record: %v", err)
		return
	}

	// Update the serving graph in our cell, only if we're dealing with
	// a serving type
	if err := agent.updateServingGraph(tablet, targetTabletType); err != nil {
		log.Warningf("updateServingGraph failed (will still run post action callbacks, serving graph might be out of date): %v", err)
	}

	// We've already rebuilt the shard, which is the only reason we registered
	// ourself as OnTermSync (synchronous). The rest can be done asynchronously.
	go func() {
		// Run the post action callbacks (let them shutdown the query service)
		if err := agent.refreshTablet(agent.batchCtx, "terminatehealthcheck"); err != nil {
			log.Warningf("refreshTablet failed: %v", err)
		}
	}()
}
Ejemplo n.º 3
0
// UnmountPath is a common unmount routine that unmounts the given path and
// deletes the remaining directory if successful.
func UnmountPath(mountPath string, mounter mount.Interface) error {
	if pathExists, pathErr := PathExists(mountPath); pathErr != nil {
		return fmt.Errorf("Error checking if path exists: %v", pathErr)
	} else if !pathExists {
		glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath)
		return nil
	}

	notMnt, err := mounter.IsLikelyNotMountPoint(mountPath)
	if err != nil {
		return err
	}
	if notMnt {
		glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath)
		return os.Remove(mountPath)
	}

	// Unmount the mount path
	if err := mounter.Unmount(mountPath); err != nil {
		return err
	}
	notMnt, mntErr := mounter.IsLikelyNotMountPoint(mountPath)
	if mntErr != nil {
		return err
	}
	if notMnt {
		glog.V(4).Info("%q is unmounted, deleting the directory", mountPath)
		return os.Remove(mountPath)
	}
	return nil
}
Ejemplo n.º 4
0
func warnNoRsync() {
	if isWindows() {
		glog.Warningf(noRsyncWindowsWarning)
		return
	}
	glog.Warningf(noRsyncUnixWarning)
}
Ejemplo n.º 5
0
// generateEvents is a helper function that generates some container
// life cycle events for containers in a pod.
func (r *runtime) generateEvents(runtimePod *kubecontainer.Pod, reason string, failure error) {
	// Set up container references.
	for _, c := range runtimePod.Containers {
		containerID := string(c.ID)
		id, err := parseContainerID(containerID)
		if err != nil {
			glog.Warningf("Invalid container ID %q", containerID)
			continue
		}

		ref, ok := r.containerRefManager.GetRef(containerID)
		if !ok {
			glog.Warningf("No ref for container %q", containerID)
			continue
		}

		// Note that 'rkt id' is the pod id.
		uuid := util.ShortenString(id.uuid, 8)
		switch reason {
		case "Created":
			r.recorder.Eventf(ref, "Created", "Created with rkt id %v", uuid)
		case "Started":
			r.recorder.Eventf(ref, "Started", "Started with rkt id %v", uuid)
		case "Failed":
			r.recorder.Eventf(ref, "Failed", "Failed to start with rkt id %v with error %v", uuid, failure)
		case "Killing":
			r.recorder.Eventf(ref, "Killing", "Killing with rkt id %v", uuid)
		default:
			glog.Errorf("rkt: Unexpected event %q", reason)
		}
	}
	return
}
Ejemplo n.º 6
0
func SendAppMessage(amsg *AppMessage, uid int64) bool {
	channel := GetChannel(uid)
	channel.Publish(amsg)

	route := app_route.FindRoute(amsg.appid)
	if route == nil {
		log.Warningf("can't dispatch app message, appid:%d uid:%d cmd:%s", amsg.appid, amsg.receiver, Command(amsg.msg.cmd))
		return false
	}
	clients := route.FindClientSet(uid)
	if len(clients) == 0 {
		log.Warningf("can't dispatch app message, appid:%d uid:%d cmd:%s", amsg.appid, amsg.receiver, Command(amsg.msg.cmd))
		return false
	}
	if clients != nil {
		for c, _ := range clients {
			if amsg.msgid > 0 {
				c.ewt <- &EMessage{msgid: amsg.msgid, msg: amsg.msg}
			} else {
				c.wt <- amsg.msg
			}
		}
	}
	return true
}
Ejemplo n.º 7
0
func (wr *Wrangler) setKeyspaceShardingInfo(keyspace, shardingColumnName string, shardingColumnType key.KeyspaceIdType, force bool) error {
	ki, err := wr.ts.GetKeyspace(keyspace)
	if err != nil {
		return err
	}

	if ki.ShardingColumnName != "" && ki.ShardingColumnName != shardingColumnName {
		if force {
			log.Warningf("Forcing keyspace ShardingColumnName change from %v to %v", ki.ShardingColumnName, shardingColumnName)
		} else {
			return fmt.Errorf("Cannot change ShardingColumnName from %v to %v (use -force to override)", ki.ShardingColumnName, shardingColumnName)
		}
	}

	if ki.ShardingColumnType != key.KIT_UNSET && ki.ShardingColumnType != shardingColumnType {
		if force {
			log.Warningf("Forcing keyspace ShardingColumnType change from %v to %v", ki.ShardingColumnType, shardingColumnType)
		} else {
			return fmt.Errorf("Cannot change ShardingColumnType from %v to %v (use -force to override)", ki.ShardingColumnType, shardingColumnType)
		}
	}

	ki.ShardingColumnName = shardingColumnName
	ki.ShardingColumnType = shardingColumnType
	return topo.UpdateKeyspace(wr.ts, ki)
}
Ejemplo n.º 8
0
// executorRefs returns a slice of known references to running executors known to this framework
func (k *framework) executorRefs() []executorRef {
	slaves := k.slaveHostNames.SlaveIDs()
	refs := make([]executorRef, 0, len(slaves))

	for _, slaveID := range slaves {
		hostname := k.slaveHostNames.HostName(slaveID)
		if hostname == "" {
			log.Warningf("hostname lookup for slaveID %q failed", slaveID)
			continue
		}

		node := k.lookupNode(hostname)
		if node == nil {
			log.Warningf("node lookup for slaveID %q failed", slaveID)
			continue
		}

		eid, ok := node.Annotations[meta.ExecutorIdKey]
		if !ok {
			log.Warningf("unable to find %q annotation for node %v", meta.ExecutorIdKey, node)
			continue
		}

		refs = append(refs, executorRef{
			executorID: mutil.NewExecutorID(eid),
			slaveID:    mutil.NewSlaveID(slaveID),
		})
	}

	return refs
}
Ejemplo n.º 9
0
// ListRuntimeInfo lists L7RuntimeInfo as understood by the loadbalancer module.
func (lbc *LoadBalancerController) ListRuntimeInfo() (lbs []*loadbalancers.L7RuntimeInfo, err error) {
	ingList, err := lbc.ingLister.List()
	if err != nil {
		return lbs, err
	}
	for _, ing := range ingList.Items {
		k, err := keyFunc(&ing)
		if err != nil {
			glog.Warningf("Cannot get key for Ingress %v/%v: %v", ing.Namespace, ing.Name, err)
			continue
		}
		tls, err := lbc.tlsLoader.load(&ing)
		if err != nil {
			glog.Warningf("Cannot get certs for Ingress %v/%v: %v", ing.Namespace, ing.Name, err)
		}
		annotations := ingAnnotations(ing.ObjectMeta.Annotations)
		lbs = append(lbs, &loadbalancers.L7RuntimeInfo{
			Name:         k,
			TLS:          tls,
			AllowHTTP:    annotations.allowHTTP(),
			StaticIPName: annotations.staticIPName(),
		})
	}
	return lbs, nil
}
Ejemplo n.º 10
0
// RegisterTmp create a ephemeral node, and watch it, if node droped then send a SIGQUIT to self.
func RegisterTemp(conn *zk.Conn, fpath, data string) error {
	tpath, err := conn.Create(path.Join(fpath)+"/", []byte(data), zk.FlagEphemeral|zk.FlagSequence, zk.WorldACL(zk.PermAll))
	if err != nil {
		glog.Errorf("conn.Create(\"%s\", \"%s\", zk.FlagEphemeral|zk.FlagSequence) error(%v)", fpath, data, err)
		return err
	}
	glog.V(1).Infof("create a zookeeper node:%s", tpath)
	// watch self
	go func() {
		for {
			glog.Infof("zk path: \"%s\" set a watch", tpath)
			exist, _, watch, err := conn.ExistsW(tpath)
			if err != nil {
				glog.Errorf("zk.ExistsW(\"%s\") error(%v)", tpath, err)
				glog.Warningf("zk path: \"%s\" set watch failed, kill itself", tpath)
				killSelf()
				return
			}
			if !exist {
				glog.Warningf("zk path: \"%s\" not exist, kill itself", tpath)
				killSelf()
				return
			}
			event := <-watch
			glog.Infof("zk path: \"%s\" receive a event %v", tpath, event)
		}
	}()
	return nil
}
Ejemplo n.º 11
0
func (im *realImageGCManager) Start() error {
	go wait.Until(func() {
		// Initial detection make detected time "unknown" in the past.
		var ts time.Time
		if im.initialized {
			ts = time.Now()
		}
		err := im.detectImages(ts)
		if err != nil {
			glog.Warningf("[imageGCManager] Failed to monitor images: %v", err)
		} else {
			im.initialized = true
		}
	}, 5*time.Minute, wait.NeverStop)

	// Start a goroutine periodically updates image cache.
	// TODO(random-liu): Merge this with the previous loop.
	go wait.Until(func() {
		images, err := im.runtime.ListImages()
		if err != nil {
			glog.Warningf("[imageGCManager] Failed to update image list: %v", err)
		} else {
			im.imageCache.set(images)
		}
	}, 30*time.Second, wait.NeverStop)

	return nil
}
Ejemplo n.º 12
0
// PDNS will query for "ANY" no matter what record type the client
// has asked for. Thus, we need to return data for all record
// types. PDNS will then filter for what the client needs.  PDNS is
// sensitive to the order in which records are returned.  If you
// return a CNAME first, it returns the CNAME for all queries.
// The DNS spec says you should not have conflicts between
// CNAME/SRV records, so this really shouldn't be an issue.
func (pd *pdns) handleQReq(req *pdnsReq) (lines []string, err error) {
	// The default search is for "ANY", however we do not need to
	// explicitly search for CNAME since that is implicitly handled in
	// an A request.
	qtypes := []string{"SOA", "SRV", "A"}
	if req.qtype != "ANY" {
		qtypes = []string{req.qtype}
	}
	lines = make([]string, 0, 16)
	for _, qtype := range qtypes {
		replies, err := pd.zr.getResult(qtype, req.qname)
		if err != nil {
			// If we aren't even the authority, we might as well give up.
			if qtype == "SOA" {
				return nil, err
			}
			log.Warningf("query failed %v %v: %v", qtype, req.qname, err)
			continue
		}
		for _, reply := range replies {
			lines = append(lines, reply.fmtReply())
		}
	}
	if len(lines) == 0 {
		emptyCount.Add(1)
		log.Warningf("no results for %v %v", req.qtype, req.qname)
	}
	return lines, nil
}
Ejemplo n.º 13
0
// effectiveHairpinMode determines the effective hairpin mode given the
// configured mode, container runtime, and whether cbr0 should be configured.
func effectiveHairpinMode(hairpinMode componentconfig.HairpinMode, containerRuntime string, configureCBR0 bool, networkPlugin string) (componentconfig.HairpinMode, error) {
	// The hairpin mode setting doesn't matter if:
	// - We're not using a bridge network. This is hard to check because we might
	//   be using a plugin. It matters if --configure-cbr0=true, and we currently
	//   don't pipe it down to any plugins.
	// - It's set to hairpin-veth for a container runtime that doesn't know how
	//   to set the hairpin flag on the veth's of containers. Currently the
	//   docker runtime is the only one that understands this.
	// - It's set to "none".
	if hairpinMode == componentconfig.PromiscuousBridge || hairpinMode == componentconfig.HairpinVeth {
		// Only on docker.
		if containerRuntime != "docker" {
			glog.Warningf("Hairpin mode set to %q but container runtime is %q, ignoring", hairpinMode, containerRuntime)
			return componentconfig.HairpinNone, nil
		}
		if hairpinMode == componentconfig.PromiscuousBridge && !configureCBR0 && networkPlugin != "kubenet" {
			// This is not a valid combination.  Users might be using the
			// default values (from before the hairpin-mode flag existed) and we
			// should keep the old behavior.
			glog.Warningf("Hairpin mode set to %q but configureCBR0 is false, falling back to %q", hairpinMode, componentconfig.HairpinVeth)
			return componentconfig.HairpinVeth, nil
		}
	} else if hairpinMode == componentconfig.HairpinNone {
		if configureCBR0 {
			glog.Warningf("Hairpin mode set to %q and configureCBR0 is true, this might result in loss of hairpin packets", hairpinMode)
		}
	} else {
		return "", fmt.Errorf("unknown value: %q", hairpinMode)
	}
	return hairpinMode, nil
}
Ejemplo n.º 14
0
// Clean both server and client pods.
func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) {
	By(fmt.Sprint("cleaning the environment after ", config.prefix))

	defer GinkgoRecover()

	client := f.Client
	podClient := client.Pods(config.namespace)

	err := podClient.Delete(config.prefix+"-client", nil)
	if err != nil {
		// Log the error before failing test: if the test has already failed,
		// framework.ExpectNoError() won't print anything to logs!
		glog.Warningf("Failed to delete client pod: %v", err)
		framework.ExpectNoError(err, "Failed to delete client pod: %v", err)
	}

	if config.serverImage != "" {
		if err := f.WaitForPodTerminated(config.prefix+"-client", ""); !apierrs.IsNotFound(err) {
			framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
		}
		// See issue #24100.
		// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
		By("sleeping a bit so client can stop and unmount")
		time.Sleep(20 * time.Second)

		err = podClient.Delete(config.prefix+"-server", nil)
		if err != nil {
			glog.Warningf("Failed to delete server pod: %v", err)
			framework.ExpectNoError(err, "Failed to delete server pod: %v", err)
		}
	}
}
Ejemplo n.º 15
0
// LoadVSchema loads the VSchema from the topo. The function does
// not return an error. It instead logs warnings on failure.
func (plr *Planner) LoadVSchema(ctx context.Context) {
	formal := &vindexes.VSchemaFormal{
		Keyspaces: make(map[string]vindexes.KeyspaceFormal),
	}
	keyspaces, err := plr.serv.GetSrvKeyspaceNames(ctx, plr.cell)
	if err != nil {
		log.Warningf("Error loading vschema: could not read keyspaces: %v", err)
		return
	}
	for _, keyspace := range keyspaces {
		formal.Keyspaces[keyspace] = vindexes.KeyspaceFormal{}
		kschema, err := plr.serv.GetVSchema(context.TODO(), keyspace)
		if err != nil {
			log.Warningf("Error loading vschema for keyspace: %s: %v", keyspace, err)
			continue
		}
		var kformal vindexes.KeyspaceFormal
		err = json.Unmarshal([]byte(kschema), &kformal)
		if err != nil {
			log.Warningf("Error unmarshalling vschema for keyspace: %s: %v", keyspace, err)
			continue
		}
		formal.Keyspaces[keyspace] = kformal
	}
	vschema, err := vindexes.BuildVSchema(formal)
	if err != nil {
		log.Warningf("Error creating VSchema: %v", err)
		return
	}
	plr.mu.Lock()
	plr.vschema = vschema
	plr.mu.Unlock()
}
Ejemplo n.º 16
0
// getScriptsURL finds a scripts url label in the image metadata
func getScriptsURL(image *docker.Image) string {
	scriptsURL := getLabel(image, ScriptsURLLabel)

	// For backward compatibility, support the old label schema
	if len(scriptsURL) == 0 {
		scriptsURL = getLabel(image, "io.s2i.scripts-url")
		if len(scriptsURL) > 0 {
			glog.Warningf("The 'io.s2i.scripts-url' label is deprecated. Use %q instead.", ScriptsURLLabel)
		}
	}
	if len(scriptsURL) == 0 {
		scriptsURL = getVariable(image, ScriptsURLEnvironment)
		if len(scriptsURL) != 0 {
			glog.Warningf("BuilderImage uses deprecated environment variable %s, please migrate it to %s label instead!",
				ScriptsURLEnvironment, ScriptsURLLabel)
		}
	}
	if len(scriptsURL) == 0 {
		glog.Warningf("Image does not contain a value for the %s label", ScriptsURLLabel)
	} else {
		glog.V(2).Infof("Image contains %s set to '%s'", ScriptsURLLabel, scriptsURL)
	}

	return scriptsURL
}
Ejemplo n.º 17
0
func (storage *PodRegistryStorage) fillPodInfo(pod *api.Pod) {
	// Get cached info for the list currently.
	// TODO: Optionally use fresh info
	if storage.podCache != nil {
		info, err := storage.podCache.GetPodInfo(pod.CurrentState.Host, pod.ID)
		if err != nil {
			glog.Errorf("Error getting container info from cache: %#v", err)
			if storage.podInfoGetter != nil {
				info, err = storage.podInfoGetter.GetPodInfo(pod.CurrentState.Host, pod.ID)
			}
			if err != nil {
				glog.Errorf("Error getting fresh container info: %#v", err)
				return
			}
		}
		pod.CurrentState.Info = info
		netContainerInfo, ok := info["net"]
		if ok {
			if netContainerInfo.NetworkSettings != nil {
				pod.CurrentState.PodIP = netContainerInfo.NetworkSettings.IPAddress
			} else {
				glog.Warningf("No network settings: %#v", netContainerInfo)
			}
		} else {
			glog.Warningf("Couldn't find network container in %v", info)
		}
	}
}
Ejemplo n.º 18
0
// effectiveHairpinMode determines the effective hairpin mode given the
// configured mode, container runtime, and whether cbr0 should be configured.
func effectiveHairpinMode(hairpinMode componentconfig.HairpinMode, containerRuntime string, networkPlugin string) (componentconfig.HairpinMode, error) {
	// The hairpin mode setting doesn't matter if:
	// - We're not using a bridge network. This is hard to check because we might
	//   be using a plugin.
	// - It's set to hairpin-veth for a container runtime that doesn't know how
	//   to set the hairpin flag on the veth's of containers. Currently the
	//   docker runtime is the only one that understands this.
	// - It's set to "none".
	if hairpinMode == componentconfig.PromiscuousBridge || hairpinMode == componentconfig.HairpinVeth {
		// Only on docker.
		if containerRuntime != "docker" {
			glog.Warningf("Hairpin mode set to %q but container runtime is %q, ignoring", hairpinMode, containerRuntime)
			return componentconfig.HairpinNone, nil
		}
		if hairpinMode == componentconfig.PromiscuousBridge && networkPlugin != "kubenet" {
			// This is not a valid combination, since promiscuous-bridge only works on kubenet. Users might be using the
			// default values (from before the hairpin-mode flag existed) and we
			// should keep the old behavior.
			glog.Warningf("Hairpin mode set to %q but kubenet is not enabled, falling back to %q", hairpinMode, componentconfig.HairpinVeth)
			return componentconfig.HairpinVeth, nil
		}
	} else if hairpinMode != componentconfig.HairpinNone {
		return "", fmt.Errorf("unknown value: %q", hairpinMode)
	}
	return hairpinMode, nil
}
Ejemplo n.º 19
0
func DispatchAppMessage(amsg *AppMessage) {
	log.Info("dispatch app message:", Command(amsg.msg.cmd))

	route := app_route.FindRoute(amsg.appid)
	if route == nil {
		log.Warningf("can't dispatch app message, appid:%d uid:%d cmd:%s", amsg.appid, amsg.receiver, Command(amsg.msg.cmd))
		return
	}
	clients := route.FindClientSet(amsg.receiver)
	if len(clients) == 0 {
		log.Warningf("can't dispatch app message, appid:%d uid:%d cmd:%s", amsg.appid, amsg.receiver, Command(amsg.msg.cmd))
		return
	}
	for c, _ := range clients {
		//自己在同一台设备上发出的消息,不再发送回去
		if amsg.msg.cmd == MSG_IM || amsg.msg.cmd == MSG_GROUP_IM {
			m := amsg.msg.body.(*IMMessage)
			if m.sender == amsg.receiver && amsg.device_id == c.device_ID {
				continue
			}
		}

		if amsg.msgid > 0 {
			c.ewt <- &EMessage{msgid: amsg.msgid, msg: amsg.msg}
		} else {
			c.wt <- amsg.msg
		}
	}
}
Ejemplo n.º 20
0
// InstanceID returns the cloud provider ID of the node with the specified Name.
func (i *Instances) InstanceID(nodeName k8stypes.NodeName) (string, error) {
	// Create context
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	vm, err := getVirtualMachineByName(i.cfg, ctx, i.client, nodeName)
	if err != nil {
		if _, ok := err.(*find.NotFoundError); ok {
			return "", cloudprovider.InstanceNotFound
		}
		return "", err
	}

	var mvm mo.VirtualMachine
	err = getVirtualMachineManagedObjectReference(ctx, i.client, vm, "summary", &mvm)
	if err != nil {
		return "", err
	}

	if mvm.Summary.Runtime.PowerState == ActivePowerState {
		return "/" + vm.InventoryPath, nil
	}

	if mvm.Summary.Config.Template == false {
		glog.Warningf("VM %s, is not in %s state", nodeName, ActivePowerState)
	} else {
		glog.Warningf("VM %s, is a template", nodeName)
	}

	return "", cloudprovider.InstanceNotFound
}
Ejemplo n.º 21
0
// SetBlacklistedTablesByShard sets the blacklisted table list of all
// tablets of a given type in a shard. It would work for the master,
// but it wouldn't be very efficient.
func (wr *Wrangler) SetBlacklistedTablesByShard(keyspace, shard string, tabletType topo.TabletType, tables []string) error {
	tabletMap, err := topo.GetTabletMapForShard(wr.ts, keyspace, shard)
	switch err {
	case nil:
		// keep going
	case topo.ErrPartialResult:
		log.Warningf("SetBlacklistedTablesByShard: got partial result, may not blacklist everything everywhere")
	default:
		return err
	}

	// ignore errors in this phase
	wg := sync.WaitGroup{}
	for _, ti := range tabletMap {
		if ti.Type != tabletType {
			continue
		}

		wg.Add(1)
		go func(ti *topo.TabletInfo) {
			if err := wr.ai.SetBlacklistedTables(ti, tables, wr.ActionTimeout()); err != nil {
				log.Warningf("SetBlacklistedTablesByShard: failed to set tables for %v: %v", ti.Alias, err)
			}
			wg.Done()
		}(ti)
	}
	wg.Wait()

	return nil
}
Ejemplo n.º 22
0
func (m *Meta) handleAppConfigChanged(watch <-chan zookeeper.Event) {
	for {
		event := <-watch
		if event.Type == zookeeper.EventNodeDataChanged {
			a, w, err := m.FetchAppConfig()
			if err == nil {
				if a.MigrateKeysEachTime == 0 {
					a.MigrateKeysEachTime = DEFAULT_MIGRATE_KEYS_EACH_TIME
				}
				if a.MigrateTimeout == 0 {
					a.MigrateTimeout = DEFAULT_MIGRATE_TIMEOUT
				}
				if a.AutoFailoverInterval == 0 {
					a.AutoFailoverInterval = DEFAULT_AUTOFAILOVER_INTERVAL
				}
				if a.FetchClusterNodesInterval == 0 {
					a.FetchClusterNodesInterval = DEFAULT_FETCH_CLUSTER_NODES_INTERVAL
				}
				if a.MigrateConcurrency == 0 {
					a.MigrateConcurrency = DEFAULT_MIGRATE_CONCURRENCY
				}
				m.appConfig.Store(a)
				glog.Warning("meta: app config changed.", a)
			} else {
				glog.Warningf("meta: fetch app config failed, %v", err)
			}
			watch = w
		} else {
			glog.Warningf("meta: unexpected event coming, %v", event)
			break
		}
	}
}
Ejemplo n.º 23
0
// terminateHealthChecks is called when we enter lame duck mode.
// We will clean up our state, and shut down query service.
// We only do something if we are in targetTabletType state, and then
// we just go to spare.
func (agent *ActionAgent) terminateHealthChecks(targetTabletType topo.TabletType) {
	agent.actionMutex.Lock()
	defer agent.actionMutex.Unlock()
	log.Info("agent.terminateHealthChecks is starting")

	// read the current tablet record
	tablet := agent.Tablet()
	if tablet.Type != targetTabletType {
		log.Infof("Tablet in state %v, not changing it", tablet.Type)
		return
	}

	// Change the Type to spare, update the health. Note we pass in a map
	// that's not nil, meaning we will clear it.
	if err := topotools.ChangeType(agent.TopoServer, tablet.Alias, topo.TYPE_SPARE, make(map[string]string), true /*runHooks*/); err != nil {
		log.Infof("Error updating tablet record: %v", err)
		return
	}

	// Rebuild the serving graph in our cell, only if we're dealing with
	// a serving type
	if err := agent.rebuildShardIfNeeded(tablet, targetTabletType); err != nil {
		log.Warningf("rebuildShardIfNeeded failed (will still run post action callbacks, serving graph might be out of date): %v", err)
	}

	// Run the post action callbacks (let them shutdown the query service)
	if err := agent.refreshTablet("terminatehealthcheck"); err != nil {
		log.Warningf("refreshTablet failed: %v", err)
	}
}
Ejemplo n.º 24
0
// RunDNSServer starts the DNS server
func (c *MasterConfig) RunDNSServer() {
	config, err := dns.NewServerDefaults()
	if err != nil {
		glog.Fatalf("Could not start DNS: %v", err)
	}
	config.DnsAddr = c.Options.DNSConfig.BindAddress
	config.NoRec = true // do not want to deploy an open resolver

	_, port, err := net.SplitHostPort(c.Options.DNSConfig.BindAddress)
	if err != nil {
		glog.Fatalf("Could not start DNS: %v", err)
	}
	if port != "53" {
		glog.Warningf("Binding DNS on port %v instead of 53 (you may need to run as root and update your config), using %s which will not resolve from all locations", port, c.Options.DNSConfig.BindAddress)
	}

	if ok, err := cmdutil.TryListen(c.Options.DNSConfig.BindAddress); !ok {
		glog.Warningf("Could not start DNS: %v", err)
		return
	}

	go func() {
		err := dns.ListenAndServe(config, c.DNSServerClient(), c.EtcdHelper.Client.(*etcdclient.Client))
		glog.Fatalf("Could not start DNS: %v", err)
	}()

	cmdutil.WaitForSuccessfulDial(false, "tcp", c.Options.DNSConfig.BindAddress, 100*time.Millisecond, 100*time.Millisecond, 100)

	glog.Infof("DNS listening at %s", c.Options.DNSConfig.BindAddress)
}
Ejemplo n.º 25
0
func (ga *GoogleAuth) validateServerToken(user string) (*TokenDBValue, error) {
	v, err := ga.getDBValue(user)
	if err != nil {
		return nil, err
	}
	if time.Now().After(v.ValidUntil) {
		glog.V(2).Infof("Refreshing token for %s", user)
		rtr, err := ga.refreshAccessToken(v.RefreshToken)
		if err != nil {
			glog.Warningf("Failed to refresh token for %q: %s", user, err)
			return nil, fmt.Errorf("failed to refresh token: %s", err)
		}
		v.AccessToken = rtr.AccessToken
		v.ValidUntil = time.Now().Add(time.Duration(rtr.ExpiresIn-30) * time.Second)
		glog.Infof("Refreshed auth token for %s (exp %d)", user, rtr.ExpiresIn)
		err = ga.setServerToken(user, v)
		if err != nil {
			glog.Errorf("Failed to record refreshed token: %s", err)
			return nil, fmt.Errorf("failed to record refreshed token: %s", err)
		}
	}
	tokenUser, err := ga.validateAccessToken(v.TokenType, v.AccessToken)
	if err != nil {
		glog.Warningf("Token for %q failed validation: %s", user, err)
		return nil, fmt.Errorf("server token invalid: %s", err)
	}
	if tokenUser != user {
		glog.Errorf("token for wrong user: expected %s, found %s", user, tokenUser)
		return nil, fmt.Errorf("found token for wrong user")
	}
	texp := v.ValidUntil.Sub(time.Now())
	glog.V(1).Infof("Validated Google auth token for %s (exp %d)", user, int(texp.Seconds()))
	return v, nil
}
Ejemplo n.º 26
0
// InstanceID returns the cloud provider ID of the specified instance.
func (i *Instances) InstanceID(name string) (string, error) {
	// Create context
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	// Create vSphere client
	c, err := vsphereLogin(i.cfg, ctx)
	if err != nil {
		return "", err
	}
	defer c.Logout(ctx)

	vm, err := getVirtualMachineByName(i.cfg, ctx, c, name)

	var mvm mo.VirtualMachine
	err = getVirtualMachineManagedObjectReference(ctx, c, vm, "summary", &mvm)
	if err != nil {
		return "", err
	}

	if mvm.Summary.Runtime.PowerState == ActivePowerState {
		return "/" + vm.InventoryPath, nil
	}

	if mvm.Summary.Config.Template == false {
		glog.Warningf("VM %s, is not in %s state", name, ActivePowerState)
	} else {
		glog.Warningf("VM %s, is a template", name)
	}

	return "", cloudprovider.InstanceNotFound
}
Ejemplo n.º 27
0
func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
	glog.V(4).Info("openstack.LoadBalancer() called")

	// TODO: Search for and support Rackspace loadbalancer API, and others.
	network, err := openstack.NewNetworkV2(os.provider, gophercloud.EndpointOpts{
		Region: os.region,
	})
	if err != nil {
		glog.Warningf("Failed to find neutron endpoint: %v", err)
		return nil, false
	}

	compute, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
		Region: os.region,
	})
	if err != nil {
		glog.Warningf("Failed to find compute endpoint: %v", err)
		return nil, false
	}

	glog.V(1).Info("Claiming to support LoadBalancer")

	if os.lbOpts.LBVersion == "v2" {
		return &LbaasV2{LoadBalancer{network, compute, os.lbOpts}}, true
	} else {

		return &LbaasV1{LoadBalancer{network, compute, os.lbOpts}}, true
	}
}
Ejemplo n.º 28
0
// DetachDisk detaches given virtual disk volume from the compute running kubelet.
func (vs *VSphere) DetachDisk(volPath string, nodeName string) error {
	// Create context
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	// Create vSphere client
	c, err := vsphereLogin(vs.cfg, ctx)
	if err != nil {
		return err
	}
	defer c.Logout(ctx)

	// Find VM to detach disk from
	var vSphereInstance string
	if nodeName == "" {
		vSphereInstance = vs.localInstanceID
	} else {
		vSphereInstance = nodeName
	}

	nodeExist, err := vs.NodeExists(c, vSphereInstance)

	if err != nil {
		glog.Errorf("Failed to check whether node exist. err: %s.", err)
		return err
	}

	if !nodeExist {
		glog.Warningf(
			"Node %q does not exist. DetachDisk will assume vmdk %q is not attached to it.",
			vSphereInstance,
			volPath)
		return nil
	}

	vm, vmDevices, _, dc, err := getVirtualMachineDevices(vs.cfg, ctx, c, vSphereInstance)
	if err != nil {
		return err
	}

	diskID, err := getVirtualDiskID(volPath, vmDevices, dc, c)
	if err != nil {
		glog.Warningf("disk ID not found for %v ", volPath)
		return err
	}

	// Gets virtual disk device
	device := vmDevices.Find(diskID)
	if device == nil {
		return fmt.Errorf("device '%s' not found", diskID)
	}

	// Detach disk from VM
	err = vm.RemoveDevice(ctx, true, device)
	if err != nil {
		return err
	}

	return nil
}
Ejemplo n.º 29
0
// Unpacks a container name, returning the pod full name and container name we would have used to
// construct the docker name. If we are unable to parse the name, an error is returned.
func ParseDockerName(name string) (dockerName *KubeletContainerName, hash uint64, err error) {
	// For some reason docker appears to be appending '/' to names.
	// If it's there, strip it.
	name = strings.TrimPrefix(name, "/")
	parts := strings.Split(name, "_")
	if len(parts) == 0 || parts[0] != containerNamePrefix {
		err = fmt.Errorf("failed to parse Docker container name %q into parts", name)
		return nil, 0, err
	}
	if len(parts) < 6 {
		// We have at least 5 fields.  We may have more in the future.
		// Anything with less fields than this is not something we can
		// manage.
		glog.Warningf("found a container with the %q prefix, but too few fields (%d): %q", containerNamePrefix, len(parts), name)
		err = fmt.Errorf("Docker container name %q has less parts than expected %v", name, parts)
		return nil, 0, err
	}

	nameParts := strings.Split(parts[1], ".")
	containerName := nameParts[0]
	if len(nameParts) > 1 {
		hash, err = strconv.ParseUint(nameParts[1], 16, 32)
		if err != nil {
			glog.Warningf("invalid container hash %q in container %q", nameParts[1], name)
		}
	}

	podFullName := parts[2] + "_" + parts[3]
	podUID := types.UID(parts[4])

	return &KubeletContainerName{podFullName, podUID, containerName}, hash, nil
}
Ejemplo n.º 30
0
// WatchControllers begins watching for new, changed, or deleted controllers.
func (r *Registry) WatchControllers(ctx api.Context, label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) {
	if !field.Empty() {
		return nil, fmt.Errorf("field selectors are not supported on replication controllers")
	}
	version, err := tools.ParseWatchResourceVersion(resourceVersion, "replicationControllers")
	if err != nil {
		return nil, err
	}
	key := makeControllerListKey(ctx)
	return r.WatchList(key, version, func(obj runtime.Object) bool {
		controller, ok := obj.(*api.ReplicationController)
		if !ok {
			// Must be an error: return true to propagate to upper level.
			return true
		}
		match := label.Matches(labels.Set(controller.Labels))
		if match {
			pods, err := r.pods.ListPods(ctx, labels.Set(controller.Spec.Selector).AsSelector())
			if err != nil {
				glog.Warningf("Error listing pods: %v", err)
				// No object that's useable so drop it on the floor
				return false
			}
			if pods == nil {
				glog.Warningf("Pods list is nil.  This should never happen...")
				// No object that's useable so drop it on the floor
				return false
			}
			controller.Status.Replicas = len(pods.Items)
		}
		return match
	})
}