Exemplo n.º 1
0
// OnUpdate updates the registered endpoints with the new
// endpoint information, removes the registered endpoints
// no longer present in the provided endpoints.
func (impl LoadBalancerRR) OnUpdate(endpoints []api.Endpoints) {
	tmp := make(map[string]bool)
	impl.lock.Lock()
	defer impl.lock.Unlock()
	// First update / add all new endpoints for services.
	for _, value := range endpoints {
		existingEndpoints, exists := impl.endpointsMap[value.Name]
		validEndpoints := impl.filterValidEndpoints(value.Endpoints)
		if !exists || !reflect.DeepEqual(existingEndpoints, validEndpoints) {
			glog.Infof("LoadBalancerRR: Setting endpoints for %s to %+v", value.Name, value.Endpoints)
			impl.endpointsMap[value.Name] = validEndpoints
			// Start RR from the beginning if added or updated.
			impl.rrIndex[value.Name] = 0
		}
		tmp[value.Name] = true
	}
	// Then remove any endpoints no longer relevant
	for key, value := range impl.endpointsMap {
		_, exists := tmp[key]
		if !exists {
			glog.Infof("LoadBalancerRR: Removing endpoints for %s -> %+v", key, value)
			delete(impl.endpointsMap, key)
		}
	}
}
Exemplo n.º 2
0
// Open must be called before sending requests to QueryEngine.
func (qe *QueryEngine) Open(dbconfigs *dbconfigs.DBConfigs, schemaOverrides []SchemaOverride) {
	qe.dbconfigs = dbconfigs
	appParams := dbconfigs.App.ConnParams
	// Create dba params based on App connection params
	// and Dba credentials.
	dbaParams := dbconfigs.App.ConnParams
	if dbconfigs.Dba.Uname != "" {
		dbaParams.Uname = dbconfigs.Dba.Uname
		dbaParams.Pass = dbconfigs.Dba.Pass
	}

	strictMode := false
	if qe.strictMode.Get() != 0 {
		strictMode = true
	}
	if !strictMode && dbconfigs.App.EnableRowcache {
		panic(NewTabletError(ErrFatal, vtrpc.ErrorCode_INTERNAL_ERROR, "Rowcache cannot be enabled when queryserver-config-strict-mode is false"))
	}
	if dbconfigs.App.EnableRowcache {
		qe.cachePool.Open()
		log.Infof("rowcache is enabled")
	} else {
		log.Infof("rowcache is not enabled")
	}

	start := time.Now()
	// schemaInfo depends on cachePool. Every table that has a rowcache
	// points to the cachePool.
	qe.schemaInfo.Open(&appParams, &dbaParams, schemaOverrides, qe.cachePool, strictMode)
	log.Infof("Time taken to load the schema: %v", time.Now().Sub(start))

	qe.connPool.Open(&appParams, &dbaParams)
	qe.streamConnPool.Open(&appParams, &dbaParams)
	qe.txPool.Open(&appParams, &dbaParams)
}
Exemplo n.º 3
0
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
func StartPods(numPods int, host string, restClient *client.Client) error {
	start := time.Now()
	defer func() {
		glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods)
	}()
	hostField := fields.OneTermEqualSelector(client.PodHost, host)
	pods, err := restClient.Pods(TestNS).List(labels.Everything(), hostField)
	if err != nil || len(pods.Items) == numPods {
		return err
	}
	glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods)
	// For the sake of simplicity, assume all pods in TestNS have selectors matching TestRCManifest.
	controller := RCFromManifest(TestRCManifest)

	// Make the rc unique to the given host.
	controller.Spec.Replicas = numPods
	controller.Spec.Template.Spec.NodeName = host
	controller.Name = controller.Name + host
	controller.Spec.Selector["host"] = host
	controller.Spec.Template.Labels["host"] = host

	if rc, err := StartRC(controller, restClient); err != nil {
		return err
	} else {
		// Delete the rc, otherwise when we restart master components for the next benchmark
		// the rc controller will race with the pods controller in the rc manager.
		return restClient.ReplicationControllers(TestNS).Delete(rc.Name)
	}
}
Exemplo n.º 4
0
// A non-nil return signals that event processing should stop.
func (agent *ActionAgent) dispatchAction(actionPath, data string) error {
	agent.actionMutex.Lock()
	defer agent.actionMutex.Unlock()

	log.Infof("action dispatch %v", actionPath)
	actionNode, err := actionnode.ActionNodeFromJson(data, actionPath)
	if err != nil {
		log.Errorf("action decode failed: %v %v", actionPath, err)
		return nil
	}

	cmd := []string{
		agent.vtActionBinFile,
		"-action", actionNode.Action,
		"-action-node", actionPath,
		"-action-guid", actionNode.ActionGuid,
	}
	cmd = append(cmd, logutil.GetSubprocessFlags()...)
	cmd = append(cmd, topo.GetSubprocessFlags()...)
	cmd = append(cmd, dbconfigs.GetSubprocessFlags()...)
	cmd = append(cmd, mysqlctl.GetSubprocessFlags()...)
	log.Infof("action launch %v", cmd)
	vtActionCmd := exec.Command(cmd[0], cmd[1:]...)

	stdOut, vtActionErr := vtActionCmd.CombinedOutput()
	if vtActionErr != nil {
		log.Errorf("agent action failed: %v %v\n%s", actionPath, vtActionErr, stdOut)
		// If the action failed, preserve single execution path semantics.
		return vtActionErr
	}

	log.Infof("Agent action completed %v %s", actionPath, stdOut)
	agent.afterAction(actionPath, actionNode.Action == actionnode.TABLET_ACTION_APPLY_SCHEMA)
	return nil
}
Exemplo n.º 5
0
func (_ *UserTask) RenderLocal(t *local.LocalTarget, a, e, changes *UserTask) error {
	if a == nil {
		args := buildUseraddArgs(e)
		glog.Infof("Creating user %q", e.Name)
		cmd := exec.Command("useradd", args...)
		output, err := cmd.CombinedOutput()
		if err != nil {
			return fmt.Errorf("error creating user: %v\nOutput: %s", err, output)
		}
	} else {
		var args []string

		if changes.Shell != "" {
			args = append(args, "-s", e.Shell)
		}
		if changes.Home != "" {
			args = append(args, "-d", e.Home)
		}

		if len(args) != 0 {
			args = append(args, e.Name)
			glog.Infof("Reconfiguring user %q", e.Name)
			cmd := exec.Command("usermod", args...)
			output, err := cmd.CombinedOutput()
			if err != nil {
				return fmt.Errorf("error reconfiguring user: %v\nOutput: %s", err, output)
			}
		}
	}

	return nil
}
// Add adds a healthcheck if one for the same port doesn't already exist.
func (h *HealthChecks) Add(port int64, path string) error {
	hc, _ := h.Get(port)
	name := h.namer.BeName(port)
	if path == "" {
		path = h.defaultPath
	}
	if hc == nil {
		glog.Infof("Creating health check %v", name)
		if err := h.cloud.CreateHttpHealthCheck(
			&compute.HttpHealthCheck{
				Name:        name,
				Port:        port,
				RequestPath: path,
				Description: "Default kubernetes L7 Loadbalancing health check.",
				// How often to health check.
				CheckIntervalSec: 1,
				// How long to wait before claiming failure of a health check.
				TimeoutSec: 1,
				// Number of healthchecks to pass for a vm to be deemed healthy.
				HealthyThreshold: 1,
				// Number of healthchecks to fail before the vm is deemed unhealthy.
				UnhealthyThreshold: 10,
			}); err != nil {
			return err
		}
	} else {
		// TODO: Does this health check need an edge hop?
		glog.Infof("Health check %v already exists", hc.Name)
	}
	return nil
}
Exemplo n.º 7
0
// nodeSyncLoop handles updating the hosts pointed to by all load
// balancers whenever the set of nodes in the cluster changes.
func (s *ServiceController) nodeSyncLoop(period time.Duration) {
	var prevHosts []string
	var servicesToUpdate []*cachedService
	for range time.Tick(period) {
		nodes, err := s.nodeLister.NodeCondition(getNodeConditionPredicate()).List()
		if err != nil {
			glog.Errorf("Failed to retrieve current set of nodes from node lister: %v", err)
			continue
		}
		newHosts := hostsFromNodeSlice(nodes)
		if stringSlicesEqual(newHosts, prevHosts) {
			// The set of nodes in the cluster hasn't changed, but we can retry
			// updating any services that we failed to update last time around.
			servicesToUpdate = s.updateLoadBalancerHosts(servicesToUpdate, newHosts)
			continue
		}
		glog.Infof("Detected change in list of current cluster nodes. New node set: %v", newHosts)

		// Try updating all services, and save the ones that fail to try again next
		// round.
		servicesToUpdate = s.cache.allServices()
		numServices := len(servicesToUpdate)
		servicesToUpdate = s.updateLoadBalancerHosts(servicesToUpdate, newHosts)
		glog.Infof("Successfully updated %d out of %d load balancers to direct traffic to the updated set of nodes",
			numServices-len(servicesToUpdate), numServices)

		prevHosts = newHosts
	}
}
Exemplo n.º 8
0
// terminateHealthChecks is called when we enter lame duck mode.
// We will clean up our state, and shut down query service.
// We only do something if we are in targetTabletType state, and then
// we just go to spare.
func (agent *ActionAgent) terminateHealthChecks(targetTabletType pbt.TabletType) {
	agent.actionMutex.Lock()
	defer agent.actionMutex.Unlock()
	log.Info("agent.terminateHealthChecks is starting")

	// read the current tablet record
	tablet := agent.Tablet()
	if tablet.Type != targetTabletType {
		log.Infof("Tablet in state %v, not changing it", tablet.Type)
		return
	}

	// Change the Type to spare, update the health. Note we pass in a map
	// that's not nil, meaning we will clear it.
	if err := topotools.ChangeType(agent.batchCtx, agent.TopoServer, tablet.Alias, pbt.TabletType_SPARE, make(map[string]string)); err != nil {
		log.Infof("Error updating tablet record: %v", err)
		return
	}

	// Update the serving graph in our cell, only if we're dealing with
	// a serving type
	if err := agent.updateServingGraph(tablet, targetTabletType); err != nil {
		log.Warningf("updateServingGraph failed (will still run post action callbacks, serving graph might be out of date): %v", err)
	}

	// We've already rebuilt the shard, which is the only reason we registered
	// ourself as OnTermSync (synchronous). The rest can be done asynchronously.
	go func() {
		// Run the post action callbacks (let them shutdown the query service)
		if err := agent.refreshTablet(agent.batchCtx, "terminatehealthcheck"); err != nil {
			log.Warningf("refreshTablet failed: %v", err)
		}
	}()
}
Exemplo n.º 9
0
// Start launches a master. It will error if possible, but some background processes may still
// be running and the process should exit after it finishes.
func (m *Master) Start() error {
	// Allow privileged containers
	// TODO: make this configurable and not the default https://github.com/openshift/origin/issues/662
	capabilities.Initialize(capabilities.Capabilities{
		AllowPrivileged: true,
		PrivilegedSources: capabilities.PrivilegedSources{
			HostNetworkSources: []string{kubelettypes.ApiserverSource, kubelettypes.FileSource},
			HostPIDSources:     []string{kubelettypes.ApiserverSource, kubelettypes.FileSource},
			HostIPCSources:     []string{kubelettypes.ApiserverSource, kubelettypes.FileSource},
		},
	})

	openshiftConfig, err := origin.BuildMasterConfig(*m.config)
	if err != nil {
		return err
	}

	kubeMasterConfig, err := BuildKubernetesMasterConfig(openshiftConfig)
	if err != nil {
		return err
	}

	switch {
	case m.api:
		glog.Infof("Starting master on %s (%s)", m.config.ServingInfo.BindAddress, version.Get().String())
		glog.Infof("Public master address is %s", m.config.AssetConfig.MasterPublicURL)
		if len(m.config.DisabledFeatures) > 0 {
			glog.V(4).Infof("Disabled features: %s", strings.Join(m.config.DisabledFeatures, ", "))
		}
		glog.Infof("Using images from %q", openshiftConfig.ImageFor("<component>"))

		if err := StartAPI(openshiftConfig, kubeMasterConfig); err != nil {
			return err
		}

	case m.controllers:
		glog.Infof("Starting controllers on %s (%s)", m.config.ServingInfo.BindAddress, version.Get().String())
		if len(m.config.DisabledFeatures) > 0 {
			glog.V(4).Infof("Disabled features: %s", strings.Join(m.config.DisabledFeatures, ", "))
		}
		glog.Infof("Using images from %q", openshiftConfig.ImageFor("<component>"))

		if err := startHealth(openshiftConfig); err != nil {
			return err
		}
	}

	if m.controllers {
		// run controllers asynchronously (not required to be "ready")
		go func() {
			if err := startControllers(openshiftConfig, kubeMasterConfig); err != nil {
				glog.Fatal(err)
			}

			openshiftConfig.Informers.Start(utilwait.NeverStop)
		}()
	}

	return nil
}
Exemplo n.º 10
0
// runOnce runs a given set of pods and returns their status.
func (kl *Kubelet) runOnce(pods []*api.Pod, retryDelay time.Duration) (results []RunPodResult, err error) {
	ch := make(chan RunPodResult)
	admitted := []*api.Pod{}
	for _, pod := range pods {
		// Check if we can admit the pod.
		if ok, reason, message := kl.canAdmitPod(append(admitted, pod), pod); !ok {
			kl.rejectPod(pod, reason, message)
		} else {
			admitted = append(admitted, pod)
		}
		go func(pod *api.Pod) {
			err := kl.runPod(pod, retryDelay)
			ch <- RunPodResult{pod, err}
		}(pod)
	}

	glog.Infof("waiting for %d pods", len(pods))
	failedPods := []string{}
	for i := 0; i < len(pods); i++ {
		res := <-ch
		results = append(results, res)
		if res.Err != nil {
			// TODO(proppy): report which containers failed the pod.
			glog.Infof("failed to start pod %q: %v", res.Pod.Name, res.Err)
			failedPods = append(failedPods, res.Pod.Name)
		} else {
			glog.Infof("started pod %q", res.Pod.Name)
		}
	}
	if len(failedPods) > 0 {
		return results, fmt.Errorf("error running pods: %v", failedPods)
	}
	glog.Infof("%d pods started", len(pods))
	return results, err
}
Exemplo n.º 11
0
func (agent *ActionAgent) initHealthCheck() {
	if !agent.IsRunningHealthCheck() {
		log.Infof("No target_tablet_type specified, disabling any health check")
		return
	}

	tt, err := topoproto.ParseTabletType(*targetTabletType)
	if err != nil {
		log.Fatalf("Invalid target tablet type %v: %v", *targetTabletType, err)
	}

	log.Infof("Starting periodic health check every %v with target_tablet_type=%v", *healthCheckInterval, *targetTabletType)
	t := timer.NewTimer(*healthCheckInterval)
	servenv.OnTermSync(func() {
		// When we enter lameduck mode, we want to not call
		// the health check any more. After this returns, we
		// are guaranteed to not call it.
		log.Info("Stopping periodic health check timer")
		t.Stop()

		// Now we can finish up and force ourselves to not healthy.
		agent.terminateHealthChecks(tt)
	})
	t.Start(func() {
		agent.runHealthCheck(tt)
	})
	t.Trigger()
}
Exemplo n.º 12
0
func (a *HorizontalController) Run(stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	glog.Infof("Starting HPA Controller")
	go a.controller.Run(stopCh)
	<-stopCh
	glog.Infof("Shutting down HPA Controller")
}
Exemplo n.º 13
0
func (proxier *Proxier) openNodePort(nodePort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) error {
	// TODO: Do we want to allow containers to access public services?  Probably yes.
	// TODO: We could refactor this to be the same code as portal, but with IP == nil

	err := proxier.claimNodePort(nil, nodePort, protocol, name)
	if err != nil {
		return err
	}

	// Handle traffic from containers.
	args := proxier.iptablesContainerNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
	existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerNodePortChain, args...)
	if err != nil {
		glog.Errorf("Failed to install iptables %s rule for service %q", iptablesContainerNodePortChain, name)
		return err
	}
	if !existed {
		glog.Infof("Opened iptables from-containers public port for service %q on %s port %d", name, protocol, nodePort)
	}

	// Handle traffic from the host.
	args = proxier.iptablesHostNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
	existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostNodePortChain, args...)
	if err != nil {
		glog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostNodePortChain, name)
		return err
	}
	if !existed {
		glog.Infof("Opened iptables from-host public port for service %q on %s port %d", name, protocol, nodePort)
	}
	return nil
}
Exemplo n.º 14
0
// fetchNextState fetches the key (or waits for a change to a key) and then returns
// the nextIndex to read.  It will watch no longer than s.waitDuration and then return
func (s *SourceEtcd) fetchNextState(fromIndex uint64) (nextIndex uint64, err error) {
	var response *etcd.Response

	if fromIndex == 0 {
		response, err = s.client.Get(s.key, true, false)
	} else {
		response, err = s.client.Watch(s.key, fromIndex, false, nil, stopChannel(s.waitDuration))
		if tools.IsEtcdWatchStoppedByUser(err) {
			return fromIndex, nil
		}
	}
	if err != nil {
		return fromIndex, err
	}

	pods, err := responseToPods(response)
	if err != nil {
		glog.Infof("Response was in error: %#v", response)
		return 0, fmt.Errorf("error parsing response: %#v", err)
	}

	glog.Infof("Got state from etcd: %+v", pods)
	s.updates <- kubelet.PodUpdate{pods, kubelet.SET}

	return response.Node.ModifiedIndex + 1, nil
}
Exemplo n.º 15
0
func main() {
	flag.Parse()

	storageDriver, err := NewStorageDriver(*argDbDriver)
	if err != nil {
		glog.Fatalf("Failed to connect to database: %s", err)
	}

	containerManager, err := manager.New(storageDriver)
	if err != nil {
		glog.Fatalf("Failed to create a Container Manager: %s", err)
	}

	// Register Docker.
	if err := docker.Register(containerManager); err != nil {
		glog.Errorf("Docker registration failed: %v.", err)
	}

	// Register the raw driver.
	if err := raw.Register(containerManager); err != nil {
		glog.Fatalf("raw registration failed: %v.", err)
	}

	// Handler for static content.
	http.HandleFunc(static.StaticResource, func(w http.ResponseWriter, r *http.Request) {
		err := static.HandleRequest(w, r.URL)
		if err != nil {
			fmt.Fprintf(w, "%s", err)
		}
	})

	// Register API handler.
	if err := api.RegisterHandlers(containerManager); err != nil {
		glog.Fatalf("failed to register API handlers: %s", err)
	}

	// Redirect / to containers page.
	http.Handle("/", http.RedirectHandler(pages.ContainersPage, http.StatusTemporaryRedirect))

	// Register the handler for the containers page.
	http.HandleFunc(pages.ContainersPage, func(w http.ResponseWriter, r *http.Request) {
		err := pages.ServerContainersPage(containerManager, w, r.URL)
		if err != nil {
			fmt.Fprintf(w, "%s", err)
		}
	})

	defer glog.Flush()

	go func() {
		glog.Fatal(containerManager.Start())
	}()

	glog.Infof("Starting cAdvisor version: %q", info.VERSION)
	glog.Infof("About to serve on port ", *argPort)

	addr := fmt.Sprintf(":%v", *argPort)

	glog.Fatal(http.ListenAndServe(addr, nil))
}
Exemplo n.º 16
0
func (l *L7) checkProxy() (err error) {
	if l.um == nil {
		return fmt.Errorf("Cannot create proxy without urlmap.")
	}
	proxyName := truncate(fmt.Sprintf("%v-%v", targetProxyPrefix, l.Name))
	proxy, _ := l.cloud.GetTargetHttpProxy(proxyName)
	if proxy == nil {
		glog.Infof("Creating new http proxy for urlmap %v", l.um.Name)
		proxy, err = l.cloud.CreateTargetHttpProxy(l.um, proxyName)
		if err != nil {
			return err
		}
		l.tp = proxy
		return nil
	}
	if !compareLinks(proxy.UrlMap, l.um.SelfLink) {
		glog.Infof("Proxy %v has the wrong url map, setting %v overwriting %v",
			proxy.Name, l.um, proxy.UrlMap)
		if err := l.cloud.SetUrlMapForTargetHttpProxy(proxy, l.um); err != nil {
			return err
		}
	}
	l.tp = proxy
	return nil
}
Exemplo n.º 17
0
func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *containerData) error {
	for k, v := range collectorConfigs {
		configFile, err := cont.ReadFile(v, m.inHostNamespace)
		if err != nil {
			return fmt.Errorf("failed to read config file %q for config %q, container %q: %v", k, v, cont.info.Name, err)
		}
		glog.V(3).Infof("Got config from %q: %q", v, configFile)

		if strings.HasPrefix(k, "prometheus") || strings.HasPrefix(k, "Prometheus") {
			newCollector, err := collector.NewPrometheusCollector(k, configFile)
			if err != nil {
				glog.Infof("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err)
				return err
			}
			err = cont.collectorManager.RegisterCollector(newCollector)
			if err != nil {
				glog.Infof("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err)
				return err
			}
		} else {
			newCollector, err := collector.NewCollector(k, configFile)
			if err != nil {
				glog.Infof("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err)
				return err
			}
			err = cont.collectorManager.RegisterCollector(newCollector)
			if err != nil {
				glog.Infof("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err)
				return err
			}
		}
	}
	return nil
}
Exemplo n.º 18
0
func (l *L7) checkForwardingRule() (err error) {
	if l.tp == nil {
		return fmt.Errorf("Cannot create forwarding rule without proxy.")
	}

	forwardingRuleName := truncate(fmt.Sprintf("%v-%v", forwardingRulePrefix, l.Name))
	fw, _ := l.cloud.GetGlobalForwardingRule(forwardingRuleName)
	if fw == nil {
		glog.Infof("Creating forwarding rule for proxy %v", l.tp.Name)
		fw, err = l.cloud.CreateGlobalForwardingRule(
			l.tp, forwardingRuleName, defaultPortRange)
		if err != nil {
			return err
		}
		l.fw = fw
		return nil
	}
	// TODO: If the port range and protocol don't match, recreate the rule
	if compareLinks(fw.Target, l.tp.SelfLink) {
		glog.Infof("Forwarding rule %v already exists", fw.Name)
		l.fw = fw
		return nil
	}
	glog.Infof("Forwarding rule %v has the wrong proxy, setting %v overwriting %v",
		fw.Name, fw.Target, l.tp.SelfLink)
	if err := l.cloud.SetProxyForGlobalForwardingRule(fw, l.tp); err != nil {
		return err
	}
	l.fw = fw
	return nil
}
Exemplo n.º 19
0
func (s *Store) execute(req string) string {
	request := strings.Split(req, " ")

	switch request[0] {
	case "update":
		if len(request) != 3 {
			return "not reconised"
		}
		glog.Infof("Updating %s to %s", request[1], request[2])
		(*s)[request[1]] = request[2]
		return "OK"
	case "get":
		if len(request) != 2 {
			return "not reconised"
		}
		glog.Infof("Getting %s", request[1])
		value, ok := (*s)[request[1]]
		if ok {
			return value
		} else {
			return "key not found"
		}
	default:
		return "not reconised"
	}
}
Exemplo n.º 20
0
// Cleanup deletes resources specific to this l7 in the right order.
// forwarding rule -> target proxy -> url map
// This leaves backends and health checks, which are shared across loadbalancers.
func (l *L7) Cleanup() error {
	if l.fw != nil {
		glog.Infof("Deleting global forwarding rule %v", l.fw.Name)
		if err := l.cloud.DeleteGlobalForwardingRule(l.fw.Name); err != nil {
			if !isHTTPErrorCode(err, http.StatusNotFound) {
				return err
			}
		}
		l.fw = nil
	}
	if l.tp != nil {
		glog.Infof("Deleting target proxy %v", l.tp.Name)
		if err := l.cloud.DeleteTargetHttpProxy(l.tp.Name); err != nil {
			if !isHTTPErrorCode(err, http.StatusNotFound) {
				return err
			}
		}
		l.tp = nil
	}
	if l.um != nil {
		glog.Infof("Deleting url map %v", l.um.Name)
		if err := l.cloud.DeleteUrlMap(l.um.Name); err != nil {
			if !isHTTPErrorCode(err, http.StatusNotFound) {
				return err
			}
		}
		l.um = nil
	}
	return nil
}
Exemplo n.º 21
0
func runReplicationControllerTest(c *client.Client) {
	data, err := ioutil.ReadFile("api/examples/controller.json")
	if err != nil {
		glog.Fatalf("Unexpected error: %v", err)
	}
	var controller api.ReplicationController
	if err := api.Scheme.DecodeInto(data, &controller); err != nil {
		glog.Fatalf("Unexpected error: %v", err)
	}

	glog.Infof("Creating replication controllers")
	if _, err := c.ReplicationControllers(api.NamespaceDefault).Create(&controller); err != nil {
		glog.Fatalf("Unexpected error: %v", err)
	}
	glog.Infof("Done creating replication controllers")

	// Give the controllers some time to actually create the pods
	if err := wait.Poll(time.Second, time.Second*30, client.ControllerHasDesiredReplicas(c, &controller)); err != nil {
		glog.Fatalf("FAILED: pods never created %v", err)
	}

	// wait for minions to indicate they have info about the desired pods
	pods, err := c.Pods(api.NamespaceDefault).List(labels.Set(controller.Spec.Selector).AsSelector())
	if err != nil {
		glog.Fatalf("FAILED: unable to get pods to list: %v", err)
	}
	if err := wait.Poll(time.Second, time.Second*30, podsOnMinions(c, *pods)); err != nil {
		glog.Fatalf("FAILED: pods never started running %v", err)
	}

	glog.Infof("Pods created")
}
Exemplo n.º 22
0
func (s *ServiceController) init() error {
	if s.federationName == "" {
		return fmt.Errorf("ServiceController should not be run without federationName.")
	}
	if s.zoneName == "" {
		return fmt.Errorf("ServiceController should not be run without zoneName.")
	}
	if s.dns == nil {
		return fmt.Errorf("ServiceController should not be run without a dnsprovider.")
	}
	zones, ok := s.dns.Zones()
	if !ok {
		return fmt.Errorf("the dns provider does not support zone enumeration, which is required for creating dns records.")
	}
	s.dnsZones = zones
	if _, err := getDnsZone(s.zoneName, s.dnsZones); err != nil {
		glog.Infof("DNS zone %q not found.  Creating DNS zone %q.", s.zoneName, s.zoneName)
		managedZone, err := s.dnsZones.New(s.zoneName)
		if err != nil {
			return err
		}
		zone, err := s.dnsZones.Add(managedZone)
		if err != nil {
			return err
		}
		glog.Infof("DNS zone %q successfully created.  Note that DNS resolution will not work until you have registered this name with "+
			"a DNS registrar and they have changed the authoritative name servers for your domain to point to your DNS provider.", zone.Name())
	}
	return nil
}
Exemplo n.º 23
0
// afterAction needs to be run after an action may have changed the current
// state of the tablet.
func (agent *ActionAgent) afterAction(context string, reloadSchema bool) {
	log.Infof("Executing post-action change callbacks")

	// Save the old tablet so callbacks can have a better idea of
	// the precise nature of the transition.
	oldTablet := agent.Tablet().Tablet

	// Actions should have side effects on the tablet, so reload the data.
	if err := agent.readTablet(); err != nil {
		log.Warningf("Failed rereading tablet after %v - services may be inconsistent: %v", context, err)
	} else {
		if updatedTablet := actor.CheckTabletMysqlPort(agent.TopoServer, agent.Mysqld, agent.Tablet()); updatedTablet != nil {
			agent.mutex.Lock()
			agent._tablet = updatedTablet
			agent.mutex.Unlock()
		}

		agent.runChangeCallback(oldTablet, context)
	}

	// Maybe invalidate the schema.
	// This adds a dependency between tabletmanager and tabletserver,
	// so it's not ideal. But I (alainjobart) think it's better
	// to have up to date schema in vtocc.
	if reloadSchema {
		tabletserver.ReloadSchema()
	}
	log.Infof("Done with post-action change callbacks")
}
Exemplo n.º 24
0
// clusterSyncLoop observes running clusters changes, and apply all services to new added cluster
// and add dns records for the changes
func (s *ServiceController) clusterSyncLoop() {
	var servicesToUpdate []*cachedService
	// should we remove cache for cluster from ready to not ready? should remove the condition predicate if no
	clusters, err := s.clusterStore.ClusterCondition(getClusterConditionPredicate()).List()
	if err != nil {
		glog.Infof("Fail to get cluster list")
		return
	}
	newClusters := clustersFromList(&clusters)
	var newSet, increase sets.String
	newSet = sets.NewString(newClusters...)
	if newSet.Equal(s.knownClusterSet) {
		// The set of cluster names in the services in the federation hasn't changed, but we can retry
		// updating any services that we failed to update last time around.
		servicesToUpdate = s.updateDNSRecords(servicesToUpdate, newClusters)
		return
	}
	glog.Infof("Detected change in list of cluster names. New  set: %v, Old set: %v", newSet, s.knownClusterSet)
	increase = newSet.Difference(s.knownClusterSet)
	// do nothing when cluster is removed.
	if increase != nil {
		// Try updating all services, and save the ones that fail to try again next
		// round.
		servicesToUpdate = s.serviceCache.allServices()
		numServices := len(servicesToUpdate)
		for newCluster := range increase {
			glog.Infof("New cluster observed %s", newCluster)
			s.updateAllServicesToCluster(servicesToUpdate, newCluster)
		}
		servicesToUpdate = s.updateDNSRecords(servicesToUpdate, newClusters)
		glog.Infof("Successfully updated %d out of %d DNS records to direct traffic to the updated cluster",
			numServices-len(servicesToUpdate), numServices)
	}
	s.knownClusterSet = newSet
}
Exemplo n.º 25
0
// Delete deletes the given pet, if no other pet in the petset is blocking a
// scale event.
func (p *petSyncer) Delete(pet *pcb) error {
	if pet == nil {
		return nil
	}
	realPet, exists, err := p.Get(pet)
	if err != nil {
		return err
	}
	if !exists {
		return nil
	}
	if p.blockingPet != nil {
		glog.Infof("Delete of %v in PetSet %v blocked by unhealthy pet %v", realPet.pod.Name, pet.parent.Name, p.blockingPet.pod.Name)
		return nil
	}
	// This is counted as a delete, even if it fails.
	// The returned error will force a requeue.
	p.blockingPet = realPet
	if !p.isDying(realPet.pod) {
		glog.Infof("PetSet %v deleting pet %v", pet.parent.Name, pet.pod.Name)
		return p.petClient.Delete(pet)
	}
	glog.Infof("PetSet %v waiting on pet %v to die in %v", pet.parent.Name, realPet.pod.Name, realPet.pod.DeletionTimestamp)
	return nil
}
Exemplo n.º 26
0
// registerHandlers  services liveness probes.
func registerHandlers(s *staticPageHandler) {
	http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
		// Delegate a check to the haproxy stats service.
		response, err := http.Get(fmt.Sprintf("http://localhost:%v", *statsPort))
		if err != nil {
			glog.Infof("Error %v", err)
			w.WriteHeader(http.StatusInternalServerError)
		} else {
			defer response.Body.Close()
			if response.StatusCode != http.StatusOK {
				contents, err := ioutil.ReadAll(response.Body)
				if err != nil {
					glog.Infof("Error reading resonse on receiving status %v: %v",
						response.StatusCode, err)
				}
				glog.Infof("%v\n", string(contents))
				w.WriteHeader(response.StatusCode)
			} else {
				w.WriteHeader(200)
				w.Write([]byte("ok"))
			}
		}
	})

	// handler for not matched traffic
	http.HandleFunc("/", s.Getfunc)

	glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%v", lbApiPort), nil))
}
// WaitForClusterSize waits until the cluster size matches the given function.
func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error {
	for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
		nodes, err := c.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{
			"spec.unschedulable": "false",
		}.AsSelector().String()})
		if err != nil {
			glog.Warningf("Failed to list nodes: %v", err)
			continue
		}
		numNodes := len(nodes.Items)

		// Filter out not-ready nodes.
		framework.FilterNodes(nodes, func(node v1.Node) bool {
			return framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
		})
		numReady := len(nodes.Items)

		if numNodes == numReady && sizeFunc(numReady) {
			glog.Infof("Cluster has reached the desired size")
			return nil
		}
		glog.Infof("Waiting for cluster, current size %d, not ready nodes %d", numNodes, numNodes-numReady)
	}
	return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout)
}
Exemplo n.º 28
0
func main() {
	var (
		config *Config
		zk     *Zookeeper
		p      *Pitchfork
		err    error
	)
	flag.Parse()
	defer log.Flush()
	log.Infof("bfs pitchfork start")
	if config, err = NewConfig(configFile); err != nil {
		log.Errorf("NewConfig(\"%s\") error(%v)", configFile, err)
		return
	}
	log.Infof("init zookeeper...")
	if zk, err = NewZookeeper(config.ZookeeperAddrs, config.ZookeeperTimeout, config.ZookeeperPitchforkRoot, config.ZookeeperStoreRoot,
		config.ZookeeperVolumeRoot); err != nil {
		log.Errorf("NewZookeeper() failed, Quit now")
		return
	}
	log.Infof("register pitchfork...")
	if p, err = NewPitchfork(zk, config); err != nil {
		log.Errorf("pitchfork NewPitchfork() failed, Quit now")
		return
	}
	log.Infof("starts probe stores...")
	go p.Probe()
	StartSignal()
	return
}
Exemplo n.º 29
0
func (rm *ReplicationManager) syncReplicationController(controllerSpec api.ReplicationController) error {
	s := labels.Set(controllerSpec.DesiredState.ReplicaSelector).AsSelector()
	podList, err := rm.kubeClient.ListPods(s)
	if err != nil {
		return err
	}
	filteredList := rm.filterActivePods(podList.Items)
	diff := len(filteredList) - controllerSpec.DesiredState.Replicas
	if diff < 0 {
		diff *= -1
		wait := sync.WaitGroup{}
		wait.Add(diff)
		glog.Infof("Too few replicas, creating %d\n", diff)
		for i := 0; i < diff; i++ {
			go func() {
				defer wait.Done()
				rm.podControl.createReplica(controllerSpec)
			}()
		}
		wait.Wait()
	} else if diff > 0 {
		glog.Infof("Too many replicas, deleting %d\n", diff)
		wait := sync.WaitGroup{}
		wait.Add(diff)
		for i := 0; i < diff; i++ {
			go func(ix int) {
				defer wait.Done()
				rm.podControl.deletePod(filteredList[ix].ID)
			}(i)
		}
		wait.Wait()
	}
	return nil
}
Exemplo n.º 30
0
// Delete deletes the Backend for the given port.
func (b *Backends) Delete(port int64) (err error) {
	name := beName(port)
	glog.Infof("Deleting backend %v", name)
	defer func() {
		if isHTTPErrorCode(err, http.StatusNotFound) {
			err = nil
		}
		if err == nil {
			b.pool.Delete(portKey(port))
		}
	}()
	// Try deleting health checks and instance groups, even if a backend is
	// not found. This guards against the case where we create one of the
	// other 2 and run out of quota creating the backend.
	if err = b.cloud.DeleteBackendService(name); err != nil &&
		!isHTTPErrorCode(err, http.StatusNotFound) {
		return err
	}
	if err = b.healthChecker.Delete(port); err != nil &&
		!isHTTPErrorCode(err, http.StatusNotFound) {
		return err
	}
	glog.Infof("Deleting instance group %v", name)
	if err = b.nodePool.DeleteInstanceGroup(name); err != nil {
		return err
	}
	return nil
}