func TestMultiWatch(t *testing.T) { // Disable this test as long as it demonstrates a problem. // TODO: Reenable this test when we get #6059 resolved. return const watcherCount = 50 runtime.GOMAXPROCS(watcherCount) framework.DeleteAllEtcdKeys() defer framework.DeleteAllEtcdKeys() _, s := framework.RunAMaster(t) defer s.Close() ns := api.NamespaceDefault client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) dummyEvent := func(i int) *api.Event { name := fmt.Sprintf("unrelated-%v", i) return &api.Event{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("%v.%x", name, time.Now().UnixNano()), Namespace: ns, }, InvolvedObject: api.ObjectReference{ Name: name, Namespace: ns, }, Reason: fmt.Sprintf("unrelated change %v", i), } } type timePair struct { t time.Time name string } receivedTimes := make(chan timePair, watcherCount*2) watchesStarted := sync.WaitGroup{} // make a bunch of pods and watch them for i := 0; i < watcherCount; i++ { watchesStarted.Add(1) name := fmt.Sprintf("multi-watch-%v", i) got, err := client.Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, Labels: labels.Set{"watchlabel": name}, }, Spec: api.PodSpec{ Containers: []api.Container{{ Name: "nothing", Image: "kubernetes/pause", }}, }, }) if err != nil { t.Fatalf("Couldn't make %v: %v", name, err) } go func(name, rv string) { w, err := client.Pods(ns).Watch( labels.Set{"watchlabel": name}.AsSelector(), fields.Everything(), rv, ) if err != nil { panic(fmt.Sprintf("watch error for %v: %v", name, err)) } defer w.Stop() watchesStarted.Done() e, ok := <-w.ResultChan() // should get the update (that we'll do below) if !ok { panic(fmt.Sprintf("%v ended early?", name)) } if e.Type != watch.Modified { panic(fmt.Sprintf("Got unexpected watch notification:\n%v: %+v %+v", name, e, e.Object)) } receivedTimes <- timePair{time.Now(), name} }(name, got.ObjectMeta.ResourceVersion) } log.Printf("%v: %v pods made and watchers started", time.Now(), watcherCount) // wait for watches to start before we start spamming the system with // objects below, otherwise we'll hit the watch window restriction. watchesStarted.Wait() const ( useEventsAsUnrelatedType = false usePodsAsUnrelatedType = true ) // make a bunch of unrelated changes in parallel if useEventsAsUnrelatedType { const unrelatedCount = 3000 var wg sync.WaitGroup defer wg.Wait() changeToMake := make(chan int, unrelatedCount*2) changeMade := make(chan int, unrelatedCount*2) go func() { for i := 0; i < unrelatedCount; i++ { changeToMake <- i } close(changeToMake) }() for i := 0; i < 50; i++ { wg.Add(1) go func() { defer wg.Done() for { i, ok := <-changeToMake if !ok { return } if _, err := client.Events(ns).Create(dummyEvent(i)); err != nil { panic(fmt.Sprintf("couldn't make an event: %v", err)) } changeMade <- i } }() } for i := 0; i < 2000; i++ { <-changeMade if (i+1)%50 == 0 { log.Printf("%v: %v unrelated changes made", time.Now(), i+1) } } } if usePodsAsUnrelatedType { const unrelatedCount = 3000 var wg sync.WaitGroup defer wg.Wait() changeToMake := make(chan int, unrelatedCount*2) changeMade := make(chan int, unrelatedCount*2) go func() { for i := 0; i < unrelatedCount; i++ { changeToMake <- i } close(changeToMake) }() for i := 0; i < 50; i++ { wg.Add(1) go func() { defer wg.Done() for { i, ok := <-changeToMake if !ok { return } name := fmt.Sprintf("unrelated-%v", i) _, err := client.Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, }, Spec: api.PodSpec{ Containers: []api.Container{{ Name: "nothing", Image: "kubernetes/pause", }}, }, }) if err != nil { panic(fmt.Sprintf("couldn't make unrelated pod: %v", err)) } changeMade <- i } }() } for i := 0; i < 2000; i++ { <-changeMade if (i+1)%50 == 0 { log.Printf("%v: %v unrelated changes made", time.Now(), i+1) } } } // Now we still have changes being made in parallel, but at least 1000 have been made. // Make some updates to send down the watches. sentTimes := make(chan timePair, watcherCount*2) for i := 0; i < watcherCount; i++ { go func(i int) { name := fmt.Sprintf("multi-watch-%v", i) pod, err := client.Pods(ns).Get(name) if err != nil { panic(fmt.Sprintf("Couldn't get %v: %v", name, err)) } pod.Spec.Containers[0].Image = "kubernetes/pause:1" sentTimes <- timePair{time.Now(), name} if _, err := client.Pods(ns).Update(pod); err != nil { panic(fmt.Sprintf("Couldn't make %v: %v", name, err)) } }(i) } sent := map[string]time.Time{} for i := 0; i < watcherCount; i++ { tp := <-sentTimes sent[tp.name] = tp.t } log.Printf("all changes made") dur := map[string]time.Duration{} for i := 0; i < watcherCount; i++ { tp := <-receivedTimes delta := tp.t.Sub(sent[tp.name]) dur[tp.name] = delta log.Printf("%v: %v", tp.name, delta) } log.Printf("all watches ended") t.Errorf("durations: %v", dur) }
// Run runs the specified ProxyServer. This should never exit. func (s *ProxyServer) Run(_ []string) error { // TODO(vmarmol): Use container config for this. oomAdjuster := oom.NewOomAdjuster() if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil { glog.V(2).Info(err) } // Run in its own container. if err := util.RunInResourceContainer(s.ResourceContainer); err != nil { glog.Warningf("Failed to start in resource-only container %q: %v", s.ResourceContainer, err) } else { glog.V(2).Infof("Running in resource-only container %q", s.ResourceContainer) } // define api config source if s.Kubeconfig == "" && s.Master == "" { glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified kubeconfig // file, and then overriding the Master flag, if non-empty. kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() if err != nil { return err } client, err := client.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } // Add event recorder Hostname := nodeutil.GetHostname(s.HostnameOverride) eventBroadcaster := record.NewBroadcaster() s.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: Hostname}) eventBroadcaster.StartRecordingToSink(client.Events("")) s.nodeRef = &api.ObjectReference{ Kind: "Node", Name: Hostname, UID: types.UID(Hostname), Namespace: "", } // Birth Cry s.birthCry() serviceConfig := config.NewServiceConfig() endpointsConfig := config.NewEndpointsConfig() protocol := utiliptables.ProtocolIpv4 if s.BindAddress.To4() == nil { protocol = utiliptables.ProtocolIpv6 } var proxier proxy.ProxyProvider var endpointsHandler config.EndpointsConfigHandler // guaranteed false on error, error only necessary for debugging shouldUseIptables, err := iptables.ShouldUseIptablesProxier() if err != nil { glog.Errorf("Can't determine whether to use iptables or userspace, using userspace proxier: %v", err) } if !s.ForceUserspaceProxy && shouldUseIptables { glog.V(2).Info("Using iptables Proxier.") proxierIptables, err := iptables.NewProxier(utiliptables.New(exec.New(), protocol)) if err != nil { glog.Fatalf("Unable to create proxier: %v", err) } proxier = proxierIptables endpointsHandler = proxierIptables } else { glog.V(2).Info("Using userspace Proxier.") // This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for // our config.EndpointsConfigHandler. loadBalancer := userspace.NewLoadBalancerRR() // set EndpointsConfigHandler to our loadBalancer endpointsHandler = loadBalancer proxierUserspace, err := userspace.NewProxier(loadBalancer, s.BindAddress, utiliptables.New(exec.New(), protocol), s.PortRange) if err != nil { glog.Fatalf("Unable to create proxer: %v", err) } proxier = proxierUserspace } // Wire proxier to handle changes to services serviceConfig.RegisterHandler(proxier) // And wire endpointsHandler to handle changes to endpoints to services endpointsConfig.RegisterHandler(endpointsHandler) // Note: RegisterHandler() calls need to happen before creation of Sources because sources // only notify on changes, and the initial update (on process start) may be lost if no handlers // are registered yet. config.NewSourceAPI( client, 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), ) if s.HealthzPort > 0 { go util.Forever(func() { err := http.ListenAndServe(s.HealthzBindAddress.String()+":"+strconv.Itoa(s.HealthzPort), nil) if err != nil { glog.Errorf("Starting health server failed: %v", err) } }, 5*time.Second) } // Just loop forever for now... proxier.SyncLoop() return nil }
func TestSingleWatch(t *testing.T) { _, s := runAMaster(t) defer s.Close() ns := "blargh" deleteAllEtcdKeys() client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) mkEvent := func(i int) *api.Event { name := fmt.Sprintf("event-%v", i) return &api.Event{ ObjectMeta: api.ObjectMeta{ Namespace: ns, Name: name, }, InvolvedObject: api.ObjectReference{ Namespace: ns, Name: name, }, Reason: fmt.Sprintf("event %v", i), } } rv1 := "" for i := 0; i < 10; i++ { event := mkEvent(i) got, err := client.Events(ns).Create(event) if err != nil { t.Fatalf("Failed creating event %#q: %v", event, err) } if rv1 == "" { rv1 = got.ResourceVersion if rv1 == "" { t.Fatal("did not get a resource version.") } } t.Logf("Created event %#v", got.ObjectMeta) } w, err := client.Get(). Prefix("watch"). NamespaceIfScoped(ns, len(ns) > 0). Resource("events"). Name("event-9"). Param("resourceVersion", rv1). Watch() if err != nil { t.Fatalf("Failed watch: %v", err) } defer w.Stop() select { case <-time.After(5 * time.Second): t.Fatal("watch took longer than 15 seconds") case got, ok := <-w.ResultChan(): if !ok { t.Fatal("Watch channel closed unexpectedly.") } // We expect to see an ADD of event-9 and only event-9. (This // catches a bug where all the events would have been sent down // the channel.) if e, a := watch.Added, got.Type; e != a { t.Errorf("Wanted %v, got %v", e, a) } switch o := got.Object.(type) { case *api.Event: if e, a := "event-9", o.Name; e != a { t.Errorf("Wanted %v, got %v", e, a) } default: t.Fatalf("Unexpected watch event containing object %#q", got) } } }