func TestUnschedulableNodes(t *testing.T) { helper, err := master.NewEtcdHelper(newEtcdClient(), testapi.Version()) if err != nil { t.Fatalf("Couldn't create etcd helper: %v", err) } deleteAllEtcdKeys() var m *master.Master s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) defer s.Close() m = master.New(&master.Config{ EtcdHelper: helper, KubeletClient: client.FakeKubeletClient{}, EnableLogsSupport: false, EnableUISupport: false, EnableIndex: true, APIPrefix: "/api", Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admit.NewAlwaysAdmit(), }) client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) schedulerConfigFactory := factory.NewConfigFactory(client) schedulerConfig, err := schedulerConfigFactory.Create() if err != nil { t.Fatalf("Couldn't create scheduler config: %v", err) } eventBroadcaster := record.NewBroadcaster() schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}) eventBroadcaster.StartRecordingToSink(client.Events("")) scheduler.New(schedulerConfig).Run() defer close(schedulerConfig.StopEverything) DoTestUnschedulableNodes(t, client) }
func TestMultiWatch(t *testing.T) { // Disable this test as long as it demonstrates a problem. // TODO: Reenable this test when we get #6059 resolved. return const watcherCount = 50 runtime.GOMAXPROCS(watcherCount) framework.DeleteAllEtcdKeys() defer framework.DeleteAllEtcdKeys() _, s := framework.RunAMaster(t) defer s.Close() ns := api.NamespaceDefault client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) dummyEvent := func(i int) *api.Event { name := fmt.Sprintf("unrelated-%v", i) return &api.Event{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("%v.%x", name, time.Now().UnixNano()), Namespace: ns, }, InvolvedObject: api.ObjectReference{ Name: name, Namespace: ns, }, Reason: fmt.Sprintf("unrelated change %v", i), } } type timePair struct { t time.Time name string } receivedTimes := make(chan timePair, watcherCount*2) watchesStarted := sync.WaitGroup{} // make a bunch of pods and watch them for i := 0; i < watcherCount; i++ { watchesStarted.Add(1) name := fmt.Sprintf("multi-watch-%v", i) got, err := client.Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, Labels: labels.Set{"watchlabel": name}, }, Spec: api.PodSpec{ Containers: []api.Container{{ Name: "nothing", Image: "kubernetes/pause", }}, }, }) if err != nil { t.Fatalf("Couldn't make %v: %v", name, err) } go func(name, rv string) { w, err := client.Pods(ns).Watch( labels.Set{"watchlabel": name}.AsSelector(), fields.Everything(), rv, ) if err != nil { panic(fmt.Sprintf("watch error for %v: %v", name, err)) } defer w.Stop() watchesStarted.Done() e, ok := <-w.ResultChan() // should get the update (that we'll do below) if !ok { panic(fmt.Sprintf("%v ended early?", name)) } if e.Type != watch.Modified { panic(fmt.Sprintf("Got unexpected watch notification:\n%v: %+v %+v", name, e, e.Object)) } receivedTimes <- timePair{time.Now(), name} }(name, got.ObjectMeta.ResourceVersion) } log.Printf("%v: %v pods made and watchers started", time.Now(), watcherCount) // wait for watches to start before we start spamming the system with // objects below, otherwise we'll hit the watch window restriction. watchesStarted.Wait() const ( useEventsAsUnrelatedType = false usePodsAsUnrelatedType = true ) // make a bunch of unrelated changes in parallel if useEventsAsUnrelatedType { const unrelatedCount = 3000 var wg sync.WaitGroup defer wg.Wait() changeToMake := make(chan int, unrelatedCount*2) changeMade := make(chan int, unrelatedCount*2) go func() { for i := 0; i < unrelatedCount; i++ { changeToMake <- i } close(changeToMake) }() for i := 0; i < 50; i++ { wg.Add(1) go func() { defer wg.Done() for { i, ok := <-changeToMake if !ok { return } if _, err := client.Events(ns).Create(dummyEvent(i)); err != nil { panic(fmt.Sprintf("couldn't make an event: %v", err)) } changeMade <- i } }() } for i := 0; i < 2000; i++ { <-changeMade if (i+1)%50 == 0 { log.Printf("%v: %v unrelated changes made", time.Now(), i+1) } } } if usePodsAsUnrelatedType { const unrelatedCount = 3000 var wg sync.WaitGroup defer wg.Wait() changeToMake := make(chan int, unrelatedCount*2) changeMade := make(chan int, unrelatedCount*2) go func() { for i := 0; i < unrelatedCount; i++ { changeToMake <- i } close(changeToMake) }() for i := 0; i < 50; i++ { wg.Add(1) go func() { defer wg.Done() for { i, ok := <-changeToMake if !ok { return } name := fmt.Sprintf("unrelated-%v", i) _, err := client.Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, }, Spec: api.PodSpec{ Containers: []api.Container{{ Name: "nothing", Image: "kubernetes/pause", }}, }, }) if err != nil { panic(fmt.Sprintf("couldn't make unrelated pod: %v", err)) } changeMade <- i } }() } for i := 0; i < 2000; i++ { <-changeMade if (i+1)%50 == 0 { log.Printf("%v: %v unrelated changes made", time.Now(), i+1) } } } // Now we still have changes being made in parallel, but at least 1000 have been made. // Make some updates to send down the watches. sentTimes := make(chan timePair, watcherCount*2) for i := 0; i < watcherCount; i++ { go func(i int) { name := fmt.Sprintf("multi-watch-%v", i) pod, err := client.Pods(ns).Get(name) if err != nil { panic(fmt.Sprintf("Couldn't get %v: %v", name, err)) } pod.Spec.Containers[0].Image = "kubernetes/pause:1" sentTimes <- timePair{time.Now(), name} if _, err := client.Pods(ns).Update(pod); err != nil { panic(fmt.Sprintf("Couldn't make %v: %v", name, err)) } }(i) } sent := map[string]time.Time{} for i := 0; i < watcherCount; i++ { tp := <-sentTimes sent[tp.name] = tp.t } log.Printf("all changes made") dur := map[string]time.Duration{} for i := 0; i < watcherCount; i++ { tp := <-receivedTimes delta := tp.t.Sub(sent[tp.name]) dur[tp.name] = delta log.Printf("%v: %v", tp.name, delta) } log.Printf("all watches ended") t.Errorf("durations: %v", dur) }
func TestSingleWatch(t *testing.T) { _, s := runAMaster(t) defer s.Close() ns := "blargh" deleteAllEtcdKeys() client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) mkEvent := func(i int) *api.Event { name := fmt.Sprintf("event-%v", i) return &api.Event{ ObjectMeta: api.ObjectMeta{ Namespace: ns, Name: name, }, InvolvedObject: api.ObjectReference{ Namespace: ns, Name: name, }, Reason: fmt.Sprintf("event %v", i), } } rv1 := "" for i := 0; i < 10; i++ { event := mkEvent(i) got, err := client.Events(ns).Create(event) if err != nil { t.Fatalf("Failed creating event %#q: %v", event, err) } if rv1 == "" { rv1 = got.ResourceVersion if rv1 == "" { t.Fatal("did not get a resource version.") } } t.Logf("Created event %#v", got.ObjectMeta) } w, err := client.Get(). Prefix("watch"). NamespaceIfScoped(ns, len(ns) > 0). Resource("events"). Name("event-9"). Param("resourceVersion", rv1). Watch() if err != nil { t.Fatalf("Failed watch: %v", err) } defer w.Stop() select { case <-time.After(5 * time.Second): t.Fatal("watch took longer than 15 seconds") case got, ok := <-w.ResultChan(): if !ok { t.Fatal("Watch channel closed unexpectedly.") } // We expect to see an ADD of event-9 and only event-9. (This // catches a bug where all the events would have been sent down // the channel.) if e, a := watch.Added, got.Type; e != a { t.Errorf("Wanted %v, got %v", e, a) } switch o := got.Object.(type) { case *api.Event: if e, a := "event-9", o.Name; e != a { t.Errorf("Wanted %v, got %v", e, a) } default: t.Fatalf("Unexpected watch event containing object %#q", got) } } }