func TestServiceAlloc(t *testing.T) { cfg := framework.NewIntegrationTestMasterConfig() _, cidr, err := net.ParseCIDR("192.168.0.0/30") if err != nil { t.Fatalf("bad cidr: %v", err) } cfg.ServiceClusterIPRange = cidr _, s := framework.RunAMaster(cfg) defer s.Close() client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) svc := func(i int) *api.Service { return &api.Service{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("svc-%v", i), }, Spec: api.ServiceSpec{ Type: api.ServiceTypeClusterIP, Ports: []api.ServicePort{ {Port: 80}, }, }, } } // Make a service. if _, err := client.Services(api.NamespaceDefault).Create(svc(1)); err != nil { t.Fatalf("got unexpected error: %v", err) } // Make a second service. It will fail because we're out of cluster IPs if _, err := client.Services(api.NamespaceDefault).Create(svc(2)); err != nil { if !strings.Contains(err.Error(), "range is full") { t.Errorf("unexpected error text: %v", err) } } else { t.Fatalf("unexpected sucess") } // Delete the first service. if err := client.Services(api.NamespaceDefault).Delete(svc(1).ObjectMeta.Name); err != nil { t.Fatalf("got unexpected error: %v", err) } // This time creating the second service should work. if _, err := client.Services(api.NamespaceDefault).Create(svc(2)); err != nil { t.Fatalf("got unexpected error: %v", err) } }
func main() { client := CreateClusterClient() serviceUpdater := CreateServiceUpdater(client) w, err := client.Services(api.NamespaceAll).Watch(api.ListOptions{ LabelSelector: labels.Everything(), FieldSelector: fields.Everything(), }) if err != nil { log.Fatalln("Unable to watch services:", err) } log.Println("Watching services") for event := range w.ResultChan() { service, ok := event.Object.(*api.Service) if !ok { log.Println("Got a non-service object") continue } if event.Type == watch.Added || event.Type == watch.Modified { if k8svamprouter.ShouldUpdateServiceRoute(service) { serviceUpdater.UpdateServiceRouting(service) } } else if event.Type == watch.Deleted { serviceUpdater.RemoveServiceRouting(service) } } }
// ShouldInitializeData tries to determine whether we're dealing with // an existing OpenShift data and config. It determines that data exists by checking // for the existence of a docker-registry service. func (c *ClientStartConfig) ShouldInitializeData() bool { if c.shouldInitializeData != nil { return *c.shouldInitializeData } result := func() bool { if !c.UseExistingConfig { return true } // For now, we determine if using existing etcd data by looking // for the registry service _, kclient, err := c.Clients() if err != nil { glog.V(2).Infof("Cannot access OpenShift master: %v", err) return true } if _, err = kclient.Services(openshift.DefaultNamespace).Get(openshift.SvcDockerRegistry); err != nil { return true } // If a registry exists, then don't initialize data return false }() c.shouldInitializeData = &result return result }
func TestMasterService(t *testing.T) { _, s := framework.RunAMaster(framework.NewIntegrationTestMasterConfig()) defer s.Close() client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) err := wait.Poll(time.Second, time.Minute, func() (bool, error) { svcList, err := client.Services(api.NamespaceDefault).List(api.ListOptions{}) if err != nil { t.Errorf("unexpected error: %v", err) return false, nil } found := false for i := range svcList.Items { if svcList.Items[i].Name == "kubernetes" { found = true } } if found { ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes") if err != nil { return false, nil } if countEndpoints(ep) == 0 { return false, fmt.Errorf("no endpoints for kubernetes service: %v", ep) } return true, nil } return false, nil }) if err != nil { t.Errorf("unexpected error: %v", err) } }
func TestMasterService(t *testing.T) { // TODO: Limit the test to a single non-default namespace and clean this up at the end. framework.DeleteAllEtcdKeys() m, err := master.New(framework.NewIntegrationTestMasterConfig()) if err != nil { t.Fatalf("Error in bringing up the master: %v", err) } s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) defer s.Close() client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) err = wait.Poll(time.Second, time.Minute, func() (bool, error) { svcList, err := client.Services(api.NamespaceDefault).List(api.ListOptions{}) if err != nil { t.Errorf("unexpected error: %v", err) return false, nil } found := false for i := range svcList.Items { if svcList.Items[i].Name == "kubernetes" { found = true } } if found { ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes") if err != nil { return false, nil } if countEndpoints(ep) == 0 { return false, fmt.Errorf("no endpoints for kubernetes service: %v", ep) } return true, nil } return false, nil }) if err != nil { t.Errorf("unexpected error: %v", err) } }
func main() { kingpin.Parse() var oldBuf string // Create a client which we can use to connect to the remote Kubernetes cluster. config := &client.Config{ Host: *cliMaster, } client, err := client.New(config) if err != nil { fmt.Println(err) os.Exit(1) } // This ensures we don't overwhelm the Kubernetes API. t, err := time.ParseDuration(*cliRefresh) if err != nil { fmt.Println(err) os.Exit(1) } // This is an ongoing process to pull in new services. limiter := time.Tick(t) for { <-limiter // Get the data from the Kubernetes service API. log.Println("Refreshing data...") // This is the object which we will populate with new data. haproxy := Config{ Port: *cliPort, } // Get a list of all the services. svcs, err := client.Services("").List(api.ListOptions{}) if err != nil { log.Warn(err) continue } // Filter the list down to only the services we need. for _, s := range svcs.Items { // Only register services which are being balanced internally. if s.Spec.Type != "LoadBalancer" { log.Printf("Skipped service: %s", s.ObjectMeta.Name) continue } // Ensure we have the "domain" label set. if val, ok := s.ObjectMeta.Labels["domain"]; ok { // Get a list of all the pods and there IPs to add to the HAProxy. pods, err := client.Pods(s.ObjectMeta.Namespace).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(s.Spec.Selector))}) if err != nil { log.Println(err) continue } // Start building the new listener object. l := Listener{ Domain: val, } // Populate the list of pod IPs. for _, p := range pods.Items { if p.Status.Phase != api.PodRunning { continue } l.Servers = append(l.Servers, Server{Name: p.ObjectMeta.Name, Address: p.Status.PodIP + ":80"}) } haproxy.Listeners = append(haproxy.Listeners, l) log.Printf("Added service: %s", s.ObjectMeta.Name) } } // Attempt to rebuild the HAProxy configuration. t := template.Must(template.New("haproxy").Parse(tpl)) buf := new(bytes.Buffer) err = t.Execute(buf, haproxy) if err != nil { log.Warn(err) continue } // Compare buffs. fmt.Println("Current") fmt.Println(buf.String()) fmt.Println("Old") fmt.Println(oldBuf) if buf.String() == oldBuf { log.Warn("Configuration has not changed") continue } // Determine the current running HAProxy process. pid, err := ioutil.ReadFile(*cliPid) if err != nil { log.Warn(err) continue } // Write out the configuration to a file. err = ioutil.WriteFile(*cliConf, buf.Bytes(), 0644) if err != nil { log.Warn(err) continue } // Trigger a reload of the HAProxy service. if err := exec.Command("haproxy", "-f", *cliConf, "-p", *cliPid, "-D", "-sF", string(pid)).Run(); err != nil { log.Warn(err) continue } } }
func TestServiceAlloc(t *testing.T) { cfg := framework.NewIntegrationTestMasterConfig() _, cidr, err := net.ParseCIDR("192.168.0.0/30") if err != nil { t.Fatalf("bad cidr: %v", err) } cfg.ServiceClusterIPRange = cidr _, s := framework.RunAMaster(cfg) defer s.Close() client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) svc := func(i int) *api.Service { return &api.Service{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("svc-%v", i), }, Spec: api.ServiceSpec{ Type: api.ServiceTypeClusterIP, Ports: []api.ServicePort{ {Port: 80}, }, }, } } // Wait until the default "kubernetes" service is created. if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { _, err := client.Services(api.NamespaceDefault).Get("kubernetes") if err != nil && !errors.IsNotFound(err) { return false, err } return !errors.IsNotFound(err), nil }); err != nil { t.Fatalf("creating kubernetes service timed out") } // Make a service. if _, err := client.Services(api.NamespaceDefault).Create(svc(1)); err != nil { t.Fatalf("got unexpected error: %v", err) } // Make a second service. It will fail because we're out of cluster IPs if _, err := client.Services(api.NamespaceDefault).Create(svc(2)); err != nil { if !strings.Contains(err.Error(), "range is full") { t.Errorf("unexpected error text: %v", err) } } else { svcs, err := client.Services(api.NamespaceAll).List(api.ListOptions{}) if err != nil { t.Fatalf("unexpected success, and error getting the services: %v", err) } allIPs := []string{} for _, s := range svcs.Items { allIPs = append(allIPs, s.Spec.ClusterIP) } t.Fatalf("unexpected creation success. The following IPs exist: %#v. It should only be possible to allocate 2 IP addresses in this cluster.\n\n%#v", allIPs, svcs) } // Delete the first service. if err := client.Services(api.NamespaceDefault).Delete(svc(1).ObjectMeta.Name); err != nil { t.Fatalf("got unexpected error: %v", err) } // This time creating the second service should work. if _, err := client.Services(api.NamespaceDefault).Create(svc(2)); err != nil { t.Fatalf("got unexpected error: %v", err) } }