Beispiel #1
0
// create headless service for pod, so tat we can use service ip instead of pods volatile IP
// @param selector - selector that  will find corresponding pod, this should have label "replica" set
// @return *api.Service - created service
// @return error
func (cluster *CouchdbCluster) CreatePodService(selector map[string]string) (*api.Service, error) {
	// service special label
	serviceLabels := make(map[string]string)
	for k, v := range selector {
		serviceLabels[k] = v
	}
	// add special label
	serviceLabels[LABEL_POD_SERVICE] = "true"

	// svc port
	svcPorts := api.ServicePort{Port: COUCHDB_PORT}
	// service specs
	serviceSpec := api.ServiceSpec{Selector: selector, Ports: []api.ServicePort{svcPorts} /* ClusterIP: "None"*/}
	// init service struct
	service := api.Service{Spec: serviceSpec}
	service.GenerateName = cluster.Tag + "-pod-"
	service.Labels = serviceLabels
	// get a new kube client
	c, err := KubeClient(KUBE_API)
	// check for errors
	if err != nil {
		ErrorLog("spawner_rc: CreatePodService: Cannot connect to Kubernetes api ")
		ErrorLog(err)
		return nil, err
	} else {
		// create service in namespace
		return c.Services(cluster.Namespace).Create(&service)
	}
}
Beispiel #2
0
// allocate assigns an unallocated ip to a service and updates the
// service's persisted state.
func (ic *IngressIPController) allocate(service *kapi.Service, key string) error {
	// Make a copy to avoid mutating cache state
	t, err := kapi.Scheme.DeepCopy(service)
	if err != nil {
		return err
	}
	service = t.(*kapi.Service)

	ip, err := ic.allocateIP(service.Spec.LoadBalancerIP)
	if err != nil {
		return err
	}
	ipString := ip.String()

	glog.V(5).Infof("Allocating ip %v to service %v", ipString, key)
	service.Status = kapi.ServiceStatus{
		LoadBalancer: kapi.LoadBalancerStatus{
			Ingress: []kapi.LoadBalancerIngress{
				{
					IP: ipString,
				},
			},
		},
	}
	if err = ic.persistServiceStatus(service); err != nil {
		if releaseErr := ic.ipAllocator.Release(ip); releaseErr != nil {
			// Release from contiguous allocator should never return an error, but just in case...
			utilruntime.HandleError(fmt.Errorf("Error releasing ip %v for service %v: %v", ipString, key, releaseErr))
		}
		return err
	}
	ic.allocationMap[ipString] = key

	return ic.ensureExternalIP(service, key, ipString)
}
func TestEtcdUpdateService(t *testing.T) {
	ctx := api.NewDefaultContext()
	fakeClient := tools.NewFakeEtcdClient(t)
	fakeClient.TestIndex = true
	registry, rest := NewTestEtcdRegistry(fakeClient)
	key, _ := rest.KeyFunc(ctx, "uniquefoo")
	key = etcdtest.AddPrefix(key)
	resp, _ := fakeClient.Set(key, runtime.EncodeOrDie(latest.Codec, makeTestService("uniquefoo")), 0)
	testService := api.Service{
		ObjectMeta: api.ObjectMeta{
			Name:            "uniquefoo",
			ResourceVersion: strconv.FormatUint(resp.Node.ModifiedIndex, 10),
			Labels: map[string]string{
				"baz": "bar",
			},
		},
		Spec: api.ServiceSpec{
			Ports: []api.ServicePort{
				{Name: "port", Protocol: api.ProtocolTCP, Port: 12345, TargetPort: util.NewIntOrStringFromInt(12345)},
			},
			Selector: map[string]string{
				"baz": "bar",
			},
			SessionAffinity: "None",
			Type:            api.ServiceTypeClusterIP,
		},
	}
	_, err := registry.UpdateService(ctx, &testService)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	svc, err := registry.GetService(ctx, "uniquefoo")
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	// Clear modified indices before the equality test.
	svc.ResourceVersion = ""
	testService.ResourceVersion = ""
	if !api.Semantic.DeepEqual(*svc, testService) {
		t.Errorf("Unexpected service: got\n %#v\n, wanted\n %#v", svc, testService)
	}
}
Beispiel #4
0
// kubenetes Service to protobuf struct
func ServiceToPbStruct(Service *api.Service) *types.Service {
	todata := &types.Service{
		Name:     Service.GetName(),
		Labels:   Service.ObjectMeta.Labels,
		Selector: Service.Spec.Selector,
	}
	Service_port := Service.Spec.Ports
	// 解析容器信息
	port := make([]*types.ServicePort, len(Service_port))
	for k, v := range Service_port {
		port[k] = &types.ServicePort{
			Name:       v.Name,
			Protocol:   string(v.Protocol),
			Port:       int32(v.Port),
			TargetPort: v.TargetPort.IntVal,
			NodePort:   int32(v.NodePort),
		}
	}
	todata.Port = port
	return todata
}
Beispiel #5
0
func getTestService(identifier string, requestedPorts ...int32) api.Service {
	ports := []api.ServicePort{}
	for _, port := range requestedPorts {
		ports = append(ports, api.ServicePort{
			Name:     fmt.Sprintf("port-%d", port),
			Protocol: api.ProtocolTCP,
			Port:     port,
			NodePort: getBackendPort(port),
		})
	}

	svc := api.Service{
		Spec: api.ServiceSpec{
			Type:  api.ServiceTypeLoadBalancer,
			Ports: ports,
		},
	}
	svc.Name = identifier
	svc.Namespace = "default"
	svc.UID = types.UID(identifier)

	return svc
}
func (su *ServiceUpdater) UpdateServiceRouting(service *api.Service) error {
	err := su.UpdateRouteIfNeeded(service)
	if err != nil {
		log.Println("Unable to update service route", err)

		return err
	}

	if ServiceHasLoadBalancerAddress(service) {
		log.Println("The route was found and the service has an address, not updating the status")

		return nil
	}

	log.Println("Found route for the service", service.ObjectMeta.Name, "updating the service load-balancer status")
	service.Status = api.ServiceStatus{
		LoadBalancer: api.LoadBalancerStatus{
			Ingress: []api.LoadBalancerIngress{
				api.LoadBalancerIngress{
					Hostname: su.GetDomainNamesFromService(service)[0],
				},
			},
		},
	}

	_, err = su.ServiceRepository.Update(service)
	if err != nil {
		log.Println("Error while updating the service:", err)

		return err
	}

	log.Println("Successfully updated the service status")

	return nil
}
Beispiel #7
0
func (s *ServiceController) ensureClusterService(cachedService *cachedService, clusterName string, service *api.Service, client *clientset.Clientset) error {
	var err error
	var needUpdate bool
	for i := 0; i < clientRetryCount; i++ {
		svc, err := client.Core().Services(service.Namespace).Get(service.Name)
		if err == nil {
			// service exists
			glog.V(5).Infof("Found service %s/%s from cluster %s", service.Namespace, service.Name, clusterName)
			//reserve immutable fields
			service.Spec.ClusterIP = svc.Spec.ClusterIP

			//reserve auto assigned field
			for i, oldPort := range svc.Spec.Ports {
				for _, port := range service.Spec.Ports {
					if port.NodePort == 0 {
						if !portEqualExcludeNodePort(&oldPort, &port) {
							svc.Spec.Ports[i] = port
							needUpdate = true
						}
					} else {
						if !portEqualForLB(&oldPort, &port) {
							svc.Spec.Ports[i] = port
							needUpdate = true
						}
					}
				}
			}

			if needUpdate {
				// we only apply spec update
				svc.Spec = service.Spec
				_, err = client.Core().Services(svc.Namespace).Update(svc)
				if err == nil {
					glog.V(5).Infof("Service %s/%s successfully updated to cluster %s", svc.Namespace, svc.Name, clusterName)
					return nil
				} else {
					glog.V(4).Infof("Failed to update %+v", err)
				}
			} else {
				glog.V(5).Infof("Service %s/%s is not updated to cluster %s as the spec are identical", svc.Namespace, svc.Name, clusterName)
				return nil
			}
		} else if errors.IsNotFound(err) {
			// Create service if it is not found
			glog.Infof("Service '%s/%s' is not found in cluster %s, trying to create new",
				service.Namespace, service.Name, clusterName)
			service.ResourceVersion = ""
			_, err = client.Core().Services(service.Namespace).Create(service)
			if err == nil {
				glog.V(5).Infof("Service %s/%s successfully created to cluster %s", service.Namespace, service.Name, clusterName)
				return nil
			}
			glog.V(4).Infof("Failed to create %+v", err)
			if errors.IsAlreadyExists(err) {
				glog.V(5).Infof("service %s/%s already exists in cluster %s", service.Namespace, service.Name, clusterName)
				return nil
			}
		}
		if errors.IsConflict(err) {
			glog.V(4).Infof("Not persisting update to service '%s/%s' that has been changed since we received it: %v",
				service.Namespace, service.Name, err)
		}
		// should we reuse same retry delay for all clusters?
		time.Sleep(cachedService.nextRetryDelay())
	}
	return err
}
Beispiel #8
0
func (self *realKubeFramework) CreateService(ns string, service *api.Service) (*api.Service, error) {
	service.Namespace = ns
	newSvc, err := self.kubeClient.Services(ns).Create(service)
	return newSvc, err
}
Beispiel #9
0
func TestGetLoadBalancerSourceRanges(t *testing.T) {
	checkError := func(v string) {
		annotations := make(map[string]string)
		annotations[AnnotationLoadBalancerSourceRangesKey] = v
		svc := api.Service{}
		svc.Annotations = annotations
		_, err := GetLoadBalancerSourceRanges(&svc)
		if err == nil {
			t.Errorf("Expected error parsing: %q", v)
		}
		svc = api.Service{}
		svc.Spec.LoadBalancerSourceRanges = strings.Split(v, ",")
		_, err = GetLoadBalancerSourceRanges(&svc)
		if err == nil {
			t.Errorf("Expected error parsing: %q", v)
		}
	}
	checkError("10.0.0.1/33")
	checkError("foo.bar")
	checkError("10.0.0.1/32,*")
	checkError("10.0.0.1/32,")
	checkError("10.0.0.1/32, ")
	checkError("10.0.0.1")

	checkOK := func(v string) netsets.IPNet {
		annotations := make(map[string]string)
		annotations[AnnotationLoadBalancerSourceRangesKey] = v
		svc := api.Service{}
		svc.Annotations = annotations
		cidrs, err := GetLoadBalancerSourceRanges(&svc)
		if err != nil {
			t.Errorf("Unexpected error parsing: %q", v)
		}
		svc = api.Service{}
		svc.Spec.LoadBalancerSourceRanges = strings.Split(v, ",")
		cidrs, err = GetLoadBalancerSourceRanges(&svc)
		if err != nil {
			t.Errorf("Unexpected error parsing: %q", v)
		}
		return cidrs
	}
	cidrs := checkOK("192.168.0.1/32")
	if len(cidrs) != 1 {
		t.Errorf("Expected exactly one CIDR: %v", cidrs.StringSlice())
	}
	cidrs = checkOK("192.168.0.1/32,192.168.0.1/32")
	if len(cidrs) != 1 {
		t.Errorf("Expected exactly one CIDR (after de-dup): %v", cidrs.StringSlice())
	}
	cidrs = checkOK("192.168.0.1/32,192.168.0.2/32")
	if len(cidrs) != 2 {
		t.Errorf("Expected two CIDRs: %v", cidrs.StringSlice())
	}
	cidrs = checkOK("  192.168.0.1/32 , 192.168.0.2/32   ")
	if len(cidrs) != 2 {
		t.Errorf("Expected two CIDRs: %v", cidrs.StringSlice())
	}
	// check LoadBalancerSourceRanges not specified
	svc := api.Service{}
	cidrs, err := GetLoadBalancerSourceRanges(&svc)
	if err != nil {
		t.Errorf("Unexpected error: %v", err)
	}
	if len(cidrs) != 1 {
		t.Errorf("Expected exactly one CIDR: %v", cidrs.StringSlice())
	}
	if !IsAllowAll(cidrs) {
		t.Errorf("Expected default to be allow-all: %v", cidrs.StringSlice())
	}
	// check SourceRanges annotation is empty
	annotations := make(map[string]string)
	annotations[AnnotationLoadBalancerSourceRangesKey] = ""
	svc = api.Service{}
	svc.Annotations = annotations
	cidrs, err = GetLoadBalancerSourceRanges(&svc)
	if err != nil {
		t.Errorf("Unexpected error: %v", err)
	}
	if len(cidrs) != 1 {
		t.Errorf("Expected exactly one CIDR: %v", cidrs.StringSlice())
	}
	if !IsAllowAll(cidrs) {
		t.Errorf("Expected default to be allow-all: %v", cidrs.StringSlice())
	}
}
func ReviewService(client *client.Client, service *api.Service, rootDns string) error {
	if service.Spec.Type != api.ServiceTypeLoadBalancer {
		log.Println("Skipping service", service.ObjectMeta.Name, "as it is not a LoadBalancer")

		return nil
	}

	// If there's an IP and/or DNS address in the load balancer status, skip it
	if ServiceHasLoadBalancerAddress(service) {
		log.Println("Skipping service", service.ObjectMeta.Name, "as it already have a LoadBalancer address")

		return nil
	}

	log.Println("Service", service.ObjectMeta.Name, "needs to be reviewed")

	// Get existing proxy configuration
	var proxyConfiguration reverseproxy.Configuration
	if jsonConfiguration, found := service.ObjectMeta.Annotations["kubernetesReverseproxy"]; found {
		proxyConfiguration = reverseproxy.Configuration{}

		if err := json.Unmarshal([]byte(jsonConfiguration), &proxyConfiguration); err != nil {
			log.Println("Unable to unmarshal the configuration, keep the empty one")
		}

	} else {
		log.Println("No `kubernetesReverseproxy` annotation found")
		proxyConfiguration = reverseproxy.Configuration{}
	}

	// If configuration found, skip it
	if len(proxyConfiguration.Hosts) == 0 {
		// Create the expected hostname of the service
		host := strings.Join([]string{
			service.ObjectMeta.Name,
			service.ObjectMeta.Namespace,
			rootDns,
		}, ".")

		// Append the new host to the configuration
		proxyConfiguration.Hosts = append(proxyConfiguration.Hosts, reverseproxy.Host{
			Host: host,
			Port: 80,
		})

		jsonConfiguration, err := json.Marshal(proxyConfiguration)
		if err != nil {
			log.Println("Unable to JSON-encode the proxy configuration: ", err)

			return err
		}

		if service.ObjectMeta.Annotations == nil {
			service.ObjectMeta.Annotations = map[string]string{}
		}

		service.ObjectMeta.Annotations["kubernetesReverseproxy"] = string(jsonConfiguration)

		// Update the service
		log.Println("Adding the `kubernetesReverseproxy` annotation to service")
		updated, err := client.Services(service.ObjectMeta.Namespace).Update(service)
		if err != nil {
			log.Println("Error while updated the service:", err)

			return err
		}

		log.Println("Successfully added the reverse proxy configuration", updated)
	} else {
		// Updating service load-balancer status
		log.Println("Updating the service load-balancer status")
		service.Status = api.ServiceStatus{
			LoadBalancer: api.LoadBalancerStatus{
				Ingress: []api.LoadBalancerIngress{
					api.LoadBalancerIngress{
						Hostname: proxyConfiguration.Hosts[0].Host,
					},
				},
			},
		}

		updated, err := client.Services(service.ObjectMeta.Namespace).Update(service)
		if err != nil {
			log.Println("Error while updated the service:", err)

			return err
		}

		log.Println("Successfully updated the service status", updated)
	}

	return nil
}