Пример #1
0
// ensureOpenShiftInfraNamespace is called as part of global policy initialization to ensure infra namespace exists
func (c *MasterConfig) ensureOpenShiftInfraNamespace() {
	ns := c.Options.PolicyConfig.OpenShiftInfrastructureNamespace

	// Ensure namespace exists
	namespace, err := c.KubeClient().Namespaces().Create(&kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: ns}})
	if kapierror.IsAlreadyExists(err) {
		// Get the persisted namespace
		namespace, err = c.KubeClient().Namespaces().Get(ns)
		if err != nil {
			glog.Errorf("Error getting namespace %s: %v", ns, err)
			return
		}
	} else if err != nil {
		glog.Errorf("Error creating namespace %s: %v", ns, err)
		return
	}

	// Ensure service accounts exist
	serviceAccounts := []string{
		c.BuildControllerServiceAccount, c.DeploymentControllerServiceAccount, c.ReplicationControllerServiceAccount,
		c.JobControllerServiceAccount, c.HPAControllerServiceAccount, c.PersistentVolumeControllerServiceAccount,
	}
	for _, serviceAccountName := range serviceAccounts {
		_, err := c.KubeClient().ServiceAccounts(ns).Create(&kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: serviceAccountName}})
		if err != nil && !kapierror.IsAlreadyExists(err) {
			glog.Errorf("Error creating service account %s/%s: %v", ns, serviceAccountName, err)
		}
	}

	// Ensure service account cluster role bindings exist
	clusterRolesToSubjects := map[string][]kapi.ObjectReference{
		bootstrappolicy.BuildControllerRoleName:            {{Namespace: ns, Name: c.BuildControllerServiceAccount, Kind: "ServiceAccount"}},
		bootstrappolicy.DeploymentControllerRoleName:       {{Namespace: ns, Name: c.DeploymentControllerServiceAccount, Kind: "ServiceAccount"}},
		bootstrappolicy.ReplicationControllerRoleName:      {{Namespace: ns, Name: c.ReplicationControllerServiceAccount, Kind: "ServiceAccount"}},
		bootstrappolicy.JobControllerRoleName:              {{Namespace: ns, Name: c.JobControllerServiceAccount, Kind: "ServiceAccount"}},
		bootstrappolicy.HPAControllerRoleName:              {{Namespace: ns, Name: c.HPAControllerServiceAccount, Kind: "ServiceAccount"}},
		bootstrappolicy.PersistentVolumeControllerRoleName: {{Namespace: ns, Name: c.PersistentVolumeControllerServiceAccount, Kind: "ServiceAccount"}},
	}
	roleAccessor := policy.NewClusterRoleBindingAccessor(c.ServiceAccountRoleBindingClient())
	for clusterRole, subjects := range clusterRolesToSubjects {
		addRole := &policy.RoleModificationOptions{
			RoleName:            clusterRole,
			RoleBindingAccessor: roleAccessor,
			Subjects:            subjects,
		}
		if err := addRole.AddRole(); err != nil {
			glog.Errorf("Could not add %v subjects to the %v cluster role: %v\n", subjects, clusterRole, err)
		} else {
			glog.V(2).Infof("Added %v subjects to the %v cluster role: %v\n", subjects, clusterRole, err)
		}
	}

	c.ensureNamespaceServiceAccountRoleBindings(namespace)
}
Пример #2
0
// ensureOpenShiftInfraNamespace is called as part of global policy initialization to ensure infra namespace exists
func (c *MasterConfig) ensureOpenShiftInfraNamespace() {
	ns := c.Options.PolicyConfig.OpenShiftInfrastructureNamespace

	// Ensure namespace exists
	namespace, err := c.KubeClient().Namespaces().Create(&kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: ns}})
	if kapierror.IsAlreadyExists(err) {
		// Get the persisted namespace
		namespace, err = c.KubeClient().Namespaces().Get(ns)
		if err != nil {
			glog.Errorf("Error getting namespace %s: %v", ns, err)
			return
		}
	} else if err != nil {
		glog.Errorf("Error creating namespace %s: %v", ns, err)
		return
	}

	roleAccessor := policy.NewClusterRoleBindingAccessor(c.ServiceAccountRoleBindingClient())
	for _, saName := range bootstrappolicy.InfraSAs.GetServiceAccounts() {
		_, err := c.KubeClient().ServiceAccounts(ns).Create(&kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: saName}})
		if err != nil && !kapierror.IsAlreadyExists(err) {
			glog.Errorf("Error creating service account %s/%s: %v", ns, saName, err)
		}

		role, _ := bootstrappolicy.InfraSAs.RoleFor(saName)

		reconcileRole := &policy.ReconcileClusterRolesOptions{
			RolesToReconcile: []string{role.Name},
			Confirmed:        true,
			Union:            true,
			Out:              ioutil.Discard,
			RoleClient:       c.PrivilegedLoopbackOpenShiftClient.ClusterRoles(),
		}
		if err := reconcileRole.RunReconcileClusterRoles(nil, nil); err != nil {
			glog.Errorf("Could not reconcile %v: %v\n", role.Name, err)
		}

		addRole := &policy.RoleModificationOptions{
			RoleName:            role.Name,
			RoleBindingAccessor: roleAccessor,
			Subjects:            []kapi.ObjectReference{{Namespace: ns, Name: saName, Kind: "ServiceAccount"}},
		}
		if err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error { return addRole.AddRole() }); err != nil {
			glog.Errorf("Could not add %v service accounts to the %v cluster role: %v\n", saName, role.Name, err)
		} else {
			glog.V(2).Infof("Added %v service accounts to the %v cluster role: %v\n", saName, role.Name, err)
		}
	}

	c.ensureNamespaceServiceAccountRoleBindings(namespace)
}
Пример #3
0
func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epServiceName string, hostips []string, pvcname string) (endpoint *v1.Endpoints, service *v1.Service, err error) {

	addrlist := make([]v1.EndpointAddress, len(hostips))
	for i, v := range hostips {
		addrlist[i].IP = v
	}
	endpoint = &v1.Endpoints{
		ObjectMeta: v1.ObjectMeta{
			Namespace: namespace,
			Name:      epServiceName,
			Labels: map[string]string{
				"gluster.kubernetes.io/provisioned-for-pvc": pvcname,
			},
		},
		Subsets: []v1.EndpointSubset{{
			Addresses: addrlist,
			Ports:     []v1.EndpointPort{{Port: 1, Protocol: "TCP"}},
		}},
	}
	_, err = p.plugin.host.GetKubeClient().Core().Endpoints(namespace).Create(endpoint)
	if err != nil && errors.IsAlreadyExists(err) {
		glog.V(1).Infof("glusterfs: endpoint [%s] already exist in namespace [%s]", endpoint, namespace)
		err = nil
	}
	if err != nil {
		glog.Errorf("glusterfs: failed to create endpoint: %v", err)
		return nil, nil, fmt.Errorf("error creating endpoint: %v", err)
	}
	service = &v1.Service{
		ObjectMeta: v1.ObjectMeta{
			Name:      epServiceName,
			Namespace: namespace,
			Labels: map[string]string{
				"gluster.kubernetes.io/provisioned-for-pvc": pvcname,
			},
		},
		Spec: v1.ServiceSpec{
			Ports: []v1.ServicePort{
				{Protocol: "TCP", Port: 1}}}}
	_, err = p.plugin.host.GetKubeClient().Core().Services(namespace).Create(service)
	if err != nil && errors.IsAlreadyExists(err) {
		glog.V(1).Infof("glusterfs: service [%s] already exist in namespace [%s]", service, namespace)
		err = nil
	}
	if err != nil {
		glog.Errorf("glusterfs: failed to create service: %v", err)
		return nil, nil, fmt.Errorf("error creating service: %v", err)
	}
	return endpoint, service, nil
}
Пример #4
0
func GetClientForServiceAccount(adminClient *kclientset.Clientset, clientConfig restclient.Config, namespace, name string) (*client.Client, *kclientset.Clientset, *restclient.Config, error) {
	_, err := adminClient.Core().Namespaces().Create(&kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: namespace}})
	if err != nil && !kerrs.IsAlreadyExists(err) {
		return nil, nil, nil, err
	}

	sa, err := adminClient.Core().ServiceAccounts(namespace).Create(&kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: name}})
	if kerrs.IsAlreadyExists(err) {
		sa, err = adminClient.Core().ServiceAccounts(namespace).Get(name)
	}
	if err != nil {
		return nil, nil, nil, err
	}

	token := ""
	err = wait.Poll(time.Second, 30*time.Second, func() (bool, error) {
		selector := fields.OneTermEqualSelector(kapi.SecretTypeField, string(kapi.SecretTypeServiceAccountToken))
		secrets, err := adminClient.Core().Secrets(namespace).List(kapi.ListOptions{FieldSelector: selector})
		if err != nil {
			return false, err
		}
		for _, secret := range secrets.Items {
			if serviceaccounts.IsValidServiceAccountToken(sa, &secret) {
				token = string(secret.Data[kapi.ServiceAccountTokenKey])
				return true, nil
			}
		}
		return false, nil
	})
	if err != nil {
		return nil, nil, nil, err
	}

	saClientConfig := clientcmd.AnonymousClientConfig(&clientConfig)
	saClientConfig.BearerToken = token

	kubeClient, err := kclient.New(&saClientConfig)
	if err != nil {
		return nil, nil, nil, err
	}
	kubeClientset := adapter.FromUnversionedClient(kubeClient)

	osClient, err := client.New(&saClientConfig)
	if err != nil {
		return nil, nil, nil, err
	}

	return osClient, kubeClientset, &saClientConfig, nil
}
Пример #5
0
// EnsurePolicy returns the policy object for the specified namespace.  If one does not exist, it is created for you.  Permission to
// create, update, or delete roles in a namespace implies the ability to create a Policy object itself.
func (m *VirtualStorage) EnsurePolicy(ctx kapi.Context) (*authorizationapi.Policy, error) {
	policy, err := m.PolicyStorage.GetPolicy(ctx, authorizationapi.PolicyName)
	if err != nil {
		if !kapierrors.IsNotFound(err) {
			return nil, err
		}

		// if we have no policy, go ahead and make one.  creating one here collapses code paths below.  We only take this hit once
		policy = NewEmptyPolicy(kapi.NamespaceValue(ctx))
		if err := m.PolicyStorage.CreatePolicy(ctx, policy); err != nil {
			// Tolerate the policy having been created in the meantime
			if !kapierrors.IsAlreadyExists(err) {
				return nil, err
			}
		}

		policy, err = m.PolicyStorage.GetPolicy(ctx, authorizationapi.PolicyName)
		if err != nil {
			return nil, err
		}

	}

	if policy.Roles == nil {
		policy.Roles = make(map[string]*authorizationapi.Role)
	}

	return policy, nil
}
Пример #6
0
func (p *provision) Admit(a admission.Attributes) (err error) {
	gvk, err := api.RESTMapper.KindFor(a.GetResource())
	if err != nil {
		return admission.NewForbidden(a, err)
	}
	mapping, err := api.RESTMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
	if err != nil {
		return admission.NewForbidden(a, err)
	}
	if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
		return nil
	}
	namespace := &api.Namespace{
		ObjectMeta: api.ObjectMeta{
			Name:      a.GetNamespace(),
			Namespace: "",
		},
		Status: api.NamespaceStatus{},
	}
	_, exists, err := p.store.Get(namespace)
	if err != nil {
		return admission.NewForbidden(a, err)
	}
	if exists {
		return nil
	}
	_, err = p.client.Namespaces().Create(namespace)
	if err != nil && !errors.IsAlreadyExists(err) {
		return admission.NewForbidden(a, err)
	}
	return nil
}
Пример #7
0
func (p *provision) Admit(a admission.Attributes) (err error) {
	// if we're here, then we've already passed authentication, so we're allowed to do what we're trying to do
	// if we're here, then the API server has found a route, which means that if we have a non-empty namespace
	// its a namespaced resource.
	if len(a.GetNamespace()) == 0 || a.GetKind() == api.Kind("Namespace") {
		return nil
	}

	namespace := &api.Namespace{
		ObjectMeta: api.ObjectMeta{
			Name:      a.GetNamespace(),
			Namespace: "",
		},
		Status: api.NamespaceStatus{},
	}
	_, exists, err := p.store.Get(namespace)
	if err != nil {
		return admission.NewForbidden(a, err)
	}
	if exists {
		return nil
	}
	_, err = p.client.Legacy().Namespaces().Create(namespace)
	if err != nil && !errors.IsAlreadyExists(err) {
		return admission.NewForbidden(a, err)
	}
	return nil
}
Пример #8
0
// CreateMasterServiceIfNeeded will create the specified service if it
// doesn't already exist.
func (c *Controller) CreateOrUpdateMasterServiceIfNeeded(serviceName string, serviceIP net.IP, servicePorts []api.ServicePort, serviceType api.ServiceType, reconcile bool) error {
	if s, err := c.ServiceClient.Services(api.NamespaceDefault).Get(serviceName); err == nil {
		// The service already exists.
		if reconcile {
			if svc, updated := getMasterServiceUpdateIfNeeded(s, servicePorts, serviceType); updated {
				glog.Warningf("Resetting master service %q to %#v", serviceName, svc)
				_, err := c.ServiceClient.Services(api.NamespaceDefault).Update(svc)
				return err
			}
		}
		return nil
	}
	svc := &api.Service{
		ObjectMeta: api.ObjectMeta{
			Name:      serviceName,
			Namespace: api.NamespaceDefault,
			Labels:    map[string]string{"provider": "kubernetes", "component": "apiserver"},
		},
		Spec: api.ServiceSpec{
			Ports: servicePorts,
			// maintained by this code, not by the pod selector
			Selector:        nil,
			ClusterIP:       serviceIP.String(),
			SessionAffinity: api.ServiceAffinityClientIP,
			Type:            serviceType,
		},
	}

	_, err := c.ServiceClient.Services(api.NamespaceDefault).Create(svc)
	if errors.IsAlreadyExists(err) {
		return c.CreateOrUpdateMasterServiceIfNeeded(serviceName, serviceIP, servicePorts, serviceType, reconcile)
	}
	return err
}
Пример #9
0
// ensurePolicyBindingToMaster returns a PolicyBinding object that has a PolicyRef pointing to the Policy in the passed namespace.
func (m *VirtualStorage) ensurePolicyBindingToMaster(ctx kapi.Context, policyNamespace, policyBindingName string) (*authorizationapi.PolicyBinding, error) {
	policyBinding, err := m.BindingRegistry.GetPolicyBinding(ctx, policyBindingName)
	if err != nil {
		if !kapierrors.IsNotFound(err) {
			return nil, err
		}

		// if we have no policyBinding, go ahead and make one.  creating one here collapses code paths below.  We only take this hit once
		policyBinding = policybindingregistry.NewEmptyPolicyBinding(kapi.NamespaceValue(ctx), policyNamespace, policyBindingName)
		if err := m.BindingRegistry.CreatePolicyBinding(ctx, policyBinding); err != nil {
			// Tolerate the policybinding having been created in the meantime
			if !kapierrors.IsAlreadyExists(err) {
				return nil, err
			}
		}

		policyBinding, err = m.BindingRegistry.GetPolicyBinding(ctx, policyBindingName)
		if err != nil {
			return nil, err
		}
	}

	if policyBinding.RoleBindings == nil {
		policyBinding.RoleBindings = make(map[string]*authorizationapi.RoleBinding)
	}

	return policyBinding, nil
}
Пример #10
0
func ensureNamespace(client internalclientset.Interface, namespace string) error {
	err := createNamespace(client, namespace)
	if err != nil && !errors.IsAlreadyExists(err) {
		return err
	}
	return nil
}
Пример #11
0
Файл: init.go Проект: slack/helm
// runInit initializes local config and installs tiller to Kubernetes Cluster
func (i *initCmd) run() error {
	if err := ensureHome(i.home, i.out); err != nil {
		return err
	}

	if !i.clientOnly {
		if i.kubeClient == nil {
			_, c, err := getKubeClient(kubeContext)
			if err != nil {
				return fmt.Errorf("could not get kubernetes client: %s", err)
			}
			i.kubeClient = c
		}
		if err := installer.Install(i.kubeClient, tillerNamespace, i.image, i.canary, flagDebug); err != nil {
			if !kerrors.IsAlreadyExists(err) {
				return fmt.Errorf("error installing: %s", err)
			}
			fmt.Fprintln(i.out, "Warning: Tiller is already installed in the cluster. (Use --client-only to suppress this message.)")
		} else {
			fmt.Fprintln(i.out, "\nTiller (the helm server side component) has been installed into your Kubernetes Cluster.")
		}
	} else {
		fmt.Fprintln(i.out, "Not installing tiller due to 'client-only' flag having been set")
	}
	fmt.Fprintln(i.out, "Happy Helming!")
	return nil
}
Пример #12
0
// CreateMasterServiceIfNeeded will create the specified service if it
// doesn't already exist.
func (c *Controller) CreateMasterServiceIfNeeded(serviceName string, serviceIP net.IP, servicePort, nodePort int) error {
	ctx := api.NewDefaultContext()
	if _, err := c.ServiceRegistry.GetService(ctx, serviceName); err == nil {
		// The service already exists.
		return nil
	}

	ports, serviceType := createPortAndServiceSpec(servicePort, nodePort)
	svc := &api.Service{
		ObjectMeta: api.ObjectMeta{
			Name:      serviceName,
			Namespace: api.NamespaceDefault,
			Labels:    map[string]string{"provider": "kubernetes", "component": "apiserver"},
		},
		Spec: api.ServiceSpec{
			Ports: ports,
			// maintained by this code, not by the pod selector
			Selector:        nil,
			ClusterIP:       serviceIP.String(),
			SessionAffinity: api.ServiceAffinityNone,
			Type:            serviceType,
		},
	}
	if err := rest.BeforeCreate(rest.Services, ctx, svc); err != nil {
		return err
	}

	_, err := c.ServiceRegistry.CreateService(ctx, svc)
	if err != nil && errors.IsAlreadyExists(err) {
		err = nil
	}
	return err
}
Пример #13
0
// CreateTenantIfNeeded will create the tenant that contains the master services if it doesn't already exist
func (c *Controller) CreateTenantIfNeeded(te string) error {
	ctx := api.NewContext()
	if _, err := c.TenantRegistry.GetTenant(ctx, api.TenantDefault); err == nil {
		// the tenant already exists
		return nil
	}
	ns, err := c.NamespaceRegistry.GetNamespace(ctx, api.NamespaceDefault)
	if err != nil {
		// the namespace already exists
		return err
	}
	newTenant := &api.Tenant{
		ObjectMeta: api.ObjectMeta{
			Name:      te,
			Namespace: api.NamespaceDefault,
			Tenant:    api.TenantDefault,
		},
		Spec: api.TenantSpec{
			Namespaces: []api.Namespace{*ns},
		},
	}
	err = c.TenantRegistry.CreateTenant(ctx, newTenant)
	if err != nil && errors.IsAlreadyExists(err) {
		err = nil
	}
	return err
}
Пример #14
0
// CreateProject creates a project
func CreateProject(name, display, desc, basecmd string, out io.Writer) error {
	f, err := loggedInUserFactory()
	if err != nil {
		return err
	}
	client, _, err := f.Clients()
	if err != nil {
		return nil
	}
	pathOptions := config.NewPathOptionsWithConfig("")
	opt := &cmd.NewProjectOptions{
		ProjectName: name,
		DisplayName: display,
		Description: desc,

		Name: basecmd,

		Client: client,

		ProjectOptions: &cmd.ProjectOptions{PathOptions: pathOptions},
		Out:            ioutil.Discard,
	}
	err = opt.ProjectOptions.Complete(f, []string{}, ioutil.Discard)
	if err != nil {
		return err
	}
	err = opt.Run()
	if err != nil {
		if errors.IsAlreadyExists(err) {
			return setCurrentProject(f, name, out)
		}
		return err
	}
	return nil
}
Пример #15
0
// Install uses kubernetes client to install tiller
//
// Returns the string output received from the operation, and an error if the
// command failed.
//
// If verbose is true, this will print the manifest to stdout.
func Install(namespace, image string, verbose bool) error {
	kc := kube.New(nil)

	if namespace == "" {
		ns, _, err := kc.DefaultNamespace()
		if err != nil {
			return err
		}
		namespace = ns
	}

	c, err := kc.Client()
	if err != nil {
		return err
	}

	ns := generateNamespace(namespace)
	if _, err := c.Namespaces().Create(ns); err != nil {
		if !errors.IsAlreadyExists(err) {
			return err
		}
	}

	if image == "" {
		// strip git sha off version
		tag := strings.Split(version.Version, "+")[0]
		image = fmt.Sprintf("%s:%s", defaultImage, tag)
	}

	rc := generateDeployment(image)

	_, err = c.Deployments(namespace).Create(rc)
	return err
}
Пример #16
0
// createSchedulerServiceIfNeeded will create the specified service if it
// doesn't already exist.
func (m *SchedulerServer) createSchedulerServiceIfNeeded(serviceName string, servicePort int) error {
	ctx := api.NewDefaultContext()
	if _, err := m.client.Services(api.NamespaceValue(ctx)).Get(serviceName); err == nil {
		// The service already exists.
		return nil
	}
	svc := &api.Service{
		ObjectMeta: api.ObjectMeta{
			Name:      serviceName,
			Namespace: api.NamespaceDefault,
			Labels:    map[string]string{"provider": "k8sm", "component": "scheduler"},
		},
		Spec: api.ServiceSpec{
			Ports: []api.ServicePort{{Port: servicePort, Protocol: api.ProtocolTCP}},
			// maintained by this code, not by the pod selector
			Selector:        nil,
			SessionAffinity: api.ServiceAffinityNone,
		},
	}
	if m.ServiceAddress != nil {
		svc.Spec.ClusterIP = m.ServiceAddress.String()
	}
	_, err := m.client.Services(api.NamespaceValue(ctx)).Create(svc)
	if err != nil && errors.IsAlreadyExists(err) {
		err = nil
	}
	return err
}
Пример #17
0
// executeExecNewPod executes a ExecNewPod hook by creating a new pod based on
// the hook parameters and deployment. The pod is then synchronously watched
// until the pod completes, and if the pod failed, an error is returned.
//
// The hook pod inherits the following from the container the hook refers to:
//
//   * Environment (hook keys take precedence)
//   * Working directory
//   * Resources
func (e *HookExecutor) executeExecNewPod(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, label string) error {
	// Build a pod spec from the hook config and deployment
	podSpec, err := makeHookPod(hook, deployment, label)
	if err != nil {
		return err
	}

	// Try to create the pod.
	pod, err := e.PodClient.CreatePod(deployment.Namespace, podSpec)
	if err != nil {
		if !kerrors.IsAlreadyExists(err) {
			return fmt.Errorf("couldn't create lifecycle pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
		}
	} else {
		glog.V(0).Infof("Created lifecycle pod %s for deployment %s", pod.Name, deployutil.LabelForDeployment(deployment))
	}

	stopChannel := make(chan struct{})
	defer close(stopChannel)
	nextPod := e.PodClient.PodWatch(pod.Namespace, pod.Name, pod.ResourceVersion, stopChannel)

	glog.V(0).Infof("Waiting for hook pod %s/%s to complete", pod.Namespace, pod.Name)
	for {
		pod := nextPod()
		switch pod.Status.Phase {
		case kapi.PodSucceeded:
			return nil
		case kapi.PodFailed:
			return fmt.Errorf(pod.Status.Message)
		}
	}
}
Пример #18
0
func TestUnprivilegedNewProject(t *testing.T) {
	_, clusterAdminKubeConfig, err := testserver.StartTestMaster()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	valerieClientConfig := *clusterAdminClientConfig
	valerieClientConfig.Username = ""
	valerieClientConfig.Password = ""
	valerieClientConfig.BearerToken = ""
	valerieClientConfig.CertFile = ""
	valerieClientConfig.KeyFile = ""
	valerieClientConfig.CertData = nil
	valerieClientConfig.KeyData = nil

	accessToken, err := tokencmd.RequestToken(&valerieClientConfig, nil, "valerie", "security!")
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	valerieClientConfig.BearerToken = accessToken
	valerieOpenshiftClient, err := client.New(&valerieClientConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// confirm that we have access to request the project
	allowed, err := valerieOpenshiftClient.ProjectRequests().List(labels.Everything(), fields.Everything())
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if allowed.Status != unversioned.StatusSuccess {
		t.Fatalf("expected %v, got %v", unversioned.StatusSuccess, allowed.Status)
	}

	requestProject := oc.NewProjectOptions{
		ProjectName: "new-project",
		DisplayName: "display name here",
		Description: "the special description",

		Client: valerieOpenshiftClient,
		Out:    ioutil.Discard,
	}

	if err := requestProject.Run(); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	waitForProject(t, valerieOpenshiftClient, "new-project", 5*time.Second, 10)

	if err := requestProject.Run(); !kapierrors.IsAlreadyExists(err) {
		t.Fatalf("expected an already exists error, but got %v", err)
	}

}
Пример #19
0
func (c *MasterConfig) ensureDefaultSecurityContextConstraints() {
	sccSupported, err := c.securityContextConstraintsSupported()
	if err != nil {
		glog.Errorf("Unable to determine if security context constraints are supported. Got error: %v", err)
		return
	}
	if !sccSupported {
		glog.Infof("Ignoring default security context constraints when running on external Kubernetes.")
		return
	}

	ns := c.Options.PolicyConfig.OpenShiftInfrastructureNamespace
	bootstrapSCCGroups, bootstrapSCCUsers := bootstrappolicy.GetBoostrapSCCAccess(ns)

	for _, scc := range bootstrappolicy.GetBootstrapSecurityContextConstraints(bootstrapSCCGroups, bootstrapSCCUsers) {
		_, err := c.KubeClient().SecurityContextConstraints().Create(&scc)
		if kapierror.IsAlreadyExists(err) {
			continue
		}
		if err != nil {
			glog.Errorf("Unable to create default security context constraint %s.  Got error: %v", scc.Name, err)
			continue
		}
		glog.Infof("Created default security context constraint %s", scc.Name)
	}
}
Пример #20
0
func (p *provision) Admit(a admission.Attributes) (err error) {
	// if we're here, then we've already passed authentication, so we're allowed to do what we're trying to do
	// if we're here, then the API server has found a route, which means that if we have a non-empty namespace
	// its a namespaced resource.
	if len(a.GetNamespace()) == 0 || a.GetKind().GroupKind() == api.Kind("Namespace") {
		return nil
	}
	// we need to wait for our caches to warm
	if !p.WaitForReady() {
		return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request"))
	}
	namespace := &api.Namespace{
		ObjectMeta: api.ObjectMeta{
			Name:      a.GetNamespace(),
			Namespace: "",
		},
		Status: api.NamespaceStatus{},
	}
	_, exists, err := p.namespaceInformer.GetStore().Get(namespace)
	if err != nil {
		return admission.NewForbidden(a, err)
	}
	if exists {
		return nil
	}
	_, err = p.client.Core().Namespaces().Create(namespace)
	if err != nil && !errors.IsAlreadyExists(err) {
		return admission.NewForbidden(a, err)
	}
	return nil
}
Пример #21
0
func (p *provisioningIdentityMapper) userForWithRetries(info authapi.UserIdentityInfo, allowedRetries int) (kuser.Info, error) {
	ctx := kapi.NewContext()

	identity, err := p.identity.GetIdentity(ctx, info.GetIdentityName())

	if kerrs.IsNotFound(err) {
		user, err := p.createIdentityAndMapping(ctx, info)
		// Only retry for the following types of errors:
		// AlreadyExists errors:
		// * The same user was created by another identity provider with the same preferred username
		// * The same user was created by another instance of this identity provider (e.g. double-clicked login button)
		// * The same identity was created by another instance of this identity provider (e.g. double-clicked login button)
		// Conflict errors:
		// * The same user was updated be another identity provider to add identity info
		if (kerrs.IsAlreadyExists(err) || kerrs.IsConflict(err)) && allowedRetries > 0 {
			return p.userForWithRetries(info, allowedRetries-1)
		}
		return user, err
	}

	if err != nil {
		return nil, err
	}

	return p.getMapping(ctx, identity)
}
Пример #22
0
// nextBuildPhase updates build with any appropriate changes, or returns an error if
// the change cannot occur. When returning nil, be sure to set build.Status and optionally
// build.Message.
func (bc *BuildController) nextBuildPhase(build *buildapi.Build) error {
	// If a cancelling event was triggered for the build, update build status.
	if build.Status.Cancelled {
		glog.V(4).Infof("Cancelling Build %s/%s.", build.Namespace, build.Name)
		build.Status.Phase = buildapi.BuildPhaseCancelled
		return nil
	}

	// Set the output Docker image reference.
	ref, err := bc.resolveOutputDockerImageReference(build)
	if err != nil {
		return err
	}
	build.Status.OutputDockerImageReference = ref

	// Set the build phase, which will be persisted if no error occurs.
	build.Status.Phase = buildapi.BuildPhasePending

	// Make a copy to avoid mutating the build from this point on.
	copy, err := kapi.Scheme.Copy(build)
	if err != nil {
		return fmt.Errorf("unable to copy Build: %v", err)
	}
	buildCopy := copy.(*buildapi.Build)

	// TODO(rhcarvalho)
	// The S2I and Docker builders expect build.Spec.Output.To to contain a
	// resolved reference to a Docker image. Since build.Spec is immutable, we
	// change a copy (that is never persisted) and pass it to
	// bc.BuildStrategy.CreateBuildPod. We should make the builders use
	// build.Status.OutputDockerImageReference, what will make copying the build
	// unnecessary.
	if build.Spec.Output.To != nil && len(build.Spec.Output.To.Name) != 0 {
		buildCopy.Spec.Output.To = &kapi.ObjectReference{
			Kind: "DockerImage",
			Name: ref,
		}
	}

	// Invoke the strategy to get a build pod.
	podSpec, err := bc.BuildStrategy.CreateBuildPod(buildCopy)
	if err != nil {
		return fmt.Errorf("failed to create a build pod spec with strategy %q: %v", build.Spec.Strategy.Type, err)
	}
	glog.V(4).Infof("Pod %s for Build %s/%s is about to be created", podSpec.Name, build.Namespace, build.Name)

	if _, err := bc.PodManager.CreatePod(build.Namespace, podSpec); err != nil {
		if errors.IsAlreadyExists(err) {
			glog.V(4).Infof("Build pod already existed: %#v", podSpec)
			return nil
		}
		// Log an event if the pod is not created (most likely due to quota denial).
		bc.Recorder.Eventf(build, "failedCreate", "Error creating: %v", err)
		return fmt.Errorf("failed to create build pod: %v", err)
	}

	glog.V(4).Infof("Created pod for Build: %#v", podSpec)
	return nil
}
// createServiceAccount creates a ServiceAccount with the specified name and namespace
func (e *ServiceAccountsController) createServiceAccount(name, namespace string) {
	serviceAccount := &api.ServiceAccount{}
	serviceAccount.Name = name
	serviceAccount.Namespace = namespace
	_, err := e.client.ServiceAccounts(namespace).Create(serviceAccount)
	if err != nil && !apierrs.IsAlreadyExists(err) {
		glog.Error(err)
	}
}
Пример #24
0
// Create inserts a new item according to the unique key from the object.
func (e *Store) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) {
	if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {
		return nil, err
	}
	name, err := e.ObjectNameFunc(obj)
	if err != nil {
		return nil, err
	}
	key, err := e.KeyFunc(ctx, name)
	if err != nil {
		return nil, err
	}
	ttl, err := e.calculateTTL(obj, 0, false)
	if err != nil {
		return nil, err
	}
	out := e.NewFunc()
	if err := e.Storage.Create(ctx, key, obj, out, ttl); err != nil {
		err = storeerr.InterpretCreateError(err, e.QualifiedResource, name)
		err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj)
		if !kubeerr.IsAlreadyExists(err) {
			return nil, err
		}
		if errGet := e.Storage.Get(ctx, key, out, false); errGet != nil {
			return nil, err
		}
		accessor, errGetAcc := meta.Accessor(out)
		if errGetAcc != nil {
			return nil, err
		}
		if accessor.GetDeletionTimestamp() != nil {
			msg := &err.(*kubeerr.StatusError).ErrStatus.Message
			*msg = fmt.Sprintf("object is being deleted: %s", *msg)
			// TODO: remove this block after 1.6
			userAgent, _ := api.UserAgentFrom(ctx)
			if !isOldKubectl(userAgent) {
				return nil, err
			}
			if e.QualifiedResource.Resource != "replicationcontrollers" {
				return nil, err
			}
			*msg = fmt.Sprintf("%s: if you're using \"kubectl rolling-update\" with kubectl version older than v1.4.0, your rolling update has failed, though the pods are correctly updated. Please see https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md#kubectl-rolling-update for a workaround", *msg)
		}
		return nil, err
	}
	if e.AfterCreate != nil {
		if err := e.AfterCreate(out); err != nil {
			return nil, err
		}
	}
	if e.Decorator != nil {
		if err := e.Decorator(obj); err != nil {
			return nil, err
		}
	}
	return out, nil
}
Пример #25
0
// createTokenSecret creates a token secret for a given service account.  Returns the name of the token
func (e *DockercfgController) createTokenSecret(serviceAccount *api.ServiceAccount) (*api.Secret, bool, error) {
	pendingTokenName := serviceAccount.Annotations[PendingTokenAnnotation]

	// If this service account has no record of a pending token name, record one
	if len(pendingTokenName) == 0 {
		pendingTokenName = secret.Strategy.GenerateName(osautil.GetTokenSecretNamePrefix(serviceAccount))
		if serviceAccount.Annotations == nil {
			serviceAccount.Annotations = map[string]string{}
		}
		serviceAccount.Annotations[PendingTokenAnnotation] = pendingTokenName
		updatedServiceAccount, err := e.client.Core().ServiceAccounts(serviceAccount.Namespace).Update(serviceAccount)
		// Conflicts mean we'll get called to sync this service account again
		if kapierrors.IsConflict(err) {
			return nil, false, nil
		}
		if err != nil {
			return nil, false, err
		}
		serviceAccount = updatedServiceAccount
	}

	// Return the token from cache
	existingTokenSecretObj, exists, err := e.secretCache.GetByKey(serviceAccount.Namespace + "/" + pendingTokenName)
	if err != nil {
		return nil, false, err
	}
	if exists {
		existingTokenSecret := existingTokenSecretObj.(*api.Secret)
		return existingTokenSecret, len(existingTokenSecret.Data[api.ServiceAccountTokenKey]) > 0, nil
	}

	// Try to create the named pending token
	tokenSecret := &api.Secret{
		ObjectMeta: api.ObjectMeta{
			Name:      pendingTokenName,
			Namespace: serviceAccount.Namespace,
			Annotations: map[string]string{
				api.ServiceAccountNameKey: serviceAccount.Name,
				api.ServiceAccountUIDKey:  string(serviceAccount.UID),
				api.CreatedByAnnotation:   CreateDockercfgSecretsController,
			},
		},
		Type: api.SecretTypeServiceAccountToken,
		Data: map[string][]byte{},
	}

	glog.V(4).Infof("Creating token secret %q for service account %s/%s", tokenSecret.Name, serviceAccount.Namespace, serviceAccount.Name)
	token, err := e.client.Core().Secrets(tokenSecret.Namespace).Create(tokenSecret)
	// Already exists but not in cache means we'll get an add watch event and resync
	if kapierrors.IsAlreadyExists(err) {
		return nil, false, nil
	}
	if err != nil {
		return nil, false, err
	}
	return token, len(token.Data[api.ServiceAccountTokenKey]) > 0, nil
}
Пример #26
0
func (o *RoleModificationOptions) AddRole() error {
	roleBindings, err := o.RoleBindingAccessor.GetExistingRoleBindingsForRole(o.RoleNamespace, o.RoleName)
	if err != nil {
		return err
	}
	roleBindingNames, err := o.RoleBindingAccessor.GetExistingRoleBindingNames()
	if err != nil {
		return err
	}

	var roleBinding *authorizationapi.RoleBinding
	isUpdate := true
	if len(roleBindings) == 0 {
		roleBinding = &authorizationapi.RoleBinding{}
		isUpdate = false
	} else {
		// only need to add the user or group to a single roleBinding on the role.  Just choose the first one
		roleBinding = roleBindings[0]
	}

	roleBinding.RoleRef.Namespace = o.RoleNamespace
	roleBinding.RoleRef.Name = o.RoleName

	newSubjects := authorizationapi.BuildSubjects(o.Users, o.Groups, uservalidation.ValidateUserName, uservalidation.ValidateGroupName)
	newSubjects = append(newSubjects, o.Subjects...)

subjectCheck:
	for _, newSubject := range newSubjects {
		for _, existingSubject := range roleBinding.Subjects {
			if existingSubject.Kind == newSubject.Kind &&
				existingSubject.Name == newSubject.Name &&
				existingSubject.Namespace == newSubject.Namespace {
				continue subjectCheck
			}
		}

		roleBinding.Subjects = append(roleBinding.Subjects, newSubject)
	}

	if isUpdate {
		err = o.RoleBindingAccessor.UpdateRoleBinding(roleBinding)
	} else {
		roleBinding.Name = getUniqueName(o.RoleName, roleBindingNames)
		err = o.RoleBindingAccessor.CreateRoleBinding(roleBinding)
		// If the rolebinding was created in the meantime, rerun
		if kapierrors.IsAlreadyExists(err) {
			return o.AddRole()
		}
	}
	if err != nil {
		return err
	}

	return nil
}
// registerWithApiserver registers the node with the cluster master. It is safe
// to call multiple times, but not concurrently (kl.registrationCompleted is
// not locked).
func (kl *Kubelet) registerWithApiserver() {
	if kl.registrationCompleted {
		return
	}
	step := 100 * time.Millisecond
	for {
		time.Sleep(step)
		step = step * 2
		if step >= 7*time.Second {
			step = 7 * time.Second
		}

		node, err := kl.initialNodeStatus()
		if err != nil {
			glog.Errorf("Unable to construct api.Node object for kubelet: %v", err)
			continue
		}

		glog.V(2).Infof("Attempting to register node %s", node.Name)
		if _, err := kl.kubeClient.Core().Nodes().Create(node); err != nil {
			if !apierrors.IsAlreadyExists(err) {
				glog.V(2).Infof("Unable to register %s with the apiserver: %v", node.Name, err)
				continue
			}
			currentNode, err := kl.kubeClient.Core().Nodes().Get(kl.nodeName)
			if err != nil {
				glog.Errorf("error getting node %q: %v", kl.nodeName, err)
				continue
			}
			if currentNode == nil {
				glog.Errorf("no node instance returned for %q", kl.nodeName)
				continue
			}
			if currentNode.Spec.ExternalID == node.Spec.ExternalID {
				glog.Infof("Node %s was previously registered", node.Name)
				kl.registrationCompleted = true
				return
			}
			glog.Errorf(
				"Previously %q had externalID %q; now it is %q; will delete and recreate.",
				kl.nodeName, node.Spec.ExternalID, currentNode.Spec.ExternalID,
			)
			if err := kl.kubeClient.Core().Nodes().Delete(node.Name, nil); err != nil {
				glog.Errorf("Unable to delete old node: %v", err)
			} else {
				glog.Errorf("Deleted old node object %q", kl.nodeName)
			}
			continue
		}
		glog.Infof("Successfully registered node %s", node.Name)
		kl.registrationCompleted = true
		return
	}
}
Пример #28
0
func createAndRefresh(info *resource.Info) error {
	obj, err := resource.NewHelper(info.Client, info.Mapping).Create(info.Namespace, true, info.Object)
	if err != nil {
		if errors.IsAlreadyExists(err) {
			glog.V(5).Infof("Object %s/%s already exists", info.Namespace, info.Name)
			return nil
		}
		return err
	}
	info.Refresh(obj, true)
	return nil
}
Пример #29
0
func TestEtcdCreateControllerAlreadyExisting(t *testing.T) {
	ctx := api.NewDefaultContext()
	storage, fakeClient := newStorage(t)
	key, _ := makeControllerKey(ctx, validController.Name)
	key = etcdtest.AddPrefix(key)
	fakeClient.Set(key, runtime.EncodeOrDie(latest.Codec, &validController), 0)

	_, err := storage.Create(ctx, &validController)
	if !errors.IsAlreadyExists(err) {
		t.Errorf("expected already exists err, got %#v", err)
	}
}
Пример #30
0
func TestEtcdCreateServiceAlreadyExisting(t *testing.T) {
	ctx := api.NewDefaultContext()
	fakeClient := tools.NewFakeEtcdClient(t)
	registry, rest := NewTestEtcdRegistry(fakeClient)
	key, _ := rest.KeyFunc(ctx, "foo")
	key = etcdtest.AddPrefix(key)
	fakeClient.Set(key, runtime.EncodeOrDie(latest.Codec, makeTestService("foo")), 0)
	_, err := registry.CreateService(ctx, makeTestService("foo"))
	if !errors.IsAlreadyExists(err) {
		t.Errorf("expected already exists err, got %#v", err)
	}
}