Esempio n. 1
0
func printResult(check string, r Result, err error) {
	if err != nil {
		r = Failure
	}
	padLen := 78 - len(check)
	pad := ""
	if padLen > 0 {
		pad = strings.Repeat(".", padLen)
	}
	util.Infof("%s%s", check, pad)
	if r == Failure {
		util.Failuref("%-2s", r)
	} else {
		util.Successf("%-2s", r)
	}
	if err != nil {
		util.Failuref("%v", err)
	}
	util.Blank()
}
Esempio n. 2
0
func validatePersistenceVolumeClaims(c *k8sclient.Client, f *cmdutil.Factory) (Result, error) {
	ns, _, err := f.DefaultNamespace()
	if err != nil {
		return Failure, err
	}
	rc, err := c.PersistentVolumeClaims(ns).List(api.ListOptions{})
	if err != nil {
		util.Fatalf("Failed to get PersistentVolumeClaims, %s in namespace %s\n", err, ns)
	}
	if rc != nil {
		items := rc.Items
		pendingClaimNames := make([]string, 0, len(items))
		for _, item := range items {
			status := item.Status.Phase
			if status != "Bound" {
				pendingClaimNames = append(pendingClaimNames, item.ObjectMeta.Name)
			}
		}
		if len(pendingClaimNames) > 0 {
			util.Failuref("PersistentVolumeClaim not Bound for: %s. You need to create a PersistentVolume!\n", strings.Join(pendingClaimNames, ", "))
			util.Info(`
You can enable dynamic PersistentVolume creation with Kubernetes 1.4 or later.

Or to get gofabric8 to create HostPath based PersistentVolume resources for you on minikube and minishift type:

  gofabric8 volumes

For other clusters you could do something like this - though ideally with a persistent volume implementation other than hostPath:

cat <<EOF | oc create -f -
---
kind: PersistentVolume
apiVersion: v1
metadata:
  name: fabric8
spec:
  accessModes:
    - ReadWrite
  capacity:
    storage: 1000
  hostPath:
    path: /opt/fabric8-data
EOF


`)
			return Failure, err
		}
		return Success, err
	}
	return Failure, err
}
Esempio n. 3
0
func validatePersistenceVolumeClaims(c *k8sclient.Client, f *cmdutil.Factory) (Result, error) {
	ns, _, err := f.DefaultNamespace()
	if err != nil {
		return Failure, err
	}
	rc, err := c.PersistentVolumeClaims(ns).List(labels.Everything(), fields.Everything())
	if err != nil {
		util.Fatalf("Failed to get PersistentVolumeClaims, %s in namespace %s\n", err, ns)
	}
	if rc != nil {
		items := rc.Items
		pendingClaimNames := make([]string, 0, len(items))
		for _, item := range items {
			status := item.Status.Phase
			if status != "Bound" {
				pendingClaimNames = append(pendingClaimNames, item.ObjectMeta.Name)
			}
		}
		if len(pendingClaimNames) > 0 {
			util.Failuref("PersistentVolumeClaim not Bound for: %s. You need to create a PersistentVolume!\n", strings.Join(pendingClaimNames, ", "))
			util.Info(`
to generate a single node PersistentVolume then type something like this:


cat <<EOF | oc create -f -
---
kind: PersistentVolume
apiVersion: v1
metadata:
  name: fabric8
spec:
  accessModes:
    - ReadWrite
  capacity:
    storage: 1000
  hostPath:
    path: /opt/fabric8-data
EOF


`)
			return Failure, err
		}
		return Success, err
	}
	return Failure, err
}
Esempio n. 4
0
func deploy(f *cmdutil.Factory, d DefaultFabric8Deployment) {
	c, cfg := client.NewClient(f)
	ns, _, _ := f.DefaultNamespace()

	domain := d.domain
	dockerRegistry := d.dockerRegistry

	mini, err := util.IsMini()
	if err != nil {
		util.Failuref("error checking if minikube or minishift %v", err)
	}

	packageName := d.packageName
	if len(packageName) == 0 {
		util.Fatalf("Missing value for --%s", packageFlag)
	}

	typeOfMaster := util.TypeOfMaster(c)

	// extract the ip address from the URL
	u, err := url.Parse(cfg.Host)
	if err != nil {
		util.Fatalf("%s", err)
	}

	ip, _, err := net.SplitHostPort(u.Host)
	if err != nil && !strings.Contains(err.Error(), "missing port in address") {
		util.Fatalf("%s", err)
	}

	// default xip domain if local deployment incase users deploy ingress controller or router
	if mini && typeOfMaster == util.OpenShift {
		domain = ip + ".xip.io"
	}

	// default to the server from the current context
	apiserver := u.Host
	if d.apiserver != "" {
		apiserver = d.apiserver
	}

	util.Info("Deploying fabric8 to your ")
	util.Success(string(typeOfMaster))
	util.Info(" installation at ")
	util.Success(cfg.Host)
	util.Info(" for domain ")
	util.Success(domain)
	util.Info(" in namespace ")
	util.Successf("%s\n\n", ns)

	mavenRepo := d.mavenRepo
	if !strings.HasSuffix(mavenRepo, "/") {
		mavenRepo = mavenRepo + "/"
	}
	util.Info("Loading fabric8 releases from maven repository:")
	util.Successf("%s\n", mavenRepo)

	if len(dockerRegistry) > 0 {
		util.Infof("Loading fabric8 docker images from docker registry: %s\n", dockerRegistry)
	}

	if len(apiserver) == 0 {
		apiserver = domain
	}

	if len(d.appToRun) > 0 {
		util.Warn("Please note that the --app parameter is now deprecated.\n")
		util.Warn("Please use the --package argument to specify a package like `platform`, `console`, `ipaas` or to refer to a URL or file of the YAML package to install\n")
	}

	if strings.Contains(domain, "=") {
		util.Warnf("\nInvalid domain: %s\n\n", domain)
	} else if confirmAction(d.yes) {

		oc, _ := client.NewOpenShiftClient(cfg)

		initSchema()

		ensureNamespaceExists(c, oc, ns)

		versionPlatform := ""
		baseUri := ""
		switch packageName {
		case "":
		case platformPackage:
			baseUri = platformPackageUrlPrefix
			versionPlatform = versionForUrl(d.versionPlatform, urlJoin(mavenRepo, platformMetadataUrl))
			logPackageVersion(packageName, versionPlatform)
		case consolePackage:
			baseUri = consolePackageUrlPrefix
			versionPlatform = versionForUrl(d.versionPlatform, urlJoin(mavenRepo, consolePackageMetadataUrl))
			logPackageVersion(packageName, versionPlatform)
		case iPaaSPackage:
			baseUri = ipaasPackageUrlPrefix
			versionPlatform = versionForUrl(d.versioniPaaS, urlJoin(mavenRepo, ipaasMetadataUrl))
			logPackageVersion(packageName, versionPlatform)
		default:
			baseUri = ""
		}
		uri := ""
		if len(baseUri) > 0 {
			uri = fmt.Sprintf(urlJoin(mavenRepo, baseUri), versionPlatform)

		} else {
			// lets assume the package is a file or a uri already
			if strings.Contains(packageName, "://") {
				uri = packageName
			} else {
				d, err := os.Stat(packageName)
				if err != nil {
					util.Fatalf("package %s not recognised and is not a local file %s\n", packageName, err)
				}
				if m := d.Mode(); m.IsDir() {
					util.Fatalf("package %s not recognised and is not a local file %s\n", packageName, err)
				}
				absFile, err := filepath.Abs(packageName)
				if err != nil {
					util.Fatalf("package %s not recognised and is not a local file %s\n", packageName, err)
				}
				uri = "file://" + absFile
			}
		}

		if typeOfMaster == util.Kubernetes {
			if !strings.HasPrefix(uri, "file://") {
				uri += "kubernetes.yml"
			}
		} else {
			if !strings.HasPrefix(uri, "file://") {
				uri += "openshift.yml"
			}

			r, err := verifyRestrictedSecurityContextConstraints(c, f)
			printResult("SecurityContextConstraints restricted", r, err)
			r, err = deployFabric8SecurityContextConstraints(c, f, ns)
			printResult("SecurityContextConstraints fabric8", r, err)
			r, err = deployFabric8SASSecurityContextConstraints(c, f, ns)
			printResult("SecurityContextConstraints "+Fabric8SASSCC, r, err)

			printAddClusterRoleToUser(oc, f, "cluster-admin", "system:serviceaccount:"+ns+":fabric8")

			// TODO replace all of this with the necessary RoleBindings inside the OpenShift YAML...
			printAddClusterRoleToUser(oc, f, "cluster-admin", "system:serviceaccount:"+ns+":jenkins")

			printAddClusterRoleToUser(oc, f, "cluster-admin", "system:serviceaccount:"+ns+":configmapcontroller")
			printAddClusterRoleToUser(oc, f, "cluster-admin", "system:serviceaccount:"+ns+":exposecontroller")

			printAddClusterRoleToUser(oc, f, "cluster-reader", "system:serviceaccount:"+ns+":metrics")
			printAddClusterRoleToUser(oc, f, "cluster-reader", "system:serviceaccount:"+ns+":fluentd")

			printAddClusterRoleToGroup(oc, f, "cluster-reader", "system:serviceaccounts")

			printAddServiceAccount(c, f, "fluentd")
			printAddServiceAccount(c, f, "registry")
			printAddServiceAccount(c, f, "router")
		}

		// now lets apply this template
		util.Infof("Now about to install package %s\n", uri)

		yamlData := []byte{}
		format := "yaml"

		if strings.HasPrefix(uri, "file://") {
			fileName := strings.TrimPrefix(uri, "file://")
			if strings.HasSuffix(fileName, ".json") {
				format = "json"
			}
			yamlData, err = ioutil.ReadFile(fileName)
			if err != nil {
				util.Fatalf("Cannot load file %s got: %v", fileName, err)
			}
		} else {
			resp, err := http.Get(uri)
			if err != nil {
				util.Fatalf("Cannot load YAML package at %s got: %v", uri, err)
			}
			defer resp.Body.Close()
			yamlData, err = ioutil.ReadAll(resp.Body)
			if err != nil {
				util.Fatalf("Cannot load YAML from %s got: %v", uri, err)
			}
		}
		createTemplate(yamlData, format, packageName, ns, domain, apiserver, c, oc, d.pv)

		externalNodeName := ""
		if typeOfMaster == util.Kubernetes {
			if !mini && d.useIngress {
				ensureNamespaceExists(c, oc, fabric8SystemNamespace)
				util.Infof("ns is %s\n", ns)
				runTemplate(c, oc, "ingress-nginx", ns, domain, apiserver, d.pv)
				externalNodeName = addIngressInfraLabel(c, ns)
			}
		}

		updateExposeControllerConfig(c, ns, apiserver, domain, mini, d.useLoadbalancer)

		mini, _ := util.IsMini()
		if mini {
			createMissingPVs(c, ns)
		}

		printSummary(typeOfMaster, externalNodeName, ns, domain, c)
		if d.openConsole {
			openService(ns, "fabric8", c, false, true)
		}
	}
}