// RunBuildController starts the build sync loop for builds and buildConfig processing. func (c *MasterConfig) RunBuildController(informers shared.InformerFactory) error { // initialize build controller dockerImage := c.ImageFor("docker-builder") stiImage := c.ImageFor("sti-builder") storageVersion := c.Options.EtcdStorageConfig.OpenShiftStorageVersion groupVersion := unversioned.GroupVersion{Group: "", Version: storageVersion} codec := kapi.Codecs.LegacyCodec(groupVersion) admissionControl := admission.InitPlugin("SecurityContextConstraint", clientadapter.FromUnversionedClient(c.PrivilegedLoopbackKubernetesClient), "") if wantsInformers, ok := admissionControl.(cmdadmission.WantsInformers); ok { wantsInformers.SetInformers(informers) } buildDefaults, err := builddefaults.NewBuildDefaults(c.Options.AdmissionConfig.PluginConfig) if err != nil { return err } buildOverrides, err := buildoverrides.NewBuildOverrides(c.Options.AdmissionConfig.PluginConfig) if err != nil { return err } osclient, kclient := c.BuildControllerClients() factory := buildcontrollerfactory.BuildControllerFactory{ KubeClient: kclient, OSClient: osclient, BuildUpdater: buildclient.NewOSClientBuildClient(osclient), BuildLister: buildclient.NewOSClientBuildClient(osclient), DockerBuildStrategy: &buildstrategy.DockerBuildStrategy{ Image: dockerImage, // TODO: this will be set to --storage-version (the internal schema we use) Codec: codec, }, SourceBuildStrategy: &buildstrategy.SourceBuildStrategy{ Image: stiImage, // TODO: this will be set to --storage-version (the internal schema we use) Codec: codec, AdmissionControl: admissionControl, }, CustomBuildStrategy: &buildstrategy.CustomBuildStrategy{ // TODO: this will be set to --storage-version (the internal schema we use) Codec: codec, }, BuildDefaults: buildDefaults, BuildOverrides: buildOverrides, } controller := factory.Create() controller.Run() deleteController := factory.CreateDeleteController() deleteController.Run() return nil }
// RunBuildPodController starts the build/pod status sync loop for build status func (c *MasterConfig) RunBuildPodController() { osclient, kclient := c.BuildPodControllerClients() factory := buildcontrollerfactory.BuildPodControllerFactory{ OSClient: osclient, KubeClient: kclient, BuildUpdater: buildclient.NewOSClientBuildClient(osclient), } controller := factory.Create() controller.Run() deletecontroller := factory.CreateDeleteController() deletecontroller.Run() }
// RunBuildController starts the build sync loop for builds and buildConfig processing. func (c *MasterConfig) RunBuildController() { // initialize build controller dockerImage := c.ImageFor("docker-builder") stiImage := c.ImageFor("sti-builder") storageVersion := c.Options.EtcdStorageConfig.OpenShiftStorageVersion groupVersion := unversioned.GroupVersion{Group: "", Version: storageVersion} codec := kapi.Codecs.LegacyCodec(groupVersion) admissionControl := admission.NewFromPlugins(clientadapter.FromUnversionedClient(c.PrivilegedLoopbackKubernetesClient), []string{"SecurityContextConstraint"}, "") osclient, kclient := c.BuildControllerClients() factory := buildcontrollerfactory.BuildControllerFactory{ KubeClient: kclient, OSClient: osclient, BuildUpdater: buildclient.NewOSClientBuildClient(osclient), BuildLister: buildclient.NewOSClientBuildClient(osclient), DockerBuildStrategy: &buildstrategy.DockerBuildStrategy{ Image: dockerImage, // TODO: this will be set to --storage-version (the internal schema we use) Codec: codec, }, SourceBuildStrategy: &buildstrategy.SourceBuildStrategy{ Image: stiImage, // TODO: this will be set to --storage-version (the internal schema we use) Codec: codec, AdmissionControl: admissionControl, }, CustomBuildStrategy: &buildstrategy.CustomBuildStrategy{ // TODO: this will be set to --storage-version (the internal schema we use) Codec: codec, }, } controller := factory.Create() controller.Run() deleteController := factory.CreateDeleteController() deleteController.Run() }
// RunBuildController starts the build sync loop for builds and buildConfig processing. func (c *MasterConfig) RunBuildController() { // initialize build controller dockerImage := c.ImageFor("docker-builder") stiImage := c.ImageFor("sti-builder") storageVersion := c.Options.EtcdStorageConfig.OpenShiftStorageVersion groupVersion := unversioned.GroupVersion{Group: "", Version: storageVersion} interfaces, err := latest.InterfacesFor(groupVersion) if err != nil { glog.Fatalf("Unable to load storage version %s: %v", storageVersion, err) } admissionControl := admission.NewFromPlugins(c.PrivilegedLoopbackKubernetesClient, []string{"SecurityContextConstraint"}, "") osclient, kclient := c.BuildControllerClients() factory := buildcontrollerfactory.BuildControllerFactory{ OSClient: osclient, KubeClient: kclient, BuildUpdater: buildclient.NewOSClientBuildClient(osclient), DockerBuildStrategy: &buildstrategy.DockerBuildStrategy{ Image: dockerImage, // TODO: this will be set to --storage-version (the internal schema we use) Codec: interfaces.Codec, }, SourceBuildStrategy: &buildstrategy.SourceBuildStrategy{ Image: stiImage, TempDirectoryCreator: buildstrategy.STITempDirectoryCreator, // TODO: this will be set to --storage-version (the internal schema we use) Codec: interfaces.Codec, AdmissionControl: admissionControl, }, CustomBuildStrategy: &buildstrategy.CustomBuildStrategy{ // TODO: this will be set to --storage-version (the internal schema we use) Codec: interfaces.Codec, }, } controller := factory.Create() controller.Run() deleteController := factory.CreateDeleteController() deleteController.Run() }
// RunBuildController starts the build sync loop for builds and buildConfig processing. func (c *MasterConfig) RunBuildController() { // initialize build controller dockerImage := c.ImageFor("docker-builder") stiImage := c.ImageFor("sti-builder") storageVersion := c.Options.EtcdStorageConfig.OpenShiftStorageVersion interfaces, err := latest.InterfacesFor(storageVersion) if err != nil { glog.Fatalf("Unable to load storage version %s: %v", storageVersion, err) } osclient, kclient := c.BuildControllerClients() factory := buildcontrollerfactory.BuildControllerFactory{ OSClient: osclient, KubeClient: kclient, BuildUpdater: buildclient.NewOSClientBuildClient(osclient), DockerBuildStrategy: &buildstrategy.DockerBuildStrategy{ Image: dockerImage, // TODO: this will be set to --storage-version (the internal schema we use) Codec: interfaces.Codec, }, SourceBuildStrategy: &buildstrategy.SourceBuildStrategy{ Image: stiImage, TempDirectoryCreator: buildstrategy.STITempDirectoryCreator, // TODO: this will be set to --storage-version (the internal schema we use) Codec: interfaces.Codec, }, CustomBuildStrategy: &buildstrategy.CustomBuildStrategy{ // TODO: this will be set to --storage-version (the internal schema we use) Codec: interfaces.Codec, }, } controller := factory.Create() controller.Run() deleteController := factory.CreateDeleteController() deleteController.Run() }
event := <-buildWatch.ResultChan() build := event.Object.(*buildapi.Build) if build.Name == startedBuilds[0] { if buildutil.IsBuildComplete(build) { break } continue } // When the the other two builds we started after waiting for the first // build to become running are Pending, verify the first build is still // running (so the other two builds are started in parallel with first // build). // TODO: This might introduce flakes in case the first build complete // sooner or fail. if build.Status.Phase == buildapi.BuildPhasePending { c := buildclient.NewOSClientBuildClient(oc.Client()) firstBuildRunning := false _, err := buildutil.BuildConfigBuilds(c, oc.Namespace(), bcName, func(b buildapi.Build) bool { if b.Name == startedBuilds[0] && b.Status.Phase == buildapi.BuildPhaseRunning { firstBuildRunning = true } return false }) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(firstBuildRunning).Should(o.BeTrue()) counter++ } // When the build failed or completed prematurely, fail the test o.Expect(buildutil.IsBuildComplete(build)).Should(o.BeFalse()) if counter == 2 { break
event := <-buildWatch.ResultChan() build := event.Object.(*buildapi.Build) if build.Name == startedBuilds[0] { if buildutil.IsBuildComplete(build) { break } continue } // When the the other two builds we started after waiting for the first // build to become running are Pending, verify the first build is still // running (so the other two builds are started in parallel with first // build). // TODO: This might introduce flakes in case the first build complete // sooner or fail. if build.Status.Phase == buildapi.BuildPhasePending { c := buildclient.NewOSClientBuildClient(oc.REST()) firstBuildRunning := false _, err := buildutil.BuildConfigBuilds(c, oc.Namespace(), bcName, func(b buildapi.Build) bool { if b.Name == startedBuilds[0] && b.Status.Phase == buildapi.BuildPhaseRunning { firstBuildRunning = true } return false }) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(firstBuildRunning).Should(o.BeTrue()) counter++ } // When the build failed or completed prematurely, fail the test o.Expect(buildutil.IsBuildComplete(build)).Should(o.BeFalse()) if counter == 2 { break
func NewTestBuildOpenshift(t *testing.T) *testBuildOpenshift { openshift := &testBuildOpenshift{ stop: make(chan struct{}), } openshift.lock.Lock() defer openshift.lock.Unlock() etcdClient := testutil.NewEtcdClient() etcdHelper, _ := master.NewEtcdHelper(etcdClient, latest.Version, etcdtest.PathPrefix()) osMux := http.NewServeMux() openshift.server = httptest.NewServer(osMux) kubeClient := client.NewOrDie(&client.Config{Host: openshift.server.URL, Version: klatest.Version}) osClient := osclient.NewOrDie(&client.Config{Host: openshift.server.URL, Version: latest.Version}) openshift.Client = osClient openshift.KubeClient = kubeClient kubeletClient, err := kclient.NewKubeletClient(&kclient.KubeletConfig{Port: 10250}) if err != nil { t.Fatalf("Unable to configure Kubelet client: %v", err) } handlerContainer := master.NewHandlerContainer(osMux) _ = master.New(&master.Config{ EtcdHelper: etcdHelper, KubeletClient: kubeletClient, APIPrefix: "/api", AdmissionControl: admit.NewAlwaysAdmit(), RestfulContainer: handlerContainer, DisableV1: false, }) interfaces, _ := latest.InterfacesFor(latest.Version) buildStorage := buildetcd.NewStorage(etcdHelper) buildRegistry := buildregistry.NewRegistry(buildStorage) buildConfigStorage := buildconfigetcd.NewStorage(etcdHelper) buildConfigRegistry := buildconfigregistry.NewRegistry(buildConfigStorage) imageStorage := imageetcd.NewREST(etcdHelper) imageRegistry := image.NewRegistry(imageStorage) imageStreamStorage, imageStreamStatus := imagestreametcd.NewREST( etcdHelper, imagestream.DefaultRegistryFunc(func() (string, bool) { return "registry:3000", true }), &fakeSubjectAccessReviewRegistry{}, ) imageStreamRegistry := imagestream.NewRegistry(imageStreamStorage, imageStreamStatus) imageStreamImageStorage := imagestreamimage.NewREST(imageRegistry, imageStreamRegistry) imageStreamImageRegistry := imagestreamimage.NewRegistry(imageStreamImageStorage) imageStreamTagStorage := imagestreamtag.NewREST(imageRegistry, imageStreamRegistry) imageStreamTagRegistry := imagestreamtag.NewRegistry(imageStreamTagStorage) buildGenerator := &buildgenerator.BuildGenerator{ Client: buildgenerator.Client{ GetBuildConfigFunc: buildConfigRegistry.GetBuildConfig, UpdateBuildConfigFunc: buildConfigRegistry.UpdateBuildConfig, GetBuildFunc: buildRegistry.GetBuild, CreateBuildFunc: buildRegistry.CreateBuild, GetImageStreamFunc: imageStreamRegistry.GetImageStream, GetImageStreamImageFunc: imageStreamImageRegistry.GetImageStreamImage, GetImageStreamTagFunc: imageStreamTagRegistry.GetImageStreamTag, }, } buildConfigWebHooks := buildconfigregistry.NewWebHookREST( buildConfigRegistry, buildclient.NewOSClientBuildConfigInstantiatorClient(osClient), map[string]webhook.Plugin{ "generic": generic.New(), "github": github.New(), }, ) storage := map[string]rest.Storage{ "builds": buildStorage, "buildConfigs": buildConfigStorage, "buildConfigs/webhooks": buildConfigWebHooks, "builds/clone": buildclonestorage.NewStorage(buildGenerator), "buildConfigs/instantiate": buildinstantiatestorage.NewStorage(buildGenerator), "imageStreams": imageStreamStorage, "imageStreams/status": imageStreamStatus, "imageStreamTags": imageStreamTagStorage, "imageStreamImages": imageStreamImageStorage, } for k, v := range storage { storage[strings.ToLower(k)] = v } version := &apiserver.APIGroupVersion{ Root: "/oapi", Version: "v1", Storage: storage, Codec: latest.Codec, Mapper: latest.RESTMapper, Creater: kapi.Scheme, Typer: kapi.Scheme, Convertor: kapi.Scheme, Linker: interfaces.MetadataAccessor, Admit: admit.NewAlwaysAdmit(), Context: kapi.NewRequestContextMapper(), } if err := version.InstallREST(handlerContainer); err != nil { t.Fatalf("unable to install REST: %v", err) } bcFactory := buildcontrollerfactory.BuildControllerFactory{ OSClient: osClient, KubeClient: kubeClient, BuildUpdater: buildclient.NewOSClientBuildClient(osClient), DockerBuildStrategy: &buildstrategy.DockerBuildStrategy{ Image: "test-docker-builder", Codec: latest.Codec, }, SourceBuildStrategy: &buildstrategy.SourceBuildStrategy{ Image: "test-sti-builder", TempDirectoryCreator: buildstrategy.STITempDirectoryCreator, Codec: latest.Codec, }, Stop: openshift.stop, } bcFactory.Create().Run() bpcFactory := buildcontrollerfactory.BuildPodControllerFactory{ OSClient: osClient, KubeClient: kubeClient, BuildUpdater: buildclient.NewOSClientBuildClient(osClient), Stop: openshift.stop, } bpcFactory.Create().Run() return openshift }
// Complete completes all the required options. func (o *CancelBuildOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, args []string, in io.Reader, out io.Writer) error { o.In = in o.Out = out o.ErrOut = cmd.OutOrStderr() o.ReportError = func(err error) { o.HasError = true fmt.Fprintf(o.ErrOut, "error: %s\n", err.Error()) } if len(args) == 0 { return kcmdutil.UsageError(cmd, "Must pass a name of a build or a buildconfig to cancel") } namespace, _, err := f.DefaultNamespace() if err != nil { return err } if len(o.States) == 0 { // If --state is not specified, set the default to "new", "pending" and // "running". o.States = []string{"new", "pending", "running"} } else { for _, state := range o.States { if len(state) > 0 && !isStateCancellable(state) { return kcmdutil.UsageError(cmd, "The '--state' flag has invalid value. Must be one of 'new', 'pending', or 'running'") } } } client, _, err := f.Clients() if err != nil { return err } o.Namespace = namespace o.Client = client o.BuildLister = buildclient.NewOSClientBuildClient(client) o.BuildClient = client.Builds(namespace) o.Mapper, _ = f.Object(false) for _, item := range args { resource, name, err := cmdutil.ResolveResource(buildapi.Resource("builds"), item, o.Mapper) if err != nil { return err } switch resource { case buildapi.Resource("buildconfigs"): list, err := buildutil.BuildConfigBuilds(o.BuildLister, o.Namespace, name, nil) if err != nil { return err } for _, b := range list.Items { o.BuildNames = append(o.BuildNames, b.Name) } case buildapi.Resource("builds"): o.BuildNames = append(o.BuildNames, strings.TrimSpace(name)) default: return fmt.Errorf("invalid resource provided: %v", resource) } } return nil }