func NewCmdRolloutUndo(f cmdutil.Factory, out io.Writer) *cobra.Command { options := &UndoOptions{} validArgs := []string{"deployment"} argAliases := kubectl.ResourceAliases(validArgs) cmd := &cobra.Command{ Use: "undo (TYPE NAME | TYPE/NAME) [flags]", Short: "Undo a previous rollout", Long: undo_long, Example: undo_example, Run: func(cmd *cobra.Command, args []string) { allErrs := []error{} err := options.CompleteUndo(f, cmd, out, args) if err != nil { allErrs = append(allErrs, err) } err = options.RunUndo() if err != nil { allErrs = append(allErrs, err) } cmdutil.CheckErr(utilerrors.Flatten(utilerrors.NewAggregate(allErrs))) }, ValidArgs: validArgs, ArgAliases: argAliases, } cmd.Flags().Int64("to-revision", 0, "The revision to rollback to. Default to 0 (last revision).") usage := "identifying the resource to get from a server." cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, usage) cmdutil.AddDryRunFlag(cmd) return cmd }
func NewCmdRolloutResume(f cmdutil.Factory, out io.Writer) *cobra.Command { options := &ResumeConfig{} validArgs := []string{"deployment"} argAliases := kubectl.ResourceAliases(validArgs) cmd := &cobra.Command{ Use: "resume RESOURCE", Short: "Resume a paused resource", Long: resume_long, Example: resume_example, Run: func(cmd *cobra.Command, args []string) { allErrs := []error{} err := options.CompleteResume(f, cmd, out, args) if err != nil { allErrs = append(allErrs, err) } err = options.RunResume() if err != nil { allErrs = append(allErrs, err) } cmdutil.CheckErr(utilerrors.Flatten(utilerrors.NewAggregate(allErrs))) }, ValidArgs: validArgs, ArgAliases: argAliases, } usage := "identifying the resource to get from a server." cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, usage) return cmd }
// MultilineError returns a string representing an error that splits sub errors into their own // lines. The returned string will end with a newline. func MultilineError(prefix string, err error) string { if agg, ok := err.(utilerrors.Aggregate); ok { errs := utilerrors.Flatten(agg).Errors() buf := &bytes.Buffer{} switch len(errs) { case 0: return fmt.Sprintf("%s%v\n", prefix, err) case 1: return fmt.Sprintf("%s%v\n", prefix, messageForError(errs[0])) default: fmt.Fprintln(buf, prefix) for _, err := range errs { fmt.Fprintf(buf, "* %v\n", messageForError(err)) } return buf.String() } } return fmt.Sprintf("%s%s\n", prefix, err) }
func (c *ServiceAccountsController) syncNamespace(key string) error { startTime := time.Now() defer func() { glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Now().Sub(startTime)) }() ns, err := c.nsLister.Get(key) if apierrs.IsNotFound(err) { return nil } if err != nil { return err } if ns.Status.Phase != v1.NamespaceActive { // If namespace is not active, we shouldn't try to create anything return nil } createFailures := []error{} for i := range c.serviceAccountsToEnsure { sa := c.serviceAccountsToEnsure[i] switch _, err := c.saLister.ServiceAccounts(ns.Name).Get(sa.Name); { case err == nil: continue case apierrs.IsNotFound(err): case err != nil: return err } // this is only safe because we never read it and we always write it // TODO eliminate this once the fake client can handle creation without NS sa.Namespace = ns.Name if _, err := c.client.Core().ServiceAccounts(ns.Name).Create(&sa); err != nil && !apierrs.IsAlreadyExists(err) { createFailures = append(createFailures, err) } } return utilerrors.Flatten(utilerrors.NewAggregate(createFailures)) }
// RunGet implements the generic Get command // TODO: convert all direct flag accessors to a struct and pass that instead of cmd func RunGet(f cmdutil.Factory, out, errOut io.Writer, cmd *cobra.Command, args []string, options *GetOptions) error { if len(options.Raw) > 0 { restClient, err := f.RESTClient() if err != nil { return err } stream, err := restClient.Get().RequestURI(options.Raw).Stream() if err != nil { return err } defer stream.Close() _, err = io.Copy(out, stream) if err != nil && err != io.EOF { return err } return nil } selector := cmdutil.GetFlagString(cmd, "selector") allNamespaces := cmdutil.GetFlagBool(cmd, "all-namespaces") showKind := cmdutil.GetFlagBool(cmd, "show-kind") mapper, typer, err := f.UnstructuredObject() if err != nil { return err } filterFuncs := f.DefaultResourceFilterFunc() filterOpts := f.DefaultResourceFilterOptions(cmd, allNamespaces) cmdNamespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { return err } if allNamespaces { enforceNamespace = false } if len(args) == 0 && cmdutil.IsFilenameEmpty(options.Filenames) { fmt.Fprint(errOut, "You must specify the type of resource to get. ", valid_resources) fullCmdName := cmd.Parent().CommandPath() usageString := "Required resource not specified." if len(fullCmdName) > 0 && cmdutil.IsSiblingCommandExists(cmd, "explain") { usageString = fmt.Sprintf("%s\nUse \"%s explain <resource>\" for a detailed description of that resource (e.g. %[2]s explain pods).", usageString, fullCmdName) } return cmdutil.UsageError(cmd, usageString) } argsHasNames, err := resource.HasNames(args) if err != nil { return err } // always show resources when getting by name or filename, or if the output // is machine-consumable, or if multiple resource kinds were requested. if len(options.Filenames) > 0 || argsHasNames || cmdutil.OutputsRawFormat(cmd) { if !cmd.Flag("show-all").Changed { cmd.Flag("show-all").Value.Set("true") } } export := cmdutil.GetFlagBool(cmd, "export") // handle watch separately since we cannot watch multiple resource types isWatch, isWatchOnly := cmdutil.GetFlagBool(cmd, "watch"), cmdutil.GetFlagBool(cmd, "watch-only") if isWatch || isWatchOnly { r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.UnstructuredClientForMapping), unstructured.UnstructuredJSONScheme). NamespaceParam(cmdNamespace).DefaultNamespace().AllNamespaces(allNamespaces). FilenameParam(enforceNamespace, &options.FilenameOptions). SelectorParam(selector). ExportParam(export). ResourceTypeOrNameArgs(true, args...). SingleResourceType(). Latest(). Do() err := r.Err() if err != nil { return err } infos, err := r.Infos() if err != nil { return err } if len(infos) != 1 { return i18n.Errorf("watch is only supported on individual resources and resource collections - %d resources were found", len(infos)) } info := infos[0] mapping := info.ResourceMapping() printer, err := f.PrinterForMapping(cmd, mapping, allNamespaces) if err != nil { return err } obj, err := r.Object() if err != nil { return err } // watching from resourceVersion 0, starts the watch at ~now and // will return an initial watch event. Starting form ~now, rather // the rv of the object will insure that we start the watch from // inside the watch window, which the rv of the object might not be. rv := "0" isList := meta.IsListType(obj) if isList { // the resourceVersion of list objects is ~now but won't return // an initial watch event rv, err = mapping.MetadataAccessor.ResourceVersion(obj) if err != nil { return err } } // print the current object filteredResourceCount := 0 if !isWatchOnly { if err := printer.PrintObj(obj, out); err != nil { return fmt.Errorf("unable to output the provided object: %v", err) } filteredResourceCount++ cmdutil.PrintFilterCount(filteredResourceCount, mapping.Resource, filterOpts) } // print watched changes w, err := r.Watch(rv) if err != nil { return err } first := true filteredResourceCount = 0 intr := interrupt.New(nil, w.Stop) intr.Run(func() error { _, err := watch.Until(0, w, func(e watch.Event) (bool, error) { if !isList && first { // drop the initial watch event in the single resource case first = false return false, nil } err := printer.PrintObj(e.Object, out) if err != nil { return false, err } filteredResourceCount++ cmdutil.PrintFilterCount(filteredResourceCount, mapping.Resource, filterOpts) return false, nil }) return err }) return nil } r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.UnstructuredClientForMapping), unstructured.UnstructuredJSONScheme). NamespaceParam(cmdNamespace).DefaultNamespace().AllNamespaces(allNamespaces). FilenameParam(enforceNamespace, &options.FilenameOptions). SelectorParam(selector). ExportParam(export). ResourceTypeOrNameArgs(true, args...). ContinueOnError(). Latest(). Flatten(). Do() err = r.Err() if err != nil { return err } printer, generic, err := cmdutil.PrinterForCommand(cmd) if err != nil { return err } if generic { // we flattened the data from the builder, so we have individual items, but now we'd like to either: // 1. if there is more than one item, combine them all into a single list // 2. if there is a single item and that item is a list, leave it as its specific list // 3. if there is a single item and it is not a a list, leave it as a single item var errs []error singleItemImplied := false infos, err := r.IntoSingleItemImplied(&singleItemImplied).Infos() if err != nil { if singleItemImplied { return err } errs = append(errs, err) } if len(infos) == 0 && len(errs) == 0 { outputEmptyListWarning(errOut) } res := "" if len(infos) > 0 { res = infos[0].ResourceMapping().Resource } var obj runtime.Object if !singleItemImplied || len(infos) > 1 { // we have more than one item, so coerce all items into a list list := &unstructured.UnstructuredList{ Object: map[string]interface{}{ "kind": "List", "apiVersion": "v1", "metadata": map[string]interface{}{}, }, } for _, info := range infos { list.Items = append(list.Items, info.Object.(*unstructured.Unstructured)) } obj = list } else { obj = infos[0].Object } isList := meta.IsListType(obj) if isList { filteredResourceCount, items, err := cmdutil.FilterResourceList(obj, filterFuncs, filterOpts) if err != nil { return err } // take the filtered items and create a new list for display list := &unstructured.UnstructuredList{ Object: map[string]interface{}{ "kind": "List", "apiVersion": "v1", "metadata": map[string]interface{}{}, }, } if listMeta, err := meta.ListAccessor(obj); err == nil { list.Object["selfLink"] = listMeta.GetSelfLink() list.Object["resourceVersion"] = listMeta.GetResourceVersion() } for _, item := range items { list.Items = append(list.Items, item.(*unstructured.Unstructured)) } if err := printer.PrintObj(list, out); err != nil { errs = append(errs, err) } cmdutil.PrintFilterCount(filteredResourceCount, res, filterOpts) return utilerrors.Reduce(utilerrors.Flatten(utilerrors.NewAggregate(errs))) } filteredResourceCount := 0 if isFiltered, err := filterFuncs.Filter(obj, filterOpts); !isFiltered { if err != nil { glog.V(2).Infof("Unable to filter resource: %v", err) } else if err := printer.PrintObj(obj, out); err != nil { errs = append(errs, err) } } else if isFiltered { filteredResourceCount++ } cmdutil.PrintFilterCount(filteredResourceCount, res, filterOpts) return utilerrors.Reduce(utilerrors.Flatten(utilerrors.NewAggregate(errs))) } allErrs := []error{} errs := sets.NewString() infos, err := r.Infos() if err != nil { allErrs = append(allErrs, err) } if len(infos) == 0 && len(allErrs) == 0 { outputEmptyListWarning(errOut) } objs := make([]runtime.Object, len(infos)) for ix := range infos { objs[ix] = infos[ix].Object } sorting, err := cmd.Flags().GetString("sort-by") if err != nil { return err } var sorter *kubectl.RuntimeSort if len(sorting) > 0 && len(objs) > 1 { // TODO: questionable if sorter, err = kubectl.SortObjects(f.Decoder(true), objs, sorting); err != nil { return err } } // use the default printer for each object printer = nil var lastMapping *meta.RESTMapping w := kubectl.GetNewTabWriter(out) filteredResourceCount := 0 if resource.MultipleTypesRequested(args) || cmdutil.MustPrintWithKinds(objs, infos, sorter) { showKind = true } for ix := range objs { var mapping *meta.RESTMapping var original runtime.Object if sorter != nil { mapping = infos[sorter.OriginalPosition(ix)].Mapping original = infos[sorter.OriginalPosition(ix)].Object } else { mapping = infos[ix].Mapping original = infos[ix].Object } if printer == nil || lastMapping == nil || mapping == nil || mapping.Resource != lastMapping.Resource { if printer != nil { w.Flush() cmdutil.PrintFilterCount(filteredResourceCount, lastMapping.Resource, filterOpts) } printer, err = f.PrinterForMapping(cmd, mapping, allNamespaces) if err != nil { if !errs.Has(err.Error()) { errs.Insert(err.Error()) allErrs = append(allErrs, err) } continue } // add linebreak between resource groups (if there is more than one) // skip linebreak above first resource group noHeaders := cmdutil.GetFlagBool(cmd, "no-headers") if lastMapping != nil && !noHeaders { fmt.Fprintf(errOut, "%s\n", "") } lastMapping = mapping } // try to convert before apply filter func decodedObj, _ := kubectl.DecodeUnknownObject(original) // filter objects if filter has been defined for current object if isFiltered, err := filterFuncs.Filter(decodedObj, filterOpts); isFiltered { if err == nil { filteredResourceCount++ continue } if !errs.Has(err.Error()) { errs.Insert(err.Error()) allErrs = append(allErrs, err) } } if resourcePrinter, found := printer.(*kubectl.HumanReadablePrinter); found { resourceName := resourcePrinter.GetResourceKind() if mapping != nil { if resourceName == "" { resourceName = mapping.Resource } if alias, ok := kubectl.ResourceShortFormFor(mapping.Resource); ok { resourceName = alias } else if resourceName == "" { resourceName = "none" } } else { resourceName = "none" } if showKind { resourcePrinter.EnsurePrintWithKind(resourceName) } if err := printer.PrintObj(decodedObj, w); err != nil { if !errs.Has(err.Error()) { errs.Insert(err.Error()) allErrs = append(allErrs, err) } } continue } if err := printer.PrintObj(decodedObj, w); err != nil { if !errs.Has(err.Error()) { errs.Insert(err.Error()) allErrs = append(allErrs, err) } continue } } w.Flush() if printer != nil && lastMapping != nil { cmdutil.PrintFilterCount(filteredResourceCount, lastMapping.Resource, filterOpts) } return utilerrors.NewAggregate(allErrs) }
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { lbName := getLoadBalancerName(clusterName) pipName := getPublicIPName(clusterName, service) serviceName := getServiceName(service) glog.V(2).Infof("ensure(%s): START clusterName=%q lbName=%q", serviceName, clusterName, lbName) pip, err := az.ensurePublicIPExists(serviceName, pipName) if err != nil { return nil, err } sg, err := az.SecurityGroupsClient.Get(az.ResourceGroup, az.SecurityGroupName, "") if err != nil { return nil, err } sg, sgNeedsUpdate, err := az.reconcileSecurityGroup(sg, clusterName, service) if err != nil { return nil, err } if sgNeedsUpdate { glog.V(3).Infof("ensure(%s): sg(%s) - updating", serviceName, *sg.Name) _, err := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil) if err != nil { return nil, err } } lb, existsLb, err := az.getAzureLoadBalancer(lbName) if err != nil { return nil, err } if !existsLb { lb = network.LoadBalancer{ Name: &lbName, Location: &az.Location, LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{}, } } lb, lbNeedsUpdate, err := az.reconcileLoadBalancer(lb, pip, clusterName, service, nodes) if err != nil { return nil, err } if !existsLb || lbNeedsUpdate { glog.V(3).Infof("ensure(%s): lb(%s) - updating", serviceName, lbName) _, err = az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil) if err != nil { return nil, err } } // Add the machines to the backend pool if they're not already lbBackendName := getBackendPoolName(clusterName) lbBackendPoolID := az.getBackendPoolID(lbName, lbBackendName) hostUpdates := make([]func() error, len(nodes)) for i, node := range nodes { localNodeName := node.Name f := func() error { err := az.ensureHostInPool(serviceName, types.NodeName(localNodeName), lbBackendPoolID) if err != nil { return fmt.Errorf("ensure(%s): lb(%s) - failed to ensure host in pool: %q", serviceName, lbName, err) } return nil } hostUpdates[i] = f } errs := utilerrors.AggregateGoroutines(hostUpdates...) if errs != nil { return nil, utilerrors.Flatten(errs) } glog.V(2).Infof("ensure(%s): FINISH - %s", serviceName, *pip.IPAddress) return &v1.LoadBalancerStatus{ Ingress: []v1.LoadBalancerIngress{{IP: *pip.IPAddress}}, }, nil }