func (k *Krud) update(h *Webhook) error { h.UpdateAttempt = true h.UpdateStart = time.Now() defer func() { h.UpdateEnd = time.Now() }() conf := &client.Config{ Host: k.Endpoint, } client, err := client.New(conf) if err != nil { return err } rcs := client.ReplicationControllers(k.Namespace) oldRc, err := rcs.Get(k.ControllerName) if err != nil { return err } newRc, err := rcs.Get(k.ControllerName) if err != nil { return err } hash, err := api.HashObject(oldRc, client.Codec) if err != nil { return err } h.UpdateID = hash newRc.Name = fmt.Sprintf("%s-%s", k.ControllerName, hash) newRc.ResourceVersion = "" apply := func(key, value string, ms ...map[string]string) { for _, m := range ms { m[key] = value } } apply(k.DeploymentKey, hash, newRc.Spec.Selector, newRc.Spec.Template.Labels) apply("run", k.ControllerName, newRc.Spec.Selector, newRc.Spec.Template.Labels) ruconf := kubectl.RollingUpdaterConfig{ Out: &lockBuffer{ k: k, h: h, }, OldRc: oldRc, NewRc: newRc, UpdatePeriod: time.Second * 3, // todo: change to time.Minute Timeout: time.Minute * 5, Interval: time.Second * 3, UpdateAcceptor: kubectl.DefaultUpdateAcceptor, CleanupPolicy: kubectl.RenameRollingUpdateCleanupPolicy, } ruc := kubectl.NewRollingUpdaterClient(client) println("doing rolling update") err = kubectl.NewRollingUpdater(k.Namespace, ruc).Update(&ruconf) println("done") k.Lock() h.UpdateSuccess = err == nil k.Unlock() return err }
func TestUpdateWithRetries(t *testing.T) { codec := testapi.Codec() rc := &api.ReplicationController{ ObjectMeta: api.ObjectMeta{Name: "rc", Labels: map[string]string{ "foo": "bar", }, }, Spec: api.ReplicationControllerSpec{ Selector: map[string]string{ "foo": "bar", }, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: map[string]string{ "foo": "bar", }, }, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, }, }, }, } // Test end to end updating of the rc with retries. Essentially make sure the update handler // sees the right updates, failures in update/get are handled properly, and that the updated // rc with new resource version is returned to the caller. Without any of these rollingupdate // will fail cryptically. newRc := *rc newRc.ResourceVersion = "2" newRc.Spec.Selector["baz"] = "foobar" updates := []*http.Response{ {StatusCode: 500, Body: objBody(codec, &api.ReplicationController{})}, {StatusCode: 500, Body: objBody(codec, &api.ReplicationController{})}, {StatusCode: 200, Body: objBody(codec, &newRc)}, } gets := []*http.Response{ {StatusCode: 500, Body: objBody(codec, &api.ReplicationController{})}, {StatusCode: 200, Body: objBody(codec, rc)}, } fakeClient := &client.FakeRESTClient{ Codec: codec, Client: client.HTTPClientFunc(func(req *http.Request) (*http.Response, error) { switch p, m := req.URL.Path, req.Method; { case p == testapi.ResourcePath("replicationcontrollers", "default", "rc") && m == "PUT": update := updates[0] updates = updates[1:] // We should always get an update with a valid rc even when the get fails. The rc should always // contain the update. if c, ok := readOrDie(t, req, codec).(*api.ReplicationController); !ok || !reflect.DeepEqual(rc, c) { t.Errorf("Unexpected update body, got %+v expected %+v", c, rc) } else if sel, ok := c.Spec.Selector["baz"]; !ok || sel != "foobar" { t.Errorf("Expected selector label update, got %+v", c.Spec.Selector) } else { delete(c.Spec.Selector, "baz") } return update, nil case p == testapi.ResourcePath("replicationcontrollers", "default", "rc") && m == "GET": get := gets[0] gets = gets[1:] return get, nil default: t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) return nil, nil } }), } clientConfig := &client.Config{Version: testapi.Version()} client := client.NewOrDie(clientConfig) client.Client = fakeClient.Client if rc, err := updateWithRetries( client.ReplicationControllers("default"), rc, func(c *api.ReplicationController) { c.Spec.Selector["baz"] = "foobar" }); err != nil { t.Errorf("unexpected error: %v", err) } else if sel, ok := rc.Spec.Selector["baz"]; !ok || sel != "foobar" || rc.ResourceVersion != "2" { t.Errorf("Expected updated rc, got %+v", rc) } if len(updates) != 0 || len(gets) != 0 { t.Errorf("Remaining updates %+v gets %+v", updates, gets) } }
func Run(f *cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *cobra.Command, args []string) error { if len(os.Args) > 1 && os.Args[1] == "run-container" { printDeprecationWarning("run", "run-container") } if len(args) != 1 { return cmdutil.UsageError(cmd, "NAME is required for run") } interactive := cmdutil.GetFlagBool(cmd, "stdin") tty := cmdutil.GetFlagBool(cmd, "tty") if tty && !interactive { return cmdutil.UsageError(cmd, "-i/--stdin is required for containers with --tty=true") } replicas := cmdutil.GetFlagInt(cmd, "replicas") if interactive && replicas != 1 { return cmdutil.UsageError(cmd, fmt.Sprintf("-i/--stdin requires that replicas is 1, found %d", replicas)) } namespace, _, err := f.DefaultNamespace() if err != nil { return err } client, err := f.Client() if err != nil { return err } restartPolicy, err := getRestartPolicy(cmd, interactive) if err != nil { return err } if restartPolicy != api.RestartPolicyAlways && replicas != 1 { return cmdutil.UsageError(cmd, fmt.Sprintf("--restart=%s requires that --repliacs=1, found %d", restartPolicy, replicas)) } generatorName := cmdutil.GetFlagString(cmd, "generator") if len(generatorName) == 0 { if restartPolicy == api.RestartPolicyAlways { generatorName = "run/v1" } else { generatorName = "run-pod/v1" } } generator, found := f.Generator(generatorName) if !found { return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not found.", generatorName)) } names := generator.ParamNames() params := kubectl.MakeParams(cmd, names) params["name"] = args[0] err = kubectl.ValidateParams(names, params) if err != nil { return err } obj, err := generator.Generate(params) if err != nil { return err } inline := cmdutil.GetFlagString(cmd, "overrides") if len(inline) > 0 { var objType string if restartPolicy == api.RestartPolicyAlways { objType = "ReplicationController" } else { objType = "Pod" } obj, err = cmdutil.Merge(obj, inline, objType) if err != nil { return err } } // TODO: extract this flag to a central location, when such a location exists. if !cmdutil.GetFlagBool(cmd, "dry-run") { if restartPolicy == api.RestartPolicyAlways { obj, err = client.ReplicationControllers(namespace).Create(obj.(*api.ReplicationController)) } else { obj, err = client.Pods(namespace).Create(obj.(*api.Pod)) } if err != nil { return err } } attachFlag := cmd.Flags().Lookup("attach") attach := cmdutil.GetFlagBool(cmd, "attach") if !attachFlag.Changed && interactive { attach = true } if attach { opts := &AttachOptions{ In: cmdIn, Out: cmdOut, Err: cmdErr, Stdin: interactive, TTY: tty, Attach: &DefaultRemoteAttach{}, } config, err := f.ClientConfig() if err != nil { return err } opts.Config = config client, err := f.Client() if err != nil { return err } opts.Client = client if restartPolicy == api.RestartPolicyAlways { return handleAttachReplicationController(client, obj.(*api.ReplicationController), opts) } else { return handleAttachPod(client, obj.(*api.Pod), opts) } } return f.PrintObject(cmd, obj, cmdOut) }