// Test fleet running through an etcd2 proxy. func Proxy(c platform.TestCluster) error { masterconf.CoreOS.Etcd2.Discovery, _ = c.GetDiscoveryURL(1) master, err := c.NewMachine(masterconf.String()) if err != nil { return fmt.Errorf("Cluster.NewMachine: %s", err) } defer master.Destroy() proxyconf.CoreOS.Etcd2.Discovery = masterconf.CoreOS.Etcd2.Discovery proxy, err := c.NewMachine(proxyconf.String()) if err != nil { return fmt.Errorf("Cluster.NewMachine: %s", err) } defer proxy.Destroy() err = platform.InstallFile(strings.NewReader(fleetunit), proxy, "/home/core/hello.service") if err != nil { return fmt.Errorf("InstallFile: %s", err) } // settling... fleetStart := func() error { _, err = proxy.SSH("fleetctl start /home/core/hello.service") if err != nil { return fmt.Errorf("fleetctl start: %s", err) } return nil } if err := util.Retry(5, 5*time.Second, fleetStart); err != nil { return fmt.Errorf("fleetctl start failed: %v", err) } var status []byte fleetList := func() error { status, err = proxy.SSH("fleetctl list-units -l -fields active -no-legend") if err != nil { return fmt.Errorf("fleetctl list-units: %s", err) } if !bytes.Equal(status, []byte("active")) { return fmt.Errorf("unit not active") } return nil } if err := util.Retry(5, 1*time.Second, fleetList); err != nil { return fmt.Errorf("fleetctl list-units failed: %v", err) } return nil }
func checkEtcdVersion(cluster platform.Cluster, m platform.Machine, expected string) error { var b []byte var err error checker := func() error { cmd := cluster.NewCommand("curl", "-L", fmt.Sprintf("http://%v:2379/version", m.IP())) b, err = cmd.Output() if err != nil { return fmt.Errorf("curl failed: %v", err) } return nil } if err := util.Retry(15, 10*time.Second, checker); err != nil { return err } plog.Infof("got version: %s", b) if string(b) != expected { return fmt.Errorf("expected %v, got %s", expected, b) } return nil }
func checkEtcdVersion(cluster platform.Cluster, m platform.Machine, expected string) error { var b []byte checker := func() error { out, err := m.SSH(fmt.Sprintf("curl -s -L http://%s:2379/version", m.IP())) if err != nil { return fmt.Errorf("curl failed: %v", out) } b = out return nil } if err := util.Retry(15, 10*time.Second, checker); err != nil { return err } plog.Infof("got version: %s", b) if string(b) != expected { return fmt.Errorf("expected %v, got %s", expected, b) } return nil }
// poll cluster-health until result func getClusterHealth(m platform.Machine, csize int) error { var err error var b []byte checker := func() error { b, err := m.SSH("etcdctl cluster-health") if err != nil { return err } // repsonse should include "healthy" for each machine and for cluster if strings.Count(string(b), "healthy") != (csize*2)+1 { return fmt.Errorf("unexpected etcdctl output") } plog.Infof("cluster healthy") return nil } err = util.Retry(15, 10*time.Second, checker) if err != nil { return fmt.Errorf("health polling failed: %v: %s", err, b) } return nil }
func sshCheck(gm *gceMachine) error { var err error // Allow a few authentication failures in case setup is slow. sshchecker := func() error { gm.sshClient, err = gm.gc.sshAgent.NewClient(gm.IP()) if err != nil { return err } return nil } if err := util.Retry(sshRetries, sshTimeout, sshchecker); err != nil { return err } // sanity check out, err := gm.SSH("grep ^ID= /etc/os-release") if err != nil { return err } if !bytes.Equal(out, []byte("ID=coreos")) { return fmt.Errorf("Unexpected SSH output: %s", out) } return nil }
func nginxCheck(master platform.Machine, nodes []platform.Machine) error { pod := strings.NewReader(nginxPodYAML) if err := platform.InstallFile(pod, master, "./nginx-pod.yaml"); err != nil { return err } if _, err := master.SSH("./kubectl create -f nginx-pod.yaml"); err != nil { return err } // wait for pod status to be 'Running' podIsRunning := func() error { b, err := master.SSH("./kubectl get pod nginx -o=template -t={{.status.phase}}") if err != nil { return err } if !bytes.Equal(b, []byte("Running")) { return fmt.Errorf("nginx pod not running: %s", b) } return nil } if err := util.Retry(10, 5*time.Second, podIsRunning); err != nil { return err } // delete pod _, err := master.SSH("./kubectl delete pods nginx") if err != nil { return err } return nil }
func discovery(cluster platform.Cluster, version int) error { if plog.LevelAt(capnslog.DEBUG) { // get journalctl -f from all machines before starting for _, m := range cluster.Machines() { if err := platform.StreamJournal(m); err != nil { return fmt.Errorf("failed to start journal: %v", err) } } } // start etcd on each machine asynchronously. for _, m := range cluster.Machines() { if err := doStart(m, version, false); err != nil { return err } } // block until each instance is reported as started. for i, m := range cluster.Machines() { if err := doStart(m, version, true); err != nil { return err } plog.Infof("etcd instance%d started", i) } var keyMap map[string]string var retryFuncs []func() error retryFuncs = append(retryFuncs, func() error { var err error keyMap, err = SetKeys(cluster, 5) if err != nil { return err } return nil }) retryFuncs = append(retryFuncs, func() error { var quorumRead bool if version == 2 { quorumRead = true } if err := CheckKeys(cluster, keyMap, quorumRead); err != nil { return err } return nil }) for _, retry := range retryFuncs { if err := util.Retry(5, 5*time.Second, retry); err != nil { return fmt.Errorf("discovery failed health check: %v", err) } // NOTE(pb): etcd1 seems to fail in an odd way when I try quorum // read, instead just sleep between setting and getting. time.Sleep(2 * time.Second) } return nil }
// Start a multi-node cluster from offcial kubernetes 1.0 guides. Once // up, do a couple basic smoke checks. See: // http://kubernetes.io/v1.0/docs/getting-started-guides/coreos/coreos_multinode_cluster.html func MultiNodeSmoke(c platform.TestCluster) error { const clusterSize = 3 // spawn master master, err := c.NewMachine(masterConfig) if err != nil { return err } // get master private IP and place into nodeConfig nodeConfig = strings.Replace(nodeConfig, "<master-private-ip>", master.PrivateIP(), -1) var nodeConfigs []string for i := 0; i < clusterSize-1; i++ { nodeConfigs = append(nodeConfigs, nodeConfig) } // spawn nodes nodes, err := platform.NewMachines(c, nodeConfigs) if err != nil { return err } // get kubectl in master _, err = master.SSH("wget -q https://storage.googleapis.com/kubernetes-release/release/v1.0.1/bin/linux/amd64/kubectl") if err != nil { return err } _, err = master.SSH("chmod +x kubectl") if err != nil { return err } // check that all nodes appear in kubectl f := func() error { if err = nodeCheck(master, nodes); err != nil { return err } return nil } if err := util.Retry(10, 5*time.Second, f); err != nil { return err } // start nginx pod and curl endpoint if err = nginxCheck(master, nodes); err != nil { return err } // http://kubernetes.io/v1.0/docs/user-guide/secrets/ Also, ensures // https://github.com/coreos/bugs/issues/447 does not re-occur. if err = secretCheck(master, nodes); err != nil { return err } return nil }
// JournalRemote tests that systemd-journal-remote can read log entries from // a systemd-journal-gatewayd server. func JournalRemote(c platform.TestCluster) error { // start gatewayd and log a message gateway, err := c.NewMachine(gatewayconf.String()) if err != nil { return fmt.Errorf("Cluster.NewMachine: %s", err) } defer gateway.Destroy() // log a unique message on gatewayd machine msg := "supercalifragilisticexpialidocious" out, err := gateway.SSH("logger " + msg) if err != nil { return fmt.Errorf("logger: %v: %v", out, err) } // spawn a machine to read from gatewayd collector, err := c.NewMachine("") if err != nil { return fmt.Errorf("Cluster.NewMachine: %s", err) } defer collector.Destroy() // collect logs from gatewayd machine cmd := fmt.Sprintf("sudo systemd-run --unit systemd-journal-remote-client /usr/lib/systemd/systemd-journal-remote --url http://%s:19531", gateway.PrivateIP()) out, err = collector.SSH(cmd) if err != nil { return fmt.Errorf("failed to start systemd-journal-remote: %v: %v", out, err) } // find the message on the collector journalReader := func() error { cmd = fmt.Sprintf("sudo journalctl _HOSTNAME=%s -t core --file /var/log/journal/remote/remote-%s:19531.journal", gatewayconf.Hostname, gateway.PrivateIP()) out, err = collector.SSH(cmd) if err != nil { return fmt.Errorf("journalctl: %v: %v", out, err) } if !strings.Contains(string(out), msg) { return fmt.Errorf("journal missing entry: expected %q got %q", msg, out) } return nil } if err := util.Retry(5, 2*time.Second, journalReader); err != nil { return err } return nil }
func DownloadFile(file, url string) error { plog.Infof("Downloading %s to %s", url, file) if err := os.MkdirAll(filepath.Dir(file), 0777); err != nil { return err } download := func() error { return downloadFile(file, url) } if err := util.Retry(5, 1*time.Second, download); err != nil { return err } return nil }
// run clustering based tests func ClusterTests(c platform.TestCluster) error { if plog.LevelAt(capnslog.DEBUG) { // get journalctl -f from all machines before starting for _, m := range c.Machines() { if err := platform.StreamJournal(m); err != nil { return fmt.Errorf("failed to start journal: %v", err) } } } // make sure etcd is up and running var keyMap map[string]string var retryFuncs []func() error retryFuncs = append(retryFuncs, func() error { var err error keyMap, err = etcd.SetKeys(c, 3) if err != nil { return err } return nil }) retryFuncs = append(retryFuncs, func() error { if err := etcd.CheckKeys(c, keyMap, true); err != nil { return err } return nil }) for _, retry := range retryFuncs { if err := util.Retry(5, 5*time.Second, retry); err != nil { return fmt.Errorf("etcd failed health check: %v", err) } } tests := c.ListNativeFunctions() for _, name := range tests { plog.Noticef("running %v...", name) err := c.RunNative(name, c.Machines()[0]) if err != nil { return err } } return nil }
// Test that timesyncd starts using the local NTP server func NTP(c platform.TestCluster) error { m, err := c.NewMachine("") if err != nil { return fmt.Errorf("Cluster.NewMachine: %s", err) } defer m.Destroy() out, err := m.SSH("networkctl status eth0") if err != nil { return fmt.Errorf("networkctl: %v", err) } if !bytes.Contains(out, []byte("NTP: 10.0.0.1")) { return fmt.Errorf("Bad network config:\n%s", out) } plog.Info("Waiting for systemd-timesyncd.service") checker := func() error { out, err = m.SSH("systemctl status systemd-timesyncd.service") if err != nil { return fmt.Errorf("systemctl: %v", err) } if !bytes.Contains(out, []byte(`Status: "Synchronized to time server 10.0.0.1:123 (10.0.0.1)."`)) { return fmt.Errorf("unexpected systemd-timesyncd status: %v", out) } plog.Info("systemd-timesyncd.service is working!") return nil } err = util.Retry(60, 1*time.Second, checker) if err != nil { return nil } return nil }
func DownloadFile(file, fileURL string, client *http.Client) error { plog.Infof("Downloading %s to %s", fileURL, file) // handle bucket urls by using api to get media link parseURL, err := url.Parse(fileURL) if err != nil { return err } if parseURL.Scheme == "gs" { if client == nil { client = http.DefaultClient } api, err := storage.New(client) if err != nil { plog.Fatal(err) } path := strings.TrimLeft(parseURL.Path, "/") obj, err := api.Objects.Get(parseURL.Host, path).Do() if err != nil { plog.Fatal(err) } fileURL = obj.MediaLink } if err := os.MkdirAll(filepath.Dir(file), 0777); err != nil { return err } download := func() error { return downloadFile(file, fileURL, client) } if err := util.Retry(5, 1*time.Second, download); err != nil { return err } return nil }
// commonMachineChecks tests a machine for various error conditions such as ssh // being available and no systemd units failing at the time ssh is reachable. // It also ensures the remote system is running CoreOS. // // TODO(mischief): better error messages. func commonMachineChecks(m Machine) error { // ensure ssh works sshChecker := func() error { _, err := m.SSH("true") if err != nil { return err } return nil } if err := util.Retry(sshRetries, sshTimeout, sshChecker); err != nil { return fmt.Errorf("ssh unreachable: %v", err) } // ensure we're talking to a CoreOS system out, err := m.SSH("grep ^ID= /etc/os-release") if err != nil { return fmt.Errorf("no /etc/os-release file") } if !bytes.Equal(out, []byte("ID=coreos")) { return fmt.Errorf("not a CoreOS instance") } // ensure no systemd units failed during boot out, err = m.SSH("systemctl --no-legend --state failed list-units") if err != nil { return fmt.Errorf("systemctl: %v: %v", out, err) } if len(out) > 0 { return fmt.Errorf("some systemd units failed:\n%s", out) } return nil }
func runUpdatePayload(cmd *cobra.Command, args []string) { if len(args) != 0 { plog.Fatal("No args accepted") } plog.Info("Generating update payload") // check for update file, generate if it doesn't exist version := "latest" dir := sdk.BuildImageDir(version) payload := "coreos_production_update.gz" _, err := os.Stat(filepath.Join(dir, payload)) if err != nil { err = sdkomaha.GenerateFullUpdate("latest", true) if err != nil { plog.Fatalf("Building full update failed: %v", err) } } plog.Info("Bringing up test harness cluster") cluster, err := platform.NewQemuCluster(kola.QEMUOptions) qc := cluster.(*platform.QEMUCluster) if err != nil { plog.Fatalf("Cluster failed: %v", err) } defer cluster.Destroy() svc := &updateServer{ updatePath: dir, payload: payload, } qc.OmahaServer.Updater = svc // tell omaha server to handle file requests for the images dir qc.OmahaServer.Mux.Handle(dir+"/", http.StripPrefix(dir+"/", http.FileServer(http.Dir(dir)))) _, port, err := net.SplitHostPort(qc.OmahaServer.Addr().String()) if err != nil { plog.Errorf("SplitHostPort failed: %v", err) return } tmplVals := map[string]string{ "Server": fmt.Sprintf("10.0.0.1:%s", port), } tmpl := template.Must(template.New("userdata").Parse(userdata)) buf := new(bytes.Buffer) err = tmpl.Execute(buf, tmplVals) if err != nil { plog.Errorf("Template execution failed: %v", err) return } plog.Infof("Spawning test machine") m, err := cluster.NewMachine(buf.String()) if err != nil { plog.Errorf("Machine failed: %v", err) return } plog.Info("Checking for boot from USR-A partition") /* check that we are on USR-A. */ if err := checkUsrPartition(m, []string{"PARTUUID=" + sdk.USRAUUID.String(), "PARTLABEL=USR-A"}); err != nil { plog.Errorf("Did not find USR-A partition: %v", err) return } plog.Infof("Triggering update_engine") /* trigger update, monitor the progress. */ out, err := m.SSH("update_engine_client -check_for_update") if err != nil { plog.Errorf("Executing update_engine_client failed: %v: %v", out, err) return } checker := func() error { envs, err := m.SSH("update_engine_client -status 2>/dev/null") if err != nil { return err } em := splitNewlineEnv(string(envs)) if em["CURRENT_OP"] != "UPDATE_STATUS_UPDATED_NEED_REBOOT" { return fmt.Errorf("have not arrived in reboot state: currently at %s", em["CURRENT_OP"]) } return nil } if err := util.Retry(12, 10*time.Second, checker); err != nil { plog.Errorf("Applying update payload failed: %v", err) return } plog.Info("Rebooting test machine") /* reboot it */ if err := platform.Reboot(m); err != nil { plog.Errorf("Rebooting machine failed: %v", err) return } plog.Info("Checking for boot from USR-B partition") /* check that we are on USR-B now. */ if err := checkUsrPartition(m, []string{"PARTUUID=" + sdk.USRBUUID.String(), "PARTLABEL=USR-B"}); err != nil { plog.Errorf("Did not find USR-B partition: %v", err) return } plog.Info("Update complete!") }
// VXLAN tests that flannel can send packets using the vxlan backend. func vxlan(c platform.TestCluster) error { machs := c.Machines() return util.Retry(12, 10*time.Second, func() error { return ping(machs[0], machs[2], "flannel.1") }) }
func testNFS(c platform.TestCluster, nfsversion int) error { m1, err := c.NewMachine(nfsserverconf.String()) if err != nil { return fmt.Errorf("Cluster.NewMachine: %s", err) } defer m1.Destroy() plog.Info("NFS server booted.") /* poke a file in /tmp */ tmp, err := m1.SSH("mktemp") if err != nil { return fmt.Errorf("Machine.SSH: %s", err) } plog.Infof("Test file %q created on server.", tmp) c2 := config.CloudConfig{ CoreOS: config.CoreOS{ Units: []config.Unit{ config.Unit{ Name: "mnt.mount", Command: "start", Content: fmt.Sprintf(mounttmpl, m1.PrivateIP(), nfsversion), }, }, }, Hostname: "nfs2", } m2, err := c.NewMachine(c2.String()) if err != nil { return fmt.Errorf("Cluster.NewMachine: %s", err) } defer m2.Destroy() plog.Info("NFS client booted.") plog.Info("Waiting for NFS mount on client...") checkmount := func() error { status, err := m2.SSH("systemctl is-active mnt.mount") if err != nil || string(status) != "active" { return fmt.Errorf("mnt.mount status is %q: %v", status, err) } plog.Info("Got NFS mount.") return nil } if err = util.Retry(10, 3*time.Second, checkmount); err != nil { return err } _, err = m2.SSH(fmt.Sprintf("stat /mnt/%s", path.Base(string(tmp)))) if err != nil { return fmt.Errorf("file %q does not exist", tmp) } return nil }
func (ac *awsCluster) NewMachine(userdata string) (Machine, error) { cloudConfig, err := config.NewCloudConfig(userdata) if err != nil { return nil, err } if err = ac.agent.UpdateConfig(cloudConfig); err != nil { return nil, err } if cloudConfig.Hostname == "" { id := make([]byte, 4) _, _ = rand.Read(id) cloudConfig.Hostname = fmt.Sprintf("%x", id) } ud := base64.StdEncoding.EncodeToString([]byte(cloudConfig.String())) cnt := int64(1) inst := ec2.RunInstancesInput{ ImageId: &ac.conf.AMI, MinCount: &cnt, MaxCount: &cnt, KeyName: &ac.conf.KeyName, // this is only useful if you wish to ssh in for debugging InstanceType: &ac.conf.InstanceType, SecurityGroups: []*string{&ac.conf.SecurityGroup}, UserData: &ud, } resp, err := ac.api.RunInstances(&inst) if err != nil { return nil, err } ids := []*string{resp.Instances[0].InstanceId} if err := waitForAWSInstances(ac.api, ids, 5*time.Minute); err != nil { return nil, err } getinst := &ec2.DescribeInstancesInput{ InstanceIds: ids, } insts, err := ac.api.DescribeInstances(getinst) if err != nil { return nil, err } mach := &awsMachine{ cluster: ac, mach: insts.Reservations[0].Instances[0], } // Allow a few authentication failures in case setup is slow. sshchecker := func() error { mach.sshClient, err = mach.cluster.agent.NewClient(mach.IP()) if err != nil { return err } return nil } if err := util.Retry(sshRetries, sshTimeout, sshchecker); err != nil { mach.Destroy() return nil, err } ac.addMach(mach) return mach, nil }
func (qc *qemuCluster) NewMachine(cfg string) (Machine, error) { id := uuid.NewV4() // hacky solution for cloud config ip substitution // NOTE: escaping is not supported qc.mu.Lock() netif := qc.Dnsmasq.GetInterface("br0") ip := strings.Split(netif.DHCPv4[0].String(), "/")[0] cfg = strings.Replace(cfg, "$public_ipv4", ip, -1) cfg = strings.Replace(cfg, "$private_ipv4", ip, -1) cloudConfig, err := config.NewCloudConfig(cfg) if err != nil { qc.mu.Unlock() return nil, err } if err = qc.SSHAgent.UpdateConfig(cloudConfig); err != nil { qc.mu.Unlock() return nil, err } if cloudConfig.Hostname == "" { cloudConfig.Hostname = id.String()[:8] } qc.mu.Unlock() configDrive, err := local.NewConfigDrive(cloudConfig) if err != nil { return nil, err } qm := &qemuMachine{ qc: qc, id: id.String(), configDrive: configDrive, netif: netif, } disk, err := setupDisk(qc.conf.DiskImage) if err != nil { return nil, err } defer disk.Close() qc.mu.Lock() tap, err := qc.NewTap("br0") if err != nil { qc.mu.Unlock() return nil, err } defer tap.Close() qmMac := qm.netif.HardwareAddr.String() qmCfg := qm.configDrive.Directory qm.qemu = qm.qc.NewCommand( "qemu-system-x86_64", "-machine", "accel=kvm", "-cpu", "host", "-smp", "2", "-m", "1024", "-uuid", qm.id, "-display", "none", "-add-fd", "fd=3,set=1", "-drive", "file=/dev/fdset/1,media=disk,if=virtio,format=raw", "-netdev", "tap,id=tap,fd=4", "-device", "virtio-net,netdev=tap,mac="+qmMac, "-fsdev", "local,id=cfg,security_model=none,readonly,path="+qmCfg, "-device", "virtio-9p-pci,fsdev=cfg,mount_tag=config-2") qc.mu.Unlock() cmd := qm.qemu.(*local.NsCmd) cmd.Stderr = os.Stderr cmd.ExtraFiles = append(cmd.ExtraFiles, disk) // fd=3 cmd.ExtraFiles = append(cmd.ExtraFiles, tap.File) // fd=4 if err = qm.qemu.Start(); err != nil { return nil, err } // Allow a few authentication failures in case setup is slow. sshchecker := func() error { qm.qc.mu.Lock() defer qm.qc.mu.Unlock() qm.sshClient, err = qm.qc.SSHAgent.NewClient(qm.IP()) if err != nil { return err } return nil } if err := util.Retry(sshRetries, sshTimeout, sshchecker); err != nil { return nil, err } if err != nil { qm.Destroy() return nil, err } out, err := qm.SSH("grep ^ID= /etc/os-release") if err != nil { qm.Destroy() return nil, err } if !bytes.Equal(out, []byte("ID=coreos")) { qm.Destroy() return nil, fmt.Errorf("Unexpected SSH output: %s", out) } qc.mu.Lock() qc.machines[qm.ID()] = qm qc.mu.Unlock() return Machine(qm), nil }
// Start a multi-node cluster from offcial coreos guides on manual // installation. Once up, do a couple basic smoke checks. See: // https://coreos.com/kubernetes/docs/latest/getting-started.html func CoreOSBasic(c platform.TestCluster, version string) error { // start single-node etcd etcdNode, err := c.NewMachine(etcdConfig) if err != nil { return err } if err := etcd.GetClusterHealth(etcdNode, 1); err != nil { return err } master, err := c.NewMachine("") if err != nil { return err } options := map[string]string{ "HYPERKUBE_ACI": "quay.io/coreos/hyperkube", "MASTER_HOST": master.PrivateIP(), "ETCD_ENDPOINTS": fmt.Sprintf("http://%v:2379", etcdNode.PrivateIP()), "CONTROLLER_ENDPOINT": fmt.Sprintf("https://%v:443", master.PrivateIP()), "K8S_SERVICE_IP": "10.3.0.1", "K8S_VER": version, "KUBELET_PATH": "/usr/lib/coreos/kubelet-wrapper", } // generate TLS assets on master if err := generateMasterTLSAssets(master, options); err != nil { return err } // create 3 worker nodes workerConfigs := []string{"", "", ""} workers, err := platform.NewMachines(c, workerConfigs) if err != nil { return err } // generate tls assets on workers by transfering ca from master if err := generateWorkerTLSAssets(master, workers); err != nil { return err } // configure nodes via generic install scripts if err := runInstallScript(master, controllerInstallScript, options); err != nil { return fmt.Errorf("Installing controller: %v", err) } for _, worker := range workers { if err := runInstallScript(worker, workerInstallScript, options); err != nil { return fmt.Errorf("Installing worker: %v", err) } } // configure kubectl if err := configureKubectl(master, master.PrivateIP(), version); err != nil { return err } // check that all nodes appear in kubectl f := func() error { return nodeCheck(master, workers) } if err := util.Retry(15, 10*time.Second, f); err != nil { return err } // start nginx pod and curl endpoint if err = nginxCheck(master, workers); err != nil { return err } // http://kubernetes.io/v1.0/docs/user-guide/secrets/ Also, ensures // https://github.com/coreos/bugs/issues/447 does not re-occur. if err = secretCheck(master, workers); err != nil { return err } return nil }