func deleteLocalSubnetRoute(device, localSubnetCIDR string) { backoff := utilwait.Backoff{ Duration: 100 * time.Millisecond, Factor: 1.25, Steps: 6, } err := utilwait.ExponentialBackoff(backoff, func() (bool, error) { itx := ipcmd.NewTransaction(kexec.New(), device) routes, err := itx.GetRoutes() if err != nil { return false, fmt.Errorf("could not get routes: %v", err) } for _, route := range routes { if strings.Contains(route, localSubnetCIDR) { itx.DeleteRoute(localSubnetCIDR) err = itx.EndTransaction() if err != nil { return false, fmt.Errorf("could not delete route: %v", err) } return true, nil } } return false, nil }) if err != nil { glog.Errorf("Error removing %s route from dev %s: %v; if the route appears later it will not be deleted.", localSubnetCIDR, device, err) } }
func (p *authV1JsonParser) parse(config *Config, raw []byte) error { var auth authV1 if err := json.Unmarshal(raw, &auth); err != nil { return err } if len(auth.Domains) == 0 { return fmt.Errorf("no domains specified") } if len(auth.Type) == 0 { return fmt.Errorf("no auth type specified") } var ( err error headerer Headerer ) switch auth.Type { case "basic": headerer, err = p.getBasicV1Headerer(auth.Credentials) case "oauth": headerer, err = p.getOAuthV1Headerer(auth.Credentials) default: err = fmt.Errorf("unknown auth type: %q", auth.Type) } if err != nil { return err } for _, domain := range auth.Domains { if _, ok := config.AuthPerHost[domain]; ok { return fmt.Errorf("auth for domain %q is already specified", domain) } config.AuthPerHost[domain] = headerer } return nil }
func ExampleV_Validate() { type X struct { A string `validate:"long"` B string `validate:"short"` C string `validate:"long,short"` D string } vd := make(V) vd["long"] = func(i interface{}) error { s := i.(string) if len(s) < 5 { return fmt.Errorf("%q is too short", s) } return nil } vd["short"] = func(i interface{}) error { s := i.(string) if len(s) >= 5 { return fmt.Errorf("%q is too long", s) } return nil } fmt.Println(vd.Validate(X{ A: "hello there", B: "hi", C: "help me", D: "I am not validated", })) // Output: [field C is invalid: "help me" is too long] }
func getAppID(client *http.Client, url string) (string, error) { // Generate a pseudo-random token for handshaking. token := strconv.Itoa(rand.New(rand.NewSource(time.Now().UnixNano())).Int()) resp, err := client.Get(fmt.Sprintf("%s?rtok=%s", url, token)) if err != nil { return "", err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("bad response %d; body: %q", resp.StatusCode, body) } if err != nil { return "", fmt.Errorf("failed reading response: %v", err) } // Check the token is present in response. if !bytes.Contains(body, []byte(token)) { return "", fmt.Errorf("token not found: want %q; body %q", token, body) } match := appIDRE.FindSubmatch(body) if match == nil { return "", fmt.Errorf("app ID not found: body %q", body) } return string(match[1]), nil }
func (daemon *Daemon) reserveName(id, name string) (string, error) { if !validContainerNamePattern.MatchString(name) { return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) } if name[0] != '/' { name = "/" + name } if _, err := daemon.containerGraph.Set(name, id); err != nil { if !graphdb.IsNonUniqueNameError(err) { return "", err } conflictingContainer, err := daemon.GetByName(name) if err != nil { if strings.Contains(err.Error(), "Could not find entity") { return "", err } // Remove name and continue starting the container if err := daemon.containerGraph.Delete(name); err != nil { return "", err } } else { nameAsKnownByUser := strings.TrimPrefix(name, "/") return "", fmt.Errorf( "Conflict. The name %q is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.", nameAsKnownByUser, stringid.TruncateID(conflictingContainer.ID)) } } return name, nil }
// GetGame gets the game information from the DB. func GetGame(req GGReq) (*GGResp, error) { u, err := url.Parse(GDBURL) u.Path = GGPath q := url.Values{} switch { case req.ID != "": q.Set("id", req.ID) case req.Name != "": q.Set("name", req.Name) if req.Platform != "" { q.Set("platform", req.Platform) } default: return nil, fmt.Errorf("must provide an ID or Name.") } u.RawQuery = q.Encode() resp, err := http.Get(u.String()) if err != nil { return nil, fmt.Errorf("getting game url:%s, error:%s", u, err) } defer resp.Body.Close() r := &GGResp{} decoder := xml.NewDecoder(resp.Body) if err := decoder.Decode(r); err != nil { return nil, err } if r.XMLName.Local == "Error" { return nil, fmt.Errorf("GetGame error: %s", r.err) } else { r.err = "" } return r, nil }
func validateEndpoint(endpoint *Endpoint) error { log.Debugf("pinging registry endpoint %s", endpoint) // Try HTTPS ping to registry endpoint.URL.Scheme = "https" if _, err := endpoint.Ping(); err != nil { if endpoint.IsSecure { // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) } // If registry is insecure and HTTPS failed, fallback to HTTP. log.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) endpoint.URL.Scheme = "http" var err2 error if _, err2 = endpoint.Ping(); err2 == nil { return nil } return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) } return nil }
func LoadImage(root string) (*Image, error) { // Load the json data jsonData, err := ioutil.ReadFile(jsonPath(root)) if err != nil { return nil, err } var img Image if err := json.Unmarshal(jsonData, &img); err != nil { return nil, err } if err := ValidateId(img.Id); err != nil { return nil, err } // Check that the filesystem layer exists if stat, err := os.Stat(layerPath(root)); err != nil { if os.IsNotExist(err) { return nil, fmt.Errorf("Couldn't load image %s: no filesystem layer", img.Id) } else { return nil, err } } else if !stat.IsDir() { return nil, fmt.Errorf("Couldn't load image %s: %s is not a directory", img.Id, layerPath(root)) } return &img, nil }
// verifyContainerSettings performs validation of the hostconfig and config // structures. func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) { // First perform verification of settings common across all platforms. if config != nil { if config.WorkingDir != "" && !filepath.IsAbs(config.WorkingDir) { return nil, fmt.Errorf("The working directory '%s' is invalid. It needs to be an absolute path.", config.WorkingDir) } } if hostConfig == nil { return nil, nil } for port := range hostConfig.PortBindings { _, portStr := nat.SplitProtoPort(string(port)) if _, err := nat.ParsePort(portStr); err != nil { return nil, fmt.Errorf("Invalid port specification: %q", portStr) } for _, pb := range hostConfig.PortBindings[port] { _, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort)) if err != nil { return nil, fmt.Errorf("Invalid port specification: %q", pb.HostPort) } } } // Now do platform-specific verification return verifyPlatformContainerSettings(daemon, hostConfig, config) }
func (s *Session) handleUnbind(stream conn.Conn, unbind *proto.Unbind) (err error) { // s.Debug("Unbinding tunnel") log.Println("[DEBUG] Unbinding tunnel") // remote it from the list of tunnels t, ok := s.delTunnel(unbind.Url) if !ok { err := fmt.Errorf("Failed to unbind tunnel %s: no tunnel found.", unbind.Url) log.Println("[ERROR]", err) // return s.Error("Failed to unbind tunnel %s: no tunnel found.", unbind.Url) return err } if err = t.shutdown(); err != nil { err := fmt.Errorf("Failed to unbind tunnel %s: %v", unbind.Url, err) log.Println("[ERROR]", err) // return s.Error("Failed to unbind tunnel %s: %v", unbind.Url, err) return err } // acknowledge success unbindResp := &proto.UnbindResp{} if err = proto.WriteMsg(stream, unbindResp); err != nil { err := fmt.Errorf("Failed to write unbind resp: %v", err) // return s.Error("Failed to write unbind resp: %v", err) log.Println("[ERROR]", err) return err } return }
func postContainersCopy(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } name := vars["name"] copyData := &APICopy{} contentType := r.Header.Get("Content-Type") if contentType == "application/json" { if err := json.NewDecoder(r.Body).Decode(copyData); err != nil { return err } } else { return fmt.Errorf("Content-Type not supported: %s", contentType) } if copyData.Resource == "" { return fmt.Errorf("Resource cannot be empty") } if copyData.Resource[0] == '/' { copyData.Resource = copyData.Resource[1:] } if err := srv.ContainerCopy(name, copyData.Resource, w); err != nil { utils.Errorf("%s", err.Error()) return err } return nil }
func (ctrl *Controller) authorizeKey(conn ssh.ConnMetadata, key ssh.PublicKey) ( *ssh.Permissions, error) { marshaledKey := key.Marshal() for _, authorizedKey := range ctrl.authorizedKeys { if bytes.Compare(authorizedKey.Marshal(), marshaledKey) == 0 { return &ssh.Permissions{}, nil } } nodes, err := ctrl.cluster.GetDir("console/authorized_keys") if err != nil { if err == cluster.ErrNotFound { return nil, fmt.Errorf("unauthorized") } return nil, err } for path, value := range nodes { key, _, _, _, err := ssh.ParseAuthorizedKey([]byte(value)) if err != nil { fmt.Printf("bad authorized key from etcd: %s: %s\n", path, err) } if bytes.Compare(key.Marshal(), marshaledKey) == 0 { return &ssh.Permissions{}, nil } } return nil, fmt.Errorf("unauthorized") }
func (ctrl *Controller) AddHostKeyFromCluster(host string) error { generate := func() (string, error) { // Generate an ECDSA key. key, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) if err != nil { return "", err } derBytes, err := x509.MarshalECPrivateKey(key) if err != nil { return "", err } w := &bytes.Buffer{} if err := pem.Encode(w, &pem.Block{Type: "EC PRIVATE KEY", Bytes: derBytes}); err != nil { return "", err } return w.String(), nil } pemString, err := ctrl.cluster.GetValueWithDefault(fmt.Sprintf("console/%s", host), generate) if err != nil { return fmt.Errorf("failed to get/generate host key: %s", err) } signer, err := ssh.ParsePrivateKey([]byte(pemString)) if err != nil { return fmt.Errorf("failed to parse host key: %s", err) } ctrl.config.AddHostKey(signer) return nil }
// InitIssueWithMetaAndFields returns Issue with with values from fieldsConfig properly set. // * metaProject should contain metaInformation about the project where the issue should be created. // * metaIssuetype is the MetaInformation about the Issuetype that needs to be created. // * fieldsConfig is a key->value pair where key represents the name of the field as seen in the UI // And value is the string value for that particular key. // Note: This method doesn't verify that the fieldsConfig is complete with mandatory fields. The fieldsConfig is // supposed to be already verified with MetaIssueType.CheckCompleteAndAvailable. It will however return // error if the key is not found. // All values will be packed into Unknowns. This is much convenient. If the struct fields needs to be // configured as well, marshalling and unmarshalling will set the proper fields. func InitIssueWithMetaAndFields(metaProject *MetaProject, metaIssuetype *MetaIssueType, fieldsConfig map[string]string) (*Issue, error) { issue := new(Issue) issueFields := new(IssueFields) issueFields.Unknowns = tcontainer.NewMarshalMap() // map the field names the User presented to jira's internal key allFields, _ := metaIssuetype.GetAllFields() for key, value := range fieldsConfig { jiraKey, found := allFields[key] if !found { return nil, fmt.Errorf("Key %s is not found in the list of fields.", key) } valueType, err := metaIssuetype.Fields.String(jiraKey + "/schema/type") if err != nil { return nil, err } switch valueType { case "array": elemType, err := metaIssuetype.Fields.String(jiraKey + "/schema/items") if err != nil { return nil, err } switch elemType { case "component": issueFields.Unknowns[jiraKey] = []Component{Component{Name: value}} default: issueFields.Unknowns[jiraKey] = []string{value} } case "string": issueFields.Unknowns[jiraKey] = value case "date": issueFields.Unknowns[jiraKey] = value case "any": // Treat any as string issueFields.Unknowns[jiraKey] = value case "project": issueFields.Unknowns[jiraKey] = Project{ Name: metaProject.Name, ID: metaProject.Id, } case "priority": issueFields.Unknowns[jiraKey] = Priority{Name: value} case "user": issueFields.Unknowns[jiraKey] = User{ Name: value, } case "issuetype": issueFields.Unknowns[jiraKey] = IssueType{ Name: value, } default: return nil, fmt.Errorf("Unknown issue type encountered: %s for %s", valueType, key) } } issue.Fields = issueFields return issue, nil }
// handleConn processes conn. This is run in a separate goroutine. func (s *Service) handleConn(conn net.Conn) error { // Read request from connection. req, err := s.readRequest(conn) if err != nil { return fmt.Errorf("read request: %s", err) } // Retrieve shard. sh := s.TSDBStore.Shard(req.GetShardID()) // Return error response if the shard doesn't exist. if sh == nil { if err := s.writeResponse(conn, &internal.Response{ Error: proto.String(fmt.Sprintf("shard not found: id=%d", req.GetShardID())), }); err != nil { return fmt.Errorf("write error response: %s", err) } return nil } // Write successful response. if err := s.writeResponse(conn, &internal.Response{}); err != nil { return fmt.Errorf("write response: %s", err) } // Write shard to response. if _, err := sh.WriteTo(conn); err != nil { return fmt.Errorf("write shard: %s", err) } return nil }
// ShardReader returns a reader for streaming shard data. // Returned ReadCloser must be closed by the caller. func (c *Client) ShardReader(id uint64) (io.ReadCloser, error) { // Connect to remote server. conn, err := tcp.Dial("tcp", c.host, MuxHeader) if err != nil { return nil, err } // Send request to server. if err := c.writeRequest(conn, &internal.Request{ShardID: proto.Uint64(id)}); err != nil { return nil, fmt.Errorf("write request: %s", err) } // Read response from the server. resp, err := c.readResponse(conn) if err != nil { return nil, fmt.Errorf("read response: %s", err) } // If there was an error then return it and close connection. if resp.GetError() != "" { conn.Close() return nil, errors.New(resp.GetError()) } // Returning remaining stream for caller to consume. return conn, nil }
func (s *Server) startServices() error { for _, service := range s.Services { s.Logger.Printf("D! opening service: %T", service) if err := service.Open(); err != nil { return fmt.Errorf("open service %T: %s", service, err) } s.Logger.Printf("D! opened service: %T", service) // Apply config overrides after the config override service has been opened and before any dynamic services. if service == s.ConfigOverrideService && !s.config.SkipConfigOverrides { // Apply initial config updates s.Logger.Println("D! applying configuration overrides") configs, err := s.ConfigOverrideService.Config() if err != nil { return errors.Wrap(err, "failed to apply config overrides") } for service, config := range configs { if srv, ok := s.DynamicServices[service]; !ok { return fmt.Errorf("found configuration override for unknown service %q", service) } else { s.Logger.Println("D! applying configuration overrides for", service) if err := srv.Update(config); err != nil { return errors.Wrapf(err, "failed to update configuration for service %s", service) } } } } } return nil }
// 获取客服图像的大小, 如果客服没有图像则返回 ErrNoHeadImage 错误. func (info *KfInfo) HeadImageSize() (size int, err error) { HeadImageURL := info.HeadImageURL if HeadImageURL == "" { err = ErrNoHeadImage return } lastSlashIndex := strings.LastIndex(HeadImageURL, "/") if lastSlashIndex == -1 { err = fmt.Errorf("invalid HeadImageURL: %s", HeadImageURL) return } HeadImageIndex := lastSlashIndex + 1 if HeadImageIndex == len(HeadImageURL) { err = fmt.Errorf("invalid HeadImageURL: %s", HeadImageURL) return } sizeStr := HeadImageURL[HeadImageIndex:] size, err = strconv.Atoi(sizeStr) if err != nil { err = fmt.Errorf("invalid HeadImageURL: %s", HeadImageURL) return } if size == 0 { size = 640 } return }
// 加载公会历史消息 func loadUnionHistoryMessage(unionId string) { // 判断是否已经加载过公会的历史消息文件 if ifHasLoaded(unionId) { return } // 判断是否存在文件夹 if !fileUtil.IsDirExists(con_HistoryPath) { return } // 读取公会对应文件内容 bytes, err := fileUtil.ReadFileBytes(filepath.Join(con_HistoryPath, getUnionHistoryMessageFileName(unionId))) if err != nil { // 判断文件是否存在,如果不存在直接返回 if os.IsNotExist(err) { return } panic(fmt.Errorf("加载公会历史消息错误,错误信息为:%s", err)) } // 反序列化文件 messageList := make([]*responseDataObject.SocketResponseObject, 0, configBLL.MaxHistoryCount()) if err = json.Unmarshal(bytes, &messageList); err != nil { panic(fmt.Errorf("反序列化公会历史消息错误,错误信息为:%s", err)) } // 加锁 unionHistoryMessageMutex.Lock() defer unionHistoryMessageMutex.Unlock() // 保存到unionHistoryMessageList中 unionHistoryMessageList[unionId] = messageList }
func GetStringParam(m map[string]interface{}, p string, required bool) (string, bool, error) { v, have := m[p] if !have { if required { return "", false, fmt.Errorf("Parameter %s missing", p) } return "", false, nil } switch v.(type) { case string: return v.(string), true, nil case []interface{}: var acc string for _, x := range v.([]interface{}) { switch x.(type) { case string: acc += x.(string) default: return "", true, fmt.Errorf("Parameter %s type %T wrong at %v %T", p, v, x, x) } } return acc, true, nil default: return "", true, fmt.Errorf("Parameter %s type %T wrong", p, v) } }
func queryNamespace(conn *sqlConn, parentID sqlbase.ID, name string) (sqlbase.ID, error) { rows, err := makeQuery( `SELECT id FROM system.namespace WHERE parentID = $1 AND name = $2`, parentID, sqlbase.NormalizeName(name))(conn) if err != nil { return 0, err } defer func() { _ = rows.Close() }() if err != nil { return 0, fmt.Errorf("%s not found: %v", name, err) } if len(rows.Columns()) != 1 { return 0, fmt.Errorf("unexpected result columns: %d", len(rows.Columns())) } vals := make([]driver.Value, 1) if err := rows.Next(vals); err != nil { return 0, err } switch t := vals[0].(type) { case int64: return sqlbase.ID(t), nil default: return 0, fmt.Errorf("unexpected result type: %T", vals[0]) } }
func (plugin *OsdnNode) getLocalSubnet() (string, error) { var subnet *osapi.HostSubnet backoff := utilwait.Backoff{ Duration: 100 * time.Millisecond, Factor: 2, Steps: 8, } err := utilwait.ExponentialBackoff(backoff, func() (bool, error) { var err error subnet, err = plugin.osClient.HostSubnets().Get(plugin.hostName) if err == nil { return true, nil } else if kapierrors.IsNotFound(err) { glog.Warningf("Could not find an allocated subnet for node: %s, Waiting...", plugin.hostName) return false, nil } else { return false, err } }) if err != nil { return "", fmt.Errorf("Failed to get subnet for this host: %s, error: %v", plugin.hostName, err) } if err = plugin.networkInfo.validateNodeIP(subnet.HostIP); err != nil { return "", fmt.Errorf("Failed to validate own HostSubnet: %v", err) } return subnet.Subnet, nil }
func setHairpinMode(link netlink.Link, enable bool) error { err := netlink.LinkSetHairpin(link, enable) if err != nil && err != syscall.EINVAL { // If error is not EINVAL something else went wrong, bail out right away return fmt.Errorf("unable to set hairpin mode on %s via netlink: %v", link.Attrs().Name, err) } // Hairpin mode successfully set up if err == nil { return nil } // The netlink method failed with EINVAL which is probably because of an older // kernel. Try one more time via the sysfs method. path := filepath.Join("/sys/class/net", link.Attrs().Name, "brport/hairpin_mode") var val []byte if enable { val = []byte{'1', '\n'} } else { val = []byte{'0', '\n'} } if err := ioutil.WriteFile(path, val, 0644); err != nil { return fmt.Errorf("unable to set hairpin mode on %s via sysfs: %v", link.Attrs().Name, err) } return nil }
func (p Patch) test(doc *partialDoc, op operation) error { path := op.path() con, key := findObject(doc, path) val, err := con.get(key) if err != nil { return err } if val == nil { if op.value().raw == nil { return nil } else { return fmt.Errorf("Testing value %s failed", path) } } if val.equal(op.value()) { return nil } return fmt.Errorf("Testing value %s failed", path) }
func getScheduler(st settings, demandUpdate chan struct{}) (scheduler.Scheduler, error) { var s scheduler.Scheduler switch st.schedulerType { case "DOCKER": log.Info("Scheduling with Docker remote API") s = docker.NewScheduler(st.pullImages, st.dockerHost) case "MARATHON": log.Info("Scheduling with Mesos / Marathon") s = marathon.NewScheduler(st.marathonAPI, demandUpdate) case "ECS": return nil, fmt.Errorf("Scheduling with ECS not yet supported. Tweet with hashtag #MicroscaleECS if you'd like us to add this next!") case "KUBERNETES": return nil, fmt.Errorf("Scheduling with Kubernetes not yet supported. Tweet with hashtag #MicroscaleK8S if you'd like us to add this next!") case "NOMAD": return nil, fmt.Errorf("Scheduling with Nomad not yet supported. Tweet with hashtag #MicroscaleNomad if you'd like us to add this next!") case "TOY": log.Info("Scheduling with toy scheduler") s = toy.NewScheduler() default: return nil, fmt.Errorf("Bad value for MSS_SCHEDULER: %s", st.schedulerType) } if s == nil { return nil, fmt.Errorf("No scheduler") } return s, nil }
func (b *basicAuditor) AuditRequest(req *authorization.Request, pluginRes *authorization.Response) error { if req == nil { return fmt.Errorf("Authorization request is nil") } if pluginRes == nil { return fmt.Errorf("Authorization response is nil") } err := b.init() if err != nil { return err } // Default - file fields := logrus.Fields{ "method": req.RequestMethod, "uri": req.RequestURI, "user": req.User, "allow": pluginRes.Allow, "msg": pluginRes.Msg, } if pluginRes != nil || pluginRes.Err != "" { fields["err"] = pluginRes.Err } b.logger.WithFields(fields).Info("Request") return nil }
// GetGameList gets the game information from the DB. func GetGameList(req GGLReq) (*GGLResp, error) { u, err := url.Parse(GDBURL) u.Path = GGLPath q := url.Values{} if req.Name == "" { return nil, fmt.Errorf("must provide Name") } q.Set("name", req.Name) if req.Platform != "" { q.Set("platform", req.Platform) } if req.Genre != "" { q.Set("genre", req.Genre) } u.RawQuery = q.Encode() resp, err := http.Get(u.String()) if err != nil { return nil, fmt.Errorf("getting game list url:%s, error:%s", u, err) } defer resp.Body.Close() r := &GGLResp{} decoder := xml.NewDecoder(resp.Body) if err := decoder.Decode(r); err != nil { return nil, err } if r.XMLName.Local == "Error" { return nil, fmt.Errorf("GetGameList error: %s", r.err) } else { r.err = "" } return r, nil }
func TestV_Validate_multi(t *testing.T) { type X struct { A int `validate:"nonzero,odd"` } vd := make(V) vd["nonzero"] = func(i interface{}) error { n := i.(int) if n == 0 { return fmt.Errorf("should be nonzero") } return nil } vd["odd"] = func(i interface{}) error { n := i.(int) if n&1 == 0 { return fmt.Errorf("%d is not odd", n) } return nil } errs := vd.Validate(X{ A: 0, }) if len(errs) != 2 { t.Fatal("wrong number of errors for two failures: %v", errs) } if errs[0].Error() != "field A is invalid: should be nonzero" { t.Fatal("first error should be nonzero:", errs[0]) } if errs[1].Error() != "field A is invalid: 0 is not odd" { t.Fatal("second error should be odd:", errs[1]) } }
func (lb *FileBackend) BuildACI(layerNumber int, layerID string, dockerURL *types.ParsedDockerURL, outputDir string, tmpBaseDir string, curPwl []string, compression common.Compression) (string, *schema.ImageManifest, error) { tmpDir, err := ioutil.TempDir(tmpBaseDir, "docker2aci-") if err != nil { return "", nil, fmt.Errorf("error creating dir: %v", err) } defer os.RemoveAll(tmpDir) j, err := getJson(lb.file, layerID) if err != nil { return "", nil, fmt.Errorf("error getting image json: %v", err) } layerData := types.DockerImageData{} if err := json.Unmarshal(j, &layerData); err != nil { return "", nil, fmt.Errorf("error unmarshaling layer data: %v", err) } tmpLayerPath := path.Join(tmpDir, layerID) tmpLayerPath += ".tar" layerFile, err := extractEmbeddedLayer(lb.file, layerID, tmpLayerPath) if err != nil { return "", nil, fmt.Errorf("error getting layer from file: %v", err) } defer layerFile.Close() util.Debug("Generating layer ACI...") aciPath, manifest, err := common.GenerateACI(layerNumber, layerData, dockerURL, outputDir, layerFile, curPwl, compression) if err != nil { return "", nil, fmt.Errorf("error generating ACI: %v", err) } return aciPath, manifest, nil }
func validateMounts(mounts []api.Mount) error { for _, mount := range mounts { // Target must always be absolute if !filepath.IsAbs(mount.Target) { return fmt.Errorf("invalid mount target, must be an absolute path: %s", mount.Target) } switch mount.Type { // The checks on abs paths are required due to the container API confusing // volume mounts as bind mounts when the source is absolute (and vice-versa) // See #25253 // TODO: This is probably not neccessary once #22373 is merged case api.MountTypeBind: if !filepath.IsAbs(mount.Source) { return fmt.Errorf("invalid bind mount source, must be an absolute path: %s", mount.Source) } case api.MountTypeVolume: if filepath.IsAbs(mount.Source) { return fmt.Errorf("invalid volume mount source, must not be an absolute path: %s", mount.Source) } case api.MountTypeTmpfs: if mount.Source != "" { return fmt.Errorf("invalid tmpfs source, source must be empty") } default: return fmt.Errorf("invalid mount type: %s", mount.Type) } } return nil }