示例#1
0
// getDownloadURLs returns URLs or empty list if a primitive type.
func getDownloadURLs(tName string) []string {
	qName := path.Join(*template_registry, tName)
	provider := getRegistryProvider()
	result, err := registry.GetDownloadURLs(provider, qName)
	if err != nil {
		panic(fmt.Errorf("cannot get URLs for %s: %s\n", tName, err))
	}

	return result
}
示例#2
0
// ResolveTypes resolves the types in the supplied configuration and returns
// resolved type definitions in t.ImportFiles. Types can be either
// primitive (i.e., built in), resolved (i.e., already t.ImportFiles), or remote
// (i.e., described by a URL that must be fetched to resolve the type).
func (tr *typeResolver) ResolveTypes(config *common.Configuration, imports []*common.ImportFile) ([]*common.ImportFile, error) {
	existing := map[string]bool{}
	for _, v := range imports {
		existing[v.Name] = true
	}

	fetched := map[string][]*common.ImportFile{}
	// TODO(vaikas): Need to account for multiple URLs being fetched for a given type.
	toFetch := make([]*fetchUnit, 0, tr.maxUrls)
	for _, r := range config.Resources {
		// Map the type to a fetchable URL (if applicable) or skip it if it's a non-fetchable type (primitive for example).
		urls, urlRegistry, err := registry.GetDownloadURLs(tr.rp, r.Type)
		if err != nil {
			return nil, resolverError(config, fmt.Errorf("Failed to understand download url for %s: %v", r.Type, err))
		}
		if !existing[r.Type] {
			f := &fetchUnit{}
			for _, u := range urls {
				if len(u) > 0 {
					f.urls = append(f.urls, fetchableURL{urlRegistry, u})
					// Add to existing map so it is not fetched multiple times.
					existing[r.Type] = true
				}
			}
			if len(f.urls) > 0 {
				toFetch = append(toFetch, f)
				fetched[f.urls[0].url] = append(fetched[f.urls[0].url], &common.ImportFile{Name: r.Type, Path: f.urls[0].url})
			}
		}
	}

	count := 0
	for len(toFetch) > 0 {
		// 1. If short github URL, resolve to a download URL
		// 2. Fetch import URL. Exit if no URLs left
		// 3. Check/handle HTTP status
		// 4. Store results in all ImportFiles from that URL
		// 5. Check for the optional schema file at import URL + .schema
		// 6. Repeat 2,3 for schema file
		// 7. Add each schema import to fetch if not already done
		// 8. Mark URL done. Return to 1.
		if count >= tr.maxUrls {
			return nil, resolverError(config,
				fmt.Errorf("Number of imports exceeds maximum of %d", tr.maxUrls))
		}

		templates := []string{}
		url := toFetch[0].urls[0]
		for _, u := range toFetch[0].urls {
			template, err := tr.performHTTPGet(u.reg, u.url, false)
			if err != nil {
				return nil, resolverError(config, err)
			}
			templates = append(templates, template)
		}

		for _, i := range fetched[url.url] {
			template, err := parseContent(templates)
			if err != nil {
				return nil, resolverError(config, err)
			}
			i.Content = template
		}

		schemaURL := url.url + schemaSuffix
		sch, err := tr.performHTTPGet(url.reg, schemaURL, true)
		if err != nil {
			return nil, resolverError(config, err)
		}

		if sch != "" {
			var s common.Schema
			if err := yaml.Unmarshal([]byte(sch), &s); err != nil {
				return nil, resolverError(config, err)
			}
			// Here we handle any nested imports in the schema we've just fetched.
			for _, v := range s.Imports {
				i := &common.ImportFile{Name: v.Name}
				var existingSchema string
				urls, urlRegistry, conversionErr := registry.GetDownloadURLs(tr.rp, v.Path)
				if conversionErr != nil {
					return nil, resolverError(config, fmt.Errorf("Failed to understand download url for %s: %v", v.Path, conversionErr))
				}
				if len(urls) == 0 {
					// If it's not a fetchable URL, we need to use the type name as is, since it is a short name
					// for a schema.
					urls = []string{v.Path}
				}
				for _, u := range urls {
					if len(fetched[u]) == 0 {
						// If this import URL is new to us, add it to the URLs to fetch.
						toFetch = append(toFetch, &fetchUnit{[]fetchableURL{fetchableURL{urlRegistry, u}}})
					} else {
						// If this is not a new import URL and we've already fetched its contents,
						// reuse them. Also, check if we also found a schema for that import URL and
						// record those contents for re-use as well.
						if fetched[u][0].Content != "" {
							i.Content = fetched[u][0].Content
							if len(fetched[u+schemaSuffix]) > 0 {
								existingSchema = fetched[u+schemaSuffix][0].Content
							}
						}
					}
					fetched[u] = append(fetched[u], i)
					if existingSchema != "" {
						fetched[u+schemaSuffix] = append(fetched[u+schemaSuffix],
							&common.ImportFile{Name: v.Name + schemaSuffix, Content: existingSchema})
					}
				}
			}

			// Add the schema we've fetched as the schema for any templates which used this URL.
			for _, i := range fetched[url.url] {
				schemaImportName := i.Name + schemaSuffix
				fetched[schemaURL] = append(fetched[schemaURL],
					&common.ImportFile{Name: schemaImportName, Content: sch})
			}
		}

		count = count + 1
		toFetch = toFetch[1:]
	}

	ret := []*common.ImportFile{}
	for _, v := range fetched {
		ret = append(ret, v...)
	}

	return ret, nil
}