func resolveConfiguration() (*configuration.Configuration, error) { var configurationPath string if flag.NArg() > 0 { configurationPath = flag.Arg(0) } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") } if configurationPath == "" { return nil, fmt.Errorf("configuration path unspecified") } fp, err := os.Open(configurationPath) if err != nil { return nil, err } config, err := configuration.Parse(fp) if err != nil { return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) } return config, nil }
func resolveConfiguration(args []string) (*configuration.Configuration, error) { var configurationPath string if len(args) > 0 { configurationPath = args[0] } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") } if configurationPath == "" { return nil, fmt.Errorf("configuration path unspecified") } fp, err := os.Open(configurationPath) if err != nil { return nil, err } defer fp.Close() config, err := configuration.Parse(fp) if err != nil { return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) } return config, nil }
func main() { var configPath, reposPath string flag.StringVar(&configPath, "config", "", "path to a config file") flag.StringVar(&reposPath, "repos", "", "file with a list of repos") flag.Parse() if configPath == "" { fmt.Fprintln(os.Stderr, "must supply a config file with -config") flag.Usage() return } // Parse config file configFile, err := os.Open(configPath) if err != nil { panic(fmt.Sprintf("error opening config file: %v", err)) } defer configFile.Close() config, err := configuration.Parse(configFile) if err != nil { panic(fmt.Sprintf("error parsing config file: %v", err)) } ctx := context.Background() driver, err := factory.Create(config.Storage.Type(), config.Storage.Parameters()) if err != nil { panic(fmt.Sprintf("error creating storage driver: %v", err)) } registry, _ := storage.NewRegistry(ctx, driver, storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider())) var repos []string if reposPath != "" { reposFile, err := os.Open(reposPath) if err != nil { panic(fmt.Sprintf("could not open repos file: %v", err)) } scanner := bufio.NewScanner(reposFile) for scanner.Scan() { repoName := scanner.Text() if len(repoName) > 0 { if repoName[0] == '+' { repoName = repoName[1:] } repos = append(repos, repoName) } } } else { repos = make([]string, maxRepos) n, err := registry.Repositories(ctx, repos, "") if err != nil && err != io.EOF { panic(fmt.Sprintf("unexpected error getting repo: %v", err)) } if n == maxRepos { panic("too many repositories") } repos = repos[:n] } var wg sync.WaitGroup repoChan := make(chan string) for i := 0; i < 30; i++ { wg.Add(1) go func() { for repoName := range repoChan { if err := checkRepo(registry, repoName); err != nil { fmt.Fprintln(os.Stderr, err) } } wg.Done() }() } for _, repoName := range repos { repoChan <- repoName } close(repoChan) wg.Wait() }
// Execute runs the Docker registry. func Execute(configFile io.Reader) { config, err := configuration.Parse(configFile) if err != nil { log.Fatalf("Error parsing configuration file: %s", err) } logLevel, err := log.ParseLevel(string(config.Log.Level)) if err != nil { log.Errorf("Error parsing log level %q: %s", config.Log.Level, err) logLevel = log.InfoLevel } log.SetLevel(logLevel) log.Infof("version=%s", version.Version) ctx := context.Background() app := handlers.NewApp(ctx, *config) // register OpenShift routes // TODO: change this to an anonymous Access record app.RegisterRoute(app.NewRoute().Path("/healthz"), server.HealthzHandler, handlers.NameNotRequired, handlers.NoCustomAccessRecords) // TODO add https scheme adminRouter := app.NewRoute().PathPrefix("/admin/").Subrouter() pruneAccessRecords := func(*http.Request) []auth.Access { return []auth.Access{ { Resource: auth.Resource{ Type: "admin", }, Action: "prune", }, } } app.RegisterRoute( // DELETE /admin/blobs/<digest> adminRouter.Path("/blobs/{digest:"+digest.DigestRegexp.String()+"}").Methods("DELETE"), // handler server.BlobDispatcher, // repo name not required in url handlers.NameNotRequired, // custom access records pruneAccessRecords, ) app.RegisterRoute( // DELETE /admin/<repo>/manifests/<digest> adminRouter.Path("/{name:"+v2.RepositoryNameRegexp.String()+"}/manifests/{digest:"+digest.DigestRegexp.String()+"}").Methods("DELETE"), // handler server.ManifestDispatcher, // repo name required in url handlers.NameRequired, // custom access records pruneAccessRecords, ) app.RegisterRoute( // DELETE /admin/<repo>/layers/<digest> adminRouter.Path("/{name:"+v2.RepositoryNameRegexp.String()+"}/layers/{digest:"+digest.DigestRegexp.String()+"}").Methods("DELETE"), // handler server.LayerDispatcher, // repo name required in url handlers.NameRequired, // custom access records pruneAccessRecords, ) handler := gorillahandlers.CombinedLoggingHandler(os.Stdout, app) if config.HTTP.TLS.Certificate == "" { context.GetLogger(app).Infof("listening on %v", config.HTTP.Addr) if err := http.ListenAndServe(config.HTTP.Addr, handler); err != nil { context.GetLogger(app).Fatalln(err) } } else { tlsConf := &tls.Config{ ClientAuth: tls.NoClientCert, } if len(config.HTTP.TLS.ClientCAs) != 0 { pool := x509.NewCertPool() for _, ca := range config.HTTP.TLS.ClientCAs { caPem, err := ioutil.ReadFile(ca) if err != nil { context.GetLogger(app).Fatalln(err) } if ok := pool.AppendCertsFromPEM(caPem); !ok { context.GetLogger(app).Fatalln(fmt.Errorf("Could not add CA to pool")) } } for _, subj := range pool.Subjects() { context.GetLogger(app).Debugf("CA Subject: %s", string(subj)) } tlsConf.ClientAuth = tls.RequireAndVerifyClientCert tlsConf.ClientCAs = pool } context.GetLogger(app).Infof("listening on %v, tls", config.HTTP.Addr) server := &http.Server{ Addr: config.HTTP.Addr, Handler: handler, TLSConfig: tlsConf, } if err := server.ListenAndServeTLS(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key); err != nil { context.GetLogger(app).Fatalln(err) } } }
// Execute runs the Docker registry. func Execute(configFile io.Reader) { config, err := configuration.Parse(configFile) if err != nil { log.Fatalf("Error parsing configuration file: %s", err) } tokenPath := "/openshift/token" // If needed, generate and populate the token realm URL in the config. // Must be done prior to instantiating the app, so our auth provider has the config available. _, usingOpenShiftAuth := config.Auth[server.OpenShiftAuth] _, hasTokenRealm := config.Auth[server.OpenShiftAuth][server.TokenRealmKey].(string) if usingOpenShiftAuth && !hasTokenRealm { registryHost := os.Getenv(server.DockerRegistryURLEnvVar) if len(registryHost) == 0 { log.Fatalf("%s is required", server.DockerRegistryURLEnvVar) } tokenURL := &url.URL{Scheme: "https", Host: registryHost, Path: tokenPath} if len(config.HTTP.TLS.Certificate) == 0 { tokenURL.Scheme = "http" } if config.Auth[server.OpenShiftAuth] == nil { config.Auth[server.OpenShiftAuth] = configuration.Parameters{} } config.Auth[server.OpenShiftAuth][server.TokenRealmKey] = tokenURL.String() } ctx := context.Background() ctx, err = configureLogging(ctx, config) if err != nil { log.Fatalf("error configuring logger: %v", err) } log.Infof("version=%s", version.Version) // inject a logger into the uuid library. warns us if there is a problem // with uuid generation under low entropy. uuid.Loggerf = context.GetLogger(ctx).Warnf app := handlers.NewApp(ctx, config) // Add a token handling endpoint if usingOpenShiftAuth { app.NewRoute().Methods("GET").PathPrefix(tokenPath).Handler(server.NewTokenHandler(ctx, server.DefaultRegistryClient)) } // TODO add https scheme adminRouter := app.NewRoute().PathPrefix("/admin/").Subrouter() pruneAccessRecords := func(*http.Request) []auth.Access { return []auth.Access{ { Resource: auth.Resource{ Type: "admin", }, Action: "prune", }, } } app.RegisterRoute( // DELETE /admin/blobs/<digest> adminRouter.Path("/blobs/{digest:"+reference.DigestRegexp.String()+"}").Methods("DELETE"), // handler server.BlobDispatcher, // repo name not required in url handlers.NameNotRequired, // custom access records pruneAccessRecords, ) app.RegisterHealthChecks() handler := alive("/", app) // TODO: temporarily keep for backwards compatibility; remove in the future handler = alive("/healthz", handler) handler = health.Handler(handler) handler = panicHandler(handler) handler = gorillahandlers.CombinedLoggingHandler(os.Stdout, handler) if config.HTTP.TLS.Certificate == "" { context.GetLogger(app).Infof("listening on %v", config.HTTP.Addr) if err := http.ListenAndServe(config.HTTP.Addr, handler); err != nil { context.GetLogger(app).Fatalln(err) } } else { tlsConf := crypto.SecureTLSConfig(&tls.Config{ClientAuth: tls.NoClientCert}) if len(config.HTTP.TLS.ClientCAs) != 0 { pool := x509.NewCertPool() for _, ca := range config.HTTP.TLS.ClientCAs { caPem, err := ioutil.ReadFile(ca) if err != nil { context.GetLogger(app).Fatalln(err) } if ok := pool.AppendCertsFromPEM(caPem); !ok { context.GetLogger(app).Fatalln(fmt.Errorf("Could not add CA to pool")) } } for _, subj := range pool.Subjects() { context.GetLogger(app).Debugf("CA Subject: %s", string(subj)) } tlsConf.ClientAuth = tls.RequireAndVerifyClientCert tlsConf.ClientCAs = pool } context.GetLogger(app).Infof("listening on %v, tls", config.HTTP.Addr) server := &http.Server{ Addr: config.HTTP.Addr, Handler: handler, TLSConfig: tlsConf, } if err := server.ListenAndServeTLS(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key); err != nil { context.GetLogger(app).Fatalln(err) } } }
// Execute runs the Docker registry. func Execute(configFile io.Reader) { config, err := configuration.Parse(configFile) if err != nil { log.Fatalf("Error parsing configuration file: %s", err) } ctx := context.Background() ctx, err = configureLogging(ctx, config) if err != nil { log.Fatalf("error configuring logger: %v", err) } log.Infof("version=%s", version.Version) // inject a logger into the uuid library. warns us if there is a problem // with uuid generation under low entropy. uuid.Loggerf = context.GetLogger(ctx).Warnf app := handlers.NewApp(ctx, config) // TODO add https scheme adminRouter := app.NewRoute().PathPrefix("/admin/").Subrouter() pruneAccessRecords := func(*http.Request) []auth.Access { return []auth.Access{ { Resource: auth.Resource{ Type: "admin", }, Action: "prune", }, } } app.RegisterRoute( // DELETE /admin/blobs/<digest> adminRouter.Path("/blobs/{digest:"+reference.DigestRegexp.String()+"}").Methods("DELETE"), // handler server.BlobDispatcher, // repo name not required in url handlers.NameNotRequired, // custom access records pruneAccessRecords, ) app.RegisterHealthChecks() handler := alive("/", app) // TODO: temporarily keep for backwards compatibility; remove in the future handler = alive("/healthz", handler) handler = health.Handler(handler) handler = panicHandler(handler) handler = gorillahandlers.CombinedLoggingHandler(os.Stdout, handler) if config.HTTP.TLS.Certificate == "" { context.GetLogger(app).Infof("listening on %v", config.HTTP.Addr) if err := http.ListenAndServe(config.HTTP.Addr, handler); err != nil { context.GetLogger(app).Fatalln(err) } } else { tlsConf := crypto.SecureTLSConfig(&tls.Config{ClientAuth: tls.NoClientCert}) if len(config.HTTP.TLS.ClientCAs) != 0 { pool := x509.NewCertPool() for _, ca := range config.HTTP.TLS.ClientCAs { caPem, err := ioutil.ReadFile(ca) if err != nil { context.GetLogger(app).Fatalln(err) } if ok := pool.AppendCertsFromPEM(caPem); !ok { context.GetLogger(app).Fatalln(fmt.Errorf("Could not add CA to pool")) } } for _, subj := range pool.Subjects() { context.GetLogger(app).Debugf("CA Subject: %s", string(subj)) } tlsConf.ClientAuth = tls.RequireAndVerifyClientCert tlsConf.ClientCAs = pool } context.GetLogger(app).Infof("listening on %v, tls", config.HTTP.Addr) server := &http.Server{ Addr: config.HTTP.Addr, Handler: handler, TLSConfig: tlsConf, } if err := server.ListenAndServeTLS(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key); err != nil { context.GetLogger(app).Fatalln(err) } } }
func TestDefaultMiddleware(t *testing.T) { checks := []struct { title, input, expect string }{ { title: "miss all middlewares", input: ` version: 0.1 storage: inmemory: {} `, expect: ` version: 0.1 storage: inmemory: {} middleware: registry: - name: openshift repository: - name: openshift storage: - name: openshift `, }, { title: "miss some middlewares", input: ` version: 0.1 storage: inmemory: {} middleware: registry: - name: openshift `, expect: ` version: 0.1 storage: inmemory: {} middleware: registry: - name: openshift repository: - name: openshift storage: - name: openshift `, }, { title: "all middlewares are in place", input: ` version: 0.1 storage: inmemory: {} middleware: registry: - name: openshift repository: - name: openshift storage: - name: openshift `, expect: ` version: 0.1 storage: inmemory: {} middleware: registry: - name: openshift repository: - name: openshift storage: - name: openshift `, }, { title: "check v1.0.8 config", input: ` version: 0.1 log: level: debug http: addr: :5000 storage: cache: layerinfo: inmemory filesystem: rootdirectory: /registry auth: openshift: realm: openshift middleware: repository: - name: openshift `, expect: ` version: 0.1 log: level: debug http: addr: :5000 storage: cache: layerinfo: inmemory filesystem: rootdirectory: /registry auth: openshift: realm: openshift middleware: registry: - name: openshift repository: - name: openshift storage: - name: openshift `, }, { title: "check v1.2.1 config", input: ` version: 0.1 log: level: debug http: addr: :5000 storage: cache: layerinfo: inmemory filesystem: rootdirectory: /registry delete: enabled: true auth: openshift: realm: openshift middleware: repository: - name: openshift options: pullthrough: true `, expect: ` version: 0.1 log: level: debug http: addr: :5000 storage: cache: layerinfo: inmemory filesystem: rootdirectory: /registry delete: enabled: true auth: openshift: realm: openshift middleware: registry: - name: openshift repository: - name: openshift options: pullthrough: true storage: - name: openshift `, }, { title: "check v1.3.0-alpha.3 config", input: ` version: 0.1 log: level: debug http: addr: :5000 storage: cache: blobdescriptor: inmemory filesystem: rootdirectory: /registry delete: enabled: true auth: openshift: realm: openshift middleware: registry: - name: openshift repository: - name: openshift options: acceptschema2: false pullthrough: true enforcequota: false projectcachettl: 1m blobrepositorycachettl: 10m storage: - name: openshift `, expect: ` version: 0.1 log: level: debug http: addr: :5000 storage: cache: blobdescriptor: inmemory filesystem: rootdirectory: /registry delete: enabled: true auth: openshift: realm: openshift middleware: registry: - name: openshift repository: - name: openshift options: acceptschema2: false pullthrough: true enforcequota: false projectcachettl: 1m blobrepositorycachettl: 10m storage: - name: openshift `, }, } for _, check := range checks { currentConfig, err := configuration.Parse(strings.NewReader(check.input)) if err != nil { t.Fatal(err) } expectConfig, err := configuration.Parse(strings.NewReader(check.expect)) if err != nil { t.Fatal(err) } setDefaultMiddleware(currentConfig) if !reflect.DeepEqual(currentConfig, expectConfig) { t.Errorf("%s: expected\n\t%#v\ngot\n\t%#v", check.title, expectConfig, currentConfig) } } }