func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { client := &s3.Client{ Auth: &s3.Auth{ AccessKey: config.RequiredString("aws_access_key"), SecretAccessKey: config.RequiredString("aws_secret_access_key"), }, HttpClient: http.DefaultClient, } sto := &s3Storage{ SimpleBlobHubPartitionMap: &blobserver.SimpleBlobHubPartitionMap{}, s3Client: client, bucket: config.RequiredString("bucket"), } skipStartupCheck := config.OptionalBool("skipStartupCheck", false) if err := config.Validate(); err != nil { return nil, err } if !skipStartupCheck { // TODO: skip this check if a file // ~/.camli/.configcheck/sha1-("IS GOOD: s3: sha1(access key + // secret key)") exists and has recent time? if _, err := client.Buckets(); err != nil { return nil, fmt.Errorf("Failed to get bucket list from S3: %v", err) } } return sto, nil }
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { url := config.RequiredString("url") auth := config.RequiredString("auth") skipStartupCheck := config.OptionalBool("skipStartupCheck", false) if err := config.Validate(); err != nil { return nil, err } client := client.New(url) if err = client.SetupAuthFromString(auth); err != nil { return nil, err } client.SetLogger(log.New(os.Stderr, "remote", log.LstdFlags)) sto := &remoteStorage{ client: client, } if !skipStartupCheck { // Do a quick dummy operation to check that our credentials are // correct. // TODO(bradfitz,mpl): skip this operation smartly if it turns out this is annoying/slow for whatever reason. c := make(chan blob.SizedRef, 1) err = sto.EnumerateBlobs(context.TODO(), c, "", 1) if err != nil { return nil, err } } return sto, nil }
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { client := &swift.Connection{ UserName: config.RequiredString("user_name"), Tenant: config.RequiredString("tenant"), ApiKey: config.RequiredString("secret"), AuthUrl: config.RequiredString("auth_url"), } if err := client.Authenticate(); err != nil { return nil, fmt.Errorf("Authentication failure on swift: %v", err) } sto := &swiftStorage{ client: client, container: config.OptionalString("container", "default"), } skipStartupCheck := config.OptionalBool("skipStartupCheck", false) if err := config.Validate(); err != nil { return nil, err } if !skipStartupCheck { containers, err := client.ContainerNames(nil) if err != nil { return nil, fmt.Errorf("Failed to get container list from swift: %v", err) } haveContainer := make(map[string]bool) for _, c := range containers { haveContainer[c] = true } if !haveContainer[sto.container] { if err := client.ContainerCreate(sto.container, nil); err != nil { return nil, fmt.Errorf("Swift bucket %s doesn't exist and it's impossible to create it: ", sto.container, err) } } } return sto, nil }
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { client := &s3.Client{ Auth: &s3.Auth{ AccessKey: config.RequiredString("aws_access_key"), SecretAccessKey: config.RequiredString("aws_secret_access_key"), }, HttpClient: http.DefaultClient, } sto := &s3Storage{ SimpleBlobHubPartitionMap: &blobserver.SimpleBlobHubPartitionMap{}, s3Client: client, bucket: config.RequiredString("bucket"), } skipStartupCheck := config.OptionalBool("skipStartupCheck", false) if err := config.Validate(); err != nil { return nil, err } if !skipStartupCheck { // TODO: skip this check if a file // ~/.camli/.configcheck/sha1-("IS GOOD: s3: sha1(access key + // secret key)") exists and has recent time? buckets, err := client.Buckets() if err != nil { return nil, fmt.Errorf("Failed to get bucket list from S3: %v", err) } haveBucket := make(map[string]bool) for _, b := range buckets { haveBucket[b.Name] = true } if !haveBucket[sto.bucket] { return nil, fmt.Errorf("S3 bucket %q doesn't exist. Create it first at https://console.aws.amazon.com/s3/home") } } return sto, nil }
func newRootFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) { u, err := user.Current() if err != nil { return } root := &RootHandler{ BlobRoot: conf.OptionalString("blobRoot", ""), SearchRoot: conf.OptionalString("searchRoot", ""), OwnerName: conf.OptionalString("ownerName", u.Name), } root.Stealth = conf.OptionalBool("stealth", false) if err = conf.Validate(); err != nil { return } if root.BlobRoot != "" { bs, err := ld.GetStorage(root.BlobRoot) if err != nil { return nil, fmt.Errorf("Root handler's blobRoot of %q error: %v", root.BlobRoot, err) } root.Storage = bs } if root.SearchRoot != "" { h, _ := ld.GetHandler(root.SearchRoot) root.Search = h.(*search.Handler) } return root, nil }
func newSyncFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) { from := conf.RequiredString("from") to := conf.RequiredString("to") fullSync := conf.OptionalBool("fullSyncOnStart", false) blockFullSync := conf.OptionalBool("blockingFullSyncOnStart", false) if err = conf.Validate(); err != nil { return } fromBs, err := ld.GetStorage(from) if err != nil { return } toBs, err := ld.GetStorage(to) if err != nil { return } fromQsc, ok := fromBs.(blobserver.StorageQueueCreator) if !ok { return nil, fmt.Errorf("Prefix %s (type %T) does not support being efficient replication source (queueing)", from, fromBs) } synch, err := createSyncHandler(from, to, fromQsc, toBs) if err != nil { return } if fullSync || blockFullSync { didFullSync := make(chan bool, 1) go func() { n := synch.runSync("queue", fromQsc, 0) log.Printf("Queue sync copied %d blobs", n) n = synch.runSync("full", fromBs, 0) log.Printf("Full sync copied %d blobs", n) didFullSync <- true synch.syncQueueLoop() }() if blockFullSync { log.Printf("Blocking startup, waiting for full sync from %q to %q", from, to) <-didFullSync log.Printf("Full sync complete.") } } else { go synch.syncQueueLoop() } rootPrefix, _, err := ld.FindHandlerByType("root") switch err { case blobserver.ErrHandlerTypeNotFound: // ignore; okay to not have a root handler. case nil: h, err := ld.GetHandler(rootPrefix) if err != nil { return nil, err } h.(*RootHandler).registerSyncHandler(synch) default: return nil, fmt.Errorf("Error looking for root handler: %v", err) } return synch, nil }
func newRootFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) { root := &RootHandler{} root.Stealth = conf.OptionalBool("stealth", false) if err = conf.Validate(); err != nil { return } if _, h, err := ld.FindHandlerByType("ui"); err == nil { root.ui = h.(*UIHandler) } return root, nil }
func newRootFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) { username, _ := getUserName() root := &RootHandler{ BlobRoot: conf.OptionalString("blobRoot", ""), SearchRoot: conf.OptionalString("searchRoot", ""), OwnerName: conf.OptionalString("ownerName", username), Username: osutil.Username(), Prefix: ld.MyPrefix(), } root.Stealth = conf.OptionalBool("stealth", false) root.statusRoot = conf.OptionalString("statusRoot", "") if err = conf.Validate(); err != nil { return } if root.BlobRoot != "" { bs, err := ld.GetStorage(root.BlobRoot) if err != nil { return nil, fmt.Errorf("Root handler's blobRoot of %q error: %v", root.BlobRoot, err) } root.Storage = bs } root.searchInit = func() {} if root.SearchRoot != "" { prefix := root.SearchRoot if t := ld.GetHandlerType(prefix); t != "search" { if t == "" { return nil, fmt.Errorf("root handler's searchRoot of %q is invalid and doesn't refer to a declared handler", prefix) } return nil, fmt.Errorf("root handler's searchRoot of %q is of type %q, not %q", prefix, t, "search") } root.searchInit = func() { h, err := ld.GetHandler(prefix) if err != nil { log.Fatalf("Error fetching SearchRoot at %q: %v", prefix, err) } root.searchHandler = h.(*search.Handler) root.searchInit = nil } } if pfx, _, _ := ld.FindHandlerByType("importer"); err == nil { root.importerRoot = pfx } return root, nil }
func newSyncFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) { from := conf.RequiredString("from") to := conf.RequiredString("to") fullSync := conf.OptionalBool("fullSyncOnStart", false) blockFullSync := conf.OptionalBool("blockingFullSyncOnStart", false) if err = conf.Validate(); err != nil { return } fromBs, err := ld.GetStorage(from) if err != nil { return } toBs, err := ld.GetStorage(to) if err != nil { return } fromQsc, ok := fromBs.(blobserver.StorageQueueCreator) if !ok { return nil, fmt.Errorf("Prefix %s (type %T) does not support being efficient replication source (queueing)", from, fromBs) } synch, err := createSyncHandler(from, to, fromQsc, toBs) if err != nil { return } if fullSync || blockFullSync { didFullSync := make(chan bool, 1) go func() { n := synch.runSync("queue", fromQsc, 0) log.Printf("Queue sync copied %d blobs", n) n = synch.runSync("full", fromBs, 0) log.Printf("Full sync copied %d blobs", n) didFullSync <- true synch.syncQueueLoop() }() if blockFullSync { log.Printf("Blocking startup, waiting for full sync from %q to %q", from, to) <-didFullSync log.Printf("Full sync complete.") } } else { go synch.syncQueueLoop() } return synch, nil }
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { url := config.RequiredString("url") skipStartupCheck := config.OptionalBool("skipStartupCheck", false) if err := config.Validate(); err != nil { return nil, err } client := client.New(url) err = client.SetupAuthFromConfig(config) if err != nil { return nil, err } sto := &remoteStorage{ client: client, } if !skipStartupCheck { // TODO: do a server stat or something to check password } return sto, nil }
func newHandlerFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) { indexPrefix := conf.RequiredString("index") // TODO: add optional help tips here? ownerBlobStr := conf.RequiredString("owner") devBlockStartupPrefix := conf.OptionalString("devBlockStartupOn", "") slurpToMemory := conf.OptionalBool("slurpToMemory", false) if err := conf.Validate(); err != nil { return nil, err } if devBlockStartupPrefix != "" { _, err := ld.GetHandler(devBlockStartupPrefix) if err != nil { return nil, fmt.Errorf("search handler references bogus devBlockStartupOn handler %s: %v", devBlockStartupPrefix, err) } } indexHandler, err := ld.GetHandler(indexPrefix) if err != nil { return nil, fmt.Errorf("search config references unknown handler %q", indexPrefix) } indexer, ok := indexHandler.(index.Interface) if !ok { return nil, fmt.Errorf("search config references invalid indexer %q (actually a %T)", indexPrefix, indexHandler) } ownerBlobRef, ok := blob.Parse(ownerBlobStr) if !ok { return nil, fmt.Errorf("search 'owner' has malformed blobref %q; expecting e.g. sha1-xxxxxxxxxxxx", ownerBlobStr) } h := NewHandler(indexer, ownerBlobRef) if slurpToMemory { ii := indexer.(*index.Index) corpus, err := ii.KeepInMemory() if err != nil { return nil, fmt.Errorf("error slurping index to memory: %v", err) } h.corpus = corpus } return h, nil }
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { hostname := config.OptionalString("hostname", "s3.amazonaws.com") cacheSize := config.OptionalInt64("cacheSize", 32<<20) client := &s3.Client{ Auth: &s3.Auth{ AccessKey: config.RequiredString("aws_access_key"), SecretAccessKey: config.RequiredString("aws_secret_access_key"), Hostname: hostname, }, } sto := &s3Storage{ s3Client: client, bucket: config.RequiredString("bucket"), hostname: hostname, } skipStartupCheck := config.OptionalBool("skipStartupCheck", false) if err := config.Validate(); err != nil { return nil, err } if cacheSize != 0 { sto.cache = memory.NewCache(cacheSize) } if !skipStartupCheck { _, err := client.ListBucket(sto.bucket, "", 1) if serr, ok := err.(*s3.Error); ok { if serr.AmazonCode == "NoSuchBucket" { return nil, fmt.Errorf("Bucket %q doesn't exist.", sto.bucket) } // This code appears when the hostname has dots in it: if serr.AmazonCode == "PermanentRedirect" { loc, lerr := client.BucketLocation(sto.bucket) if lerr != nil { return nil, fmt.Errorf("Wrong server for bucket %q; and error determining bucket's location: %v", sto.bucket, lerr) } client.Auth.Hostname = loc _, err = client.ListBucket(sto.bucket, "", 1) if err == nil { log.Printf("Warning: s3 server should be %q, not %q. Change config file to avoid start-up latency.", client.Auth.Hostname, hostname) } } // This path occurs when the user set the // wrong server, or didn't set one at all, but // the bucket doesn't have dots in it: if serr.UseEndpoint != "" { // UseEndpoint will be e.g. "brads3test-ca.s3-us-west-1.amazonaws.com" // But we only want the "s3-us-west-1.amazonaws.com" part. client.Auth.Hostname = strings.TrimPrefix(serr.UseEndpoint, sto.bucket+".") _, err = client.ListBucket(sto.bucket, "", 1) if err == nil { log.Printf("Warning: s3 server should be %q, not %q. Change config file to avoid start-up latency.", client.Auth.Hostname, hostname) } } } if err != nil { return nil, fmt.Errorf("Error listing bucket %s: %v", sto.bucket, err) } } return sto, nil }
func newSyncFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) { var ( from = conf.RequiredString("from") to = conf.RequiredString("to") fullSync = conf.OptionalBool("fullSyncOnStart", false) blockFullSync = conf.OptionalBool("blockingFullSyncOnStart", false) idle = conf.OptionalBool("idle", false) queueConf = conf.OptionalObject("queue") copierPoolSize = conf.OptionalInt("copierPoolSize", 5) validate = conf.OptionalBool("validateOnStart", validateOnStartDefault) ) if err := conf.Validate(); err != nil { return nil, err } if idle { return newIdleSyncHandler(from, to), nil } if len(queueConf) == 0 { return nil, errors.New(`Missing required "queue" object`) } q, err := sorted.NewKeyValue(queueConf) if err != nil { return nil, err } isToIndex := false fromBs, err := ld.GetStorage(from) if err != nil { return nil, err } toBs, err := ld.GetStorage(to) if err != nil { return nil, err } if _, ok := fromBs.(*index.Index); !ok { if _, ok := toBs.(*index.Index); ok { isToIndex = true } } sh := newSyncHandler(from, to, fromBs, toBs, q) sh.toIndex = isToIndex sh.copierPoolSize = copierPoolSize if err := sh.readQueueToMemory(); err != nil { return nil, fmt.Errorf("Error reading sync queue to memory: %v", err) } if fullSync || blockFullSync { sh.logf("Doing full sync") didFullSync := make(chan bool, 1) go func() { for { n := sh.runSync("queue", sh.enumeratePendingBlobs) if n > 0 { sh.logf("Queue sync copied %d blobs", n) continue } break } n := sh.runSync("full", blobserverEnumerator(context.TODO(), fromBs)) sh.logf("Full sync copied %d blobs", n) didFullSync <- true sh.syncLoop() }() if blockFullSync { sh.logf("Blocking startup, waiting for full sync from %q to %q", from, to) <-didFullSync sh.logf("Full sync complete.") } } else { go sh.syncLoop() } if validate { go sh.startFullValidation() } blobserver.GetHub(fromBs).AddReceiveHook(sh.enqueue) return sh, nil }
func newSyncFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) { var ( from = conf.RequiredString("from") to = conf.RequiredString("to") fullSync = conf.OptionalBool("fullSyncOnStart", false) blockFullSync = conf.OptionalBool("blockingFullSyncOnStart", false) idle = conf.OptionalBool("idle", false) queueConf = conf.OptionalObject("queue") ) if err := conf.Validate(); err != nil { return nil, err } if idle { synch, err := createIdleSyncHandler(from, to) if err != nil { return nil, err } return synch, nil } if len(queueConf) == 0 { return nil, errors.New(`Missing required "queue" object`) } q, err := sorted.NewKeyValue(queueConf) if err != nil { return nil, err } isToIndex := false fromBs, err := ld.GetStorage(from) if err != nil { return nil, err } toBs, err := ld.GetStorage(to) if err != nil { return nil, err } if _, ok := fromBs.(*index.Index); !ok { if _, ok := toBs.(*index.Index); ok { isToIndex = true } } sh, err := createSyncHandler(from, to, fromBs, toBs, q, isToIndex) if err != nil { return nil, err } if fullSync || blockFullSync { didFullSync := make(chan bool, 1) go func() { n := sh.runSync("queue", sh.enumerateQueuedBlobs) sh.logf("Queue sync copied %d blobs", n) n = sh.runSync("full", blobserverEnumerator(context.TODO(), fromBs)) sh.logf("Full sync copied %d blobs", n) didFullSync <- true sh.syncQueueLoop() }() if blockFullSync { sh.logf("Blocking startup, waiting for full sync from %q to %q", from, to) <-didFullSync sh.logf("Full sync complete.") } } else { go sh.syncQueueLoop() } blobserver.GetHub(fromBs).AddReceiveHook(sh.enqueue) return sh, nil }
func newRootFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) { checkType := func(key string, htype string) { v := conf.OptionalString(key, "") if v == "" { return } ct := ld.GetHandlerType(v) if ct == "" { err = fmt.Errorf("root handler's %q references non-existant %q", key, v) } else if ct != htype { err = fmt.Errorf("root handler's %q references %q of type %q; expected type %q", key, v, ct, htype) } } checkType("searchRoot", "search") checkType("jsonSignRoot", "jsonsign") if err != nil { return } username, _ := getUserName() root := &RootHandler{ BlobRoot: conf.OptionalString("blobRoot", ""), SearchRoot: conf.OptionalString("searchRoot", ""), JSONSignRoot: conf.OptionalString("jsonSignRoot", ""), OwnerName: conf.OptionalString("ownerName", username), Username: osutil.Username(), Prefix: ld.MyPrefix(), } root.Stealth = conf.OptionalBool("stealth", false) root.statusRoot = conf.OptionalString("statusRoot", "") root.helpRoot = conf.OptionalString("helpRoot", "") if err = conf.Validate(); err != nil { return } if root.BlobRoot != "" { bs, err := ld.GetStorage(root.BlobRoot) if err != nil { return nil, fmt.Errorf("Root handler's blobRoot of %q error: %v", root.BlobRoot, err) } root.Storage = bs } if root.JSONSignRoot != "" { h, _ := ld.GetHandler(root.JSONSignRoot) if sigh, ok := h.(*signhandler.Handler); ok { root.sigh = sigh } } root.searchInit = func() {} if root.SearchRoot != "" { prefix := root.SearchRoot if t := ld.GetHandlerType(prefix); t != "search" { if t == "" { return nil, fmt.Errorf("root handler's searchRoot of %q is invalid and doesn't refer to a declared handler", prefix) } return nil, fmt.Errorf("root handler's searchRoot of %q is of type %q, not %q", prefix, t, "search") } root.searchInit = func() { h, err := ld.GetHandler(prefix) if err != nil { log.Fatalf("Error fetching SearchRoot at %q: %v", prefix, err) } root.searchHandler = h.(*search.Handler) root.searchInit = nil } } if pfx, _, _ := ld.FindHandlerByType("importer"); err == nil { root.importerRoot = pfx } return root, nil }
// GenerateClientConfig retuns a client configuration which can be used to // access a server defined by the provided low-level server configuration. func GenerateClientConfig(serverConfig jsonconfig.Obj) (*Config, error) { missingConfig := func(param string) (*Config, error) { return nil, fmt.Errorf("required value for '%s' not found", param) } if serverConfig == nil { return nil, errors.New("server config is a required parameter") } param := "auth" auth := serverConfig.OptionalString(param, "") if auth == "" { return missingConfig(param) } listen := serverConfig.OptionalString("listen", "") baseURL := serverConfig.OptionalString("baseURL", "") if listen == "" { listen = baseURL } if listen == "" { return nil, errors.New("required value for 'listen' or 'baseURL' not found") } https := serverConfig.OptionalBool("https", false) if !strings.HasPrefix(listen, "http://") && !strings.HasPrefix(listen, "https://") { if !https { listen = "http://" + listen } else { listen = "https://" + listen } } param = "httpsCert" httpsCert := serverConfig.OptionalString(param, "") if https && httpsCert == "" { return missingConfig(param) } // TODO(mpl): See if we can detect that the cert is not self-signed,and in // that case not add it to the trustedCerts var trustedList []string if https && httpsCert != "" { certPEMBlock, err := wkfs.ReadFile(httpsCert) if err != nil { return nil, fmt.Errorf("could not read certificate: %v", err) } sig, err := httputil.CertFingerprint(certPEMBlock) if err != nil { return nil, fmt.Errorf("could not get fingerprints of certificate: %v", err) } trustedList = []string{sig} } param = "prefixes" prefixes := serverConfig.OptionalObject(param) if len(prefixes) == 0 { return missingConfig(param) } param = "/sighelper/" sighelper := prefixes.OptionalObject(param) if len(sighelper) == 0 { return missingConfig(param) } param = "handlerArgs" handlerArgs := sighelper.OptionalObject(param) if len(handlerArgs) == 0 { return missingConfig(param) } param = "keyId" keyId := handlerArgs.OptionalString(param, "") if keyId == "" { return missingConfig(param) } param = "secretRing" secretRing := handlerArgs.OptionalString(param, "") if secretRing == "" { return missingConfig(param) } return &Config{ Servers: map[string]*Server{ "default": { Server: listen, Auth: auth, IsDefault: true, TrustedCerts: trustedList, }, }, Identity: keyId, IdentitySecretRing: secretRing, IgnoredFiles: []string{".DS_Store"}, }, nil }