func (p *Propolis) SignRequest(req *http.Request) { // gather the string to be signed // method msg := req.Method + "\n" // md5sum msg += req.Header.Get("Content-MD5") + "\n" // content-type msg += req.Header.Get("Content-Type") + "\n" // date msg += req.Header.Get("Date") + "\n" // add headers for _, key := range AWS_HEADERS { if value := req.Header.Get(key); value != "" { msg += strings.ToLower(key) + ":" + value + "\n" } } // resource: the path components should be URL-encoded, but not the slashes u := new(url.URL) u.Path = "/" + p.Bucket + req.URL.Path msg += u.String() // create the signature hmac := hmac.NewSHA1([]byte(p.Secret)) hmac.Write([]byte(msg)) // get a base64 encoding of the signature var encoded bytes.Buffer encoder := base64.NewEncoder(base64.StdEncoding, &encoded) encoder.Write(hmac.Sum()) encoder.Close() signature := encoded.String() req.Header.Set("Authorization", "AWS "+p.Key+":"+signature) }
func (p *Propolis) SendRequest(method string, reduced bool, src string, target *url.URL, body io.ReadCloser, hash string, info *os.FileInfo) (resp *http.Response, err os.Error) { defer func() { // if anything goes wrong, close the body reader // if it ends normally, this will be closed already and set to nil if body != nil { body.Close() } }() var req *http.Request if req, err = http.NewRequest(method, target.String(), body); err != nil { return } // set upload file info if applicable if info != nil && body != nil { // TODO: 0-length files fail because the Content-Length field is missing // a fix is in the works in the Go library req.ContentLength = info.Size } if info != nil { p.SetRequestMetaData(req, info) } // reduced redundancy? if reduced { req.Header.Set("X-Amz-Storage-Class", "REDUCED_REDUNDANCY") } // are we uploading a file with a content hash? if hash != "" { req.Header.Set("Content-MD5", hash) } // is this a copy/metadata update? if src != "" { // note: src should already be a full bucket + path name u := new(url.URL) u.Path = src req.Header.Set("X-Amz-Copy-Source", u.String()) req.Header.Set("X-Amz-Metadata-Directive", "REPLACE") } // sign and execute the request // note: 2nd argument is temporary hack to set Content-Length: 0 when needed if resp, err = p.SignAndExecute(req, method == "PUT" && body == nil || (info != nil && info.Size == 0)); err != nil { return } // body was closed when the request was written out, // so nullify the deferred close body = nil if resp.StatusCode < 200 || resp.StatusCode > 299 { err = os.NewError(resp.Status) return } return }
func Setup() (p *Propolis, push bool) { var refresh, watch, delete, paranoid, reset, practice, public, secure, reduced, directories bool var delay, concurrent int flag.BoolVar(&refresh, "refresh", true, "Scan online bucket to update cache at startup\n"+ "\tLonger startup time, but catches changes made while offline") flag.BoolVar(&watch, "watch", false, "Go into daemon mode and watch the local file system\n"+ "\tfor changes after initial sync (false means sync then quit)") flag.BoolVar(&delete, "delete", true, "Delete files when syncing as well as copying changed files") flag.BoolVar(¶noid, "paranoid", false, "Always verify md5 hash of file contents,\n"+ "\teven when all metadata is an exact match (slower)") flag.BoolVar(&reset, "reset", false, "Reset the cache (implies -refresh=true)") flag.BoolVar(&practice, "practice", false, "Do a practice run without changing any files\n"+ "\tShows what would be changed (implies -watch=false)") flag.BoolVar(&public, "public", true, "Make world-readable local files publicly readable\n"+ "\tin the online bucket (downloadable via the web)") flag.BoolVar(&secure, "secure", false, "Use secure connections to Amazon S3\n"+ "\tA bit slower, but data is encrypted when being transferred") flag.BoolVar(&reduced, "reduced", false, "Use reduced redundancy storage when uploading\n"+ "\tCheaper, but higher chance of loosing data") flag.BoolVar(&directories, "directories", false, "Track directories using special zero-length files\n"+ "\tMostly useful for greater compatibility with s3fslite") flag.IntVar(&delay, "delay", 5, "Wait this number of seconds from the last change to a file\n"+ "\tbefore syncing it with the server") flag.IntVar(&concurrent, "concurrent", 25, "Maximum number of server transactions that are\n"+ "\tallowed to run concurrently") var accesskeyid, secretaccesskey, cache_location string flag.StringVar(&accesskeyid, "accesskeyid", "", "Amazon AWS Access Key ID") flag.StringVar(&secretaccesskey, "secretaccesskey", "", "Amazon AWS Secret Access Key") flag.StringVar(&cache_location, "cache", default_cache_location, "Metadata cache location\n"+ "\tA sqlite3 database file that caches online metadata") flag.Usage = func() { fmt.Fprintf(os.Stderr, "Propolis:\n"+ " Amazon S3 <--> local file system synchronizer\n"+ " Synchronizes a local directory with an S3 bucket, then\n"+ " watches the local directory for changes and automatically\n"+ " propogates them to the bucket.\n\n"+ " See http://github.com/russross/propolis for details\n\n"+ " Copyright 2011 by Russ Ross <*****@*****.**>\n\n"+ " Propolis comes with ABSOLUTELY NO WARRANTY. This is free software, and you\n"+ " are welcome to redistribute it under certain conditions. See the GNU\n"+ " General Public Licence for details.\n\n"+ "Usage:\n"+ " To start by syncing remote bucket to match local file system:\n"+ " %s [flags] local/dir s3:bucket[:remote/dir]\n"+ " To start by syncing local file system to match remote bucket:\n"+ " %s [flags] s3:bucket[:remote/dir] local/dir\n\n"+ " Amazon Access Key ID and Secret Access Key can be specified in\n"+ " one of three ways, listed in decreasing order of precedence.\n"+ " Note: both values must be supplied using a single method:\n\n"+ " 1. On the command line\n"+ " 2. In the environment variables %s and %s\n"+ " 3. In the file %s as key:secret on a single line\n\n"+ "Options:\n", os.Args[0], os.Args[0], s3_access_key_id_variable, s3_secret_access_key_variable, s3_password_file) flag.PrintDefaults() } flag.Parse() // enforce certain option combinations if reset { refresh = true } if practice { watch = false } // make sure we get access keys if accesskeyid == "" || secretaccesskey == "" { accesskeyid, secretaccesskey = getKeys() } if accesskeyid == "" || secretaccesskey == "" { fmt.Fprintln(os.Stderr, "Error: Amazon AWS Access Key ID and/or Secret Access Key undefined\n") flag.Usage() os.Exit(-1) } // check command-line arguments args := flag.Args() if len(args) != 2 { flag.Usage() os.Exit(-1) } // figure out the direction of sync, parse the bucket and directory info var bucketname, bucketprefix, localdir string switch { case !strings.HasPrefix(args[0], "s3:") && strings.HasPrefix(args[1], "s3:"): push = true localdir = parseLocalDir(args[0]) bucketname, bucketprefix = parseBucket(args[1]) case strings.HasPrefix(args[0], "s3:") && !strings.HasPrefix(args[1], "s3:"): push = false bucketname, bucketprefix = parseBucket(args[0]) localdir = parseLocalDir(args[1]) default: flag.Usage() os.Exit(-1) } // make sure the root directory exists if info, err := os.Lstat(localdir); err != nil || !info.IsDirectory() { fmt.Fprintf(os.Stderr, "%s is not a valid directory\n", localdir) } // open the database var err os.Error var cache Cache if cache, err = Connect(path.Join(cache_location, bucketname+".sqlite")); err != nil { fmt.Println("Error connecting to database:", err) os.Exit(-1) } // create the Propolis object url := new(url.URL) url.Scheme = "http" if secure { url.Scheme = "https" } url.Host = bucketname + ".s3.amazonaws.com" url.Path = "/" p = &Propolis{ Bucket: bucketname, Url: url, Secure: secure, ReducedRedundancy: reduced, Key: accesskeyid, Secret: secretaccesskey, BucketRoot: bucketprefix, LocalRoot: localdir, Refresh: refresh, Paranoid: paranoid, Reset: reset, Directories: directories, Practice: practice, Watch: watch, Delay: delay, Concurrent: concurrent, Db: cache, } return }