示例#1
0
文件: client.go 项目: Sunmonds/gcc
func (c *Client) doFollowingRedirects(ireq *Request) (r *Response, err os.Error) {
	// TODO: if/when we add cookie support, the redirected request shouldn't
	// necessarily supply the same cookies as the original.
	var base *url.URL
	redirectChecker := c.CheckRedirect
	if redirectChecker == nil {
		redirectChecker = defaultCheckRedirect
	}
	var via []*Request

	if ireq.URL == nil {
		return nil, os.NewError("http: nil Request.URL")
	}

	req := ireq
	urlStr := "" // next relative or absolute URL to fetch (after first request)
	for redirect := 0; ; redirect++ {
		if redirect != 0 {
			req = new(Request)
			req.Method = ireq.Method
			req.Header = make(Header)
			req.URL, err = base.Parse(urlStr)
			if err != nil {
				break
			}
			if len(via) > 0 {
				// Add the Referer header.
				lastReq := via[len(via)-1]
				if lastReq.URL.Scheme != "https" {
					req.Header.Set("Referer", lastReq.URL.String())
				}

				err = redirectChecker(req, via)
				if err != nil {
					break
				}
			}
		}

		urlStr = req.URL.String()
		if r, err = send(req, c.Transport); err != nil {
			break
		}
		if shouldRedirect(r.StatusCode) {
			r.Body.Close()
			if urlStr = r.Header.Get("Location"); urlStr == "" {
				err = os.NewError(fmt.Sprintf("%d response missing Location header", r.StatusCode))
				break
			}
			base = req.URL
			via = append(via, req)
			continue
		}
		return
	}

	method := ireq.Method
	err = &url.Error{method[0:1] + strings.ToLower(method[1:]), urlStr, err}
	return
}
示例#2
0
func newConn(url *url.URL) (*http.ClientConn, os.Error) {
	addr := url.Host
	//just set the default scheme to http
	if url.Scheme == "" {
		url.Scheme = "http"
	}
	if !hasPort(addr) {
		addr += ":" + url.Scheme
	}
	var conn net.Conn
	var err os.Error
	if url.Scheme == "http" {
		conn, err = net.Dial("tcp", addr)
		if err != nil {
			return nil, err
		}
	} else { // https
		conn, err = tls.Dial("tcp", addr, nil)
		if err != nil {
			return nil, err
		}
		h := url.Host
		if hasPort(h) {
			h = h[0:strings.LastIndex(h, ":")]
		}
		if err := conn.(*tls.Conn).VerifyHostname(h); err != nil {
			return nil, err
		}
	}

	return http.NewClientConn(conn, nil), nil
}
示例#3
0
文件: s3.go 项目: russross/propolis
func (p *Propolis) ListRequest(path string, marker string, maxEntries int, includeAll bool) (listresult *ListBucketResult, err os.Error) {
	// set up the query string
	var prefix string

	// are we scanning a subdirectory or starting at the root?
	if path != "" {
		prefix = path + "/"
	}

	query := make(url.Values)
	query.Add("prefix", prefix)

	// are we scanning just a single directory or getting everything?
	if !includeAll {
		query.Add("delimiter", "/")
	}

	// are we continuing an earlier scan?
	if marker != "" {
		query.Add("marker", marker)
	}

	// restrict the maximum number of entries returned
	query.Add("max-keys", strconv.Itoa(maxEntries))

	u := new(url.URL)
	*u = *p.Url
	u.RawQuery = query.Encode()

	// issue the request
	var resp *http.Response
	if resp, err = p.SendRequest("GET", false, "", u, nil, "", nil); err != nil {
		return
	}
	if resp.Body != nil {
		defer resp.Body.Close()
	}

	// parse the stuff we care about from the xml result
	listresult = &ListBucketResult{}
	if err = xml.Unmarshal(resp.Body, listresult); err != nil {
		listresult = nil
		return
	}
	return
}
示例#4
0
文件: sign.go 项目: streadway/s3sig
// Assumes no custom headers are sent so only needs access to a URL.
// If you plan on sending x-amz-* headers with a query string authorization
// you can use Signature(secret, StringToSign(url, headers, expires)) instead
// Returns an url.URL struct constructed from the Raw URL with the AWS
// query parameters appended at the end.
// Assumes any fragments are not included in url.Raw
func URL(url *url.URL, key, secret, method, expires string) (*url.URL, os.Error) {
	sig := Signature(secret, StringToSign(method, url, http.Header{}, expires))
	raw := url.Raw
	parts := strings.SplitN(raw, "?", 2)
	params := parts[1:]
	params = append(params, "AWSAccessKeyId="+key)
	params = append(params, "Expires="+expires)
	params = append(params, "Signature="+sig)
	signed := strings.Join(append(parts[:1], strings.Join(params, "&")), "?")

	return url.Parse(signed)
}
示例#5
0
文件: s3.go 项目: russross/propolis
func (p *Propolis) SignRequest(req *http.Request) {
	// gather the string to be signed

	// method
	msg := req.Method + "\n"

	// md5sum
	msg += req.Header.Get("Content-MD5") + "\n"

	// content-type
	msg += req.Header.Get("Content-Type") + "\n"

	// date
	msg += req.Header.Get("Date") + "\n"

	// add headers
	for _, key := range AWS_HEADERS {
		if value := req.Header.Get(key); value != "" {
			msg += strings.ToLower(key) + ":" + value + "\n"
		}
	}

	// resource: the path components should be URL-encoded, but not the slashes
	u := new(url.URL)
	u.Path = "/" + p.Bucket + req.URL.Path
	msg += u.String()

	// create the signature
	hmac := hmac.NewSHA1([]byte(p.Secret))
	hmac.Write([]byte(msg))

	// get a base64 encoding of the signature
	var encoded bytes.Buffer
	encoder := base64.NewEncoder(base64.StdEncoding, &encoded)
	encoder.Write(hmac.Sum())
	encoder.Close()
	signature := encoded.String()

	req.Header.Set("Authorization", "AWS "+p.Key+":"+signature)
}
示例#6
0
文件: sqs.go 项目: supr/sqs
func (s *SQS) query(queueUrl string, params map[string]string, resp interface{}) os.Error {
	params["Timestamp"] = time.UTC().Format(time.RFC3339)
	var url_ *url.URL
	var err os.Error
	var path string
	if queueUrl != "" {
		url_, err = url.Parse(queueUrl)
		path = queueUrl[len(s.Region.SQSEndpoint):]
	} else {
		url_, err = url.Parse(s.Region.SQSEndpoint)
		path = "/"
	}
	if err != nil {
		return err
	}

	//url_, err := url.Parse(s.Region.SQSEndpoint)
	//if err != nil {
	//	return err
	//}

	sign(s.Auth, "GET", path, params, url_.Host)
	url_.RawQuery = multimap(params).Encode()
	r, err := http.Get(url_.String())
	if err != nil {
		return err
	}
	defer r.Body.Close()

	//dump, _ := http.DumpResponse(r, true)
	//println("DUMP:\n", string(dump))
	//return nil

	if r.StatusCode != 200 {
		return buildError(r)
	}
	err = xml.Unmarshal(r.Body, resp)
	return err
}
示例#7
0
文件: chsupp.go 项目: dvyukov/go-conc
func cacheLogOutput(logUrl *url.URL) (cacheFilePath string, ok bool) {
	cacheFilePath = path.Join(cacheRoot, determineFilename(logUrl))
	// See if it already exists.
	_, err := os.Stat(cacheFilePath)
	if err == nil {
		return cacheFilePath, true
	}
	// Create a cached file.
	tempFile, err := os.Create(cacheFilePath + "-tmp")
	if err != nil {
		log.Printf("Failed to generate temp filename: %s", err)
		return
	}
	defer func() {
		tempFile.Close()
		os.Remove(tempFile.Name())
	}()
	// Do a URL request, and pipe the data into the temporary file.
	r, err := http.Get(logUrl.String())
	if err != nil {
		log.Printf("Failed to http.Get: %s", err)
		return
	}
	defer r.Body.Close()
	_, err = io.Copy(tempFile, r.Body)
	if err != nil {
		log.Printf("Failed to io.Copy HTTP: %s", err)
		return
	}
	// Move the file to it's final location.
	tempFile.Close()
	err = os.Rename(tempFile.Name(), cacheFilePath)
	if err != nil {
		log.Printf("Failed to rename temp file: %s", err)
		return
	}
	// Pipe the data through
	return cacheFilePath, true
}
示例#8
0
文件: sign.go 项目: streadway/s3sig
func canonicalizedResource(url *url.URL) string {
	var res string

	// Strip any port declaration (443/80/8080/...)
	host := first(strings.SplitN(url.Host, ":", 2))

	if strings.HasSuffix(host, ".amazonaws.com") {
		// Hostname bucket style, ignore (s3-eu-west.|s3.)amazonaws.com
		parts := strings.SplitN(host, ".", -1)
		if len(parts) > 3 {
			res = res + "/" + strings.Join(parts[:len(parts)-3], ".")
		}
	} else if len(host) > 0 {
		// CNAME bucket style
		res = res + "/" + host
	} else {
		// Bucket as root element in path already
	}

	// RawPath will include the bucket if not in the host
	res = res + strings.SplitN(url.RawPath, "?", 2)[0]

	// Include a sorted list of query parameters that have
	// special meaning to aws.  These should stay decoded for
	// the canonical resource.
	var amz []string
	for key, values := range url.Query() {
		if amzQueryParams[key] {
			for _, value := range values {
				if value != "" {
					amz = append(amz, key+"="+value)
				} else {
					amz = append(amz, key)
				}
			}
		}
	}

	if len(amz) > 0 {
		sort.Strings(amz)
		res = res + "?" + strings.Join(amz, "&")
	}

	// All done.
	return res
}
示例#9
0
func getContent(url *url.URL, req *http.Request) *memcache.MemMapItem {

	cacheToken := url.String()

	cached := Repos.GetByKey(cacheToken)
	if cached != nil {
		return cached
	}
	backendUrl := getNewUrl(url)

	newReq := http.Request{
		Method:     "GET",
		RawURL:     backendUrl.String(),
		URL:        backendUrl,
		Proto:      "HTTP/1.1",
		ProtoMajor: 1,
		ProtoMinor: 0,
		RemoteAddr: "192.168.0.21",
	}

	newReq.Header = http.Header{}
	newReq.Header.Add("Accept", "*/*")
	newReq.Header.Add("Accept-Charset", "utf-8,ISO-8859-1;q=0.7,*;q=0.3")
	newReq.Header.Add("Accept-Encoding", "utf-8")
	newReq.Header.Add("Host", backendUrl.Host)

	//newReq = ResponseWriter{};

	response, err := Client.Do(&newReq)

	if err != nil {
		log.Fatal("error: ", err.String())
	}

	cacheItem := memcache.MemMapItem{Key: cacheToken}
	cacheItem.Raw, _ = ioutil.ReadAll(response.Body)
	cacheItem.Head = response.Header

	Repos.Add(&cacheItem)

	return &cacheItem
}
示例#10
0
文件: s3.go 项目: russross/propolis
func (p *Propolis) SendRequest(method string, reduced bool, src string, target *url.URL, body io.ReadCloser, hash string, info *os.FileInfo) (resp *http.Response, err os.Error) {
	defer func() {
		// if anything goes wrong, close the body reader
		// if it ends normally, this will be closed already and set to nil
		if body != nil {
			body.Close()
		}
	}()

	var req *http.Request
	if req, err = http.NewRequest(method, target.String(), body); err != nil {
		return
	}

	// set upload file info if applicable
	if info != nil && body != nil {
		// TODO: 0-length files fail because the Content-Length field is missing
		// a fix is in the works in the Go library
		req.ContentLength = info.Size
	}

	if info != nil {
		p.SetRequestMetaData(req, info)
	}

	// reduced redundancy?
	if reduced {
		req.Header.Set("X-Amz-Storage-Class", "REDUCED_REDUNDANCY")
	}

	// are we uploading a file with a content hash?
	if hash != "" {
		req.Header.Set("Content-MD5", hash)
	}

	// is this a copy/metadata update?
	if src != "" {
		// note: src should already be a full bucket + path name
		u := new(url.URL)
		u.Path = src
		req.Header.Set("X-Amz-Copy-Source", u.String())
		req.Header.Set("X-Amz-Metadata-Directive", "REPLACE")
	}

	// sign and execute the request
	// note: 2nd argument is temporary hack to set Content-Length: 0 when needed
	if resp, err = p.SignAndExecute(req, method == "PUT" && body == nil || (info != nil && info.Size == 0)); err != nil {
		return
	}

	// body was closed when the request was written out,
	// so nullify the deferred close
	body = nil

	if resp.StatusCode < 200 || resp.StatusCode > 299 {
		err = os.NewError(resp.Status)
		return
	}

	return
}
示例#11
0
文件: chsupp.go 项目: dvyukov/go-conc
func determineFilename(u *url.URL) string {
	h := sha1.New()
	h.Write([]byte(u.String()))
	return hex.EncodeToString(h.Sum())
}
示例#12
0
文件: main.go 项目: russross/propolis
func Setup() (p *Propolis, push bool) {
	var refresh, watch, delete, paranoid, reset, practice, public, secure, reduced, directories bool
	var delay, concurrent int
	flag.BoolVar(&refresh, "refresh", true,
		"Scan online bucket to update cache at startup\n"+
			"\tLonger startup time, but catches changes made while offline")
	flag.BoolVar(&watch, "watch", false,
		"Go into daemon mode and watch the local file system\n"+
			"\tfor changes after initial sync (false means sync then quit)")
	flag.BoolVar(&delete, "delete", true,
		"Delete files when syncing as well as copying changed files")
	flag.BoolVar(&paranoid, "paranoid", false,
		"Always verify md5 hash of file contents,\n"+
			"\teven when all metadata is an exact match (slower)")
	flag.BoolVar(&reset, "reset", false,
		"Reset the cache (implies -refresh=true)")
	flag.BoolVar(&practice, "practice", false,
		"Do a practice run without changing any files\n"+
			"\tShows what would be changed (implies -watch=false)")
	flag.BoolVar(&public, "public", true,
		"Make world-readable local files publicly readable\n"+
			"\tin the online bucket (downloadable via the web)")
	flag.BoolVar(&secure, "secure", false,
		"Use secure connections to Amazon S3\n"+
			"\tA bit slower, but data is encrypted when being transferred")
	flag.BoolVar(&reduced, "reduced", false,
		"Use reduced redundancy storage when uploading\n"+
			"\tCheaper, but higher chance of loosing data")
	flag.BoolVar(&directories, "directories", false,
		"Track directories using special zero-length files\n"+
			"\tMostly useful for greater compatibility with s3fslite")
	flag.IntVar(&delay, "delay", 5,
		"Wait this number of seconds from the last change to a file\n"+
			"\tbefore syncing it with the server")
	flag.IntVar(&concurrent, "concurrent", 25,
		"Maximum number of server transactions that are\n"+
			"\tallowed to run concurrently")

	var accesskeyid, secretaccesskey, cache_location string
	flag.StringVar(&accesskeyid, "accesskeyid", "",
		"Amazon AWS Access Key ID")
	flag.StringVar(&secretaccesskey, "secretaccesskey", "",
		"Amazon AWS Secret Access Key")
	flag.StringVar(&cache_location, "cache", default_cache_location,
		"Metadata cache location\n"+
			"\tA sqlite3 database file that caches online metadata")

	flag.Usage = func() {
		fmt.Fprintf(os.Stderr,
			"Propolis:\n"+
				"  Amazon S3 <--> local file system synchronizer\n"+
				"  Synchronizes a local directory with an S3 bucket, then\n"+
				"  watches the local directory for changes and automatically\n"+
				"  propogates them to the bucket.\n\n"+
				"  See http://github.com/russross/propolis for details\n\n"+
				"  Copyright 2011 by Russ Ross <*****@*****.**>\n\n"+
				"  Propolis comes with ABSOLUTELY NO WARRANTY.  This is free software, and you\n"+
				"  are welcome to redistribute it under certain conditions.  See the GNU\n"+
				"  General Public Licence for details.\n\n"+
				"Usage:\n"+
				"  To start by syncing remote bucket to match local file system:\n"+
				"      %s [flags] local/dir s3:bucket[:remote/dir]\n"+
				"  To start by syncing local file system to match remote bucket:\n"+
				"      %s [flags] s3:bucket[:remote/dir] local/dir\n\n"+
				"  Amazon Access Key ID and Secret Access Key can be specified in\n"+
				"  one of three ways, listed in decreasing order of precedence.\n"+
				"  Note: both values must be supplied using a single method:\n\n"+
				"      1. On the command line\n"+
				"      2. In the environment variables %s and %s\n"+
				"      3. In the file %s as key:secret on a single line\n\n"+
				"Options:\n",
			os.Args[0], os.Args[0],
			s3_access_key_id_variable, s3_secret_access_key_variable, s3_password_file)
		flag.PrintDefaults()
	}
	flag.Parse()

	// enforce certain option combinations
	if reset {
		refresh = true
	}
	if practice {
		watch = false
	}

	// make sure we get access keys
	if accesskeyid == "" || secretaccesskey == "" {
		accesskeyid, secretaccesskey = getKeys()
	}
	if accesskeyid == "" || secretaccesskey == "" {
		fmt.Fprintln(os.Stderr, "Error: Amazon AWS Access Key ID and/or Secret Access Key undefined\n")
		flag.Usage()
		os.Exit(-1)
	}

	// check command-line arguments
	args := flag.Args()
	if len(args) != 2 {
		flag.Usage()
		os.Exit(-1)
	}

	// figure out the direction of sync, parse the bucket and directory info
	var bucketname, bucketprefix, localdir string

	switch {
	case !strings.HasPrefix(args[0], "s3:") && strings.HasPrefix(args[1], "s3:"):
		push = true
		localdir = parseLocalDir(args[0])
		bucketname, bucketprefix = parseBucket(args[1])
	case strings.HasPrefix(args[0], "s3:") && !strings.HasPrefix(args[1], "s3:"):
		push = false
		bucketname, bucketprefix = parseBucket(args[0])
		localdir = parseLocalDir(args[1])
	default:
		flag.Usage()
		os.Exit(-1)
	}

	// make sure the root directory exists
	if info, err := os.Lstat(localdir); err != nil || !info.IsDirectory() {
		fmt.Fprintf(os.Stderr, "%s is not a valid directory\n", localdir)
	}

	// open the database
	var err os.Error
	var cache Cache
	if cache, err = Connect(path.Join(cache_location, bucketname+".sqlite")); err != nil {
		fmt.Println("Error connecting to database:", err)
		os.Exit(-1)
	}

	// create the Propolis object
	url := new(url.URL)
	url.Scheme = "http"
	if secure {
		url.Scheme = "https"
	}
	url.Host = bucketname + ".s3.amazonaws.com"
	url.Path = "/"

	p = &Propolis{
		Bucket:            bucketname,
		Url:               url,
		Secure:            secure,
		ReducedRedundancy: reduced,
		Key:               accesskeyid,
		Secret:            secretaccesskey,

		BucketRoot: bucketprefix,
		LocalRoot:  localdir,

		Refresh:     refresh,
		Paranoid:    paranoid,
		Reset:       reset,
		Directories: directories,
		Practice:    practice,
		Watch:       watch,
		Delay:       delay,
		Concurrent:  concurrent,

		Db: cache,
	}
	return
}
示例#13
0
func getNewUrl(url *url.URL) *url.URL {
	urlBackend, _ := url.Parse("http://127.0.0.1")
	url.Scheme = urlBackend.Scheme
	url.Host = urlBackend.Host
	return url
}