Ejemplo n.º 1
0
func (bctl *BucketCtl) Delete(bname, key string, req *http.Request) (err error) {
	bucket, err := bctl.FindBucket(bname)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, err.Error())
		return
	}

	err = bucket.check_auth(req, BucketAuthWrite)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), errors.ErrorStatus(err),
			fmt.Sprintf("upload: %s", errors.ErrorData(err)))
		return
	}

	s, err := bctl.e.DataSession(req)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("delete: could not create data session: %v", err))
		return
	}
	defer s.Delete()

	s.SetNamespace(bucket.Name)
	s.SetGroups(bucket.Meta.Groups)
	s.SetIOflags(elliptics.IOflag(bctl.Conf.Proxy.WriterIOFlags))

	log.Printf("delete-trace-id: %x: url: %s, bucket: %s, key: %s, id: %s\n",
		s.GetTraceID(), req.URL.String(), bucket.Name, key, s.Transform(key))

	for r := range s.Remove(key) {
		err = r.Error()
	}

	return
}
Ejemplo n.º 2
0
func (bctl *BucketCtl) Lookup(bname, key string, req *http.Request) (reply *reply.LookupResult, err error) {
	bucket, err := bctl.FindBucket(bname)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, err.Error())
		return
	}

	err = bucket.check_auth(req, BucketAuthEmpty)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), errors.ErrorStatus(err),
			fmt.Sprintf("upload: %s", errors.ErrorData(err)))
		return
	}

	s, err := bctl.e.DataSession(req)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("lookup: could not create data session: %v", err))
		return
	}
	defer s.Delete()

	s.SetNamespace(bucket.Name)
	s.SetGroups(bucket.Meta.Groups)
	s.SetIOflags(elliptics.IOflag(bctl.Conf.Proxy.ReaderIOFlags))

	log.Printf("lookup-trace-id: %x: url: %s, bucket: %s, key: %s, id: %s\n",
		s.GetTraceID(), req.URL.String(), bucket.Name, key, s.Transform(key))

	reply, err = bucket.lookup_serialize(false, s.ParallelLookup(key))
	return
}
Ejemplo n.º 3
0
func (b *Bucket) check_auth(r *http.Request, required_flags uint64) (err error) {
	if len(b.Meta.Acl) == 0 {
		err = nil
		return
	}

	user, recv_auth, err := auth.GetAuthInfo(r)
	if err != nil {
		return
	}

	acl, ok := b.Meta.Acl[user]
	if !ok {
		err = errors.NewKeyError(r.URL.String(), http.StatusForbidden,
			fmt.Sprintf("auth: header: '%v': there is no user '%s' in ACL",
				r.Header[auth.AuthHeaderStr], user))
		return
	}

	log.Printf("check-auth: url: %s, user: %s, token: %s, flags: %x, required: %x\n",
		r.URL.String(), acl.User, acl.Token, acl.Flags, required_flags)

	// only require required_flags check if its not @BucketAuthEmpty
	// @BucketAuthEmpty required_flags is set by reader, non BucketAuthEmpty required_flags are supposed to mean modifications
	if required_flags != BucketAuthEmpty {
		// there are no required flags in ACL
		if (acl.Flags & required_flags) == 0 {
			err = errors.NewKeyError(r.URL.String(), http.StatusForbidden,
				fmt.Sprintf("auth: header: '%v': user '%s' is not allowed to do action: acl-flags: 0x%x, required-flags: 0x%x",
					r.Header[auth.AuthHeaderStr], user, acl.Flags, required_flags))
			return
		}
	}

	// skip authorization if special ACL flag is set
	if (acl.Flags & BucketAuthNoToken) != 0 {
		return
	}

	calc_auth, err := auth.GenerateSignature(acl.Token, r.Method, r.URL, r.Header)
	if err != nil {
		err = errors.NewKeyError(r.URL.String(), http.StatusForbidden,
			fmt.Sprintf("auth: header: '%v': hmac generation failed: %s",
				r.Header[auth.AuthHeaderStr], user))
		return
	}

	if recv_auth != calc_auth {
		err = errors.NewKeyError(r.URL.String(), http.StatusForbidden,
			fmt.Sprintf("auth: header: '%v': user: %s, hmac mismatch: recv: '%s', calc: '%s'",
				r.Header[auth.AuthHeaderStr], user, recv_auth, calc_auth))
		return
	}

	return
}
Ejemplo n.º 4
0
func (bctl *BucketCtl) Stream(bname, key string, w http.ResponseWriter, req *http.Request) (err error) {
	bucket, err := bctl.FindBucket(bname)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, err.Error())
		return
	}

	err = bucket.check_auth(req, BucketAuthEmpty)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), errors.ErrorStatus(err),
			fmt.Sprintf("stream: %s", errors.ErrorData(err)))
		return
	}

	s, err := bctl.e.DataSession(req)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("stream: could not create data session: %v", err))
		return
	}
	defer s.Delete()

	s.SetFilter(elliptics.SessionFilterAll)
	s.SetNamespace(bucket.Name)
	bctl.SetGroupsTimeout(s, bucket, key)

	log.Printf("stream-trace-id: %x: url: %s, bucket: %s, key: %s, id: %s\n",
		s.GetTraceID(), req.URL.String(), bucket.Name, key, s.Transform(key))

	offset, size, err := URIOffsetSize(req)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, fmt.Sprintf("stream: %v", err))
		return
	}

	if offset != 0 || size != 0 {
		if size == 0 {
			req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
		} else {
			req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+size-1))
		}
	}

	rs, err := elliptics.NewReadSeekerOffsetSize(s, key, offset, size)
	if err != nil {
		err = errors.NewKeyErrorFromEllipticsError(err, req.URL.String(), "stream: could not create read-seeker")
		return
	}
	defer rs.Free()

	bctl.SetContentType(key, w)
	http.ServeContent(w, req, key, rs.Mtime, rs)
	return
}
Ejemplo n.º 5
0
func lookup_handler(w http.ResponseWriter, req *http.Request, strings ...string) Reply {
	bucket := strings[0]
	key := strings[1]

	reply, err := proxy.bctl.Lookup(bucket, key, req)
	if err != nil {
		return Reply{
			err:    err,
			status: errors.ErrorStatus(err),
		}
	}

	reply_json, err := json.Marshal(reply)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("lookup: json marshal failed: %q", err))
		return Reply{
			err:    err,
			status: http.StatusServiceUnavailable,
		}
	}

	w.WriteHeader(http.StatusOK)
	w.Write(reply_json)

	return GoodReply()
}
Ejemplo n.º 6
0
func stat_handler(w http.ResponseWriter, req *http.Request, str ...string) Reply {
	bnames := make([]string, 0)

	bnames_combined := strings.SplitN(str[0], "/", 2)
	if len(bnames_combined[0]) != 0 {
		bnames = strings.Split(bnames_combined[0], ",")
		if len(bnames[0]) == 0 {
			bnames = []string{}
		}
	}

	reply, err := proxy.bctl.Stat(req, bnames)
	if err != nil {
		return Reply{
			err:    err,
			status: errors.ErrorStatus(err),
		}
	}

	reply_json, err := json.Marshal(reply)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("stat: json marshal failed: %q", err))
		return Reply{
			err:    err,
			status: http.StatusBadRequest,
		}
	}

	w.WriteHeader(http.StatusOK)
	w.Write(reply_json)

	return GoodReply()
}
Ejemplo n.º 7
0
func update_local_config_handler(w http.ResponseWriter, req *http.Request, strings ...string) Reply {
	conf := &config.ProxyConfig{}

	err := conf.LoadIO(req.Body)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest,
			fmt.Sprintf("conf: could not parse config: %q", err))
		return Reply{
			err:    err,
			status: http.StatusBadRequest,
		}
	}

	conf.Proxy.RedirectToken = proxy.bctl.Conf.Proxy.RedirectToken
	diff := pretty.Diff(proxy.bctl.Conf, conf)
	for _, d := range diff {
		log.Printf("update_local_config_handler: diff: %s\n", d)
	}

	proxy.bctl.Lock()
	proxy.bctl.Conf = conf
	proxy.bctl.DisableConfigUpdateUntil = time.Now().Add(time.Second * time.Duration(conf.Proxy.DisableConfigUpdateForSeconds))
	proxy.bctl.Unlock()

	log.Printf("update_local_config_handler: next automatic config update is only allowed in %d seconds at %s\n",
		conf.Proxy.DisableConfigUpdateForSeconds,
		proxy.bctl.DisableConfigUpdateUntil.String())

	return GoodReply()
}
Ejemplo n.º 8
0
func common_handler(w http.ResponseWriter, req *http.Request, strings ...string) Reply {
	if len(proxy.bctl.Conf.Proxy.Root) == 0 {
		err := errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("common: root option is not configured, reading files is being denied"))
		return Reply{
			err:    err,
			status: http.StatusServiceUnavailable,
		}
	}

	if len(strings) == 0 {
		w.WriteHeader(http.StatusOK)
		return GoodReply()
	}

	object := path.Clean(strings[0])
	if object == bucket.ProfilePath || object == "." {
		err := errors.NewKeyError(req.URL.String(), http.StatusNotFound,
			fmt.Sprintf("common: could not read file '%s'", object))
		return Reply{
			err:    err,
			status: http.StatusNotFound,
		}
	}

	key := proxy.bctl.Conf.Proxy.Root + "/" + object

	data, err := ioutil.ReadFile(key)
	if err != nil {
		log.Printf("common: url: %s, object: '%s', error: %s\n", req.URL.String(), object, err)

		err = errors.NewKeyError(req.URL.String(), http.StatusNotFound,
			fmt.Sprintf("common: could not read file '%s'", object))
		return Reply{
			err:    err,
			status: http.StatusNotFound,
		}
	}

	w.WriteHeader(http.StatusOK)
	w.Write(data)

	return GoodReplyLength(uint64(len(data)))
}
Ejemplo n.º 9
0
func (bctl *BucketCtl) BucketUpload(bucket_name, key string, req *http.Request) (reply *reply.LookupResult, bucket *Bucket, err error) {
	bucket, err = bctl.FindBucket(bucket_name)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, err.Error())
		return
	}

	reply, err = bctl.bucket_upload(bucket, key, req)
	return
}
Ejemplo n.º 10
0
func (bctl *BucketCtl) BulkDelete(bname string, keys []string, req *http.Request) (reply map[string]interface{}, err error) {
	reply = make(map[string]interface{})

	bucket, err := bctl.FindBucket(bname)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, err.Error())
		return
	}

	err = bucket.check_auth(req, BucketAuthWrite)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), errors.ErrorStatus(err),
			fmt.Sprintf("upload: %s", errors.ErrorData(err)))
		return
	}

	s, err := bctl.e.DataSession(req)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("bulk_delete: could not create data session: %v", err))
		return
	}
	defer s.Delete()

	s.SetNamespace(bucket.Name)
	s.SetGroups(bucket.Meta.Groups)
	s.SetIOflags(elliptics.IOflag(bctl.Conf.Proxy.WriterIOFlags))

	log.Printf("bulk-delete-trace-id: %x: url: %s, bucket: %s, keys: %v\n",
		s.GetTraceID(), req.URL.String(), bucket.Name, keys)

	for r := range s.BulkRemove(keys) {
		err = r.Error()
		if err != nil {
			reply[r.Key()] = err.Error()
		}
	}

	err = nil

	return
}
Ejemplo n.º 11
0
func (bctl *BucketCtl) Upload(key string, req *http.Request) (reply *reply.LookupResult, bucket *Bucket, err error) {
	bucket = bctl.GetBucket(key, req)
	if bucket == nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("there are no buckets with free space available"))
		return
	}

	reply, err = bctl.bucket_upload(bucket, key, req)
	return
}
Ejemplo n.º 12
0
func ReadBucket(ell *etransport.Elliptics, name string) (bucket *Bucket, err error) {
	ms, err := ell.MetadataSession()
	if err != nil {
		log.Printf("read-bucket: %s: could not create metadata session: %v", name, err)
		return
	}
	defer ms.Delete()

	ms.SetNamespace(BucketNamespace)

	b := NewBucket(name)

	for rd := range ms.ReadData(name, 0, 0) {
		if rd.Error() != nil {
			err = rd.Error()

			log.Printf("read-bucket: %s: could not read bucket metadata: %v", name, err)
			return
		}

		var out []interface{}
		err = msgpack.Unmarshal([]byte(rd.Data()), &out)
		if err != nil {
			log.Printf("read-bucket: %s: could not parse bucket metadata: %v", name, err)
			return
		}

		err = b.Meta.ExtractMsgpack(out)
		if err != nil {
			log.Printf("read-bucket: %s: unsupported msgpack data: %v", name, err)
			return
		}

		bucket = b
		return
	}

	bucket = nil
	err = errors.NewKeyError(name, http.StatusNotFound,
		"read-bucket: could not read bucket data: ReadData() returned nothing")
	return
}
Ejemplo n.º 13
0
func WriteBucket(ell *etransport.Elliptics, meta *BucketMsgpack) (bucket *Bucket, err error) {
	ms, err := ell.MetadataSession()
	if err != nil {
		log.Printf("%s: could not create metadata session: %v", meta.Name, err)
		return
	}
	defer ms.Delete()

	ms.SetNamespace(BucketNamespace)

	out, err := meta.PackMsgpack()
	if err != nil {
		log.Printf("%s: could not pack bucket: %v", meta.Name, err)
		return
	}

	data, err := msgpack.Marshal(&out)
	if err != nil {
		log.Printf("%s: could not parse bucket metadata: %v", meta.Name, err)
		return
	}

	for wr := range ms.WriteData(meta.Name, bytes.NewReader(data), 0, 0) {
		if wr.Error() != nil {
			err = wr.Error()

			log.Printf("%s: could not write bucket metadata: %v", meta.Name, err)
			return
		}

		bucket = NewBucket(meta.Name)
		bucket.Meta = *meta

		return
	}

	err = errors.NewKeyError(meta.Name, http.StatusNotFound,
		"could not write bucket metadata: WriteData() returned nothing")
	return
}
Ejemplo n.º 14
0
func proxy_stat_handler(w http.ResponseWriter, req *http.Request, strings ...string) Reply {
	start_idx := proxy.error_index
	l := uint64(len(proxy.last_errors))
	if start_idx < uint64(len(proxy.last_errors)) {
		l = start_idx
	}

	res := proxy_stat_reply{
		BucketCtlStat: proxy.bctl.NewBucketCtlStat(),
		Handlers:      estimator_scan_handlers,
		Errors:        make([]ErrorInfo, l),
	}

	if start_idx <= uint64(len(proxy.last_errors)) {
		copy(res.Errors, proxy.last_errors)
	} else {
		var i uint64
		for i = 0; i < uint64(len(proxy.last_errors)); i++ {
			idx := (i + start_idx + 1) % uint64(len(proxy.last_errors))
			res.Errors[i] = proxy.last_errors[idx]
		}
	}

	reply_json, err := json.Marshal(&res)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("stat: json marshal failed: %q", err))
		return Reply{
			err:    err,
			status: http.StatusServiceUnavailable,
		}
	}

	w.WriteHeader(http.StatusOK)
	w.Write(reply_json)

	return GoodReply()
}
Ejemplo n.º 15
0
func (p *bproxy) send_upload_reply(w http.ResponseWriter, req *http.Request,
	bucket *bucket.Bucket, key string, resp *reply.LookupResult) Reply {
	reply := reply.Upload{
		Bucket: bucket.Name,
		Key:    key,
		Reply:  resp,
	}

	reply_json, err := json.Marshal(reply)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("upload: json marshal failed: %q", err))

		return Reply{
			err:    err,
			status: http.StatusServiceUnavailable,
		}
	}

	w.WriteHeader(http.StatusOK)
	w.Write(reply_json)

	return GoodReply()
}
Ejemplo n.º 16
0
func (bctl *BucketCtl) bucket_upload(bucket *Bucket, key string, req *http.Request) (reply *reply.LookupResult, err error) {
	err = bucket.check_auth(req, BucketAuthWrite)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), errors.ErrorStatus(err),
			fmt.Sprintf("upload: %s", errors.ErrorData(err)))
		return
	}

	lheader, ok := req.Header["Content-Length"]
	if !ok {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest,
			"upload: there is no Content-Length header")
		return
	}

	total_size, err := strconv.ParseUint(lheader[0], 0, 64)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest,
			fmt.Sprintf("upload: invalid content length conversion: %v", err))
		return
	}

	if total_size == 0 {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest,
			"upload: attempting to perform invalid zero-length upload")
		return
	}

	s, err := bctl.e.DataSession(req)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("upload: could not create data session: %v", err))
		return
	}
	defer s.Delete()

	s.SetFilter(elliptics.SessionFilterAll)
	s.SetNamespace(bucket.Name)
	s.SetGroups(bucket.Meta.Groups)
	s.SetTimeout(100)
	s.SetIOflags(elliptics.IOflag(bctl.Conf.Proxy.WriterIOFlags))

	log.Printf("upload-trace-id: %x: url: %s, bucket: %s, key: %s, id: %s\n",
		s.GetTraceID(), req.URL.String(), bucket.Name, key, s.Transform(key))

	ranges, err := ranges.ParseRange(req.Header.Get("Range"), int64(total_size))
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, fmt.Sprintf("upload: %v", err))
		return
	}

	var offset uint64 = 0
	if len(ranges) != 0 {
		offset = uint64(ranges[0].Start)
	}

	start := time.Now()

	reply, err = bucket.lookup_serialize(true, s.WriteData(key, req.Body, offset, total_size))

	// PID controller should aim at some destination performance point
	// it can be velocity pf the vehicle or deisred write rate
	//
	// Let's consider our desired control point as number of useconds needed to write 1 byte into the storage
	// In the ideal world it would be zero

	time_us := time.Since(start).Nanoseconds() / 1000
	e := float64(time_us) / float64(total_size)

	bctl.RLock()

	str := make([]string, 0)
	for _, res := range reply.Servers {
		sg, ok := bucket.Group[res.Group]
		if ok {
			st, back_err := sg.FindStatBackend(res.Addr, res.Backend)
			if back_err == nil {
				old_pain := st.PIDPain()
				update_pain := e
				estring := "ok"

				if res.Error != nil {
					update_pain = BucketWriteErrorPain
					estring = res.Error.Error()
				}
				st.PIDUpdate(update_pain)

				str = append(str, fmt.Sprintf("{group: %d, time: %d us, e: %f, error: %v, pain: %f -> %f}",
					res.Group, time_us, e, estring, old_pain, st.PIDPain()))
			} else {
				str = append(str, fmt.Sprintf("{group: %d, time: %d us, e: %f, error: no backend stat}",
					res.Group, time_us, e))
			}
		} else {
			str = append(str, fmt.Sprintf("{group: %d, time: %d us, e: %f, error: no group stat}",
				res.Group, time_us, e))
		}
	}

	if len(reply.SuccessGroups) == 0 {
		for _, group_id := range bucket.Meta.Groups {
			str = append(str, fmt.Sprintf("{error-group: %d, time: %d us}", group_id, time_us))
		}
	}

	bctl.RUnlock()

	log.Printf("bucket-upload: bucket: %s, key: %s, size: %d: %v\n", bucket.Name, key, total_size, str)

	return
}
Ejemplo n.º 17
0
func redirect_handler(w http.ResponseWriter, req *http.Request, string_keys ...string) Reply {
	if proxy.bctl.Conf.Proxy.RedirectPort == 0 || proxy.bctl.Conf.Proxy.RedirectPort >= 65536 {
		err := errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("redirect is not allowed because of invalid redirect port %d",
				proxy.bctl.Conf.Proxy.RedirectPort))

		return Reply{
			err:    err,
			status: errors.ErrorStatus(err),
		}
	}

	bname := string_keys[0]
	key := string_keys[1]

	reply, err := proxy.bctl.Lookup(bname, key, req)
	if err != nil {
		return Reply{
			err:    err,
			status: errors.ErrorStatus(err),
		}
	}

	srv := reply.Servers[rand.Intn(len(reply.Servers))]
	scheme := "http"
	if req.URL.Scheme != "" {
		scheme = req.URL.Scheme
	}

	if len(srv.Filename) == 0 {
		err := errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("lookup returned invalid filename: %s", srv.Filename))

		return Reply{
			err:    err,
			status: errors.ErrorStatus(err),
		}
	}

	filename := srv.Filename

	if len(proxy.bctl.Conf.Proxy.RedirectRoot) != 0 {
		if strings.HasPrefix(filename, proxy.bctl.Conf.Proxy.RedirectRoot) {
			filename = filename[len(proxy.bctl.Conf.Proxy.RedirectRoot):]
		}
	}

	slash := "/"
	if filename[0] == '/' {
		slash = ""
	}

	ranges, err := ranges.ParseRange(req.Header.Get("Range"), int64(srv.Size))
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, fmt.Sprintf("upload: %v", err))
		return Reply{
			err:    err,
			status: errors.ErrorStatus(err),
		}
	}

	offset, size, err := bucket.URIOffsetSize(req)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, fmt.Sprintf("redirect: %v", err))
		return Reply{
			err:    err,
			status: errors.ErrorStatus(err),
		}
	}

	if len(ranges) != 0 {
		offset = uint64(ranges[0].Start)
		size = uint64(ranges[0].Length)
	}

	if offset >= srv.Size {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest,
			fmt.Sprintf("redirect: offset is beyond size of the object: offset: %d, size: %d",
				offset, srv.Size))
		return Reply{
			err:    err,
			status: errors.ErrorStatus(err),
		}
	}

	if size == 0 || offset+size >= srv.Size {
		size = srv.Size - offset
	}

	timestamp := time.Now().Unix()
	url_str := fmt.Sprintf("%s://%s:%d%s%s:%d:%d",
		scheme, srv.Server.HostString(), proxy.bctl.Conf.Proxy.RedirectPort,
		slash, filename, srv.Offset+offset, size)

	u, err := url.Parse(url_str)
	if err != nil {
		err := errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("could not parse generated redirect url '%s'", url_str))

		return Reply{
			err:    err,
			status: errors.ErrorStatus(err),
		}
	}

	req.URL = u

	w.Header().Set("X-Ell-Mtime", fmt.Sprintf("%d", srv.Info.Mtime.Unix()))
	w.Header().Set("X-Ell-Signtime", fmt.Sprintf("%d", timestamp))
	w.Header().Set("X-Ell-Signature-Timeout", fmt.Sprintf("%d", proxy.bctl.Conf.Proxy.RedirectSignatureTimeout))
	w.Header().Set("X-Ell-File-Offset", fmt.Sprintf("%d", srv.Offset))
	w.Header().Set("X-Ell-Total-Size", fmt.Sprintf("%d", srv.Size))
	w.Header().Set("X-Ell-File", filename)

	signature, err := auth.GenerateSignature(proxy.bctl.Conf.Proxy.RedirectToken, "GET", req.URL, w.Header())
	if err != nil {
		err := errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("could not generate signature for redirect url '%s': %v", url_str, err))

		return Reply{
			err:    err,
			status: errors.ErrorStatus(err),
		}
	}

	w.Header().Set(auth.AuthHeaderStr, signature)

	http.Redirect(w, req, url_str, http.StatusFound)

	return GoodReply()
}
Ejemplo n.º 18
0
func bulk_delete_handler(w http.ResponseWriter, req *http.Request, strings ...string) Reply {
	bucket := strings[0]

	var err error
	var v map[string]interface{} = make(map[string]interface{})
	if err = json.NewDecoder(req.Body).Decode(&v); err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest,
			fmt.Sprintf("bulk_delete: could not parse input json: %v", err))
		return Reply{
			err:    err,
			status: errors.ErrorStatus(err),
		}
	}

	kv, ok := v["keys"]
	if !ok {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest,
			fmt.Sprintf("bulk_delete: there is no 'keys' array"))
		return Reply{
			err:    err,
			status: errors.ErrorStatus(err),
		}
	}

	var keys []string = make([]string, 0)

	for _, v := range kv.([]interface{}) {
		keys = append(keys, v.(string))
	}

	if len(keys) == 0 {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest,
			fmt.Sprintf("bulk_delete: 'keys' array is empty"))
		return Reply{
			err:    err,
			status: errors.ErrorStatus(err),
		}
	}

	reply, err := proxy.bctl.BulkDelete(bucket, keys, req)
	if err != nil {
		return Reply{
			err:    err,
			status: errors.ErrorStatus(err),
		}
	}

	reply_json, err := json.Marshal(reply)
	if err != nil {
		log.Printf("url: %s: bulk_delete: json marshal failed: %q\n", req.URL, err)
		return Reply{
			err:    err,
			status: http.StatusBadRequest,
		}
	}

	w.WriteHeader(http.StatusOK)
	w.Write(reply_json)

	return GoodReply()
}
Ejemplo n.º 19
0
func generic_handler(w http.ResponseWriter, req *http.Request) {
	// join together sequential // in the URL path

	start := time.Now()

	proxy.bctl.RLock()
	for k, v := range proxy.bctl.Conf.Proxy.Headers {
		w.Header().Set(k, v)
	}
	proxy.bctl.RUnlock()

	content_length := get_content_length(req.Header)

	reply := Reply{
		status: http.StatusBadRequest,
		err:    errors.NewKeyError(req.URL.String(), http.StatusBadRequest, "there is no registered handler for this path"),
	}

	var h *handler = nil

	if req.Method == "HEAD" {
		w.WriteHeader(http.StatusOK)
		w.Write([]byte("OK"))
		return
	}

	path, err := url.QueryUnescape(req.URL.Path)
	if err != nil {
		path = req.URL.Path
		reply.err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, fmt.Sprintf("could not unescape URL: %v", err))
	} else {
		hstrings := strings.SplitN(path, "/", 3)
		if len(hstrings) < 2 {
			path = req.URL.Path
			reply.err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, fmt.Sprintf("could not split URL"))
		} else {
			var ok bool

			param_strings := make([]string, 0)
			h, ok = proxy_handlers[hstrings[1]]
			if !ok {
				h = proxy_handlers["/"]
				param_strings = []string{path}
				ok = true
			} else {
				if len(hstrings) != 3 {
					reply.err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest,
						fmt.Sprintf("not enough path parts for handler: %v, must be at least: %d",
							len(hstrings)-1, h.Params+1))
					ok = false
				} else {
					if h.Params == 0 {
						param_strings = append(param_strings, hstrings[2])
					} else {
						param_strings = strings.SplitN(hstrings[2], "/", h.Params)
					}

					if len(param_strings) < h.Params {
						reply.err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest,
							fmt.Sprintf("not enough path parameters for handler: %v, must be at least: %d",
								len(param_strings), h.Params))
						ok = false
					} else if h.Params > 0 && len(param_strings[h.Params-1]) == 0 {
						reply.err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest,
							fmt.Sprintf("last path parameter can not be empty"))
						ok = false
					}
				}
			}

			if ok {
				method_matched := false
				for _, method := range h.Methods {
					if method == req.Method {
						method_matched = true
						break
					}
				}

				if method_matched {
					log.Printf("url: %s, handler: %s, time-since-start: %s\n",
						req.URL.String(), hstrings[1], time.Since(start).String())
					reply = h.Function(w, req, param_strings...)
				} else {
					reply.err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest,
						fmt.Sprintf("method doesn't match: provided: %s, required: %v",
							req.Method, h.Methods))
				}
			}
		}
	}

	msg := "OK"
	if reply.err != nil {
		msg = reply.err.Error()
		proxy.add_error(req.Method, req.RemoteAddr, req.URL.RequestURI(), reply.status, msg)
	}

	if content_length == 0 {
		content_length = get_content_length(w.Header())
		if content_length == 0 {
			content_length = reply.length
		}
	}

	duration := time.Since(start)
	if h != nil {
		h.Estimator.Push(content_length, reply.status)
	}

	log.Printf("access_log: method: '%s', client: '%s', x-fwd: '%v', path: '%s', encoded-uri: '%s', "+
		"status: %d, size: %d, time: %.3f ms, err: '%v'\n",
		req.Method, req.RemoteAddr, req.Header.Get("X-Forwarded-For"), path, req.URL.RequestURI(),
		reply.status, content_length, float64(duration.Nanoseconds())/1000000.0, msg)

	if reply.err != nil {
		http.Error(w, reply.err.Error(), reply.status)
	}
}
Ejemplo n.º 20
0
func (bctl *BucketCtl) GetBucket(key string, req *http.Request) (bucket *Bucket) {
	s, err := bctl.e.MetadataSession()
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("get-bucket: could not create metadata session: %v", err))
		return bctl.Bucket[rand.Intn(len(bctl.Bucket))]
	}
	defer s.Delete()

	type bucket_stat struct {
		Bucket        *Bucket
		SuccessGroups []uint32
		ErrorGroups   []uint32
		Pain          float64
		Range         float64

		pains      []float64
		free_rates []float64

		abs []string
	}

	stat := make([]*bucket_stat, 0)
	failed := make([]*bucket_stat, 0)

	bctl.RLock()

	for _, b := range bctl.Bucket {
		bs := &bucket_stat{
			Bucket:        b,
			SuccessGroups: make([]uint32, 0),
			ErrorGroups:   make([]uint32, 0),
			Pain:          0.0,
			Range:         0.0,

			pains:      make([]float64, 0, len(b.Group)),
			free_rates: make([]float64, 0, len(b.Group)),
		}

		s.SetNamespace(b.Name)

		for group_id, sg := range b.Group {
			st, err := sg.FindStatBackendKey(s, key, group_id)
			if err != nil {
				// there is no statistics for given address+backend, which should host our data
				// do not allow to write into the bucket which contains given address+backend

				bs.ErrorGroups = append(bs.ErrorGroups, group_id)

				bs.Pain += PainNoStats
				continue
			}

			bs.abs = append(bs.abs, st.Ab.String())

			if st.RO {
				bs.ErrorGroups = append(bs.ErrorGroups, group_id)

				bs.Pain += PainStatRO
				continue
			}

			if st.Error.Code != 0 {
				bs.ErrorGroups = append(bs.ErrorGroups, group_id)

				bs.Pain += PainStatError
				continue
			}

			// this is an empty stat structure
			if st.VFS.TotalSizeLimit == 0 || st.VFS.Total == 0 {
				bs.ErrorGroups = append(bs.ErrorGroups, group_id)

				bs.Pain += PainNoStats
				continue
			}

			free_space_rate := FreeSpaceRatio(st, uint64(req.ContentLength))
			bs.Pain += 1000.0 / free_space_rate * 5.0

			if free_space_rate <= bctl.Conf.Proxy.FreeSpaceRatioHard {
				bs.ErrorGroups = append(bs.ErrorGroups, group_id)

				bs.Pain += PainNoFreeSpaceHard
			} else if free_space_rate <= bctl.Conf.Proxy.FreeSpaceRatioSoft {
				bs.ErrorGroups = append(bs.ErrorGroups, group_id)

				bs.Pain += PainNoFreeSpaceSoft
			} else {
				bs.SuccessGroups = append(bs.SuccessGroups, group_id)
			}

			pp := st.PIDPain()

			bs.Pain += pp * float64(req.ContentLength)
			bs.pains = append(bs.pains, pp)
			bs.free_rates = append(bs.free_rates, free_space_rate)
		}

		total_groups := len(bs.SuccessGroups) + len(bs.ErrorGroups)
		diff := 0
		if len(b.Meta.Groups) > total_groups {
			diff += len(b.Meta.Groups) - total_groups
		}

		bs.Pain += float64(diff) * PainNoGroup

		// calculate discrepancy pain:
		// run over all address+backends in every group in given bucket,
		// sum up number of live records
		// set discrepancy as a maximum difference between number of records among all groups
		var min_records uint64 = 1<<31 - 1
		var max_records uint64 = 0

		records := make([]uint64, 0)
		for _, sg := range b.Group {
			var r uint64 = 0

			for _, sb := range sg.Ab {
				r += sb.VFS.RecordsTotal - sb.VFS.RecordsRemoved
			}

			records = append(records, r)
		}

		for _, r := range records {
			if r < min_records {
				min_records = r
			}

			if r > max_records {
				max_records = r
			}
		}
		bs.Pain += float64(max_records-min_records) * PainDiscrepancy

		// do not even consider buckets without free space even in one group
		if bs.Pain >= PainNoFreeSpaceHard {
			//log.Printf("find-bucket: url: %s, bucket: %s, content-length: %d, " +
			//	"groups: %v, success-groups: %v, error-groups: %v, " +
			//	"pain: %f, pains: %v, free_rates: %v: pain is higher than HARD limit\n",
			//	req.URL.String(), b.Name, req.ContentLength, b.Meta.Groups, bs.SuccessGroups, bs.ErrorGroups, bs.Pain,
			//	bs.pains, bs.free_rates)
			failed = append(failed, bs)
			continue
		}

		if bs.Pain != 0 {
			bs.Range = 1.0 / bs.Pain
		} else {
			bs.Range = 1.0
		}

		stat = append(stat, bs)
	}

	bctl.RUnlock()

	// there are no buckets suitable for this request
	// either there is no space in either bucket, or there are no buckets at all
	if len(stat) == 0 {
		str := make([]string, 0)
		for _, bs := range failed {
			str = append(str,
				fmt.Sprintf("{bucket: %s, success-groups: %v, error-groups: %v, groups: %v, "+
					"abs: %v, pain: %f, free-rates: %v}",
					bs.Bucket.Name, bs.SuccessGroups, bs.ErrorGroups, bs.Bucket.Meta.Groups,
					bs.abs, bs.Pain, bs.free_rates))
		}

		log.Printf("find-bucket: url: %s, content-length: %d: there are no suitable buckets: %v",
			req.URL.String(), req.ContentLength, str)
		return nil
	}

	// get rid of buckets without free space if we do have other buckets
	ok_buckets := 0
	nospace_buckets := 0
	for _, bs := range stat {
		if bs.Pain < PainNoFreeSpaceSoft {
			ok_buckets++
		} else {
			nospace_buckets++
		}
	}

	if nospace_buckets != 0 && ok_buckets != 0 {
		tmp := make([]*bucket_stat, 0)
		for _, bs := range stat {
			if bs.Pain < PainNoFreeSpaceSoft {
				tmp = append(tmp, bs)
			}
		}

		stat = tmp
	}

	str := make([]string, 0)
	show_num := 0
	for _, bs := range stat {
		str = append(str,
			fmt.Sprintf("{bucket: %s, success-groups: %v, error-groups: %v, groups: %v, "+
				"abs: %v, pain: %f, free-rates: %v}",
				bs.Bucket.Name, bs.SuccessGroups, bs.ErrorGroups, bs.Bucket.Meta.Groups,
				bs.abs, bs.Pain, bs.free_rates))

		if show_num >= 5 {
			break
		}
	}

	log.Printf("find-bucket: url: %s, content-length: %d, buckets: %d, showing top %d: %v",
		req.URL.String(), req.ContentLength, len(stat), len(str), str)

	var sum int64 = 0
	for {
		sum = 0
		var multiple int64 = 10

		for _, bs := range stat {
			sum += int64(bs.Range)
		}

		if sum >= multiple {
			break
		} else {
			for _, bs := range stat {
				bs.Range *= float64(multiple)
			}
		}
	}

	r := rand.Int63n(int64(sum))
	for _, bs := range stat {
		r -= int64(bs.Range)
		if r <= 0 {
			log.Printf("find-bucket: url: %s, selected bucket: %s, content-length: %d, groups: %v, success-groups: %v, error-groups: %v, pain: %f, pains: %v, free_rates: %v\n",
				req.URL.String(), bs.Bucket.Name, req.ContentLength,
				bs.Bucket.Meta.Groups, bs.SuccessGroups, bs.ErrorGroups,
				bs.Pain, bs.pains, bs.free_rates)
			return bs.Bucket
		}
	}

	return nil
}