Exemple #1
0
func (bctl *BucketCtl) Delete(bname, key string, req *http.Request) (err error) {
	bucket, err := bctl.FindBucket(bname)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, err.Error())
		return
	}

	err = bucket.check_auth(req, BucketAuthWrite)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), errors.ErrorStatus(err),
			fmt.Sprintf("upload: %s", errors.ErrorData(err)))
		return
	}

	s, err := bctl.e.DataSession(req)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("delete: could not create data session: %v", err))
		return
	}
	defer s.Delete()

	s.SetNamespace(bucket.Name)
	s.SetGroups(bucket.Meta.Groups)
	s.SetIOflags(elliptics.IOflag(bctl.Conf.Proxy.WriterIOFlags))

	log.Printf("delete-trace-id: %x: url: %s, bucket: %s, key: %s, id: %s\n",
		s.GetTraceID(), req.URL.String(), bucket.Name, key, s.Transform(key))

	for r := range s.Remove(key) {
		err = r.Error()
	}

	return
}
Exemple #2
0
func (bctl *BucketCtl) Lookup(bname, key string, req *http.Request) (reply *reply.LookupResult, err error) {
	bucket, err := bctl.FindBucket(bname)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, err.Error())
		return
	}

	err = bucket.check_auth(req, BucketAuthEmpty)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), errors.ErrorStatus(err),
			fmt.Sprintf("upload: %s", errors.ErrorData(err)))
		return
	}

	s, err := bctl.e.DataSession(req)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("lookup: could not create data session: %v", err))
		return
	}
	defer s.Delete()

	s.SetNamespace(bucket.Name)
	s.SetGroups(bucket.Meta.Groups)
	s.SetIOflags(elliptics.IOflag(bctl.Conf.Proxy.ReaderIOFlags))

	log.Printf("lookup-trace-id: %x: url: %s, bucket: %s, key: %s, id: %s\n",
		s.GetTraceID(), req.URL.String(), bucket.Name, key, s.Transform(key))

	reply, err = bucket.lookup_serialize(false, s.ParallelLookup(key))
	return
}
Exemple #3
0
func (bctl *BucketCtl) Stream(bname, key string, w http.ResponseWriter, req *http.Request) (err error) {
	bucket, err := bctl.FindBucket(bname)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, err.Error())
		return
	}

	err = bucket.check_auth(req, BucketAuthEmpty)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), errors.ErrorStatus(err),
			fmt.Sprintf("stream: %s", errors.ErrorData(err)))
		return
	}

	s, err := bctl.e.DataSession(req)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("stream: could not create data session: %v", err))
		return
	}
	defer s.Delete()

	s.SetFilter(elliptics.SessionFilterAll)
	s.SetNamespace(bucket.Name)
	bctl.SetGroupsTimeout(s, bucket, key)

	log.Printf("stream-trace-id: %x: url: %s, bucket: %s, key: %s, id: %s\n",
		s.GetTraceID(), req.URL.String(), bucket.Name, key, s.Transform(key))

	offset, size, err := URIOffsetSize(req)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, fmt.Sprintf("stream: %v", err))
		return
	}

	if offset != 0 || size != 0 {
		if size == 0 {
			req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
		} else {
			req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+size-1))
		}
	}

	rs, err := elliptics.NewReadSeekerOffsetSize(s, key, offset, size)
	if err != nil {
		err = errors.NewKeyErrorFromEllipticsError(err, req.URL.String(), "stream: could not create read-seeker")
		return
	}
	defer rs.Free()

	bctl.SetContentType(key, w)
	http.ServeContent(w, req, key, rs.Mtime, rs)
	return
}
Exemple #4
0
func (bctl *BucketCtl) BulkDelete(bname string, keys []string, req *http.Request) (reply map[string]interface{}, err error) {
	reply = make(map[string]interface{})

	bucket, err := bctl.FindBucket(bname)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, err.Error())
		return
	}

	err = bucket.check_auth(req, BucketAuthWrite)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), errors.ErrorStatus(err),
			fmt.Sprintf("upload: %s", errors.ErrorData(err)))
		return
	}

	s, err := bctl.e.DataSession(req)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("bulk_delete: could not create data session: %v", err))
		return
	}
	defer s.Delete()

	s.SetNamespace(bucket.Name)
	s.SetGroups(bucket.Meta.Groups)
	s.SetIOflags(elliptics.IOflag(bctl.Conf.Proxy.WriterIOFlags))

	log.Printf("bulk-delete-trace-id: %x: url: %s, bucket: %s, keys: %v\n",
		s.GetTraceID(), req.URL.String(), bucket.Name, keys)

	for r := range s.BulkRemove(keys) {
		err = r.Error()
		if err != nil {
			reply[r.Key()] = err.Error()
		}
	}

	err = nil

	return
}
Exemple #5
0
func (bctl *BucketCtl) bucket_upload(bucket *Bucket, key string, req *http.Request) (reply *reply.LookupResult, err error) {
	err = bucket.check_auth(req, BucketAuthWrite)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), errors.ErrorStatus(err),
			fmt.Sprintf("upload: %s", errors.ErrorData(err)))
		return
	}

	lheader, ok := req.Header["Content-Length"]
	if !ok {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest,
			"upload: there is no Content-Length header")
		return
	}

	total_size, err := strconv.ParseUint(lheader[0], 0, 64)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest,
			fmt.Sprintf("upload: invalid content length conversion: %v", err))
		return
	}

	if total_size == 0 {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest,
			"upload: attempting to perform invalid zero-length upload")
		return
	}

	s, err := bctl.e.DataSession(req)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("upload: could not create data session: %v", err))
		return
	}
	defer s.Delete()

	s.SetFilter(elliptics.SessionFilterAll)
	s.SetNamespace(bucket.Name)
	s.SetGroups(bucket.Meta.Groups)
	s.SetTimeout(100)
	s.SetIOflags(elliptics.IOflag(bctl.Conf.Proxy.WriterIOFlags))

	log.Printf("upload-trace-id: %x: url: %s, bucket: %s, key: %s, id: %s\n",
		s.GetTraceID(), req.URL.String(), bucket.Name, key, s.Transform(key))

	ranges, err := ranges.ParseRange(req.Header.Get("Range"), int64(total_size))
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, fmt.Sprintf("upload: %v", err))
		return
	}

	var offset uint64 = 0
	if len(ranges) != 0 {
		offset = uint64(ranges[0].Start)
	}

	start := time.Now()

	reply, err = bucket.lookup_serialize(true, s.WriteData(key, req.Body, offset, total_size))

	// PID controller should aim at some destination performance point
	// it can be velocity pf the vehicle or deisred write rate
	//
	// Let's consider our desired control point as number of useconds needed to write 1 byte into the storage
	// In the ideal world it would be zero

	time_us := time.Since(start).Nanoseconds() / 1000
	e := float64(time_us) / float64(total_size)

	bctl.RLock()

	str := make([]string, 0)
	for _, res := range reply.Servers {
		sg, ok := bucket.Group[res.Group]
		if ok {
			st, back_err := sg.FindStatBackend(res.Addr, res.Backend)
			if back_err == nil {
				old_pain := st.PIDPain()
				update_pain := e
				estring := "ok"

				if res.Error != nil {
					update_pain = BucketWriteErrorPain
					estring = res.Error.Error()
				}
				st.PIDUpdate(update_pain)

				str = append(str, fmt.Sprintf("{group: %d, time: %d us, e: %f, error: %v, pain: %f -> %f}",
					res.Group, time_us, e, estring, old_pain, st.PIDPain()))
			} else {
				str = append(str, fmt.Sprintf("{group: %d, time: %d us, e: %f, error: no backend stat}",
					res.Group, time_us, e))
			}
		} else {
			str = append(str, fmt.Sprintf("{group: %d, time: %d us, e: %f, error: no group stat}",
				res.Group, time_us, e))
		}
	}

	if len(reply.SuccessGroups) == 0 {
		for _, group_id := range bucket.Meta.Groups {
			str = append(str, fmt.Sprintf("{error-group: %d, time: %d us}", group_id, time_us))
		}
	}

	bctl.RUnlock()

	log.Printf("bucket-upload: bucket: %s, key: %s, size: %d: %v\n", bucket.Name, key, total_size, str)

	return
}