Exemple #1
0
func (bctl *BucketCtl) Delete(bname, key string, req *http.Request) (err error) {
	bucket, err := bctl.FindBucket(bname)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, err.Error())
		return
	}

	err = bucket.check_auth(req, BucketAuthWrite)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), errors.ErrorStatus(err),
			fmt.Sprintf("upload: %s", errors.ErrorData(err)))
		return
	}

	s, err := bctl.e.DataSession(req)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("delete: could not create data session: %v", err))
		return
	}
	defer s.Delete()

	s.SetNamespace(bucket.Name)
	s.SetGroups(bucket.Meta.Groups)
	s.SetIOflags(elliptics.IOflag(bctl.Conf.Proxy.WriterIOFlags))

	log.Printf("delete-trace-id: %x: url: %s, bucket: %s, key: %s, id: %s\n",
		s.GetTraceID(), req.URL.String(), bucket.Name, key, s.Transform(key))

	for r := range s.Remove(key) {
		err = r.Error()
	}

	return
}
Exemple #2
0
func (bctl *BucketCtl) Lookup(bname, key string, req *http.Request) (reply *reply.LookupResult, err error) {
	bucket, err := bctl.FindBucket(bname)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, err.Error())
		return
	}

	err = bucket.check_auth(req, BucketAuthEmpty)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), errors.ErrorStatus(err),
			fmt.Sprintf("upload: %s", errors.ErrorData(err)))
		return
	}

	s, err := bctl.e.DataSession(req)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("lookup: could not create data session: %v", err))
		return
	}
	defer s.Delete()

	s.SetNamespace(bucket.Name)
	s.SetGroups(bucket.Meta.Groups)
	s.SetIOflags(elliptics.IOflag(bctl.Conf.Proxy.ReaderIOFlags))

	log.Printf("lookup-trace-id: %x: url: %s, bucket: %s, key: %s, id: %s\n",
		s.GetTraceID(), req.URL.String(), bucket.Name, key, s.Transform(key))

	reply, err = bucket.lookup_serialize(false, s.ParallelLookup(key))
	return
}
Exemple #3
0
func (e *Elliptics) DataSession(req *http.Request) (*elliptics.Session, error) {
	s, err := elliptics.NewSession(e.Node)
	if err != nil {
		return nil, err
	}

	values := req.URL.Query()
	var val uint64

	var trace_id uint64
	trace, ok := req.Header["X-Request"]
	if !ok {
		trace_id = uint64(rand.Int63())
	} else {
		trace_id, err = strconv.ParseUint(trace[0], 0, 64)
		if err != nil {
			trace_id = uint64(rand.Int63())
		}
	}

	ioflags, ok := values["ioflags"]
	if ok {
		val, err = strconv.ParseUint(ioflags[0], 0, 32)
		if err == nil {
			s.SetIOflags(elliptics.IOflag(val))
		}
	}
	cflags, ok := values["cflags"]
	if ok {
		val, err = strconv.ParseUint(cflags[0], 0, 64)
		if err == nil {
			s.SetCflags(elliptics.Cflag(val))
		}
	}
	trace, ok = values["trace_id"]
	if ok {
		trace_id, err = strconv.ParseUint(trace[0], 0, 64)
		if err != nil {
			trace_id = uint64(rand.Int63())
		}
	}

	s.SetTraceID(elliptics.TraceID(trace_id))

	return s, nil
}
Exemple #4
0
func (bctl *BucketCtl) BulkDelete(bname string, keys []string, req *http.Request) (reply map[string]interface{}, err error) {
	reply = make(map[string]interface{})

	bucket, err := bctl.FindBucket(bname)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, err.Error())
		return
	}

	err = bucket.check_auth(req, BucketAuthWrite)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), errors.ErrorStatus(err),
			fmt.Sprintf("upload: %s", errors.ErrorData(err)))
		return
	}

	s, err := bctl.e.DataSession(req)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("bulk_delete: could not create data session: %v", err))
		return
	}
	defer s.Delete()

	s.SetNamespace(bucket.Name)
	s.SetGroups(bucket.Meta.Groups)
	s.SetIOflags(elliptics.IOflag(bctl.Conf.Proxy.WriterIOFlags))

	log.Printf("bulk-delete-trace-id: %x: url: %s, bucket: %s, keys: %v\n",
		s.GetTraceID(), req.URL.String(), bucket.Name, keys)

	for r := range s.BulkRemove(keys) {
		err = r.Error()
		if err != nil {
			reply[r.Key()] = err.Error()
		}
	}

	err = nil

	return
}
Exemple #5
0
func (bctl *BucketCtl) SetGroupsTimeout(s *elliptics.Session, bucket *Bucket, key string) {
	// sort groups by defrag state, increase timeout if needed

	groups := make([]uint32, 0)
	defrag_groups := make([]uint32, 0)
	timeout := 30

	for group_id, sg := range bucket.Group {
		sb, err := sg.FindStatBackendKey(s, key, group_id)
		if err != nil {
			continue
		}

		if sb.DefragState != 0 {
			defrag_groups = append(defrag_groups, group_id)
		} else {
			groups = append(groups, group_id)
		}
	}

	// Not being defragmented backends first, then those which are currently being defragmented
	groups = append(groups, defrag_groups...)
	if len(groups) == len(defrag_groups) {
		timeout = 90
	}

	// if there are no backends being defragmented, use weights to mix read states
	// if there are such backends, use strict order and read from non-defragmented backends first
	ioflags := elliptics.IOflag(bctl.Conf.Proxy.ReaderIOFlags) | s.GetIOflags()
	if len(defrag_groups) == 0 {
		ioflags |= elliptics.DNET_IO_FLAGS_MIX_STATES
	}
	s.SetIOflags(ioflags)

	// there are no stats for bucket groups, use what we have in metadata
	if len(groups) == 0 {
		groups = bucket.Meta.Groups
	}

	s.SetGroups(groups)
	s.SetTimeout(timeout)
}
Exemple #6
0
func (bctl *BucketCtl) bucket_upload(bucket *Bucket, key string, req *http.Request) (reply *reply.LookupResult, err error) {
	err = bucket.check_auth(req, BucketAuthWrite)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), errors.ErrorStatus(err),
			fmt.Sprintf("upload: %s", errors.ErrorData(err)))
		return
	}

	lheader, ok := req.Header["Content-Length"]
	if !ok {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest,
			"upload: there is no Content-Length header")
		return
	}

	total_size, err := strconv.ParseUint(lheader[0], 0, 64)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest,
			fmt.Sprintf("upload: invalid content length conversion: %v", err))
		return
	}

	if total_size == 0 {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest,
			"upload: attempting to perform invalid zero-length upload")
		return
	}

	s, err := bctl.e.DataSession(req)
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusServiceUnavailable,
			fmt.Sprintf("upload: could not create data session: %v", err))
		return
	}
	defer s.Delete()

	s.SetFilter(elliptics.SessionFilterAll)
	s.SetNamespace(bucket.Name)
	s.SetGroups(bucket.Meta.Groups)
	s.SetTimeout(100)
	s.SetIOflags(elliptics.IOflag(bctl.Conf.Proxy.WriterIOFlags))

	log.Printf("upload-trace-id: %x: url: %s, bucket: %s, key: %s, id: %s\n",
		s.GetTraceID(), req.URL.String(), bucket.Name, key, s.Transform(key))

	ranges, err := ranges.ParseRange(req.Header.Get("Range"), int64(total_size))
	if err != nil {
		err = errors.NewKeyError(req.URL.String(), http.StatusBadRequest, fmt.Sprintf("upload: %v", err))
		return
	}

	var offset uint64 = 0
	if len(ranges) != 0 {
		offset = uint64(ranges[0].Start)
	}

	start := time.Now()

	reply, err = bucket.lookup_serialize(true, s.WriteData(key, req.Body, offset, total_size))

	// PID controller should aim at some destination performance point
	// it can be velocity pf the vehicle or deisred write rate
	//
	// Let's consider our desired control point as number of useconds needed to write 1 byte into the storage
	// In the ideal world it would be zero

	time_us := time.Since(start).Nanoseconds() / 1000
	e := float64(time_us) / float64(total_size)

	bctl.RLock()

	str := make([]string, 0)
	for _, res := range reply.Servers {
		sg, ok := bucket.Group[res.Group]
		if ok {
			st, back_err := sg.FindStatBackend(res.Addr, res.Backend)
			if back_err == nil {
				old_pain := st.PIDPain()
				update_pain := e
				estring := "ok"

				if res.Error != nil {
					update_pain = BucketWriteErrorPain
					estring = res.Error.Error()
				}
				st.PIDUpdate(update_pain)

				str = append(str, fmt.Sprintf("{group: %d, time: %d us, e: %f, error: %v, pain: %f -> %f}",
					res.Group, time_us, e, estring, old_pain, st.PIDPain()))
			} else {
				str = append(str, fmt.Sprintf("{group: %d, time: %d us, e: %f, error: no backend stat}",
					res.Group, time_us, e))
			}
		} else {
			str = append(str, fmt.Sprintf("{group: %d, time: %d us, e: %f, error: no group stat}",
				res.Group, time_us, e))
		}
	}

	if len(reply.SuccessGroups) == 0 {
		for _, group_id := range bucket.Meta.Groups {
			str = append(str, fmt.Sprintf("{error-group: %d, time: %d us}", group_id, time_us))
		}
	}

	bctl.RUnlock()

	log.Printf("bucket-upload: bucket: %s, key: %s, size: %d: %v\n", bucket.Name, key, total_size, str)

	return
}