Example #1
0
func (fdb *fdbSlice) deleteSecIndex(docid []byte, workerId int) {

	//logging.Tracef("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Delete Key - %s",
	//	fdb.id, fdb.idxInstId, docid)

	var olditm []byte
	var err error

	if olditm, err = fdb.getBackIndexEntry(docid, workerId); err != nil {
		fdb.checkFatalDbError(err)
		logging.Errorf("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Error locating "+
			"backindex entry for Doc %s. Error %v", fdb.id, fdb.idxInstId, docid, err)
		return
	}

	//if the oldkey is nil, nothing needs to be done. This is the case of deletes
	//which happened before index was created.
	if olditm == nil {
		logging.Tracef("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v Received NIL Key for "+
			"Doc Id %v. Skipped.", fdb.id, fdb.idxInstId, docid)
		return
	}

	//delete from main index
	t0 := time.Now()
	if err = fdb.main[workerId].DeleteKV(olditm); err != nil {
		fdb.checkFatalDbError(err)
		logging.Errorf("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Error deleting "+
			"entry from main index for Doc %s. Key %v. Error %v", fdb.id, fdb.idxInstId,
			docid, olditm, err)
		return
	}
	fdb.idxStats.Timings.stKVDelete.Put(time.Now().Sub(t0))
	platform.AddInt64(&fdb.delete_bytes, int64(len(olditm)))

	//delete from the back index
	t0 = time.Now()
	if err = fdb.back[workerId].DeleteKV(docid); err != nil {
		fdb.checkFatalDbError(err)
		logging.Errorf("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Error deleting "+
			"entry from back index for Doc %s. Error %v", fdb.id, fdb.idxInstId, docid, err)
		return
	}
	fdb.idxStats.Timings.stKVDelete.Put(time.Now().Sub(t0))
	platform.AddInt64(&fdb.delete_bytes, int64(len(docid)))
	fdb.isDirty = true

}
Example #2
0
func (q *atomicMutationQueue) dequeueUptoSeqno(vbucket Vbucket, seqno Seqno,
	datach chan *MutationKeys) {

	//every DEQUEUE_POLL_INTERVAL milliseconds, check for new mutations
	ticker := time.NewTicker(time.Millisecond * DEQUEUE_POLL_INTERVAL)

	var dequeueSeq Seqno

	for _ = range ticker.C {
		for platform.LoadPointer(&q.head[vbucket]) !=
			platform.LoadPointer(&q.tail[vbucket]) { //if queue is nonempty

			head := (*node)(platform.LoadPointer(&q.head[vbucket]))
			//copy the mutation pointer
			m := head.next.mutation
			if seqno >= m.meta.seqno {
				//free mutation pointer
				head.next.mutation = nil
				//move head to next
				platform.StorePointer(&q.head[vbucket], unsafe.Pointer(head.next))
				platform.AddInt64(&q.size[vbucket], -1)
				//send mutation to caller
				dequeueSeq = m.meta.seqno
				datach <- m
			}

			//once the seqno is reached, close the channel
			if seqno <= dequeueSeq {
				ticker.Stop()
				close(datach)
				return
			}
		}
	}
}
Example #3
0
//Enqueue will enqueue the mutation reference for given vbucket.
//Caller should not free the mutation till it is dequeued.
//Mutation will not be copied internally by the queue.
//caller can call appch to force this call to return. Otherwise
//this is a blocking call till there is a slot available for enqueue.
func (q *atomicMutationQueue) Enqueue(mutation *MutationKeys,
	vbucket Vbucket, appch StopChannel) error {

	if vbucket < 0 || vbucket > Vbucket(q.numVbuckets)-1 {
		return errors.New("vbucket out of range")
	}

	//no more requests are taken once queue
	//is marked as destroyed
	if q.isDestroyed {
		return nil
	}

	//create a new node
	n := q.allocNode(vbucket, appch)
	if n == nil {
		return nil
	}

	n.mutation = mutation
	n.next = nil

	//point tail's next to new node
	tail := (*node)(platform.LoadPointer(&q.tail[vbucket]))
	tail.next = n
	//update tail to new node
	platform.StorePointer(&q.tail[vbucket], unsafe.Pointer(tail.next))

	platform.AddInt64(&q.size[vbucket], 1)

	return nil

}
func (fdb *fdbSlice) deletePrimaryIndex(docid []byte, workerId int) {

	//logging.Tracef("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Delete Key - %s",
	//	fdb.id, fdb.idxInstId, docid)

	if docid == nil {
		common.CrashOnError(errors.New("Nil Primary Key"))
		return
	}

	//docid -> key format
	entry, err := NewPrimaryIndexEntry(docid)
	common.CrashOnError(err)

	//delete from main index
	t0 := time.Now()
	if err := fdb.main[workerId].DeleteKV(entry.Bytes()); err != nil {
		fdb.checkFatalDbError(err)
		logging.Errorf("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Error deleting "+
			"entry from main index for Doc %s. Error %v", fdb.id, fdb.idxInstId,
			docid, err)
		return
	}
	fdb.idxStats.Timings.stKVDelete.Put(time.Now().Sub(t0))
	platform.AddInt64(&fdb.delete_bytes, int64(len(entry.Bytes())))

}
func (fdb *fdbSlice) insertPrimaryIndex(key []byte, docid []byte, workerId int) {
	var err error

	logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Set Key - %s", fdb.id, fdb.idxInstId, docid)

	//check if the docid exists in the main index
	t0 := time.Now()
	if _, err = fdb.main[workerId].GetKV(key); err == nil {
		fdb.idxStats.Timings.stKVGet.Put(time.Now().Sub(t0))
		//skip
		logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Key %v Already Exists. "+
			"Primary Index Update Skipped.", fdb.id, fdb.idxInstId, string(docid))
	} else if err != nil && err != forestdb.RESULT_KEY_NOT_FOUND {
		fdb.checkFatalDbError(err)
		logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error locating "+
			"mainindex entry %v", fdb.id, fdb.idxInstId, err)
	} else if err == forestdb.RESULT_KEY_NOT_FOUND {
		//set in main index
		t0 := time.Now()
		if err = fdb.main[workerId].SetKV(key, nil); err != nil {
			fdb.checkFatalDbError(err)
			logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error in Main Index Set. "+
				"Skipped Key %s. Error %v", fdb.id, fdb.idxInstId, string(docid), err)
		}
		fdb.idxStats.Timings.stKVSet.Put(time.Now().Sub(t0))
		platform.AddInt64(&fdb.insert_bytes, int64(len(key)))
	}
}
Example #6
0
// handle connection request. connection might be kept open in client's
// connection pool.
func (s *Server) handleConnection(conn net.Conn) {
	platform.AddInt64(&s.nConnections, 1)
	defer func() {
		platform.AddInt64(&s.nConnections, -1)
	}()

	raddr := conn.RemoteAddr()
	defer func() {
		conn.Close()
		logging.Infof("%v connection %v closed\n", s.logPrefix, raddr)
	}()

	// start a receive routine.
	rcvch := make(chan interface{}, s.streamChanSize)
	go s.doReceive(conn, rcvch)

loop:
	for {
		select {
		case req, ok := <-rcvch:
			if _, yes := req.(*protobuf.EndStreamRequest); yes { // skip
				format := "%v connection %q skip protobuf.EndStreamRequest\n"
				logging.Infof(format, s.logPrefix, raddr)
				break
			} else if !ok {
				break loop
			}
			quitch := make(chan interface{})
			mfinch := make(chan bool)
			go s.monitorClient(conn, rcvch, quitch, mfinch)
			s.callb(req, conn, quitch) // blocking call
			// shutdown monitor routine synchronously
			mfinch <- true
			<-mfinch
			// End response should be only sent after monitor is shutdown
			// otherwise it could lead to loss of next request coming through
			// same connection.

			transport.SendResponseEnd(conn)

		case <-s.killch:
			break loop
		}
	}
}
Example #7
0
func (f *ForestDBIterator) Value() []byte {
	if f.valid && f.curr != nil {
		if f.slice != nil {
			platform.AddInt64(&f.slice.get_bytes, int64(len(f.curr.Body())))
		}
		return f.curr.Body()
	}
	return nil
}
Example #8
0
//DequeueSingleElement dequeues a single element and returns.
//Returns nil in case of empty queue.
func (q *atomicMutationQueue) DequeueSingleElement(vbucket Vbucket) *MutationKeys {

	if platform.LoadPointer(&q.head[vbucket]) !=
		platform.LoadPointer(&q.tail[vbucket]) { //if queue is nonempty

		head := (*node)(platform.LoadPointer(&q.head[vbucket]))
		//copy the mutation pointer
		m := head.next.mutation
		//free mutation pointer
		head.next.mutation = nil
		//move head to next
		platform.StorePointer(&q.head[vbucket], unsafe.Pointer(head.next))
		platform.AddInt64(&q.size[vbucket], -1)
		return m
	}
	return nil
}
//getBackIndexEntry returns an existing back index entry
//given the docid
func (fdb *fdbSlice) getBackIndexEntry(docid []byte, workerId int) ([]byte, error) {

	//	logging.Tracef("ForestDBSlice::getBackIndexEntry \n\tSliceId %v IndexInstId %v Get BackIndex Key - %s",
	//		fdb.id, fdb.idxInstId, docid)

	var kbytes []byte
	var err error

	t0 := time.Now()
	kbytes, err = fdb.back[workerId].GetKV(docid)
	fdb.idxStats.Timings.stKVGet.Put(time.Now().Sub(t0))
	platform.AddInt64(&fdb.get_bytes, int64(len(kbytes)))

	//forestdb reports get in a non-existent key as an
	//error, skip that
	if err != nil && err != forestdb.RESULT_KEY_NOT_FOUND {
		return nil, err
	}

	return kbytes, nil
}
Example #10
0
func (h *Histogram) Add(val int64) {
	i := h.findBucket(val)
	platform.AddInt64(&h.vals[i], 1)
}
func (fdb *fdbSlice) insertSecIndex(key []byte, docid []byte, workerId int) {
	var err error
	var oldkey []byte

	//logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Set Key - %s "+
	//	"Value - %s", fdb.id, fdb.idxInstId, k, v)

	//check if the docid exists in the back index
	if oldkey, err = fdb.getBackIndexEntry(docid, workerId); err != nil {
		fdb.checkFatalDbError(err)
		logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error locating "+
			"backindex entry %v", fdb.id, fdb.idxInstId, err)
		return
	} else if oldkey != nil {
		//If old-key from backindex matches with the new-key
		//in mutation, skip it.
		if bytes.Equal(oldkey, key) {
			logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Received Unchanged Key for "+
				"Doc Id %v. Key %v. Skipped.", fdb.id, fdb.idxInstId, string(docid), key)
			return
		}

		//there is already an entry in main index for this docid
		//delete from main index
		t0 := time.Now()
		if err = fdb.main[workerId].DeleteKV(oldkey); err != nil {
			fdb.checkFatalDbError(err)
			logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error deleting "+
				"entry from main index %v", fdb.id, fdb.idxInstId, err)
			return
		}
		fdb.idxStats.Timings.stKVDelete.Put(time.Now().Sub(t0))
		platform.AddInt64(&fdb.delete_bytes, int64(len(oldkey)))

		// If a field value changed from "existing" to "missing" (ie, key = nil),
		// we need to remove back index entry corresponding to the previous "existing" value.
		if key == nil {
			t0 := time.Now()
			if err = fdb.back[workerId].DeleteKV(docid); err != nil {
				fdb.checkFatalDbError(err)
				logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error deleting "+
					"entry from back index %v", fdb.id, fdb.idxInstId, err)
				return
			}

			fdb.idxStats.Timings.stKVDelete.Put(time.Now().Sub(t0))
			platform.AddInt64(&fdb.delete_bytes, int64(len(docid)))
		}
	}

	if key == nil {
		logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Received NIL Key for "+
			"Doc Id %s. Skipped.", fdb.id, fdb.idxInstId, docid)
		return
	}

	//set the back index entry <docid, encodedkey>
	t0 := time.Now()
	if err = fdb.back[workerId].SetKV(docid, key); err != nil {
		fdb.checkFatalDbError(err)
		logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error in Back Index Set. "+
			"Skipped Key %s. Value %v. Error %v", fdb.id, fdb.idxInstId, string(docid), key, err)
		return
	}
	fdb.idxStats.Timings.stKVSet.Put(time.Now().Sub(t0))
	platform.AddInt64(&fdb.insert_bytes, int64(len(docid)+len(key)))

	t0 = time.Now()
	//set in main index
	if err = fdb.main[workerId].SetKV(key, nil); err != nil {
		fdb.checkFatalDbError(err)
		logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error in Main Index Set. "+
			"Skipped Key %v. Error %v", fdb.id, fdb.idxInstId, key, err)
		return
	}
	fdb.idxStats.Timings.stKVSet.Put(time.Now().Sub(t0))
	platform.AddInt64(&fdb.insert_bytes, int64(len(key)))
}
Example #12
0
func (v *Int64Val) Add(delta int64) {
	platform.AddInt64(v.val, delta)
}