Exemple #1
0
//
// Repair the stream by asking the provider to reconnect to the list of endpoints.
// Once connected, the provider will stream mutations from the current vbucket seqno.
// In other words, the provider will not reset the seqno.
//
func (p *ProjectorAdmin) RepairEndpointForStream(streamId common.StreamId,
	bucketVbnosMap map[string][]uint16,
	endpoint string) error {

	logging.Debugf("ProjectorAdmin::RepairStreamForEndpoint(): streamId = %d", streamId.String())

	// If there is no bucket, nothing to start.
	if len(bucketVbnosMap) == 0 {
		return nil
	}

	shouldRetry := true
	for shouldRetry {
		shouldRetry = false

		var buckets []string = nil
		for bucket, _ := range bucketVbnosMap {
			buckets = append(buckets, bucket)
		}

		nodes, err := p.env.GetNodeListForBuckets(buckets)
		if err != nil {
			return err
		}

		// start worker to create mutation stream
		workers := make(map[string]*adminWorker)
		donech := make(chan *adminWorker, len(nodes))

		for _, server := range nodes {
			worker := &adminWorker{
				admin:            p,
				server:           server,
				streamId:         streamId,
				killch:           make(chan bool, 1),
				activeTimestamps: nil,
				err:              nil}
			workers[server] = worker
			go worker.repairEndpoint(endpoint, donech)
		}

		// now wait for the worker to be done
		// TODO: timeout?
		for len(workers) != 0 {
			worker := <-donech
			delete(workers, worker.server)

			if worker.err != nil {
				logging.Debugf("ProjectorAdmin::RepairEndpointFromStream(): worker % has error=%v", worker.server, worker.err)

				// cleanup : kill the other workers
				for _, worker := range workers {
					worker.killch <- true
				}

				// if it is not a recoverable error, then just return
				if worker.err.(Error).code != ERROR_STREAM_PROJECTOR_TIMEOUT {
					return worker.err
				}

				shouldRetry = true
				break
			}
		}
	}

	return nil
}
Exemple #2
0
//
// Delete Index from stream
//
func (p *ProjectorAdmin) DeleteIndexFromStream(streamId common.StreamId, buckets []string, instances []uint64) error {

	logging.Debugf("StreamAdmin::DeleteIndexFromStream(): streamId=%d", streamId.String())

	// If there is no bucket or index instances, nothing to start.
	if len(buckets) == 0 || len(instances) == 0 {
		logging.Debugf("ProjectorAdmin::DeleteIndexToStream(): len(buckets)=%v, len(instances)=%v",
			len(buckets), len(instances))
		return nil
	}

	shouldRetry := true
	for shouldRetry {
		shouldRetry = false

		nodes, err := p.env.GetNodeListForBuckets(buckets)
		if err != nil {
			return err
		}

		// start worker to create mutation stream
		workers := make(map[string]*adminWorker)
		donech := make(chan *adminWorker, len(nodes))

		for _, server := range nodes {
			worker := &adminWorker{
				admin:            p,
				server:           server,
				streamId:         streamId,
				killch:           make(chan bool, 1),
				activeTimestamps: nil,
				err:              nil}
			workers[server] = worker
			go worker.deleteInstances(instances, donech)
		}

		logging.Debugf("ProjectorAdmin::DeleteIndexToStream(): len(workers)=%v", len(workers))

		// now wait for the worker to be done
		// TODO: timeout?
		for len(workers) != 0 {
			worker := <-donech

			logging.Debugf("ProjectorAdmin::DeleteIndexToStream(): worker %v done", worker.server)
			delete(workers, worker.server)

			if worker.err != nil {
				logging.Debugf("ProjectorAdmin::DeleteIndexFromStream(): worker % has error=%v", worker.server, worker.err)

				// cleanup : kill the other workers
				for _, worker := range workers {
					worker.killch <- true
				}

				// if it is not a recoverable error, then just return
				if worker.err.(Error).code != ERROR_STREAM_PROJECTOR_TIMEOUT {
					return worker.err
				}

				logging.Debugf("ProjectorAdmin::DeleteIndexToStream(): retry adding instances to nodes")
				shouldRetry = true
				break
			}
		}
	}

	return nil
}