예제 #1
0
func (wr *Wrangler) getMastersPosition(shards []*topo.ShardInfo) (map[*topo.ShardInfo]*mysqlctl.ReplicationPosition, error) {
	mu := sync.Mutex{}
	result := make(map[*topo.ShardInfo]*mysqlctl.ReplicationPosition)

	wg := sync.WaitGroup{}
	rec := concurrency.AllErrorRecorder{}
	for _, si := range shards {
		wg.Add(1)
		go func(si *topo.ShardInfo) {
			log.Infof("Gathering master position for %v", si.MasterAlias)
			pos, err := wr.getMasterPosition(si.MasterAlias)
			if err != nil {
				rec.RecordError(err)
			} else {
				log.Infof("Got master position for %v", si.MasterAlias)
				mu.Lock()
				result[si] = pos
				mu.Unlock()
			}
			wg.Done()
		}(si)
	}
	wg.Wait()
	return result, rec.Error()
}
예제 #2
0
// DeleteKeyspaceShards implements topo.Server.
func (s *Server) DeleteKeyspaceShards(ctx context.Context, keyspace string) error {
	shards, err := s.GetShardNames(ctx, keyspace)
	if err != nil {
		return err
	}

	wg := sync.WaitGroup{}
	rec := concurrency.AllErrorRecorder{}
	global := s.getGlobal()
	for _, shard := range shards {
		wg.Add(1)
		go func(shard string) {
			defer wg.Done()
			_, err := global.Delete(shardDirPath(keyspace, shard), true /* recursive */)
			rec.RecordError(convertError(err))
		}(shard)
	}
	wg.Wait()

	if err = rec.Error(); err != nil {
		return err
	}

	event.Dispatch(&events.KeyspaceChange{
		KeyspaceInfo: *topo.NewKeyspaceInfo(keyspace, nil, -1),
		Status:       "deleted all shards",
	})
	return nil
}
예제 #3
0
// CleanUp will run the recorded actions.
// If an action on a target fails, it will not run the next action on
// the same target.
// We return the aggregate errors for all cleanups.
// TODO(alainjobart) Actions should run concurrently on a per target
// basis. They are then serialized on each target.
func (cleaner *Cleaner) CleanUp(wr *Wrangler) error {
	actionMap := make(map[string]*cleanUpHelper)
	rec := concurrency.AllErrorRecorder{}
	cleaner.mu.Lock()
	for i := len(cleaner.actions) - 1; i >= 0; i-- {
		actionReference := cleaner.actions[i]
		helper, ok := actionMap[actionReference.target]
		if !ok {
			helper = &cleanUpHelper{
				err: nil,
			}
			actionMap[actionReference.target] = helper
		}
		if helper.err != nil {
			log.Warningf("previous action failed on target %v, no running %v", actionReference.target, actionReference.name)
			continue
		}
		err := actionReference.action.CleanUp(wr)
		if err != nil {
			helper.err = err
			rec.RecordError(err)
			log.Errorf("action %v failed on %v: %v", actionReference.name, actionReference.target, err)
		} else {
			log.Infof("action %v successfull on %v", actionReference.name, actionReference.target)
		}
	}
	cleaner.mu.Unlock()
	return rec.Error()
}
예제 #4
0
파일: shard.go 프로젝트: haoqoo/vitess
// FindAllTabletAliasesInShardByCell uses the replication graph to find all the
// tablet aliases in the given shard.
//
// It can return ErrPartialResult if some cells were not fetched,
// in which case the result only contains the cells that were fetched.
//
// The tablet aliases are sorted by cell, then by UID.
func FindAllTabletAliasesInShardByCell(ctx context.Context, ts Server, keyspace, shard string, cells []string) ([]TabletAlias, error) {
	span := trace.NewSpanFromContext(ctx)
	span.StartLocal("topo.FindAllTabletAliasesInShardbyCell")
	span.Annotate("keyspace", keyspace)
	span.Annotate("shard", shard)
	span.Annotate("num_cells", len(cells))
	defer span.Finish()
	ctx = trace.NewContext(ctx, span)

	// read the shard information to find the cells
	si, err := GetShard(ctx, ts, keyspace, shard)
	if err != nil {
		return nil, err
	}

	resultAsMap := make(map[TabletAlias]bool)
	if si.MasterAlias != nil && !TabletAliasIsZero(si.MasterAlias) {
		if InCellList(si.MasterAlias.Cell, cells) {
			resultAsMap[ProtoToTabletAlias(si.MasterAlias)] = true
		}
	}

	// read the replication graph in each cell and add all found tablets
	wg := sync.WaitGroup{}
	mutex := sync.Mutex{}
	rec := concurrency.AllErrorRecorder{}
	for _, cell := range si.Cells {
		if !InCellList(cell, cells) {
			continue
		}
		wg.Add(1)
		go func(cell string) {
			defer wg.Done()
			sri, err := ts.GetShardReplication(ctx, cell, keyspace, shard)
			if err != nil {
				rec.RecordError(fmt.Errorf("GetShardReplication(%v, %v, %v) failed: %v", cell, keyspace, shard, err))
				return
			}

			mutex.Lock()
			for _, node := range sri.Nodes {
				resultAsMap[ProtoToTabletAlias(node.TabletAlias)] = true
			}
			mutex.Unlock()
		}(cell)
	}
	wg.Wait()
	err = nil
	if rec.HasErrors() {
		log.Warningf("FindAllTabletAliasesInShard(%v,%v): got partial result: %v", keyspace, shard, rec.Error())
		err = ErrPartialResult
	}

	result := make([]TabletAlias, 0, len(resultAsMap))
	for a := range resultAsMap {
		result = append(result, a)
	}
	sort.Sort(TabletAliasList(result))
	return result, err
}
예제 #5
0
// execShardAction executes the action on a particular shard.
// If the action fails, it determines whether the keyspace/shard
// have moved, re-resolves the topology and tries again, if it is
// not executing a transaction.
func (stc *ScatterConn) execShardAction(
	context interface{},
	keyspace string,
	shard string,
	tabletType topo.TabletType,
	session *SafeSession,
	action shardActionFunc,
	allErrors *concurrency.AllErrorRecorder,
	results chan interface{},
) {
	for {
		sdc := stc.getConnection(keyspace, shard, tabletType)
		transactionId, err := stc.updateSession(context, sdc, keyspace, shard, tabletType, session)
		if err != nil {
			allErrors.RecordError(err)
			return
		}
		err = action(sdc, transactionId, results)
		// Determine whether keyspace can be re-resolved
		if shouldResolveKeyspace(err, transactionId) {
			newKeyspace, err := getKeyspaceAlias(stc.toposerv, stc.cell, keyspace, tabletType)
			if err == nil && newKeyspace != keyspace {
				sdc.Close()
				stc.cleanupShardConn(keyspace, shard, tabletType)
				keyspace = newKeyspace
				continue
			}
		}
		if err != nil {
			allErrors.RecordError(err)
			return
		}
		break
	}
}
예제 #6
0
파일: keyspace.go 프로젝트: CowLeo/vitess
// WaitForDrain blocks until the selected tablets (cells/keyspace/shard/tablet_type)
// have reported a QPS rate of 0.0.
// NOTE: This is just an observation of one point in time and no guarantee that
// the tablet was actually drained. At later times, a QPS rate > 0.0 could still
// be observed.
func (wr *Wrangler) WaitForDrain(ctx context.Context, cells []string, keyspace, shard string, servedType topodatapb.TabletType,
	retryDelay, healthCheckTopologyRefresh, healthcheckRetryDelay, healthCheckTimeout time.Duration) error {
	if len(cells) == 0 {
		// Retrieve list of cells for the shard from the topology.
		shardInfo, err := wr.ts.GetShard(ctx, keyspace, shard)
		if err != nil {
			return fmt.Errorf("failed to retrieve list of all cells. GetShard() failed: %v", err)
		}
		cells = shardInfo.Cells
	}

	// Check all cells in parallel.
	wg := sync.WaitGroup{}
	rec := concurrency.AllErrorRecorder{}
	for _, cell := range cells {
		wg.Add(1)
		go func(cell string) {
			defer wg.Done()
			rec.RecordError(wr.waitForDrainInCell(ctx, cell, keyspace, shard, servedType,
				retryDelay, healthCheckTopologyRefresh, healthcheckRetryDelay, healthCheckTimeout))
		}(cell)
	}
	wg.Wait()

	return rec.Error()
}
예제 #7
0
파일: rebuild.go 프로젝트: khanchan/vitess
// RebuildShard updates the SrvShard objects and underlying serving graph.
//
// Re-read from TopologyServer to make sure we are using the side
// effects of all actions.
//
// This function will start each cell over from the beginning on ErrBadVersion,
// so it doesn't need a lock on the shard.
func RebuildShard(ctx context.Context, log logutil.Logger, ts topo.Server, keyspace, shard string, cells []string, lockTimeout time.Duration) (*topo.ShardInfo, error) {
	log.Infof("RebuildShard %v/%v", keyspace, shard)

	span := trace.NewSpanFromContext(ctx)
	span.StartLocal("topotools.RebuildShard")
	defer span.Finish()
	ctx = trace.NewContext(ctx, span)

	// read the existing shard info. It has to exist.
	shardInfo, err := ts.GetShard(ctx, keyspace, shard)
	if err != nil {
		return nil, err
	}

	// rebuild all cells in parallel
	wg := sync.WaitGroup{}
	rec := concurrency.AllErrorRecorder{}
	for _, cell := range shardInfo.Cells {
		// skip this cell if we shouldn't rebuild it
		if !topo.InCellList(cell, cells) {
			continue
		}

		wg.Add(1)
		go func(cell string) {
			defer wg.Done()
			rec.RecordError(rebuildCellSrvShard(ctx, log, ts, shardInfo, cell))
		}(cell)
	}
	wg.Wait()

	return shardInfo, rec.Error()
}
예제 #8
0
func (wr *Wrangler) waitForFilteredReplication(sourcePositions map[*topo.ShardInfo]myproto.ReplicationPosition, destinationShards []*topo.ShardInfo) error {
	wg := sync.WaitGroup{}
	rec := concurrency.AllErrorRecorder{}
	for _, si := range destinationShards {
		wg.Add(1)
		go func(si *topo.ShardInfo) {
			for _, sourceShard := range si.SourceShards {
				// we're waiting on this guy
				blpPosition := blproto.BlpPosition{
					Uid: sourceShard.Uid,
				}

				// find the position it should be at
				for s, pos := range sourcePositions {
					if s.Keyspace() == sourceShard.Keyspace && s.ShardName() == sourceShard.Shard {
						blpPosition.Position = pos
					}
				}

				log.Infof("Waiting for %v to catch up", si.MasterAlias)
				if err := wr.ai.WaitBlpPosition(si.MasterAlias, blpPosition, wr.ActionTimeout()); err != nil {
					rec.RecordError(err)
				} else {
					log.Infof("%v caught up", si.MasterAlias)
				}
				wg.Done()
			}
		}(si)
	}
	wg.Wait()
	return rec.Error()
}
예제 #9
0
// execShardAction executes the action on a particular shard.
// If the action fails, it determines whether the keyspace/shard
// have moved, re-resolves the topology and tries again, if it is
// not executing a transaction.
func (stc *ScatterConn) execShardAction(
	context context.Context,
	keyspace string,
	shard string,
	tabletType topo.TabletType,
	session *SafeSession,
	action shardActionFunc,
	allErrors *concurrency.AllErrorRecorder,
	results chan interface{},
) {
	for {
		sdc := stc.getConnection(context, keyspace, shard, tabletType)
		transactionId, err := stc.updateSession(context, sdc, keyspace, shard, tabletType, session)
		if err != nil {
			allErrors.RecordError(err)
			return
		}
		err = action(sdc, transactionId, results)
		if err != nil {
			allErrors.RecordError(err)
			return
		}
		break
	}
}
예제 #10
0
파일: cleaner.go 프로젝트: richarwu/vitess
// CleanUp will run the recorded actions.
// If an action on a target fails, it will not run the next action on
// the same target.
// We return the aggregate errors for all cleanups.
// CleanUp uses its own context, with a timeout of 5 minutes, so that clean up action will run even if the original context times out.
// TODO(alainjobart) Actions should run concurrently on a per target
// basis. They are then serialized on each target.
func (cleaner *Cleaner) CleanUp(wr *Wrangler) error {
	// we use a background context so we're not dependent on the original context timeout
	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
	actionMap := make(map[string]*cleanUpHelper)
	rec := concurrency.AllErrorRecorder{}
	cleaner.mu.Lock()
	for i := len(cleaner.actions) - 1; i >= 0; i-- {
		actionReference := cleaner.actions[i]
		helper, ok := actionMap[actionReference.target]
		if !ok {
			helper = &cleanUpHelper{
				err: nil,
			}
			actionMap[actionReference.target] = helper
		}
		if helper.err != nil {
			wr.Logger().Warningf("previous action failed on target %v, no running %v", actionReference.target, actionReference.name)
			continue
		}
		err := actionReference.action.CleanUp(ctx, wr)
		if err != nil {
			helper.err = err
			rec.RecordError(err)
			wr.Logger().Errorf("action %v failed on %v: %v", actionReference.name, actionReference.target, err)
		} else {
			wr.Logger().Infof("action %v successful on %v", actionReference.name, actionReference.target)
		}
	}
	cleaner.mu.Unlock()
	cancel()
	return rec.Error()
}
예제 #11
0
// DiffSchemaToArray diffs two schemas and return the schema diffs if there is any.
func DiffSchemaToArray(leftName string, left *SchemaDefinition, rightName string, right *SchemaDefinition) (result []string) {
	er := concurrency.AllErrorRecorder{}
	DiffSchema(leftName, left, rightName, right, &er)
	if er.HasErrors() {
		return er.ErrorStrings()
	}
	return nil
}
예제 #12
0
// DiffPermissionsToArray difs two sets of permissions, and returns the difference
func DiffPermissionsToArray(leftName string, left *tabletmanagerdatapb.Permissions, rightName string, right *tabletmanagerdatapb.Permissions) (result []string) {
	er := concurrency.AllErrorRecorder{}
	DiffPermissions(leftName, left, rightName, right, &er)
	if er.HasErrors() {
		return er.ErrorStrings()
	}
	return nil
}
예제 #13
0
func DiffPermissionsToArray(leftName string, left *Permissions, rightName string, right *Permissions) (result []string) {
	er := concurrency.AllErrorRecorder{}
	DiffPermissions(leftName, left, rightName, right, &er)
	if er.HasErrors() {
		return er.Errors
	} else {
		return nil
	}
}
예제 #14
0
파일: split.go 프로젝트: ninqing/vitess
func (wr *Wrangler) ShardMultiRestore(keyspace, shard string, sources []topo.TabletAlias, tables []string, concurrency, fetchConcurrency, insertTableConcurrency, fetchRetryCount int, strategy string) error {

	// check parameters
	if len(tables) > 0 && len(sources) > 1 {
		return fmt.Errorf("ShardMultiRestore can only handle one source when tables are specified")
	}

	// lock the shard to perform the changes we need done
	actionNode := actionnode.ShardMultiRestore(&actionnode.MultiRestoreArgs{
		SrcTabletAliases:       sources,
		Concurrency:            concurrency,
		FetchConcurrency:       fetchConcurrency,
		InsertTableConcurrency: insertTableConcurrency,
		FetchRetryCount:        fetchRetryCount,
		Strategy:               strategy})
	lockPath, err := wr.lockShard(keyspace, shard, actionNode)
	if err != nil {
		return err
	}

	mrErr := wr.SetSourceShards(keyspace, shard, sources, tables)
	err = wr.unlockShard(keyspace, shard, actionNode, lockPath, mrErr)
	if err != nil {
		if mrErr != nil {
			log.Errorf("unlockShard got error back: %v", err)
			return mrErr
		}
		return err
	}
	if mrErr != nil {
		return mrErr
	}

	// find all tablets in the shard
	destTablets, err := topo.FindAllTabletAliasesInShard(wr.ts, keyspace, shard)
	if err != nil {
		return err
	}

	// now launch MultiRestore on all tablets we need to do
	rec := cc.AllErrorRecorder{}
	wg := sync.WaitGroup{}
	for _, tabletAlias := range destTablets {
		wg.Add(1)
		go func(tabletAlias topo.TabletAlias) {
			log.Infof("Starting multirestore on tablet %v", tabletAlias)
			err := wr.MultiRestore(tabletAlias, sources, concurrency, fetchConcurrency, insertTableConcurrency, fetchRetryCount, strategy)
			log.Infof("Multirestore on tablet %v is done (err=%v)", tabletAlias, err)
			rec.RecordError(err)
			wg.Done()
		}(tabletAlias)
	}
	wg.Wait()

	return rec.Error()
}
예제 #15
0
func (wr *Wrangler) makeMastersReadOnly(shards []*topo.ShardInfo) error {
	wg := sync.WaitGroup{}
	rec := concurrency.AllErrorRecorder{}
	for _, si := range shards {
		if si.MasterAlias.IsZero() {
			rec.RecordError(fmt.Errorf("Shard %v/%v has no master?", si.Keyspace(), si.ShardName()))
			continue
		}

		wg.Add(1)
		go func(si *topo.ShardInfo) {
			defer wg.Done()

			wr.Logger().Infof("Making master %v read-only", si.MasterAlias)
			ti, err := wr.ts.GetTablet(si.MasterAlias)
			if err != nil {
				rec.RecordError(err)
				return
			}

			if err = wr.tmc.SetReadOnly(ti, wr.ActionTimeout()); err != nil {
				rec.RecordError(err)
				return
			}
			wr.Logger().Infof("Master %v is now read-only", si.MasterAlias)
		}(si)
	}
	wg.Wait()
	return rec.Error()
}
예제 #16
0
파일: copy.go 프로젝트: nosix-me/vitess
// CopyKeyspaces will create the keyspaces in the destination topo
func CopyKeyspaces(fromTS, toTS topo.Server) {
	keyspaces, err := fromTS.GetKeyspaces()
	if err != nil {
		log.Fatalf("GetKeyspaces: %v", err)
	}

	wg := sync.WaitGroup{}
	rec := concurrency.AllErrorRecorder{}
	for _, keyspace := range keyspaces {
		wg.Add(1)
		go func(keyspace string) {
			defer wg.Done()

			k, err := fromTS.GetKeyspace(keyspace)
			if err != nil {
				rec.RecordError(fmt.Errorf("GetKeyspace(%v): %v", keyspace, err))
				return
			}

			if err := toTS.CreateKeyspace(keyspace, k.Keyspace); err != nil {
				if err == topo.ErrNodeExists {
					log.Warningf("keyspace %v already exists", keyspace)
				} else {
					rec.RecordError(fmt.Errorf("CreateKeyspace(%v): %v", keyspace, err))
				}
			}
		}(keyspace)
	}
	wg.Wait()
	if rec.HasErrors() {
		log.Fatalf("copyKeyspaces failed: %v", rec.Error())
	}
}
예제 #17
0
func (wr *Wrangler) makeMastersReadOnly(shards []*topo.ShardInfo) error {
	wg := sync.WaitGroup{}
	rec := concurrency.AllErrorRecorder{}
	for _, si := range shards {
		if si.MasterAlias.IsZero() {
			rec.RecordError(fmt.Errorf("Shard %v/%v has no master?", si.Keyspace(), si.ShardName()))
			continue
		}

		wg.Add(1)
		go func(si *topo.ShardInfo) {
			defer wg.Done()

			log.Infof("Making master %v read-only", si.MasterAlias)
			actionPath, err := wr.ai.SetReadOnly(si.MasterAlias)
			if err != nil {
				rec.RecordError(err)
				return
			}
			rec.RecordError(wr.WaitForCompletion(actionPath))
			log.Infof("Master %v is now read-only", si.MasterAlias)
		}(si)
	}
	wg.Wait()
	return rec.Error()
}
예제 #18
0
파일: shard.go 프로젝트: nangong92t/go_src
// FindAllTabletAliasesInShard uses the replication graph to find all the
// tablet aliases in the given shard.
// It can return ErrPartialResult if some cells were not fetched,
// in which case the result only contains the cells that were fetched.
func FindAllTabletAliasesInShardByCell(ts Server, keyspace, shard string, cells []string) ([]TabletAlias, error) {
	// read the shard information to find the cells
	si, err := ts.GetShard(keyspace, shard)
	if err != nil {
		return nil, err
	}

	resultAsMap := make(map[TabletAlias]bool)
	if !si.MasterAlias.IsZero() {
		if InCellList(si.MasterAlias.Cell, cells) {
			resultAsMap[si.MasterAlias] = true
		}
	}

	// read the replication graph in each cell and add all found tablets
	wg := sync.WaitGroup{}
	mutex := sync.Mutex{}
	rec := concurrency.AllErrorRecorder{}
	for _, cell := range si.Cells {
		if !InCellList(cell, cells) {
			continue
		}
		wg.Add(1)
		go func(cell string) {
			defer wg.Done()
			sri, err := ts.GetShardReplication(cell, keyspace, shard)
			if err != nil {
				rec.RecordError(fmt.Errorf("GetShardReplication(%v, %v, %v) failed: %v", cell, keyspace, shard, err))
				return
			}

			mutex.Lock()
			for _, rl := range sri.ReplicationLinks {
				resultAsMap[rl.TabletAlias] = true
				if !rl.Parent.IsZero() && InCellList(rl.Parent.Cell, cells) {
					resultAsMap[rl.Parent] = true
				}
			}
			mutex.Unlock()
		}(cell)
	}
	wg.Wait()
	err = nil
	if rec.HasErrors() {
		log.Warningf("FindAllTabletAliasesInShard(%v,%v): got partial result: %v", keyspace, shard, rec.Error())
		err = ErrPartialResult
	}

	result := make([]TabletAlias, 0, len(resultAsMap))
	for a := range resultAsMap {
		result = append(result, a)
	}
	return result, err
}
예제 #19
0
func (stc *ScatterConn) rollbackIfNeeded(ctx context.Context, allErrors *concurrency.AllErrorRecorder, session *SafeSession) {
	if session.InTransaction() {
		errstr := allErrors.Error().Error()
		// We cannot recover from these errors
		// TODO(aaijazi): get rid of this string parsing. Might
		// want a function that searches through a deeply
		// nested error chain for a particular error.
		if strings.Contains(errstr, "tx_pool_full") || strings.Contains(errstr, "not_in_tx") {
			stc.Rollback(ctx, session)
		}
	}
}
예제 #20
0
// shardsWithTablesSources returns all the shards that have SourceShards set
// to one value, with an array of Tables.
func shardsWithTablesSources(ctx context.Context, wr *wrangler.Wrangler) ([]map[string]string, error) {
	shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
	keyspaces, err := wr.TopoServer().GetKeyspaces(shortCtx)
	cancel()
	if err != nil {
		return nil, err
	}

	wg := sync.WaitGroup{}
	mu := sync.Mutex{} // protects result
	result := make([]map[string]string, 0, len(keyspaces))
	rec := concurrency.AllErrorRecorder{}
	for _, keyspace := range keyspaces {
		wg.Add(1)
		go func(keyspace string) {
			defer wg.Done()
			shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
			shards, err := wr.TopoServer().GetShardNames(shortCtx, keyspace)
			cancel()
			if err != nil {
				rec.RecordError(err)
				return
			}
			for _, shard := range shards {
				wg.Add(1)
				go func(keyspace, shard string) {
					defer wg.Done()
					shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
					si, err := wr.TopoServer().GetShard(shortCtx, keyspace, shard)
					cancel()
					if err != nil {
						rec.RecordError(err)
						return
					}

					if len(si.SourceShards) == 1 && len(si.SourceShards[0].Tables) > 0 {
						mu.Lock()
						result = append(result, map[string]string{
							"Keyspace": keyspace,
							"Shard":    shard,
						})
						mu.Unlock()
					}
				}(keyspace, shard)
			}
		}(keyspace)
	}
	wg.Wait()

	if rec.HasErrors() {
		return nil, rec.Error()
	}
	if len(result) == 0 {
		return nil, fmt.Errorf("There are no shards with SourceShards")
	}
	return result, nil
}
예제 #21
0
// Reload reloads the schema info from the db.
// Any tables that have changed since the last load are updated.
// This is a no-op if the SchemaInfo is closed.
func (si *SchemaInfo) Reload(ctx context.Context) error {
	defer logError(si.queryServiceStats)

	// Reload() gets called both from the ticker, and from external RPCs.
	// We don't want them to race over writing data that was read concurrently.
	si.actionMutex.Lock()
	defer si.actionMutex.Unlock()

	if si.IsClosed() {
		return nil
	}

	// Get time first because it needs a connection from the pool.
	curTime := si.mysqlTime(ctx)

	var tableData *sqltypes.Result
	var err error
	func() {
		conn := getOrPanic(ctx, si.connPool)
		defer conn.Recycle()
		tableData, err = conn.Exec(ctx, baseShowTables, maxTableCount, false)
	}()
	if err != nil {
		return fmt.Errorf("could not get table list for reload: %v", err)
	}

	// Reload any tables that have changed. We try every table even if some fail,
	// but we return success only if all tables succeed.
	// The following section requires us to hold mu.
	rec := concurrency.AllErrorRecorder{}
	si.mu.Lock()
	defer si.mu.Unlock()
	for _, row := range tableData.Rows {
		tableName := row[0].String()
		createTime, _ := row[2].ParseInt64()
		// Check if we know about the table or it has been recreated.
		if _, ok := si.tables[tableName]; !ok || createTime >= si.lastChange {
			func() {
				// Unlock so CreateOrUpdateTable can lock.
				si.mu.Unlock()
				defer si.mu.Lock()
				log.Infof("Reloading schema for table: %s", tableName)
				rec.RecordError(si.createOrUpdateTableLocked(ctx, tableName))
			}()
			continue
		}
		// Only update table_rows, data_length, index_length, max_data_length
		si.tables[tableName].SetMysqlStats(row[4], row[5], row[6], row[7], row[8])
	}
	si.lastChange = curTime
	return rec.Error()
}
예제 #22
0
// runOnAllShards is a helper method that executes the passed function for all shards in parallel.
// The method returns no error if the function succeeds on all shards. If on any of the shards
// the function fails then the method returns error. If several shards return error then only one
// of them is returned.
func (schemaSwap *Swap) runOnAllShards(shardFunc func(shard *shardSchemaSwap) error) error {
	var errorRecorder concurrency.AllErrorRecorder
	var waitGroup sync.WaitGroup
	for _, shardSwap := range schemaSwap.allShards {
		waitGroup.Add(1)
		go func(shard *shardSchemaSwap) {
			defer waitGroup.Done()
			errorRecorder.RecordError(shardFunc(shard))
		}(shardSwap)
	}
	waitGroup.Wait()
	return errorRecorder.Error()
}
예제 #23
0
func (stc *ScatterConn) endAction(startTime time.Time, allErrors *concurrency.AllErrorRecorder, statsKey []string, err *error) {
	if *err != nil {
		allErrors.RecordError(*err)
		// Don't increment the error counter for duplicate
		// keys or bad queries, as those errors are caused by
		// client queries and are not VTGate's fault.
		ec := vterrors.RecoverVtErrorCode(*err)
		if ec != vtrpcpb.ErrorCode_INTEGRITY_ERROR && ec != vtrpcpb.ErrorCode_BAD_INPUT {
			stc.tabletCallErrorCount.Add(statsKey, 1)
		}
	}
	stc.timings.Record(statsKey, startTime)
}
예제 #24
0
파일: rebuild.go 프로젝트: khanchan/vitess
// UpdateAllSrvShards calls UpdateSrvShard for all cells concurrently.
func UpdateAllSrvShards(ctx context.Context, ts topo.Server, si *topo.ShardInfo) error {
	wg := sync.WaitGroup{}
	errs := concurrency.AllErrorRecorder{}

	for _, cell := range si.Cells {
		wg.Add(1)
		go func(cell string) {
			errs.RecordError(UpdateSrvShard(ctx, ts, cell, si))
			wg.Done()
		}(cell)
	}
	wg.Wait()
	return errs.Error()
}
예제 #25
0
// getNewConn creates a new tablet connection with a separate per conn timeout.
// It limits the overall timeout to connTimeoutTotal by checking elapsed time after each blocking call.
func (sdc *ShardConn) getNewConn(ctx context.Context) (conn tabletconn.TabletConn, endPoint *topodatapb.EndPoint, isTimeout bool, err error) {
	startTime := time.Now()

	endPoints, err := sdc.balancer.Get()
	if err != nil {
		// Error when getting endpoint
		return nil, nil, false, err
	}
	if len(endPoints) == 0 {
		// No valid endpoint
		return nil, nil, false, vterrors.FromError(
			vtrpcpb.ErrorCode_INTERNAL_ERROR,
			fmt.Errorf("no valid endpoint"),
		)
	}
	if time.Now().Sub(startTime) >= sdc.connTimeoutTotal {
		return nil, nil, true, vterrors.FromError(
			vtrpcpb.ErrorCode_DEADLINE_EXCEEDED,
			fmt.Errorf("timeout when getting endpoints"),
		)
	}

	// Iterate through all endpoints to create a connection
	perConnTimeout := sdc.getConnTimeoutPerConn(len(endPoints))
	allErrors := new(concurrency.AllErrorRecorder)
	for _, endPoint := range endPoints {
		perConnStartTime := time.Now()
		conn, err = tabletconn.GetDialer()(ctx, endPoint, sdc.keyspace, sdc.shard, topodatapb.TabletType_UNKNOWN, perConnTimeout)
		if err == nil {
			sdc.connectTimings.Record([]string{sdc.keyspace, sdc.shard, strings.ToLower(sdc.tabletType.String())}, perConnStartTime)
			sdc.mu.Lock()
			defer sdc.mu.Unlock()
			sdc.conn = conn
			return conn, endPoint, false, nil
		}
		// Markdown the endpoint if it failed to connect
		sdc.balancer.MarkDown(endPoint.Uid, err.Error())
		vtErr := vterrors.NewVitessError(
			// TODO(aaijazi): what about OperationalErrors here?
			vterrors.RecoverVtErrorCode(err), err,
			"%v %+v", err, endPoint,
		)
		allErrors.RecordError(vtErr)
		if time.Now().Sub(startTime) >= sdc.connTimeoutTotal {
			err = vterrors.FromError(
				vtrpcpb.ErrorCode_DEADLINE_EXCEEDED,
				fmt.Errorf("timeout when connecting to %+v", endPoint),
			)
			allErrors.RecordError(err)
			return nil, nil, true, allErrors.AggrError(AggregateVtGateErrors)
		}
	}
	return nil, nil, false, allErrors.Error()
}
예제 #26
0
func (stc *ScatterConn) endAction(startTime time.Time, allErrors *concurrency.AllErrorRecorder, statsKey []string, err *error) {
	if *err != nil {
		allErrors.RecordError(*err)

		// Don't increment the error counter for duplicate
		// keys, as those errors are caused by client queries
		// and are not VTGate's fault.
		// TODO(aaijazi): get rid of this string parsing, and
		// handle all cases of invalid input
		strErr := (*err).Error()
		if !strings.Contains(strErr, errDupKey) && !strings.Contains(strErr, errOutOfRange) {
			stc.tabletCallErrorCount.Add(statsKey, 1)
		}
	}
	stc.timings.Record(statsKey, startTime)
}
예제 #27
0
func (wr *Wrangler) ShardMultiRestore(keyspace, shard string, sources []topo.TabletAlias, concurrency, fetchConcurrency, insertTableConcurrency, fetchRetryCount int, strategy string) error {
	// lock the shard to perform the changes we need done
	actionNode := wr.ai.ShardMultiRestore(&tm.MultiRestoreArgs{
		SrcTabletAliases:       sources,
		Concurrency:            concurrency,
		FetchConcurrency:       fetchConcurrency,
		InsertTableConcurrency: insertTableConcurrency,
		FetchRetryCount:        fetchRetryCount,
		Strategy:               strategy})
	lockPath, err := wr.lockShard(keyspace, shard, actionNode)
	if err != nil {
		return err
	}

	mrErr := wr.shardMultiRestore(keyspace, shard, sources, concurrency, fetchConcurrency, insertTableConcurrency, fetchRetryCount, strategy)
	err = wr.unlockShard(keyspace, shard, actionNode, lockPath, mrErr)
	if err != nil {
		return err
	}
	if mrErr != nil {
		return mrErr
	}

	// find all tablets in the shard
	destTablets, err := topo.FindAllTabletAliasesInShard(wr.ts, keyspace, shard)
	if err != nil {
		return err
	}

	// now launch MultiRestore on all tablets we need to do
	rec := cc.AllErrorRecorder{}
	wg := sync.WaitGroup{}
	for _, tabletAlias := range destTablets {
		wg.Add(1)
		go func(tabletAlias topo.TabletAlias) {
			log.Infof("Starting multirestore on tablet %v", tabletAlias)
			err := wr.MultiRestore(tabletAlias, sources, concurrency, fetchConcurrency, insertTableConcurrency, fetchRetryCount, strategy)
			log.Infof("Multirestore on tablet %v is done (err=%v)", tabletAlias, err)
			rec.RecordError(err)
			wg.Done()
		}(tabletAlias)
	}
	wg.Wait()

	return rec.Error()
}
예제 #28
0
// FIXME(alainjobart) no action to become read-write now, just use Ping,
// that forces the shard reload and will stop replication.
func (wr *Wrangler) makeMastersReadWrite(shards []*topo.ShardInfo) error {
	wg := sync.WaitGroup{}
	rec := concurrency.AllErrorRecorder{}
	for _, si := range shards {
		wg.Add(1)
		go func(si *topo.ShardInfo) {
			defer wg.Done()
			log.Infof("Pinging master %v", si.MasterAlias)

			actionPath, err := wr.ai.Ping(si.MasterAlias)
			if err != nil {
				rec.RecordError(err)
				return
			}

			if err := wr.WaitForCompletion(actionPath); err != nil {
				rec.RecordError(err)
			} else {
				log.Infof("%v responded", si.MasterAlias)
			}

		}(si)
	}
	wg.Wait()
	return rec.Error()
}
예제 #29
0
// ExecuteBatch executes a batch of non-streaming queries on the specified shards.
func (stc *ScatterConn) ExecuteBatch(
	ctx context.Context,
	batchRequest *scatterBatchRequest,
	tabletType topodatapb.TabletType,
	asTransaction bool,
	session *SafeSession) (qrs []sqltypes.Result, err error) {
	allErrors := new(concurrency.AllErrorRecorder)

	results := make([]sqltypes.Result, batchRequest.Length)
	var resMutex sync.Mutex

	var wg sync.WaitGroup
	for _, req := range batchRequest.Requests {
		wg.Add(1)
		go func(req *shardBatchRequest) {
			defer wg.Done()

			startTime, statsKey, transactionID, err := stc.startAction(ctx, "ExecuteBatch", req.Keyspace, req.Shard, tabletType, session, false, allErrors)
			defer stc.endAction(startTime, allErrors, statsKey, &err)
			if err != nil {
				return
			}

			var innerqrs []sqltypes.Result
			innerqrs, err = stc.gateway.ExecuteBatch(ctx, req.Keyspace, req.Shard, tabletType, req.Queries, asTransaction, transactionID)
			if err != nil {
				return
			}

			resMutex.Lock()
			defer resMutex.Unlock()
			for i, result := range innerqrs {
				appendResult(&results[req.ResultIndexes[i]], &result)
			}
		}(req)
	}
	wg.Wait()
	// If we want to rollback, we have to do it before closing results
	// so that the session is updated to be not InTransaction.
	if allErrors.HasErrors() {
		stc.rollbackIfNeeded(ctx, allErrors, session)
		return nil, allErrors.AggrError(stc.aggregateErrors)
	}
	return results, nil
}
예제 #30
0
func (wr *Wrangler) getMastersPosition(shards []*topo.ShardInfo) (map[*topo.ShardInfo]myproto.ReplicationPosition, error) {
	mu := sync.Mutex{}
	result := make(map[*topo.ShardInfo]myproto.ReplicationPosition)

	wg := sync.WaitGroup{}
	rec := concurrency.AllErrorRecorder{}
	for _, si := range shards {
		wg.Add(1)
		go func(si *topo.ShardInfo) {
			defer wg.Done()
			log.Infof("Gathering master position for %v", si.MasterAlias)
			ti, err := wr.ts.GetTablet(si.MasterAlias)
			if err != nil {
				rec.RecordError(err)
				return
			}

			pos, err := wr.ai.MasterPosition(ti, wr.ActionTimeout())
			if err != nil {
				rec.RecordError(err)
				return
			}

			log.Infof("Got master position for %v", si.MasterAlias)
			mu.Lock()
			result[si] = pos
			mu.Unlock()
		}(si)
	}
	wg.Wait()
	return result, rec.Error()
}