Beispiel #1
0
func (zkts *Server) UpdateKeyspace(ki *topo.KeyspaceInfo) error {
	keyspacePath := path.Join(globalKeyspacesPath, ki.KeyspaceName())
	data := jscfg.ToJson(ki.Keyspace)
	_, err := zkts.zconn.Set(keyspacePath, data, -1)
	if err != nil {
		if zookeeper.IsError(err, zookeeper.ZNONODE) {
			// The code should be:
			//   err = topo.ErrNoNode
			// Temporary code until we have Keyspace object
			// everywhere:
			_, err = zkts.zconn.Create(keyspacePath, data, 0, zookeeper.WorldACL(zookeeper.PERM_ALL))
			if err != nil {
				if zookeeper.IsError(err, zookeeper.ZNONODE) {
					// the directory doesn't even exist
					err = topo.ErrNoNode
				}
				return err
			}

			event.Dispatch(&events.KeyspaceChange{
				KeyspaceInfo: *ki,
				Status:       "updated (had to create Keyspace object)",
			})
			return nil
		}
		return err
	}

	event.Dispatch(&events.KeyspaceChange{
		KeyspaceInfo: *ki,
		Status:       "updated",
	})
	return nil
}
Beispiel #2
0
func (tee *Tee) UpdateKeyspace(ki *topo.KeyspaceInfo) error {
	if err := tee.primary.UpdateKeyspace(ki); err != nil {
		// failed on primary, not updating secondary
		return err
	}

	if err := tee.secondary.UpdateKeyspace(ki); err != nil {
		// not critical enough to fail
		log.Warningf("secondary.UpdateKeyspace(%v) failed: %v", ki.KeyspaceName(), err)
	}
	return nil
}
Beispiel #3
0
// findCellsForRebuild will find all the cells in the given keyspace
// and create an entry if the map for them
func findCellsForRebuild(ki *topo.KeyspaceInfo, shardMap map[string]*topo.ShardInfo, cells []string, srvKeyspaceMap map[string]*topodatapb.SrvKeyspace) {
	for _, si := range shardMap {
		for _, cell := range si.Cells {
			if !topo.InCellList(cell, cells) {
				continue
			}
			if _, ok := srvKeyspaceMap[cell]; !ok {
				srvKeyspaceMap[cell] = &topodatapb.SrvKeyspace{
					ShardingColumnName: ki.ShardingColumnName,
					ShardingColumnType: ki.ShardingColumnType,
					ServedFrom:         ki.ComputeCellServedFrom(cell),
				}
			}
		}
	}
}
Beispiel #4
0
// UpdateKeyspace is part of the topo.Server interface
func (zkts *Server) UpdateKeyspace(ctx context.Context, ki *topo.KeyspaceInfo, existingVersion int64) (int64, error) {
	keyspacePath := path.Join(globalKeyspacesPath, ki.KeyspaceName())
	data := jscfg.ToJSON(ki.Keyspace)
	stat, err := zkts.zconn.Set(keyspacePath, data, int(existingVersion))
	if err != nil {
		if zookeeper.IsError(err, zookeeper.ZNONODE) {
			err = topo.ErrNoNode
		}
		return -1, err
	}

	event.Dispatch(&events.KeyspaceChange{
		KeyspaceInfo: *ki,
		Status:       "updated",
	})
	return int64(stat.Version()), nil
}
Beispiel #5
0
// findCellsForRebuild will find all the cells in the given keyspace
// and create an entry if the map for them
func (wr *Wrangler) findCellsForRebuild(ki *topo.KeyspaceInfo, shardMap map[string]*topo.ShardInfo, cells []string, srvKeyspaceMap map[string]*topo.SrvKeyspace) {
	for _, si := range shardMap {
		for _, cell := range si.Cells {
			if !topo.InCellList(cell, cells) {
				continue
			}
			if _, ok := srvKeyspaceMap[cell]; !ok {
				srvKeyspaceMap[cell] = &topo.SrvKeyspace{
					ShardingColumnName: ki.ShardingColumnName,
					ShardingColumnType: key.ProtoToKeyspaceIdType(ki.ShardingColumnType),
					ServedFrom:         ki.ComputeCellServedFrom(cell),
					SplitShardCount:    ki.SplitShardCount,
				}
			}
		}
	}
}
Beispiel #6
0
// UpdateKeyspace implements topo.Server.
func (s *Server) UpdateKeyspace(ctx context.Context, ki *topo.KeyspaceInfo, existingVersion int64) (int64, error) {
	data := jscfg.ToJSON(ki.Keyspace)

	resp, err := s.getGlobal().CompareAndSwap(keyspaceFilePath(ki.KeyspaceName()),
		data, 0 /* ttl */, "" /* prevValue */, uint64(existingVersion))
	if err != nil {
		return -1, convertError(err)
	}
	if resp.Node == nil {
		return -1, ErrBadResponse
	}

	event.Dispatch(&events.KeyspaceChange{
		KeyspaceInfo: *ki,
		Status:       "updated",
	})
	return int64(resp.Node.ModifiedIndex), nil
}
Beispiel #7
0
func (wr *Wrangler) migrateServedFrom(ctx context.Context, ki *topo.KeyspaceInfo, destinationShard *topo.ShardInfo, servedType topodatapb.TabletType, cells []string, reverse bool, filteredReplicationWaitTime time.Duration) (err error) {

	// re-read and update keyspace info record
	ki, err = wr.ts.GetKeyspace(ctx, ki.KeyspaceName())
	if err != nil {
		return err
	}
	if reverse {
		ki.UpdateServedFromMap(servedType, cells, destinationShard.SourceShards[0].Keyspace, false, nil)
	} else {
		ki.UpdateServedFromMap(servedType, cells, destinationShard.SourceShards[0].Keyspace, true, destinationShard.Cells)
	}

	// re-read and check the destination shard
	destinationShard, err = wr.ts.GetShard(ctx, destinationShard.Keyspace(), destinationShard.ShardName())
	if err != nil {
		return err
	}
	if len(destinationShard.SourceShards) != 1 {
		return fmt.Errorf("Destination shard %v/%v is not a vertical split target", destinationShard.Keyspace(), destinationShard.ShardName())
	}
	tables := destinationShard.SourceShards[0].Tables

	// read the source shard, we'll need its master, and we'll need to
	// update the blacklisted tables.
	var sourceShard *topo.ShardInfo
	sourceShard, err = wr.ts.GetShard(ctx, destinationShard.SourceShards[0].Keyspace, destinationShard.SourceShards[0].Shard)
	if err != nil {
		return err
	}

	ev := &events.MigrateServedFrom{
		KeyspaceName:     ki.KeyspaceName(),
		SourceShard:      *sourceShard,
		DestinationShard: *destinationShard,
		ServedType:       servedType,
		Reverse:          reverse,
	}
	event.DispatchUpdate(ev, "start")
	defer func() {
		if err != nil {
			event.DispatchUpdate(ev, "failed: "+err.Error())
		}
	}()

	if servedType == topodatapb.TabletType_MASTER {
		err = wr.masterMigrateServedFrom(ctx, ki, sourceShard, destinationShard, tables, ev, filteredReplicationWaitTime)
	} else {
		err = wr.replicaMigrateServedFrom(ctx, ki, sourceShard, destinationShard, servedType, cells, reverse, tables, ev)
	}
	event.DispatchUpdate(ev, "finished")
	return
}
Beispiel #8
0
func (zkts *Server) UpdateKeyspace(ki *topo.KeyspaceInfo) error {
	keyspacePath := path.Join(globalKeyspacesPath, ki.KeyspaceName())
	_, err := zkts.zconn.Set(keyspacePath, jscfg.ToJson(ki.Keyspace), -1)
	if err != nil {
		if zookeeper.IsError(err, zookeeper.ZNONODE) {
			// The code should be:
			//   err = topo.ErrNoNode
			// Temporary code until we have Keyspace object
			// everywhere:
			_, err = zkts.zconn.Create(keyspacePath, jscfg.ToJson(ki.Keyspace), 0, zookeeper.WorldACL(zookeeper.PERM_ALL))
			if err != nil {
				if zookeeper.IsError(err, zookeeper.ZNONODE) {
					// the directory doesn't even exist
					err = topo.ErrNoNode
				}
			}
		}
	}
	return err
}
Beispiel #9
0
func (wr *Wrangler) migrateServedFrom(ki *topo.KeyspaceInfo, destinationShard *topo.ShardInfo, servedType topo.TabletType, reverse bool) (err error) {

	// re-read and update keyspace info record
	ki, err = wr.ts.GetKeyspace(ki.KeyspaceName())
	if err != nil {
		return err
	}
	if reverse {
		if _, ok := ki.ServedFrom[servedType]; ok {
			return fmt.Errorf("Destination Keyspace %s is not serving type %v", ki.KeyspaceName(), servedType)
		}
		ki.ServedFrom[servedType] = destinationShard.SourceShards[0].Keyspace
	} else {
		if _, ok := ki.ServedFrom[servedType]; !ok {
			return fmt.Errorf("Destination Keyspace %s is already serving type %v", ki.KeyspaceName(), servedType)
		}
		delete(ki.ServedFrom, servedType)
	}

	// re-read and check the destination shard
	destinationShard, err = wr.ts.GetShard(destinationShard.Keyspace(), destinationShard.ShardName())
	if err != nil {
		return err
	}
	if len(destinationShard.SourceShards) != 1 {
		return fmt.Errorf("Destination shard %v/%v is not a vertical split target", destinationShard.Keyspace(), destinationShard.ShardName())
	}
	tables := destinationShard.SourceShards[0].Tables

	// read the source shard, we'll need its master, and we'll need to
	// update the blacklisted tables.
	var sourceShard *topo.ShardInfo
	sourceShard, err = wr.ts.GetShard(destinationShard.SourceShards[0].Keyspace, destinationShard.SourceShards[0].Shard)
	if err != nil {
		return err
	}

	ev := &events.MigrateServedFrom{
		Keyspace:         *ki,
		SourceShard:      *sourceShard,
		DestinationShard: *destinationShard,
		ServedType:       servedType,
		Reverse:          reverse,
	}
	event.DispatchUpdate(ev, "start")
	defer func() {
		if err != nil {
			event.DispatchUpdate(ev, "failed: "+err.Error())
		}
	}()

	if servedType == topo.TYPE_MASTER {
		err = wr.masterMigrateServedFrom(ki, sourceShard, destinationShard, servedType, tables, ev)
	} else {
		err = wr.replicaMigrateServedFrom(ki, sourceShard, destinationShard, servedType, reverse, tables, ev)
	}
	event.DispatchUpdate(ev, "finished")
	return
}
Beispiel #10
0
func (wr *Wrangler) migrateServedFrom(ki *topo.KeyspaceInfo, si *topo.ShardInfo, servedType topo.TabletType, reverse bool) (err error) {

	// re-read and update keyspace info record
	ki, err = wr.ts.GetKeyspace(ki.KeyspaceName())
	if err != nil {
		return err
	}
	if reverse {
		if _, ok := ki.ServedFrom[servedType]; ok {
			return fmt.Errorf("Destination Keyspace %s is not serving type %v", ki.KeyspaceName(), servedType)
		}
		ki.ServedFrom[servedType] = si.SourceShards[0].Keyspace
	} else {
		if _, ok := ki.ServedFrom[servedType]; !ok {
			return fmt.Errorf("Destination Keyspace %s is already serving type %v", ki.KeyspaceName(), servedType)
		}
		delete(ki.ServedFrom, servedType)
	}

	// re-read and check the destination shard
	si, err = wr.ts.GetShard(si.Keyspace(), si.ShardName())
	if err != nil {
		return err
	}
	if len(si.SourceShards) != 1 {
		return fmt.Errorf("Destination shard %v/%v is not a vertical split target", si.Keyspace(), si.ShardName())
	}
	tables := si.SourceShards[0].Tables

	// read the source shard, we'll need its master
	sourceShard, err := wr.ts.GetShard(si.SourceShards[0].Keyspace, si.SourceShards[0].Shard)
	if err != nil {
		return err
	}

	ev := &events.MigrateServedFrom{
		Keyspace:         *ki,
		SourceShard:      *sourceShard,
		DestinationShard: *si,
		ServedType:       servedType,
		Reverse:          reverse,
	}
	event.DispatchUpdate(ev, "start")
	defer func() {
		if err != nil {
			event.DispatchUpdate(ev, "failed: "+err.Error())
		}
	}()

	// For master type migration, need to:
	// - switch the source shard to read-only
	// - gather the replication point
	// - wait for filtered replication to catch up before we continue
	// - disable filtered replication after the fact
	var sourceMasterTabletInfo *topo.TabletInfo
	if servedType == topo.TYPE_MASTER {
		// set master to read-only
		event.DispatchUpdate(ev, "setting source shard master to read-only")
		actionPath, err := wr.ai.SetReadOnly(sourceShard.MasterAlias)
		if err != nil {
			return err
		}
		if err := wr.WaitForCompletion(actionPath); err != nil {
			return err
		}

		// get the position
		event.DispatchUpdate(ev, "getting master position")
		sourceMasterTabletInfo, err = wr.ts.GetTablet(sourceShard.MasterAlias)
		if err != nil {
			return err
		}
		masterPosition, err := wr.ai.MasterPosition(sourceMasterTabletInfo, wr.ActionTimeout())
		if err != nil {
			return err
		}

		// wait for it
		event.DispatchUpdate(ev, "waiting for destination master to catch up to source master")
		if err := wr.ai.WaitBlpPosition(si.MasterAlias, blproto.BlpPosition{
			Uid:      0,
			Position: masterPosition,
		}, wr.ActionTimeout()); err != nil {
			return err
		}

		// and clear the shard record
		si.SourceShards = nil
	}

	// All is good, we can save the keyspace and shard (if needed) now
	event.DispatchUpdate(ev, "updating keyspace")
	if err = topo.UpdateKeyspace(wr.ts, ki); err != nil {
		return err
	}
	event.DispatchUpdate(ev, "updating destination shard")
	if servedType == topo.TYPE_MASTER {
		if err := topo.UpdateShard(wr.ts, si); err != nil {
			return err
		}
	}

	// Tell the new shards masters they can now be read-write.
	// Invoking a remote action will also make the tablet stop filtered
	// replication.
	event.DispatchUpdate(ev, "setting destination shard masters read-write")
	if servedType == topo.TYPE_MASTER {
		if err := wr.makeMastersReadWrite([]*topo.ShardInfo{si}); err != nil {
			return err
		}
	}

	// Now blacklist the table list on the right servers
	event.DispatchUpdate(ev, "setting blacklisted tables on source shard")
	if servedType == topo.TYPE_MASTER {
		if err := wr.ai.SetBlacklistedTables(sourceMasterTabletInfo, tables, wr.ActionTimeout()); err != nil {
			return err
		}
	} else {
		// We use the list of tables that are replicating
		// for the blacklist. In case of a reverse move, we clear the
		// blacklist.
		if reverse {
			tables = nil
		}
		if err := wr.SetBlacklistedTablesByShard(sourceShard.Keyspace(), sourceShard.ShardName(), servedType, tables); err != nil {
			return err
		}
	}

	event.DispatchUpdate(ev, "finished")
	return nil
}
Beispiel #11
0
// UpdateKeyspace is part of the topo.Server interface
func (tee *Tee) UpdateKeyspace(ctx context.Context, ki *topo.KeyspaceInfo, existingVersion int64) (newVersion int64, err error) {
	if newVersion, err = tee.primary.UpdateKeyspace(ctx, ki, existingVersion); err != nil {
		// failed on primary, not updating secondary
		return
	}

	// if we have a mapping between keyspace version in first topo
	// and keyspace version in second topo, replace the version number.
	// if not, this will probably fail and log.
	tee.mu.Lock()
	kvm, ok := tee.keyspaceVersionMapping[ki.KeyspaceName()]
	if ok && kvm.readFromVersion == existingVersion {
		existingVersion = kvm.readFromSecondVersion
		delete(tee.keyspaceVersionMapping, ki.KeyspaceName())
	}
	tee.mu.Unlock()
	if newVersion2, serr := tee.secondary.UpdateKeyspace(ctx, ki, existingVersion); serr != nil {
		// not critical enough to fail
		if serr == topo.ErrNoNode {
			// the keyspace doesn't exist on the secondary, let's
			// just create it
			if serr = tee.secondary.CreateKeyspace(ctx, ki.KeyspaceName(), ki.Keyspace); serr != nil {
				log.Warningf("secondary.CreateKeyspace(%v) failed (after UpdateKeyspace returned ErrNoNode): %v", ki.KeyspaceName(), serr)
			} else {
				log.Infof("secondary.UpdateKeyspace(%v) failed with ErrNoNode, CreateKeyspace then worked.", ki.KeyspaceName())
				ki, gerr := tee.secondary.GetKeyspace(ctx, ki.KeyspaceName())
				if gerr != nil {
					log.Warningf("Failed to re-read keyspace(%v) after creating it on secondary: %v", ki.KeyspaceName(), gerr)
				} else {
					tee.mu.Lock()
					tee.keyspaceVersionMapping[ki.KeyspaceName()] = versionMapping{
						readFromVersion:       newVersion,
						readFromSecondVersion: ki.Version(),
					}
					tee.mu.Unlock()
				}
			}
		} else {
			log.Warningf("secondary.UpdateKeyspace(%v) failed: %v", ki.KeyspaceName(), serr)
		}
	} else {
		tee.mu.Lock()
		tee.keyspaceVersionMapping[ki.KeyspaceName()] = versionMapping{
			readFromVersion:       newVersion,
			readFromSecondVersion: newVersion2,
		}
		tee.mu.Unlock()
	}
	return
}
Beispiel #12
0
func (wr *Wrangler) migrateServedFrom(ki *topo.KeyspaceInfo, si *topo.ShardInfo, servedType topo.TabletType, reverse bool) error {

	// re-read and update keyspace info record
	var err error
	ki, err = wr.ts.GetKeyspace(ki.KeyspaceName())
	if err != nil {
		return err
	}
	if reverse {
		if _, ok := ki.ServedFrom[servedType]; ok {
			return fmt.Errorf("Destination Keyspace %s is not serving type %v", ki.KeyspaceName(), servedType)
		}
		ki.ServedFrom[servedType] = si.SourceShards[0].Keyspace
	} else {
		if _, ok := ki.ServedFrom[servedType]; !ok {
			return fmt.Errorf("Destination Keyspace %s is already serving type %v", ki.KeyspaceName(), servedType)
		}
		delete(ki.ServedFrom, servedType)
	}

	// re-read and check the destination shard
	si, err = wr.ts.GetShard(si.Keyspace(), si.ShardName())
	if err != nil {
		return err
	}
	if len(si.SourceShards) != 1 {
		return fmt.Errorf("Destination shard %v/%v is not a vertical split target", si.Keyspace(), si.ShardName())
	}
	tables := si.SourceShards[0].Tables

	// read the source shard, we'll need its master
	sourceShard, err := wr.ts.GetShard(si.SourceShards[0].Keyspace, si.SourceShards[0].Shard)
	if err != nil {
		return err
	}

	// For master type migration, need to:
	// - switch the source shard to read-only
	// - gather the replication point
	// - wait for filtered replication to catch up before we continue
	// - disable filtered replication after the fact
	var sourceMasterTabletInfo *topo.TabletInfo
	if servedType == topo.TYPE_MASTER {
		// set master to read-only
		actionPath, err := wr.ai.SetReadOnly(sourceShard.MasterAlias)
		if err != nil {
			return err
		}
		if err := wr.ai.WaitForCompletion(actionPath, wr.actionTimeout()); err != nil {
			return err
		}

		// get the position
		sourceMasterTabletInfo, err = wr.ts.GetTablet(sourceShard.MasterAlias)
		if err != nil {
			return err
		}
		masterPosition, err := wr.ai.MasterPosition(sourceMasterTabletInfo, wr.actionTimeout())
		if err != nil {
			return err
		}

		// wait for it
		if err := wr.ai.WaitBlpPosition(si.MasterAlias, myproto.BlpPosition{
			Uid:     0,
			GroupId: masterPosition.MasterLogGroupId,
		}, wr.actionTimeout()); err != nil {
			return err
		}

		// and clear the shard record
		si.SourceShards = nil
	}

	// All is good, we can save the keyspace and shard (if needed) now
	if err = wr.ts.UpdateKeyspace(ki); err != nil {
		return err
	}
	if servedType == topo.TYPE_MASTER {
		if err := wr.ts.UpdateShard(si); err != nil {
			return err
		}
	}

	// Tell the new shards masters they can now be read-write.
	// Invoking a remote action will also make the tablet stop filtered
	// replication.
	if servedType == topo.TYPE_MASTER {
		if err := wr.makeMastersReadWrite([]*topo.ShardInfo{si}); err != nil {
			return err
		}
	}

	// Now blacklist the table list on the right servers
	if servedType == topo.TYPE_MASTER {
		if err := wr.ai.SetBlacklistedTables(sourceMasterTabletInfo, tables, wr.actionTimeout()); err != nil {
			return err
		}
	} else {
		// We use the list of tables that are replicating
		// for the blacklist. In case of a reverse move, we clear the
		// blacklist.
		if reverse {
			tables = nil
		}
		if err := wr.SetBlacklistedTablesByShard(sourceShard.Keyspace(), sourceShard.ShardName(), servedType, tables); err != nil {
			return err
		}
	}

	return nil
}