// CreateShard is part of the topo.Server interface func (zkts *Server) CreateShard(ctx context.Context, keyspace, shard string, value *pb.Shard) error { shardPath := path.Join(globalKeyspacesPath, keyspace, "shards", shard) pathList := []string{ shardPath, path.Join(shardPath, "action"), path.Join(shardPath, "actionlog"), } alreadyExists := false for i, zkPath := range pathList { c := "" if i == 0 { c = jscfg.ToJSON(value) } _, err := zk.CreateRecursive(zkts.zconn, zkPath, c, 0, zookeeper.WorldACL(zookeeper.PERM_ALL)) if err != nil { if zookeeper.IsError(err, zookeeper.ZNODEEXISTS) { alreadyExists = true } else { return fmt.Errorf("error creating shard: %v %v", zkPath, err) } } } if alreadyExists { return topo.ErrNodeExists } event.Dispatch(&events.ShardChange{ ShardInfo: *topo.NewShardInfo(keyspace, shard, value, -1), Status: "created", }) return nil }
// CreateShard implements topo.Server. func (s *Server) CreateShard(ctx context.Context, keyspace, shard string, value *topo.Shard) error { data := jscfg.ToJSON(value) global := s.getGlobal() resp, err := global.Create(shardFilePath(keyspace, shard), data, 0 /* ttl */) if err != nil { return convertError(err) } if err := initLockFile(global, shardDirPath(keyspace, shard)); err != nil { return err } // We don't return ErrBadResponse in this case because the Create() suceeeded // and we don't really need the version to satisfy our contract - we're only // logging it. version := int64(-1) if resp.Node != nil { version = int64(resp.Node.ModifiedIndex) } event.Dispatch(&events.ShardChange{ ShardInfo: *topo.NewShardInfo(keyspace, shard, value, version), Status: "created", }) return nil }
func TestReparentSyslog(t *testing.T) { wantSev, wantMsg := syslog.LOG_INFO, "keyspace-123/shard-123 [reparent cell-0000012345 -> cell-0000054321] status" tc := &Reparent{ ShardInfo: *topo.NewShardInfo("keyspace-123", "shard-123", nil, -1), OldMaster: topo.Tablet{ Alias: topo.TabletAlias{ Cell: "cell", Uid: 12345, }, }, NewMaster: topo.Tablet{ Alias: topo.TabletAlias{ Cell: "cell", Uid: 54321, }, }, StatusUpdater: base.StatusUpdater{Status: "status"}, } gotSev, gotMsg := tc.Syslog() if gotSev != wantSev { t.Errorf("wrong severity: got %v, want %v", gotSev, wantSev) } if gotMsg != wantMsg { t.Errorf("wrong message: got %v, want %v", gotMsg, wantMsg) } }
func siBytes(start, end string) *topo.ShardInfo { return topo.NewShardInfo("keyspace", start+"-"+end, &topo.Shard{ KeyRange: key.KeyRange{ Start: key.KeyspaceId(start), End: key.KeyspaceId(end), }, }, 0) }
func (topoServer *fakeTopo) GetShard(ctx context.Context, keyspace string, shard string) (*topo.ShardInfo, error) { value := &pb.Shard{ MasterAlias: &pb.TabletAlias{ Cell: "test_cell", Uid: 0, }, } return topo.NewShardInfo(keyspace, shard, value, 0), nil }
func TestMigrateServedFromSyslogReverse(t *testing.T) { wantSev, wantMsg := syslog.LOG_INFO, "keyspace-1 [migrate served-from keyspace-2/source-shard <- keyspace-1/dest-shard] status" ev := &MigrateServedFrom{ Keyspace: *topo.NewKeyspaceInfo("keyspace-1", nil, -1), SourceShard: *topo.NewShardInfo("keyspace-2", "source-shard", nil, -1), DestinationShard: *topo.NewShardInfo("keyspace-1", "dest-shard", nil, -1), Reverse: true, StatusUpdater: base.StatusUpdater{Status: "status"}, } gotSev, gotMsg := ev.Syslog() if gotSev != wantSev { t.Errorf("wrong severity: got %v, want %v", gotSev, wantSev) } if gotMsg != wantMsg { t.Errorf("wrong message: got %v, want %v", gotMsg, wantMsg) } }
func siBytes(start, end string) *topo.ShardInfo { s := hex.EncodeToString([]byte(start)) e := hex.EncodeToString([]byte(end)) return topo.NewShardInfo("keyspace", s+"-"+e, &topodatapb.Shard{ KeyRange: &topodatapb.KeyRange{ Start: []byte(start), End: []byte(end), }, }, 0) }
func si(start, end string) *topo.ShardInfo { s := hki(start) e := hki((end)) return topo.NewShardInfo("keyspace", s.String()+"-"+e.String(), &topo.Shard{ KeyRange: key.KeyRange{ Start: s, End: e, }, }, 0) }
func si(start, end string) *topo.ShardInfo { s := hki(start) e := hki(end) return topo.NewShardInfo("keyspace", start+"-"+end, &pb.Shard{ KeyRange: &pb.KeyRange{ Start: s, End: e, }, }, 0) }
// DeleteShard implements topo.Server. func (s *Server) DeleteShard(ctx context.Context, keyspace, shard string) error { _, err := s.getGlobal().Delete(shardDirPath(keyspace, shard), true /* recursive */) if err != nil { return convertError(err) } event.Dispatch(&events.ShardChange{ ShardInfo: *topo.NewShardInfo(keyspace, shard, nil, -1), Status: "deleted", }) return nil }
func TestMigrateServedTypesSyslogReverse(t *testing.T) { wantSev, wantMsg := syslog.LOG_INFO, "keyspace-1 [migrate served-types {src1, src2} <- {dst1, dst2}] status" ev := &MigrateServedTypes{ Keyspace: *topo.NewKeyspaceInfo("keyspace-1", nil, -1), SourceShards: []*topo.ShardInfo{ topo.NewShardInfo("keyspace-1", "src1", nil, -1), topo.NewShardInfo("keyspace-1", "src2", nil, -1), }, DestinationShards: []*topo.ShardInfo{ topo.NewShardInfo("keyspace-1", "dst1", nil, -1), topo.NewShardInfo("keyspace-1", "dst2", nil, -1), }, Reverse: true, StatusUpdater: base.StatusUpdater{Status: "status"}, } gotSev, gotMsg := ev.Syslog() if gotSev != wantSev { t.Errorf("wrong severity: got %v, want %v", gotSev, wantSev) } if gotMsg != wantMsg { t.Errorf("wrong message: got %v, want %v", gotMsg, wantMsg) } }
func TestShardChangeSyslog(t *testing.T) { wantSev, wantMsg := syslog.LOG_INFO, "keyspace-123/shard-123 [shard] status" sc := &ShardChange{ ShardInfo: *topo.NewShardInfo("keyspace-123", "shard-123", nil, -1), Status: "status", } gotSev, gotMsg := sc.Syslog() if gotSev != wantSev { t.Errorf("wrong severity: got %v, want %v", gotSev, wantSev) } if gotMsg != wantMsg { t.Errorf("wrong message: got %v, want %v", gotMsg, wantMsg) } }
func (zkts *Server) GetShard(keyspace, shard string) (*topo.ShardInfo, error) { shardPath := path.Join(globalKeyspacesPath, keyspace, "shards", shard) data, _, err := zkts.zconn.Get(shardPath) if err != nil { if zookeeper.IsError(err, zookeeper.ZNONODE) { err = topo.ErrNoNode } return nil, err } shardInfo, err := topo.NewShardInfo(keyspace, shard, data) if err != nil { return nil, err } return shardInfo, nil }
// DeleteShard is part of the topo.Server interface func (zkts *Server) DeleteShard(ctx context.Context, keyspace, shard string) error { shardPath := path.Join(globalKeyspacesPath, keyspace, "shards", shard) err := zk.DeleteRecursive(zkts.zconn, shardPath, -1) if err != nil { if zookeeper.IsError(err, zookeeper.ZNONODE) { err = topo.ErrNoNode } return err } event.Dispatch(&events.ShardChange{ ShardInfo: *topo.NewShardInfo(keyspace, shard, nil, -1), Status: "deleted", }) return nil }
// GetShard implements topo.Server. func (s *Server) GetShard(ctx context.Context, keyspace, shard string) (*topo.ShardInfo, error) { resp, err := s.getGlobal().Get(shardFilePath(keyspace, shard), false /* sort */, false /* recursive */) if err != nil { return nil, convertError(err) } if resp.Node == nil { return nil, ErrBadResponse } value := &topo.Shard{} if err := json.Unmarshal([]byte(resp.Node.Value), value); err != nil { return nil, fmt.Errorf("bad shard data (%v): %q", err, resp.Node.Value) } return topo.NewShardInfo(keyspace, shard, value, int64(resp.Node.ModifiedIndex)), nil }
// GetShard is part of the topo.Server interface func (zkts *Server) GetShard(ctx context.Context, keyspace, shard string) (*topo.ShardInfo, error) { shardPath := path.Join(globalKeyspacesPath, keyspace, "shards", shard) data, stat, err := zkts.zconn.Get(shardPath) if err != nil { if zookeeper.IsError(err, zookeeper.ZNONODE) { err = topo.ErrNoNode } return nil, err } s := &pb.Shard{} if err = json.Unmarshal([]byte(data), s); err != nil { return nil, fmt.Errorf("bad shard data %v", err) } return topo.NewShardInfo(keyspace, shard, s, int64(stat.Version())), nil }
// Update shard file with new master, replicas, etc. // // Re-read from TopologyServer to make sure we are using the side // effects of all actions. // // This function should only be used with an action lock on the shard // - otherwise the consistency of the serving graph data can't be // guaranteed. func (wr *Wrangler) rebuildShard(keyspace, shard string, cells []string) error { relog.Info("rebuildShard %v/%v", keyspace, shard) // NOTE(msolomon) nasty hack - pass non-empty string to bypass data check shardInfo, err := topo.NewShardInfo(keyspace, shard, "{}") if err != nil { return err } tabletMap, err := GetTabletMapForShard(wr.ts, keyspace, shard) if err != nil { return err } tablets := make([]*topo.TabletInfo, 0, len(tabletMap)) for _, ti := range tabletMap { if ti.Keyspace != shardInfo.Keyspace() || ti.Shard != shardInfo.ShardName() { return fmt.Errorf("CRITICAL: tablet %v is in replication graph for shard %v/%v but belongs to shard %v:%v (maybe remove its replication path in shard %v/%v)", ti.Alias(), keyspace, shard, ti.Keyspace, ti.Shard, keyspace, shard) } if !ti.IsInReplicationGraph() { // only valid case is a scrapped master in the // catastrophic reparent case if ti.Parent.Uid != topo.NO_TABLET { relog.Warning("Tablet %v should not be in the replication graph, please investigate (it will be ignored in the rebuild)", ti.Alias()) } } tablets = append(tablets, ti) } // Rebuild the rollup data in the replication graph. if err = shardInfo.Rebuild(tablets); err != nil { return err } if err = wr.ts.UpdateShard(shardInfo); err != nil { return err } return wr.rebuildShardSrvGraph(shardInfo, tablets, cells) }