/*
This is to change replication factor in .dat file header. Need to shut down the volume servers
that has those volumes.

1. fix the .dat file in place
	// just see the replication setting
	go run change_replication.go -volumeId=9 -dir=/Users/chrislu/Downloads
		Current Volume Replication: 000
	// fix the replication setting
	go run change_replication.go -volumeId=9 -dir=/Users/chrislu/Downloads -replication 001
		Current Volume Replication: 000
		Changing to: 001
		Done.

2. copy the fixed .dat and related .idx files to some remote server
3. restart volume servers or start new volume servers.
*/
func main() {
	flag.Parse()
	fileName := strconv.Itoa(*fixVolumeId)
	if *fixVolumeCollection != "" {
		fileName = *fixVolumeCollection + "_" + fileName
	}
	datFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".dat"), os.O_RDWR, 0644)
	if err != nil {
		glog.Fatalf("Open Volume Data File [ERROR]: %v", err)
	}
	defer datFile.Close()

	header := make([]byte, storage.SuperBlockSize)
	if _, e := datFile.Read(header); e != nil {
		glog.Fatalf("cannot read volume %s super block: %v", fileName+".dat", e)
	}
	superBlock, err := storage.ParseSuperBlock(header)

	if err != nil {
		glog.Fatalf("cannot parse existing super block: %v", err)
	}

	fmt.Printf("Current Volume Replication: %s\n", superBlock.ReplicaPlacement)

	if *targetReplica == "" {
		return
	}

	replica, err := storage.NewReplicaPlacementFromString(*targetReplica)

	if err != nil {
		glog.Fatalf("cannot parse target replica %s: %v", *targetReplica, err)
	}

	fmt.Printf("Changing to: %s\n", replica)

	superBlock.ReplicaPlacement = replica

	header = superBlock.Bytes()

	if n, e := datFile.WriteAt(header, 0); n == 0 || e != nil {
		glog.Fatalf("cannot write super block: %v", e)
	}

	fmt.Println("Done.")

}
Beispiel #2
0
func runBackup(cmd *Command, args []string) bool {
	if *s.volumeId == -1 {
		return false
	}
	vid := storage.VolumeId(*s.volumeId)

	// find volume location, replication, ttl info
	lookup, err := operation.Lookup(*s.master, vid.String())
	if err != nil {
		fmt.Printf("Error looking up volume %d: %v\n", vid, err)
		return true
	}
	volumeServer := lookup.Locations[0].Url

	stats, err := operation.GetVolumeSyncStatus(volumeServer, vid.String())
	if err != nil {
		fmt.Printf("Error get volume %d status: %v\n", vid, err)
		return true
	}
	ttl, err := storage.ReadTTL(stats.Ttl)
	if err != nil {
		fmt.Printf("Error get volume %d ttl %s: %v\n", vid, stats.Ttl, err)
		return true
	}
	replication, err := storage.NewReplicaPlacementFromString(stats.Replication)
	if err != nil {
		fmt.Printf("Error get volume %d replication %s : %v\n", vid, stats.Replication, err)
		return true
	}

	v, err := storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl)
	if err != nil {
		fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
		return true
	}

	if err := v.Synchronize(volumeServer); err != nil {
		fmt.Printf("Error synchronizing volume %d: %v\n", vid, err)
		return true
	}

	return true
}
func TestFindEmptySlotsForOneVolume(t *testing.T) {
	topo := setup(topologyLayout)
	rp, _ := storage.NewReplicaPlacementFromString("111")
	volumeGrowOption := &VolumeGrowOption{
		Collection:       "",
		ReplicaPlacement: rp,
		DataCenter:       "dc1",
		Rack:             "",
		DataNode:         "",
	}
	servers, err := FindEmptySlotsForOneVolume(topo, volumeGrowOption, nil)
	if err != nil {
		fmt.Println("finding empty slots error :", err)
		t.Fail()
	}
	for _, server := range servers {
		fmt.Printf("assigned node: %s, free space: %d\n", server.Id(), server.FreeSpace())
	}

}
Beispiel #4
0
func TestFindEmptySlotsForOneVolume(t *testing.T) {
	topo := setup(topologyLayout)
	vg := NewDefaultVolumeGrowth()
	rp, _ := storage.NewReplicaPlacementFromString("002")
	volumeGrowOption := &VolumeGrowOption{
		Collection:       "",
		ReplicaPlacement: rp,
		DataCenter:       "dc1",
		Rack:             "",
		DataNode:         "",
	}
	servers, err := vg.findEmptySlotsForOneVolume(topo, volumeGrowOption)
	if err != nil {
		fmt.Println("finding empty slots error :", err)
		t.Fail()
	}
	for _, server := range servers {
		fmt.Println("assigned node :", server.Id())
	}
}
func (ms *MasterServer) getVolumeGrowOption(r *http.Request) (*topology.VolumeGrowOption, error) {
	replicationString := r.FormValue("replication")
	if replicationString == "" {
		replicationString = ms.defaultReplicaPlacement
	}
	replicaPlacement, err := storage.NewReplicaPlacementFromString(replicationString)
	if err != nil {
		return nil, err
	}
	ttl, err := storage.ReadTTL(r.FormValue("ttl"))
	if err != nil {
		return nil, err
	}
	volumeGrowOption := &topology.VolumeGrowOption{
		Collection:       r.FormValue("collection"),
		ReplicaPlacement: replicaPlacement,
		Ttl:              ttl,
		DataCenter:       r.FormValue("dataCenter"),
		Rack:             r.FormValue("rack"),
		DataNode:         r.FormValue("dataNode"),
	}
	return volumeGrowOption, nil
}
func TestFindEmptySlotsWithExistsNodes(t *testing.T) {
	topo := setup(topologyLayout)
	rp, _ := storage.NewReplicaPlacementFromString("112")
	volumeGrowOption := &VolumeGrowOption{
		Collection:       "",
		ReplicaPlacement: rp,
		DataCenter:       "dc1",
		Rack:             "",
		DataNode:         "",
	}
	testLocationList := setupTestLocationList(topo)
	for _, locationList := range testLocationList {
		lrp := locationList.CalcReplicaPlacement()
		t.Logf("location list: [%s], replica placement = %s\n", joinNodeId(locationList.list), lrp.String())
		if lrp.Compare(rp) < 0 {
			servers, err := FindEmptySlotsForOneVolume(topo, volumeGrowOption, locationList)
			if err != nil {
				t.Log("finding empty slots error :", err)
				t.Fail()
			}
			t.Logf("assigned node: %s\n\n", joinNodeId(servers))
		}
	}
}