Example #1
0
// a workaround because http POST following redirection misses request body
func postFollowingOneRedirect(target string, contentType string, b *bytes.Buffer) error {
	backupReader := bytes.NewReader(b.Bytes())
	resp, err := http.Post(target, contentType, b)
	if err != nil {
		return err
	}
	defer resp.Body.Close()
	reply, _ := ioutil.ReadAll(resp.Body)
	statusCode := resp.StatusCode

	if statusCode == http.StatusMovedPermanently {
		var urlStr string
		if urlStr = resp.Header.Get("Location"); urlStr == "" {
			return errors.New(fmt.Sprintf("%d response missing Location header", resp.StatusCode))
		}

		glog.V(0).Infoln("Post redirected to ", urlStr)
		resp2, err2 := http.Post(urlStr, contentType, backupReader)
		if err2 != nil {
			return err2
		}
		defer resp2.Body.Close()
		reply, _ = ioutil.ReadAll(resp2.Body)
		statusCode = resp2.StatusCode
	}

	glog.V(0).Infoln("Post returned status: ", statusCode, string(reply))
	if statusCode != http.StatusOK {
		return errors.New(string(reply))
	}

	return nil
}
Example #2
0
func (l *DiskLocation) loadExistingVolumes() {
	if dirs, err := ioutil.ReadDir(l.Directory); err == nil {
		for _, dir := range dirs {
			name := dir.Name()

			//123.dat 数据文件
			//123_456.dat
			if !dir.IsDir() && strings.HasSuffix(name, ".dat") {
				collection := ""
				base := name[:len(name)-len(".dat")]
				i := strings.Index(base, "_")
				if i > 0 {
					collection, base = base[0:i], base[i+1:] // x y
				}
				if vid, err := NewVolumeId(base); err == nil {
					if l.volumes[vid] == nil {

						/*
							将.dat文件加载成volume
						*/
						if v, e := NewVolume(l.Directory, collection, vid, nil, nil); e == nil {
							l.volumes[vid] = v
							glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s", l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), v.Size(), v.Ttl.String())
						}
					}
				}
			}
		}
	}
	glog.V(0).Infoln("Store started on dir:", l.Directory, "with", len(l.volumes), "volumes", "max", l.MaxVolumeCount)
}
Example #3
0
// Join joins an existing cluster.
func (s *RaftServer) Join(peers []string) error {
	command := &raft.DefaultJoinCommand{
		Name:             s.raftServer.Name(),
		ConnectionString: "http://" + s.httpAddr,
	}

	var err error
	var b bytes.Buffer
	json.NewEncoder(&b).Encode(command)
	for _, m := range peers {
		if m == s.httpAddr {
			continue
		}
		target := fmt.Sprintf("http://%s/cluster/join", strings.TrimSpace(m))
		glog.V(0).Infoln("Attempting to connect to:", target)

		err = postFollowingOneRedirect(target, "application/json", &b)

		if err != nil {
			glog.V(0).Infoln("Post returned error: ", err.Error())
			if _, ok := err.(*url.Error); ok {
				// If we receive a network error try the next member
				continue
			}
		} else {
			return nil
		}
	}

	return errors.New("Could not connect to any cluster peers")
}
//Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
func (vs *VolumeServer) batchDeleteHandler(w http.ResponseWriter, r *http.Request) {
	r.ParseForm()
	var ret []operation.DeleteResult
	for _, fid := range r.Form["fid"] {
		vid, id_cookie, err := operation.ParseFileId(fid)
		if err != nil {
			ret = append(ret, operation.DeleteResult{Fid: fid, Error: err.Error()})
			continue
		}
		n := new(storage.Needle)
		volumeId, _ := storage.NewVolumeId(vid)
		n.ParsePath(id_cookie)
		glog.V(4).Infoln("batch deleting", n)
		cookie := n.Cookie
		if _, err := vs.store.Read(volumeId, n); err != nil {
			ret = append(ret, operation.DeleteResult{Fid: fid, Error: err.Error()})
			continue
		}
		if n.Cookie != cookie {
			ret = append(ret, operation.DeleteResult{Fid: fid, Error: "File Random Cookie does not match."})
			glog.V(0).Infoln("deleting", fid, "with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
			return
		}
		if size, err := vs.store.Delete(volumeId, n); err != nil {
			ret = append(ret, operation.DeleteResult{Fid: fid, Error: err.Error()})
		} else {
			ret = append(ret, operation.DeleteResult{Fid: fid, Size: int(size)})
		}
	}

	w.WriteHeader(http.StatusAccepted)

	writeJsonQuiet(w, r, ret)
}
func (t *Topology) StartRefreshWritableVolumes(garbageThreshold string) {
	go func() {
		for {
			//每隔3倍pulse 重新更新整理一
			if t.IsLeader() {
				freshThreshHold := time.Now().Unix() - 3*t.pulse //3 times of sleep interval
				t.CollectDeadNodeAndFullVolumes(freshThreshHold, t.volumeSizeLimit)
			}
			time.Sleep(time.Duration(float32(t.pulse*1e3)*(1+rand.Float32())) * time.Millisecond)
		}
	}()
	go func(garbageThreshold string) {
		c := time.Tick(15 * time.Minute)
		if t.IsLeader() {
			for _ = range c { //每隔15分钟 吸灰一次
				t.Vacuum(garbageThreshold)
			}
		}
	}(garbageThreshold)
	go func() {
		for {
			select {
			case v := <-t.chanFullVolumes:
				t.SetVolumeCapacityFull(v) //将某个volume设置为 full
			case dn := <-t.chanRecoveredDataNodes: //将某个DataNode 复活
				t.RegisterRecoveredDataNode(dn)
				glog.V(0).Infoln("DataNode", dn, "is back alive!")
			case dn := <-t.chanDeadDataNodes:
				t.UnRegisterDataNode(dn)
				glog.V(0).Infoln("DataNode", dn, "is dead!")
			}
		}
	}()
}
Example #6
0
func newFilePart(fullPathFilename string) (ret FilePart, err error) {
	fh, openErr := os.Open(fullPathFilename)
	if openErr != nil {
		glog.V(0).Info("Failed to open file: ", fullPathFilename)
		return ret, openErr
	}
	ret.Reader = fh

	if fi, fiErr := fh.Stat(); fiErr != nil {
		glog.V(0).Info("Failed to stat file:", fullPathFilename)
		return ret, fiErr
	} else {

		//获得文件修改时间及大小
		ret.ModTime = fi.ModTime().UTC().Unix()
		ret.FileSize = fi.Size()
	}
	ext := strings.ToLower(path.Ext(fullPathFilename))
	ret.IsGzipped = ext == ".gz"
	if ret.IsGzipped { //文件被gz过
		ret.FileName = fullPathFilename[0 : len(fullPathFilename)-3]
	}
	ret.FileName = fullPathFilename
	if ext != "" {
		ret.MimeType = mime.TypeByExtension(ext)
	}

	return ret, nil
}
Example #7
0
func (s *Store) Write(i VolumeId, n *Needle) (size uint32, err error) {
	if v := s.findVolume(i); v != nil {
		if v.readOnly {
			err = fmt.Errorf("Volume %d is read only!", i)
			return

		} else {

			//直接写入
			if MaxPossibleVolumeSize >= v.ContentSize()+uint64(size) {
				size, err = v.write(n)
			} else {
				err = fmt.Errorf("Volume Size Limit %d Exceeded! Current size is %d", s.volumeSizeLimit, v.ContentSize())
			}

			// join how to ???
			if s.volumeSizeLimit < v.ContentSize()+3*uint64(size) {
				glog.V(0).Infoln("volume", i, "size", v.ContentSize(), "will exceed limit", s.volumeSizeLimit)
				if _, e := s.Join(); e != nil {
					glog.V(0).Infoln("error when reporting size:", e)
				}
			}
		}
		return
	}
	glog.V(0).Infoln("volume", i, "not found!")
	err = fmt.Errorf("Volume %d not found!", i)
	return
}
Example #8
0
func (v *Volume) ensureConvertIdxToCdb(fileName string) (cdbCanRead bool) {
	var indexFile *os.File
	var e error
	_, cdbCanRead, cdbCanWrite, cdbModTime := checkFile(fileName + ".cdb")
	_, idxCanRead, _, idxModeTime := checkFile(fileName + ".idx")
	if cdbCanRead && cdbModTime.After(idxModeTime) {
		return true
	}
	if !cdbCanWrite {
		return false
	}
	if !idxCanRead {
		glog.V(0).Infoln("Can not read file", fileName+".idx!")
		return false
	}
	glog.V(2).Infoln("opening file", fileName+".idx")
	if indexFile, e = os.Open(fileName + ".idx"); e != nil {
		glog.V(0).Infoln("Failed to read file", fileName+".idx !")
		return false
	}
	defer indexFile.Close()
	glog.V(0).Infof("converting %s.idx to %s.cdb", fileName, fileName)
	if e = ConvertIndexToCdb(fileName+".cdb", indexFile); e != nil {
		glog.V(0).Infof("error converting %s.idx to %s.cdb: %v", fileName, fileName, e)
		return false
	}
	return true
}
func (s *RaftServer) redirectToLeader(w http.ResponseWriter, req *http.Request) {
	if leader, e := s.topo.Leader(); e == nil {
		//http.StatusMovedPermanently does not cause http POST following redirection
		glog.V(0).Infoln("Redirecting to", http.StatusMovedPermanently, "http://"+leader+req.URL.Path)
		http.Redirect(w, req, "http://"+leader+req.URL.Path, http.StatusMovedPermanently)
	} else {
		glog.V(0).Infoln("Error: Leader Unknown")
		http.Error(w, "Leader unknown", http.StatusInternalServerError)
	}
}
Example #10
0
func (v *Volume) Compact() error {
	glog.V(3).Infof("Compacting ...")
	//no need to lock for copy on write
	//v.accessLock.Lock()
	//defer v.accessLock.Unlock()
	//glog.V(3).Infof("Got Compaction lock...")

	filePath := v.FileName()
	glog.V(3).Infof("creating copies for volume %d ...", v.Id)
	return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx")
}
Example #11
0
func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string) (err error) {
	var (
		dst, idx *os.File
	)
	if dst, err = os.OpenFile(dstName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil {
		return
	}
	defer dst.Close()

	if idx, err = os.OpenFile(idxName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil {
		return
	}
	defer idx.Close()

	nm := NewNeedleMap(idx)
	new_offset := int64(SuperBlockSize)

	now := uint64(time.Now().Unix())

//从旧文件写入到新文件
	err = ScanVolumeFile(
		v.dir,
		v.Collection,
		v.Id, func(superBlock SuperBlock) error {
		_, err = dst.Write(superBlock.Bytes())
		return err
   	},
		true,
		func(n *Needle, offset int64) error {

			//已经过期的文件 不再加入最后文件
			if n.HasTtl() && now >= n.LastModified+uint64(v.Ttl.Minutes()*60) {
				return nil
			}

			nv, ok := v.nm.Get(n.Id)
			glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv)
			if ok && int64(nv.Offset)*NeedlePaddingSize == offset && nv.Size > 0 {
				if _, err = nm.Put(n.Id, uint32(new_offset/NeedlePaddingSize), n.Size); err != nil {
					return fmt.Errorf("cannot put needle: %s", err)
				}
				if _, err = n.Append(dst, v.Version()); err != nil {
					return fmt.Errorf("cannot append needle: %s", err)
				}
				new_offset += n.DiskSize()
					glog.V(3).Infoln("saving key", n.Id, "volume offset", offset, "=>", new_offset, "data_size", n.Size)
				}
				return nil
			}
			)

	return
}
Example #12
0
func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, jwt security.EncodedJwt) (*UploadResult, error) {
	body_buf := bytes.NewBufferString("")
	body_writer := multipart.NewWriter(body_buf)
	h := make(textproto.MIMEHeader)
	h.Set("Content-Disposition", fmt.Sprintf(`form-data; name="file"; filename="%s"`, fileNameEscaper.Replace(filename)))
	if mtype == "" {
		mtype = mime.TypeByExtension(strings.ToLower(filepath.Ext(filename)))
	}
	if mtype != "" {
		h.Set("Content-Type", mtype)
	}
	if isGzipped {
		h.Set("Content-Encoding", "gzip")
	}
	if jwt != "" {
		h.Set("Authorization", "BEARER "+string(jwt))
	}
	file_writer, cp_err := body_writer.CreatePart(h)
	if cp_err != nil {
		glog.V(0).Infoln("error creating form file", cp_err.Error())
		return nil, cp_err
	}
	if err := fillBufferFunction(file_writer); err != nil {
		glog.V(0).Infoln("error copying data", err)
		return nil, err
	}
	content_type := body_writer.FormDataContentType()
	if err := body_writer.Close(); err != nil {
		glog.V(0).Infoln("error closing body", err)
		return nil, err
	}
	resp, post_err := client.Post(uploadUrl, content_type, body_buf)
	if post_err != nil {
		glog.V(0).Infoln("failing to upload to", uploadUrl, post_err.Error())
		return nil, post_err
	}
	defer resp.Body.Close()
	resp_body, ra_err := ioutil.ReadAll(resp.Body)
	if ra_err != nil {
		return nil, ra_err
	}
	var ret UploadResult
	unmarshal_err := json.Unmarshal(resp_body, &ret)
	if unmarshal_err != nil {
		glog.V(0).Infoln("failing to read upload resonse", uploadUrl, string(resp_body))
		return nil, unmarshal_err
	}
	if ret.Error != "" {
		return nil, errors.New(ret.Error)
	}
	return &ret, nil
}
Example #13
0
func GzipData(input []byte) ([]byte, error) {
	buf := new(bytes.Buffer)
	w, _ := gzip.NewWriterLevel(buf, flate.BestCompression)
	if _, err := w.Write(input); err != nil {
		glog.V(2).Infoln("error compressing data:", err)
		return nil, err
	}
	if err := w.Close(); err != nil {
		glog.V(2).Infoln("error closing compressed data:", err)
		return nil, err
	}
	return buf.Bytes(), nil
}
func LoadNewNeedleMap(file *os.File) CompactMap {
	m := NewCompactMap()
	bytes := make([]byte, 16*1024)
	count, e := file.Read(bytes)
	if count > 0 {
		fstat, _ := file.Stat()
		glog.V(0).Infoln("Loading index file", fstat.Name(), "size", fstat.Size())
	}

	for count > 0 && e == nil {
		for i := 0; i < count; i += 16 {
			key := util.BytesToUint64(bytes[i : i+8])
			offset := util.BytesToUint32(bytes[i+8 : i+12])
			size := util.BytesToUint32(bytes[i+12 : i+16])
			if offset > 0 {
				m.Set(Key(key), offset, size)
			} else {
				//delete(m, key)
			}
		}

		count, e = file.Read(bytes)
	}
	return m
}
Example #15
0
func upload_file_id_list(fileUrl, filename string, fids []string) error {
	var buf bytes.Buffer
	buf.WriteString(strings.Join(fids, "\n"))
	glog.V(4).Info("Uploading final list ", filename, " to ", fileUrl, "...")
	_, e := Upload(fileUrl, filename, &buf, false, "text/plain")
	return e
}
Example #16
0
func (v *Volume) freeze() error {
	if v.readOnly {
		return nil
	}
	nm, ok := v.nm.(*NeedleMap)
	if !ok {
		return nil
	}
	v.accessLock.Lock()
	defer v.accessLock.Unlock()
	bn, _ := baseFilename(v.dataFile.Name())
	cdbFn := bn + ".cdb"
	glog.V(0).Infof("converting %s to %s", nm.indexFile.Name(), cdbFn)
	err := DumpNeedleMapToCdb(cdbFn, nm)
	if err != nil {
		return err
	}
	if v.nm, err = OpenCdbMap(cdbFn); err != nil {
		return err
	}
	nm.indexFile.Close()
	os.Remove(nm.indexFile.Name())
	v.readOnly = true
	return nil
}
Example #17
0
/*
# Basic Usage:
curl http://localhost:9333/dir/assign
{"count":1,"fid":"3,01637037d6","url":"127.0.0.1:8080",
"publicUrl":"localhost:8080"}

# To assign with a specific replication type:
curl "http://localhost:9333/dir/assign?replication=001"

# To specify how many file ids to reserve
curl "http://localhost:9333/dir/assign?count=5"

# To assign a specific data center
curl "http://localhost:9333/dir/assign?dataCenter=dc1"
*/
func Assign(server string, count int, replication string, collection string, ttl string) (*AssignResult, error) {
	values := make(url.Values)
	values.Add("count", strconv.Itoa(count))
	if replication != "" {
		values.Add("replication", replication)
	}
	if collection != "" {
		values.Add("collection", collection)
	}
	if ttl != "" {
		values.Add("ttl", ttl)
	}
	jsonBlob, err := util.Post("http://"+server+"/dir/assign", values)
	glog.V(2).Info("assign result :", string(jsonBlob))
	if err != nil {
		return nil, err
	}
	var ret AssignResult
	err = json.Unmarshal(jsonBlob, &ret)
	if err != nil {
		return nil, err
	}
	if ret.Count <= 0 {
		return nil, errors.New(ret.Error)
	}
	return &ret, nil
}
Example #18
0
func distributedOperation(masterNode string, store *storage.Store, volumeId storage.VolumeId, op func(location operation.Location) bool) bool {
	if lookupResult, lookupErr := operation.Lookup(masterNode, volumeId.String()); lookupErr == nil {
		length := 0
		selfUrl := (store.Ip + ":" + strconv.Itoa(store.Port))
		results := make(chan bool)
		for _, location := range lookupResult.Locations {

			//排除自己
			if location.Url != selfUrl {
				length++

				//异步复制
				go func(location operation.Location, results chan bool) {
					results <- op(location)
				}(location, results)
			}
		}
		ret := true

		//等待操作结束 无论成功与否
		for i := 0; i < length; i++ {
			ret = ret && <-results
		}
		return ret
	} else {
		glog.V(0).Infoln("Failed to lookup for", volumeId, lookupErr.Error())
	}
	return false
}
Example #19
0
/*
如果自己是leader就直接处理这个消息而如果不是则通过反向代理提交给leader来处理
*/
func (ms *MasterServer) proxyToLeader(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
	return func(w http.ResponseWriter, r *http.Request) {
		if ms.Topo.IsLeader() { //如果当前Topology是 leader
			f(w, r)

			//自己不是,但当前有leader
		} else if ms.Topo.RaftServer != nil && ms.Topo.RaftServer.Leader() != "" {
			ms.bounedLeaderChan <- 1
			defer func() { <-ms.bounedLeaderChan }()

			//取得leader
			targetUrl, err := url.Parse("http://" + ms.Topo.RaftServer.Leader())
			if err != nil {
				writeJsonQuiet(w, r, map[string]interface{}{"error": "Leader URL http://" + ms.Topo.RaftServer.Leader() + " Parse Error " + err.Error()})
				return
			}
			glog.V(4).Infoln("proxying to leader", ms.Topo.RaftServer.Leader())
			proxy := httputil.NewSingleHostReverseProxy(targetUrl)
			proxy.Transport = util.Transport
			proxy.ServeHTTP(w, r)
		} else {
			//drop it to the floor
			//writeJsonError(w, r, errors.New(ms.Topo.RaftServer.Name()+" does not know Leader yet:"+ms.Topo.RaftServer.Leader()))
		}
	}
}
Example #20
0
func (t *Topology) ProcessJoinMessage(joinMessage *operation.JoinMessage) {
	t.Sequence.SetMax(*joinMessage.MaxFileKey)
	dcName, rackName := t.configuration.Locate(*joinMessage.Ip, *joinMessage.DataCenter, *joinMessage.Rack)
	dc := t.GetOrCreateDataCenter(dcName)
	rack := dc.GetOrCreateRack(rackName)
	dn := rack.FindDataNode(*joinMessage.Ip, int(*joinMessage.Port))

	//加入一个已经加入过的dn,先注销掉之前的
	if *joinMessage.IsInit && dn != nil {
		t.UnRegisterDataNode(dn)
	}
	dn = rack.GetOrCreateDataNode(*joinMessage.Ip, int(*joinMessage.Port), *joinMessage.PublicUrl, int(*joinMessage.MaxVolumeCount))
	var volumeInfos []storage.VolumeInfo
	for _, v := range joinMessage.Volumes {
		if vi, err := storage.NewVolumeInfo(v); err == nil {
			volumeInfos = append(volumeInfos, vi)
		} else {
			glog.V(0).Infoln("Fail to convert joined volume information:", err.Error())
		}
	}
	//将不在volumeInfo中的全部移除并取消在volumeLayout中的注册
	deletedVolumes := dn.UpdateVolumes(volumeInfos)
	for _, v := range volumeInfos {
		t.RegisterVolumeLayout(v, dn)
	}
	for _, v := range deletedVolumes {
		t.UnRegisterVolumeLayout(v, dn)
	}
}
Example #21
0
func (fl *FileListInLevelDb) ListFiles(dirId filer.DirectoryId, lastFileName string, limit int) (files []filer.FileEntry) {
	glog.V(4).Infoln("directory", dirId, "lastFileName", lastFileName, "limit", limit)
	dirKey := genKey(dirId, "")
	iter := fl.db.NewIterator(&util.Range{Start: genKey(dirId, lastFileName)}, nil)
	limitCounter := 0
	for iter.Next() {
		key := iter.Key()
		if !bytes.HasPrefix(key, dirKey) {
			break
		}
		fileName := string(key[len(dirKey):])
		if fileName == lastFileName {
			continue
		}
		limitCounter++
		if limit > 0 {
			if limitCounter > limit {
				break
			}
		}
		files = append(files, filer.FileEntry{Name: fileName, Id: filer.FileId(string(iter.Value()))})
	}
	iter.Release()
	return
}
Example #22
0
func TestCdbMap1Mem(t *testing.T) {
	var nm NeedleMapper
	i := 0
	visit := func(nv NeedleValue) error {
		i++
		return nil
	}

	a := getMemStats()
	t.Logf("opening %s.cdb", testIndexFilename)
	nm, err := OpenCdbMap(testIndexFilename + ".cdb")
	if err != nil {
		t.Fatalf("error opening cdb: %s", err)
	}
	b := getMemStats()
	glog.V(0).Infof("opening cdb consumed %d bytes", b-a)
	defer nm.Close()

	a = getMemStats()
	if err = nm.Visit(visit); err != nil {
		t.Fatalf("error visiting %s: %s", nm, err)
	}
	b = getMemStats()
	glog.V(0).Infof("visit cdb %d consumed %d bytes", i, b-a)
	nm.Close()

	indexFile, err := os.Open(testIndexFilename)
	if err != nil {
		t.Fatalf("error opening idx: %s", err)
	}
	a = getMemStats()
	nm, err = LoadNeedleMap(indexFile)
	if err != nil {
		t.Fatalf("error loading idx: %s", err)
	}
	defer nm.Close()
	b = getMemStats()
	glog.V(0).Infof("opening idx consumed %d bytes", b-a)

	i = 0
	a = getMemStats()
	if err = nm.Visit(visit); err != nil {
		t.Fatalf("error visiting %s: %s", nm, err)
	}
	b = getMemStats()
	glog.V(0).Infof("visit idx %d consumed %d bytes", i, b-a)
}
Example #23
0
func (v *Volume) Size() int64 {
	stat, e := v.dataFile.Stat()
	if e == nil {
		return stat.Size()
	}
	glog.V(0).Infof("Failed to read file size %s %v", v.dataFile.Name(), e)
	return -1
}
Example #24
0
//设置raftServer,并监听leader change事件
func (ms *MasterServer) SetRaftServer(raftServer *RaftServer) {
	ms.Topo.RaftServer = raftServer.raftServer

	//订阅leader change事件
	ms.Topo.RaftServer.AddEventListener(raft.LeaderChangeEventType, func(e raft.Event) {
		if ms.Topo.RaftServer.Leader() != "" {
			glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", ms.Topo.RaftServer.Leader(), "becomes leader.")
		}
	})
	if ms.Topo.IsLeader() {
		glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", "I am the leader!")
	} else {
		if ms.Topo.RaftServer.Leader() != "" {
			glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", ms.Topo.RaftServer.Leader(), "is the leader.")
		}
	}
}
Example #25
0
func (v *Volume) write(n *Needle) (size uint32, err error) {
	glog.V(4).Infof("writing needle %s", NewFileIdFromNeedle(v.Id, n).String())
	if v.readOnly {
		err = fmt.Errorf("%s is read-only", v.dataFile.Name())
		return
	}
	v.accessLock.Lock()
	defer v.accessLock.Unlock()
	if v.isFileUnchanged(n) {
		size = n.DataSize
		glog.V(4).Infof("needle is unchanged!")
		return
	}
	var offset int64
	if offset, err = v.dataFile.Seek(0, 2); err != nil {
		glog.V(0).Infof("failed to seek the end of file: %v", err)
		return
	}

	//ensure file writing starting from aligned positions
	if offset%NeedlePaddingSize != 0 {
		offset = offset + (NeedlePaddingSize - offset%NeedlePaddingSize)
		if offset, err = v.dataFile.Seek(offset, 0); err != nil {
			glog.V(0).Infof("failed to align in datafile %s: %v", v.dataFile.Name(), err)
			return
		}
	}

	if size, err = n.Append(v.dataFile, v.Version()); err != nil {
		if e := v.dataFile.Truncate(offset); e != nil {
			err = fmt.Errorf("%s\ncannot truncate %s: %v", err, v.dataFile.Name(), e)
		}
		return
	}
	nv, ok := v.nm.Get(n.Id)
	if !ok || int64(nv.Offset)*NeedlePaddingSize < offset {
		if _, err = v.nm.Put(n.Id, uint32(offset/NeedlePaddingSize), n.Size); err != nil {
			glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err)
		}
	}
	if v.lastModifiedTime < n.LastModified {
		v.lastModifiedTime = n.LastModified
	}
	return
}
//吸尘操作
func (vs *VolumeServer) vacuumVolumeCheckHandler(w http.ResponseWriter, r *http.Request) {
	err, ret := vs.store.CheckCompactVolume(r.FormValue("volume"), r.FormValue("garbageThreshold"))
	if err == nil {
		writeJsonQuiet(w, r, map[string]interface{}{"error": "", "result": ret})
	} else {
		writeJsonQuiet(w, r, map[string]interface{}{"error": err.Error(), "result": false})
	}
	glog.V(2).Infoln("checked compacting volume =", r.FormValue("volume"), "garbageThreshold =", r.FormValue("garbageThreshold"), "vacuum =", ret)
}
Example #27
0
//???
func (c *MaxVolumeIdCommand) Apply(server raft.Server) (interface{}, error) {
	topo := server.Context().(*Topology)
	before := topo.GetMaxVolumeId()
	topo.UpAdjustMaxVolumeId(c.MaxVolumeId)

	glog.V(4).Infoln("max volume id", before, "==>", topo.GetMaxVolumeId())

	return nil, nil
}
func (vs *VolumeServer) vacuumVolumeCommitHandler(w http.ResponseWriter, r *http.Request) {
	err := vs.store.CommitCompactVolume(r.FormValue("volume"))
	if err == nil {
		writeJsonQuiet(w, r, map[string]interface{}{"error": ""})
	} else {
		writeJsonQuiet(w, r, map[string]string{"error": err.Error()})
	}
	glog.V(2).Infoln("commit compact volume =", r.FormValue("volume"), ", error =", err)
}
Example #29
0
func (t *Topology) loadConfiguration(configurationFile string) error {
	b, e := ioutil.ReadFile(configurationFile)
	if e == nil {
		t.configuration, e = NewConfiguration(b)
		return e
	} else {
		glog.V(0).Infoln("Using default configurations.")
	}
	return nil
}
Example #30
0
func UnGzipData(input []byte) ([]byte, error) {
	buf := bytes.NewBuffer(input)
	r, _ := gzip.NewReader(buf)
	defer r.Close()
	output, err := ioutil.ReadAll(r)
	if err != nil {
		glog.V(2).Infoln("error uncompressing data:", err)
	}
	return output, err
}