func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) bool { vl.removeFromWritable(vid) ch := make(chan bool, locationlist.Length()) for index, dn := range locationlist.list { go func(index int, url string, vid storage.VolumeId) { glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url) if e := vacuumVolume_Compact(url, vid); e != nil { glog.V(0).Infoln(index, "Error when vacuuming", vid, "on", url, e) ch <- false } else { glog.V(0).Infoln(index, "Complete vacuuming", vid, "on", url) ch <- true } }(index, dn.Url(), vid) } isVacuumSuccess := true for _ = range locationlist.list { select { case _ = <-ch: case <-time.After(30 * time.Minute): isVacuumSuccess = false break } } return isVacuumSuccess }
func (g *Guard) checkJwt(w http.ResponseWriter, r *http.Request) error { if g.checkWhiteList(w, r) == nil { return nil } if len(g.SecretKey) == 0 { return nil } tokenStr := GetJwt(r) if tokenStr == "" { return ErrUnauthorized } // Verify the token token, err := DecodeJwt(g.SecretKey, tokenStr) if err != nil { glog.V(1).Infof("Token verification error from %s: %v", r.RemoteAddr, err) return ErrUnauthorized } if !token.Valid { glog.V(1).Infof("Token invliad from %s: %v", r.RemoteAddr, tokenStr) return ErrUnauthorized } glog.V(1).Infof("No permission from %s", r.RemoteAddr) return fmt.Errorf("No write permisson from %s", r.RemoteAddr) }
func (mn *MasterNodes) Reset() { glog.V(4).Infof("Resetting master nodes: %v", mn) if len(mn.nodes) > 1 && mn.lastNode >= 0 { glog.V(0).Infof("Reset master %s from: %v", mn.nodes[mn.lastNode], mn.nodes) mn.lastNode = -mn.lastNode - 1 } }
func (mn *MasterNodes) FindMaster() (string, error) { if len(mn.nodes) == 0 { return "", errors.New("No master node found!") } if mn.lastNode < 0 { for _, m := range mn.nodes { glog.V(4).Infof("Listing masters on %s", m) if masters, e := operation.ListMasters(m); e == nil { if len(masters) == 0 { continue } mn.nodes = append(masters, m) mn.lastNode = rand.Intn(len(mn.nodes)) glog.V(2).Infof("current master nodes is %v", mn) break } else { glog.V(4).Infof("Failed listing masters on %s: %v", m, e) } } } if mn.lastNode < 0 { return "", errors.New("No master node available!") } return mn.nodes[mn.lastNode], nil }
func (ms *MasterServer) dirJoinHandler(w http.ResponseWriter, r *http.Request) { body, err := ioutil.ReadAll(r.Body) if err != nil { writeJsonError(w, r, http.StatusBadRequest, err) return } joinMessage := &operation.JoinMessage{} if err = proto.Unmarshal(body, joinMessage); err != nil { writeJsonError(w, r, http.StatusBadRequest, err) return } if *joinMessage.Ip == "" { *joinMessage.Ip = r.RemoteAddr[0:strings.LastIndex(r.RemoteAddr, ":")] } if glog.V(4) { if jsonData, jsonError := json.Marshal(joinMessage); jsonError != nil { glog.V(0).Infoln("json marshaling error: ", jsonError) writeJsonError(w, r, http.StatusBadRequest, jsonError) return } else { glog.V(4).Infoln("Proto size", len(body), "json size", len(jsonData), string(jsonData)) } } ms.Topo.ProcessJoinMessage(joinMessage) writeJsonQuiet(w, r, http.StatusOK, operation.JoinResult{ VolumeSizeLimit: uint64(ms.volumeSizeLimitMB) * 1024 * 1024, SecretKey: string(ms.guard.SecretKey), }) }
func (vs *VolumeServer) getVolumeRawDataHandler(w http.ResponseWriter, r *http.Request) { v, e := vs.getVolume("volume", r) if v == nil { http.Error(w, e.Error(), http.StatusBadRequest) return } if origin, err := strconv.ParseBool(r.FormValue("origin")); err == nil && origin { http.ServeFile(w, r, v.FileName()+".dat") return } cr, e := v.GetVolumeCleanReader() if e != nil { http.Error(w, fmt.Sprintf("Get volume clean reader: %v", e), http.StatusInternalServerError) return } totalSize, e := cr.Size() if e != nil { http.Error(w, fmt.Sprintf("Get volume size: %v", e), http.StatusInternalServerError) return } w.Header().Set("Accept-Ranges", "bytes") w.Header().Set("Content-Disposition", fmt.Sprintf(`filename="%d.dat.lz4"`, v.Id)) rangeReq := r.Header.Get("Range") if rangeReq == "" { w.Header().Set("X-Content-Length", strconv.FormatInt(totalSize, 10)) w.Header().Set("Content-Encoding", "lz4") lz4w := lz4.NewWriter(w) if _, e = io.Copy(lz4w, cr); e != nil { glog.V(4).Infoln("response write error:", e) } lz4w.Close() return } ranges, e := parseRange(rangeReq, totalSize) if e != nil { http.Error(w, e.Error(), http.StatusRequestedRangeNotSatisfiable) return } if len(ranges) != 1 { http.Error(w, "Only support one range", http.StatusNotImplemented) return } ra := ranges[0] if _, e := cr.Seek(ra.start, 0); e != nil { http.Error(w, e.Error(), http.StatusInternalServerError) return } w.Header().Set("X-Content-Length", strconv.FormatInt(ra.length, 10)) w.Header().Set("Content-Range", ra.contentRange(totalSize)) w.Header().Set("Content-Encoding", "lz4") w.WriteHeader(http.StatusPartialContent) lz4w := lz4.NewWriter(w) if _, e = io.CopyN(lz4w, cr, ra.length); e != nil { glog.V(2).Infoln("response write error:", e) } lz4w.Close() }
// Join joins an existing cluster. func (s *RaftServer) Join(peers []string) error { command := &raft.DefaultJoinCommand{ Name: s.raftServer.Name(), ConnectionString: "http://" + s.httpAddr, } var err error var b bytes.Buffer json.NewEncoder(&b).Encode(command) for _, m := range peers { if m == s.httpAddr { continue } target := fmt.Sprintf("http://%s/cluster/join", strings.TrimSpace(m)) glog.V(0).Infoln("Attempting to connect to:", target) err = postFollowingOneRedirect(target, "application/json", &b) if err != nil { glog.V(0).Infoln("Post returned error: ", err.Error()) if _, ok := err.(*url.Error); ok { // If we receive a network error try the next member continue } } else { return nil } } return errors.New("Could not connect to any cluster peers") }
func (t *Topology) Vacuum(garbageThreshold string) int { glog.V(0).Infoln("Start vacuum on demand") for item := range t.collectionMap.IterItems() { c := item.Value.(*Collection) gcThreshold := garbageThreshold if gcThreshold == "" { gcThreshold = t.CollectionSettings.GetGarbageThreshold(c.Name) } glog.V(1).Infoln("vacuum on collection:", c.Name) for item1 := range c.storageType2VolumeLayout.IterItems() { if item1.Value == nil { continue } volumeLayout := item1.Value.(*VolumeLayout) for _, vid := range volumeLayout.ListVolumeId() { locationList := volumeLayout.Lookup(vid) if locationList == nil { continue } glog.V(1).Infoln("vacuum on collection:", c.Name, "volume", vid) if batchVacuumVolumeCheck(volumeLayout, vid, locationList, gcThreshold) { if batchVacuumVolumeCompact(volumeLayout, vid, locationList) { batchVacuumVolumeCommit(volumeLayout, vid, locationList) } } } } } glog.V(0).Infoln("End vacuum.") return 0 }
func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) { vl.accessLock.Lock() defer vl.accessLock.Unlock() if _, ok := vl.vid2location[v.Id]; !ok { vl.vid2location[v.Id] = NewVolumeLocationList() } vl.vid2location[v.Id].Set(dn) glog.V(4).Infoln("volume", v.Id, "added to dn", dn.Id(), "len", vl.vid2location[v.Id].Length(), "copy", v.ReplicaPlacement.GetCopyCount()) for _, dn := range vl.vid2location[v.Id].list { if v_info, err := dn.GetVolumesById(v.Id); err == nil { if v_info.ReadOnly { glog.V(3).Infof("vid %d removed from writable", v.Id) vl.removeFromWritable(v.Id) return } } else { glog.V(3).Infof("vid %d removed from writable", v.Id) vl.removeFromWritable(v.Id) return } } if vl.vid2location[v.Id].Length() == vl.rp.GetCopyCount() && vl.isWritable(v) { if _, ok := vl.oversizedVolumes[v.Id]; !ok { vl.addToWritable(v.Id) } } else { vl.rememberOversizedVolumne(v) vl.removeFromWritable(v.Id) } }
func (vs *VolumeServer) getNeedleHandler(w http.ResponseWriter, r *http.Request) { fid, err := storage.NewFileIdFromNid(r.FormValue("volume"), r.FormValue("nid")) if err != nil { glog.V(2).Infoln("parsing fid error:", err, r.URL.Path) w.WriteHeader(http.StatusBadRequest) return } n, e := vs.store.ReadLocalNeedle(fid) if e != nil { e := fmt.Errorf("read needle (%v) error: %v", fid, err) glog.V(2).Infoln(e) writeJsonError(w, r, http.StatusNotFound, e) return } w.Header().Set("Seaweed-Flags", strconv.FormatInt(int64(n.Flags), 16)) w.Header().Set("Seaweed-Checksum", strconv.FormatInt(int64(n.Checksum), 16)) if n.HasLastModifiedDate() { w.Header().Set("Seaweed-LastModified", strconv.FormatUint(n.LastModified, 16)) } if n.HasName() && n.NameSize > 0 { w.Header().Set("Seaweed-Name", string(n.Name)) } if n.HasMime() && n.MimeSize > 0 { w.Header().Set("Seaweed-Mime", string(n.Mime)) } w.Write(n.Data) }
func newFilePart(fullPathFilename string) (ret FilePart, err error) { fh, openErr := os.Open(fullPathFilename) if openErr != nil { glog.V(0).Info("Failed to open file: ", fullPathFilename) return ret, openErr } ret.Reader = fh if fi, fiErr := fh.Stat(); fiErr != nil { glog.V(0).Info("Failed to stat file:", fullPathFilename) return ret, fiErr } else { ret.ModTime = fi.ModTime().UTC().Unix() ret.FileSize = fi.Size() } ext := strings.ToLower(path.Ext(fullPathFilename)) ret.IsGzipped = ext == ".gz" if ret.IsGzipped { ret.FileName = fullPathFilename[0 : len(fullPathFilename)-3] } ret.FileName = fullPathFilename if ext != "" { ret.MimeType = mime.TypeByExtension(ext) } return ret, nil }
func distributedOperation(masterNode string, store *storage.Store, volumeId storage.VolumeId, op func(location operation.Location) bool) bool { collection := "" if v := store.GetVolume(volumeId); v != nil { collection = v.Collection } if lookupResult, lookupErr := operation.LookupNoCache(masterNode, volumeId.String(), collection); lookupErr == nil { length := 0 selfUrl := net.JoinHostPort(store.GetIP(), strconv.Itoa(store.Port)) results := make(chan bool) for _, location := range lookupResult.Locations { if location.Url != selfUrl { length++ go func(location operation.Location, results chan bool) { results <- op(location) }(location, results) } } ret := true for i := 0; i < length; i++ { ret = ret && <-results } if rp := store.GetVolumeReplicaPlacement(volumeId); rp != nil { if length+1 < rp.GetCopyCount() { glog.V(0).Infof("replicating opetations [%d] is less than volume's replication copy count [%d]", length+1, rp.GetCopyCount()) ret = false } } return ret } else { glog.V(0).Infoln("Failed to lookup for", volumeId, lookupErr.Error()) } return false }
// walks through the index file, calls fn function with each key, offset, size // stops with the error returned by the fn function func WalkIndexFile(r *os.File, fn func(key uint64, offset, size uint32) error) error { var readerOffset int64 bytes := make([]byte, 16*RowsToRead) count, e := r.ReadAt(bytes, readerOffset) glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e) readerOffset += int64(count) var ( key uint64 offset, size uint32 i int ) for count > 0 && e == nil || e == io.EOF { for i = 0; i+16 <= count; i += 16 { key, offset, size = idxFileEntry(bytes[i : i+16]) if e = fn(key, offset, size); e != nil { return e } } if e == io.EOF { return nil } count, e = r.ReadAt(bytes, readerOffset) glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e) readerOffset += int64(count) } return e }
func (ms *MasterServer) dirJoin2Handler(w http.ResponseWriter, r *http.Request) { joinResp := &weedpb.JoinResponse{} joinMsgV2 := &weedpb.JoinMessageV2{} if err := readObjRequest(r, joinMsgV2); err != nil { joinResp.Error = err.Error() writeObjResponse(w, r, http.StatusBadRequest, joinResp) return } if joinMsgV2.Ip == "" { if ip, _, e := net.SplitHostPort(r.RemoteAddr); e == nil { joinMsgV2.Ip = ip } else { glog.V(2).Infof("SplitHostPort (%s) error, %v", r.RemoteAddr, e) joinMsgV2.Ip = r.RemoteAddr } } if glog.V(4) { jsonData, _ := json.Marshal(joinMsgV2) glog.V(4).Infoln("join proto:", string(jsonData)) } ms.Topo.ProcessJoinMessageV2(joinMsgV2) joinResp.JoinKey = ms.Topo.GetJoinKey() if joinMsgV2.JoinKey != joinResp.JoinKey { joinResp.JoinIp = joinMsgV2.Ip joinResp.VolumeSizeLimit = ms.Topo.GetVolumeSizeLimit() joinResp.SecretKey = string(ms.guard.GetSecretKey()) joinResp.CollectionSettings = ms.Topo.CollectionSettings.ToPbMessage() } writeObjResponse(w, r, http.StatusOK, joinResp) }
func (t *Topology) StartRefreshWritableVolumes(garbageThreshold string) { go func() { for { if t.IsLeader() { freshThreshHold := time.Now().Unix() - 3*t.pulse //3 times of sleep interval t.CollectDeadNodeAndFullVolumes(freshThreshHold, t.volumeSizeLimit) } time.Sleep(time.Duration(float32(t.pulse*1e3)*(1+rand.Float32())) * time.Millisecond) } }() go func(garbageThreshold string) { c := time.Tick(15 * time.Minute) for _ = range c { if t.IsLeader() { t.Vacuum(garbageThreshold) } } }(garbageThreshold) go func() { for { select { case v := <-t.chanFullVolumes: t.SetVolumeCapacityFull(v) case dn := <-t.chanRecoveredDataNodes: t.RegisterRecoveredDataNode(dn) glog.V(0).Infoln("Recovered DataNode: %v", dn) case dn := <-t.chanDeadDataNodes: t.UnRegisterDataNode(dn) glog.V(0).Infof("Dead DataNode: %v", dn) } } }() }
func LoadNeedleMap(file *os.File) (*NeedleMap, error) { nm := NewNeedleMap(file) e := WalkIndexFile(file, func(key uint64, offset, size uint32) error { if key > nm.maximumFileKey { nm.maximumFileKey = key } nm.fileCounter++ nm.fileByteCounter = nm.fileByteCounter + uint64(size) if offset > 0 { oldSize := nm.m.Set(Key(key), offset, size) glog.V(3).Infoln("reading key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize) if oldSize > 0 { nm.deletionCounter++ nm.deletionByteCounter = nm.deletionByteCounter + uint64(oldSize) } } else { oldSize := nm.m.Delete(Key(key)) glog.V(3).Infoln("removing key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize) nm.deletionCounter++ nm.deletionByteCounter = nm.deletionByteCounter + uint64(oldSize) } return nil }) glog.V(1).Infoln("max file key:", nm.maximumFileKey) return nm, e }
func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string) (err error) { var ( dst, idx, oldIndexFile *os.File ) if dst, err = os.OpenFile(dstName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil { return } defer dst.Close() if idx, err = os.OpenFile(idxName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil { return } defer idx.Close() if oldIndexFile, err = os.OpenFile(v.FileName()+".idx", os.O_RDONLY, 0644); err != nil { return } defer oldIndexFile.Close() nm := NewNeedleMap(idx) now := uint64(time.Now().Unix()) v.SuperBlock.CompactRevision++ dst.Write(v.SuperBlock.Bytes()) new_offset := int64(SuperBlockSize) WalkIndexFile(oldIndexFile, func(key uint64, offset, size uint32) error { if size <= 0 { return nil } nv, ok := v.nm.Get(key) if !ok { return nil } n := new(Needle) n.ReadData(v.dataFile, int64(offset)*NeedlePaddingSize, size, v.Version()) defer n.ReleaseMemory() if n.HasTtl() && now >= n.LastModified+uint64(v.Ttl.Minutes()*60) { return nil } glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv) if nv.Offset == offset && nv.Size > 0 { if err = nm.Put(n.Id, uint32(new_offset/NeedlePaddingSize), n.Size); err != nil { return fmt.Errorf("cannot put needle: %s", err) } if _, err = n.Append(dst, v.Version()); err != nil { return fmt.Errorf("cannot append needle: %s", err) } new_offset += n.DiskSize() glog.V(3).Infoln("saving key", n.Id, "volume offset", offset, "=>", new_offset, "data_size", n.Size) } return nil }) return }
// a workaround because http POST following redirection misses request body func postFollowingOneRedirect(target string, contentType string, b *bytes.Buffer) error { backupReader := bytes.NewReader(b.Bytes()) resp, err := http.Post(target, contentType, b) if err != nil { return err } defer resp.Body.Close() reply, _ := ioutil.ReadAll(resp.Body) statusCode := resp.StatusCode if statusCode == http.StatusMovedPermanently { var urlStr string if urlStr = resp.Header.Get("Location"); urlStr == "" { return fmt.Errorf("%d response missing Location header", resp.StatusCode) } glog.V(0).Infoln("Post redirected to ", urlStr) resp2, err2 := http.Post(urlStr, contentType, backupReader) if err2 != nil { return err2 } defer resp2.Body.Close() reply, _ = ioutil.ReadAll(resp2.Body) statusCode = resp2.StatusCode } glog.V(0).Infoln("Post returned status: ", statusCode, string(reply)) if statusCode != http.StatusOK { return errors.New(string(reply)) } return nil }
func (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string, w http.ResponseWriter, r *http.Request) (processed bool) { if !n.IsChunkedManifest() { return false } chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped()) if e != nil { glog.V(0).Infof("load chunked manifest (%s) error: %v", r.URL.Path, e) return false } if fileName == "" && chunkManifest.Name != "" { fileName = chunkManifest.Name } mType := "" if chunkManifest.Mime != "" { mt := chunkManifest.Mime if !strings.HasPrefix(mt, "application/octet-stream") { mType = mt } } w.Header().Set("X-File-Store", "chunked") chunkedFileReader := &operation.ChunkedFileReader{ Manifest: chunkManifest, Master: vs.GetMasterNode(), } defer chunkedFileReader.Close() if e := writeResponseContent(fileName, mType, chunkedFileReader, w, r); e != nil { glog.V(2).Infoln("response write error:", e) } return true }
func (mn *MasterNodes) findMaster() (string, error) { master := mn.GetMaster() if master != "" { return master, nil } mn.mutex.Lock() defer mn.mutex.Unlock() if len(mn.nodes) == 0 { return "", errors.New("No master node found!") } if mn.master == "" { for _, m := range mn.nodes { glog.V(4).Infof("Listing masters on %s", m) if masters, e := operation.ListMasters(m); e == nil { if len(masters) == 0 { continue } mn.nodes = append(masters, m) mn.master = mn.nodes[rand.Intn(len(mn.nodes))] glog.V(2).Infof("current master nodes is (nodes:%v, master:%s)", mn.nodes, mn.master) break } else { glog.V(4).Infof("Failed listing masters on %s: %v", m, e) } } } if mn.master == "" { return "", errors.New("No master node available!") } return mn.master, nil }
func (s *Store) ReadLocalNeedle(fid *FileId) (n *Needle, err error) { cacheKey := fid.String() if cn, cacheHit := s.needleCache.Get(cacheKey); cacheHit { glog.V(2).Infoln("Local needle cache hit:", fid) return cn.(*Needle), nil } glog.V(2).Infoln("Local needle cache miss:", fid) if v := s.findVolume(fid.VolumeId); v != nil { n = &Needle{ Id: fid.Key, } if _, err := v.readNeedle(n); err != nil { return nil, err } if n.Cookie != fid.Cookie { return nil, fmt.Errorf("request (%v,%x) with unmaching cookie seen: %x expected: %x", fid.VolumeId, fid.Key, fid.Cookie, n.Cookie) } } else { return nil, fmt.Errorf("Volume %v not found!", fid.VolumeId) } s.needleCache.Add(cacheKey, n) return n, nil }
func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { if e := r.ParseForm(); e != nil { glog.V(0).Infoln("form parse error:", e) writeJsonError(w, r, http.StatusBadRequest, e) return } vid, _, _, _, _ := parseURLPath(r.URL.Path) volumeId, ve := storage.NewVolumeId(vid) if ve != nil { glog.V(0).Infoln("NewVolumeId error:", ve) writeJsonError(w, r, http.StatusBadRequest, ve) return } needle, ne := storage.NewNeedle(r, vs.FixJpgOrientation) if ne != nil { writeJsonError(w, r, http.StatusBadRequest, ne) return } ret := operation.UploadResult{} size, errorStatus := topology.ReplicatedWrite(vs.GetMasterNode(), vs.store, volumeId, needle, r) httpStatus := http.StatusCreated if errorStatus != "" { httpStatus = http.StatusInternalServerError ret.Error = errorStatus } if needle.HasName() { ret.Name = string(needle.Name) } ret.Size = size writeJsonQuiet(w, r, httpStatus, ret) }
func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapType) { if dirs, err := ioutil.ReadDir(l.Directory); err == nil { for _, dir := range dirs { name := dir.Name() if !dir.IsDir() && strings.HasSuffix(name, ".dat") { collection := "" base := name[:len(name)-len(".dat")] i := strings.LastIndex(base, "_") if i > 0 { collection, base = base[0:i], base[i+1:] } if vid, err := NewVolumeId(base); err == nil { if l.volumes[vid] == nil { if v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil); e == nil { l.volumes[vid] = v glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s", l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), v.Size(), v.Ttl.String()) } else { glog.V(0).Infof("new volume %s error %s", name, e) } } } } } } glog.V(0).Infoln("Store started on dir:", l.Directory, "with", len(l.volumes), "volumes", "max", l.MaxVolumeCount) }
// trySynchronizing sync with remote volume server incrementally by // make up the local and remote delta. func (v *Volume) trySynchronizing(volumeServer string, masterMap CompactMap, compactRevision uint16) error { slaveIdxFile, err := os.Open(v.nm.IndexFileName()) if err != nil { return fmt.Errorf("Open volume %d index file: %v", v.Id, err) } defer slaveIdxFile.Close() slaveMap, err := LoadNeedleMap(slaveIdxFile) if err != nil { return fmt.Errorf("Load volume %d index file: %v", v.Id, err) } var delta []NeedleValue if err := masterMap.Visit(func(needleValue NeedleValue) error { if needleValue.Key == 0 { return nil } if _, ok := slaveMap.Get(uint64(needleValue.Key)); ok { return nil // skip intersection } delta = append(delta, needleValue) return nil }); err != nil { return fmt.Errorf("Add master entry: %v", err) } if err := slaveMap.m.Visit(func(needleValue NeedleValue) error { if needleValue.Key == 0 { return nil } if _, ok := masterMap.Get(needleValue.Key); ok { return nil // skip intersection } needleValue.Size = 0 delta = append(delta, needleValue) return nil }); err != nil { return fmt.Errorf("Remove local entry: %v", err) } // simulate to same ordering of remote .dat file needle entries sort.Sort(ByOffset(delta)) // make up the delta fetchCount := 0 volumeDataContentHandlerUrl := "http://" + volumeServer + "/admin/sync/data" for _, needleValue := range delta { if needleValue.Size == 0 { // remove file entry from local v.removeNeedle(needleValue.Key) continue } // add master file entry to local data file if err := v.fetchNeedle(volumeDataContentHandlerUrl, needleValue, compactRevision); err != nil { glog.V(0).Infof("Fetch needle %v from %s: %v", needleValue, volumeServer, err) return err } fetchCount++ } glog.V(1).Infof("Fetched %d needles from %s", fetchCount, volumeServer) return nil }
func ScanVolumeFile(dirname string, collection string, id VolumeId, needleMapKind NeedleMapType, visitSuperBlock func(SuperBlock) error, readNeedleBody bool, visitNeedle func(n *Needle, offset int64) error) (err error) { var v *Volume if v, err = loadVolumeWithoutIndex(dirname, collection, id, needleMapKind); err != nil { return fmt.Errorf("Failed to load volume %d: %v", id, err) } if err = visitSuperBlock(v.SuperBlock); err != nil { return fmt.Errorf("Failed to process volume %d super block: %v", id, err) } version := v.Version() offset := int64(SuperBlockSize) n, rest, e := ReadNeedleHeader(v.dataFile, version, offset) if e != nil { err = fmt.Errorf("cannot read needle header: %v", e) return } for n != nil { if readNeedleBody { if err = n.ReadNeedleBody(v.dataFile, version, offset+int64(NeedleHeaderSize), rest); err != nil { glog.V(0).Infof("cannot read needle body: %v", err) //err = fmt.Errorf("cannot read needle body: %v", err) //return } if n.DataSize >= n.Size { // this should come from a bug reported on #87 and #93 // fixed in v0.69 // remove this whole "if" clause later, long after 0.69 oldRest, oldSize := rest, n.Size padding := NeedlePaddingSize - ((n.Size + NeedleHeaderSize + NeedleChecksumSize) % NeedlePaddingSize) n.Size = 0 rest = n.Size + NeedleChecksumSize + padding if rest%NeedlePaddingSize != 0 { rest += (NeedlePaddingSize - rest%NeedlePaddingSize) } glog.V(4).Infof("Adjusting n.Size %d=>0 rest:%d=>%d %+v", oldSize, oldRest, rest, n) } } if err = visitNeedle(n, offset); err != nil { glog.V(0).Infof("visit needle error: %v", err) } offset += int64(NeedleHeaderSize) + int64(rest) glog.V(4).Infof("==> new entry offset %d", offset) if n, rest, err = ReadNeedleHeader(v.dataFile, version, offset); err != nil { if err == io.EOF { return nil } return fmt.Errorf("cannot read needle header: %v", err) } glog.V(4).Infof("new entry needle size:%d rest:%d", n.Size, rest) } return }
func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, isGetMethod bool) { if strings.HasSuffix(r.URL.Path, "/") { if fs.disableDirListing { w.WriteHeader(http.StatusMethodNotAllowed) return } fs.listDirectoryHandler(w, r) return } fileId, err := fs.filer.FindFile(r.URL.Path) if err == leveldb.ErrNotFound { glog.V(3).Infoln("Not found in db", r.URL.Path) w.WriteHeader(http.StatusNotFound) return } query := r.URL.Query() collection := query.Get("collection") if collection == "" { collection = fs.collection } urlString, err := operation.LookupFileId(fs.master, fileId, collection, true) if err != nil { glog.V(1).Infoln("operation LookupFileId %s failed, err is %s", fileId, err.Error()) w.WriteHeader(http.StatusNotFound) return } if fs.redirectOnRead { http.Redirect(w, r, urlString, http.StatusFound) return } u, _ := url.Parse(urlString) request := &http.Request{ Method: r.Method, URL: u, Proto: r.Proto, ProtoMajor: r.ProtoMajor, ProtoMinor: r.ProtoMinor, Header: r.Header, Body: r.Body, Host: r.Host, ContentLength: r.ContentLength, } glog.V(3).Infoln("retrieving from", u) resp, do_err := util.HttpDo(request) if do_err != nil { glog.V(0).Infoln("failing to connect to volume server", do_err.Error()) writeJsonError(w, r, http.StatusInternalServerError, do_err) return } defer resp.Body.Close() for k, v := range resp.Header { w.Header()[k] = v } w.WriteHeader(resp.StatusCode) io.Copy(w, resp.Body) }
// listDirectoryHandler lists directories and folers under a directory // files are sorted by name and paginated via "lastFileName" and "limit". // sub directories are listed on the first page, when "lastFileName" // is empty. func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Request) { if !strings.HasSuffix(r.URL.Path, "/") { return } limit, limit_err := strconv.Atoi(r.FormValue("limit")) if limit_err != nil { limit = 100 } lastFileName := r.FormValue("lastFileName") files, err := fs.filer.ListFiles(r.URL.Path, lastFileName, limit) if err == leveldb.ErrNotFound { glog.V(0).Infof("Error %s", err) w.WriteHeader(http.StatusNotFound) return } directories, err2 := fs.filer.ListDirectories(r.URL.Path) if err2 == leveldb.ErrNotFound { glog.V(0).Infof("Error %s", err) w.WriteHeader(http.StatusNotFound) return } shouldDisplayLoadMore := len(files) > 0 lastFileName = "" if len(files) > 0 { lastFileName = files[len(files)-1].Name files2, err3 := fs.filer.ListFiles(r.URL.Path, lastFileName, limit) if err3 == leveldb.ErrNotFound { glog.V(0).Infof("Error %s", err) w.WriteHeader(http.StatusNotFound) return } shouldDisplayLoadMore = len(files2) > 0 } args := struct { Path string Files interface{} Directories interface{} Limit int LastFileName string ShouldDisplayLoadMore bool }{ r.URL.Path, files, directories, limit, lastFileName, shouldDisplayLoadMore, } ui.StatusTpl.Execute(w, args) }
func (mn *MasterNodes) Reset() { glog.V(4).Infof("Resetting master nodes: %v", mn) mn.mutex.Lock() defer mn.mutex.Unlock() if len(mn.nodes) > 1 && mn.master != "" { glog.V(0).Infof("Reset master %s from: %v", mn.master, mn.nodes) mn.master = "" } }
func (s *RaftServer) redirectToLeader(w http.ResponseWriter, req *http.Request) { if leader, e := s.topo.Leader(); e == nil { //http.StatusMovedPermanently does not cause http POST following redirection glog.V(0).Infoln("Redirecting to", http.StatusMovedPermanently, "http://"+leader+req.URL.Path) http.Redirect(w, req, "http://"+leader+req.URL.Path, http.StatusMovedPermanently) } else { glog.V(0).Infoln("Error: Leader Unknown") http.Error(w, "Leader unknown", http.StatusInternalServerError) } }
func (vs *VolumeServer) FaviconHandler(w http.ResponseWriter, r *http.Request) { data, err := images.Asset("favicon/favicon.ico") if err != nil { glog.V(2).Infoln("favicon read error:", err) return } if e := writeResponseContent("favicon.ico", "image/x-icon", bytes.NewReader(data), w, r); e != nil { glog.V(2).Infoln("response write error:", e) } }