func (t *ReplicaTask) Run() error { ch := make(chan error) go func() { idxUrl := util.MkUrl(t.SrcDataNode, "/admin/sync/index", url.Values{"volume": {t.VID.String()}}) e := util.DownloadToFile(idxUrl, t.FileName()+".repx") if e != nil { e = fmt.Errorf("Replicat error: %s, %v", idxUrl, e) } ch <- e }() go func() { datUrl := util.MkUrl(t.SrcDataNode, "/admin/sync/vol_data", url.Values{"volume": {t.VID.String()}}) e := util.DownloadToFile(datUrl, t.FileName()+".repd") if e != nil { e = fmt.Errorf("Replicat error: %s, %v", datUrl, e) } ch <- e }() errs := make([]error, 0) for i := 0; i < 2; i++ { if e := <-ch; e != nil { errs = append(errs, e) } } if len(errs) == 0 { return nil } else { return fmt.Errorf("%v", errs) } }
func ReplicatedWrite(masterNode string, s *storage.Store, volumeId storage.VolumeId, needle *storage.Needle, r *http.Request) (size uint32, errorStatus string) { //check JWT jwt := security.GetJwt(r) defer func() { if errorStatus == "" { return } ReplicatedDelete(masterNode, s, volumeId, needle, r) }() ret, err := s.Write(volumeId, needle) if err != nil { errorStatus = "Failed to write to local disk (" + err.Error() + ")" } else if ret <= 0 { errorStatus = "Failed to write to local disk" } //send to other replica locations if r.FormValue("type") != "replicate" { repWrite := func(location operation.Location) bool { args := url.Values{ "type": {"replicate"}, } if needle.LastModified > 0 { args.Set("ts", strconv.FormatUint(needle.LastModified, 10)) } if needle.IsChunkedManifest() { args.Set("cm", "true") } u := util.MkUrl(location.Url, r.URL.Path, args) glog.V(4).Infoln("write replication to", u) _, err := operation.Upload(u, string(needle.Name), bytes.NewReader(needle.Data), needle.IsGzipped(), string(needle.Mime), jwt) if err != nil { glog.V(0).Infof("write replication to %s err, %v", u, err) } return err == nil } if !distributedOperation(masterNode, s, volumeId, repWrite) { ret = 0 errorStatus = "Failed to write to replicas for volume " + volumeId.String() } } size = ret return }
func LookupFileId(server, fileId, collection string, readonly bool) (fullUrl string, err error) { parts := strings.Split(fileId, ",") if len(parts) != 2 { return "", errors.New("Invalid fileId " + fileId) } lookup, lookupError := Lookup(server, parts[0], collection) if lookupError != nil { return "", lookupError } if len(lookup.Locations) == 0 { return "", errors.New("File Not Found") } var u string if readonly { u = lookup.Locations.PickForRead().Url } else { u = lookup.Locations.Head().Url } return util.MkUrl(u, "/"+fileId, nil), nil }
func BatchOperation(locationList *VolumeLocationList, path string, values url.Values) (isSuccess bool) { ch := make(chan bool, locationList.Length()) for _, dn := range locationList.AllDataNode() { go func(url string, path string, values url.Values) { _, e := util.RemoteApiCall(url, path, values) if e != nil { glog.V(0).Infoln("RemoteApiCall:", util.MkUrl(url, path, values), "error =", e) } ch <- e == nil }(dn.Url(), path, values) } isSuccess = true for range locationList.AllDataNode() { select { case canVacuum := <-ch: isSuccess = isSuccess && canVacuum case <-time.After(30 * time.Minute): isSuccess = false break } } return isSuccess }
func (s *Store) SendHeartbeatToMaster(callback SettingChanged) error { masterNode, err := s.masterNodes.findMaster() if err != nil { return err } var volumeMessages []*weedpb.VolumeInformationMessage maxVolumeCount := 0 var maxFileKey uint64 for _, location := range s.Locations { maxVolumeCount = maxVolumeCount + location.MaxVolumeCount volumeToDelete := []VolumeId{} location.WalkVolume(func(v *Volume) (e error) { if maxFileKey < v.nm.MaxFileKey() { maxFileKey = v.nm.MaxFileKey() } if !v.expired(s.GetVolumeSizeLimit()) { volumeMessage := &weedpb.VolumeInformationMessage{ Id: uint32(v.Id), Size: uint64(v.Size()), Collection: v.Collection, FileCount: uint64(v.nm.FileCount()), DeleteCount: uint64(v.nm.DeletedCount()), DeletedByteCount: v.nm.DeletedSize(), ReadOnly: v.IsReadOnly(), Version: uint32(v.Version()), Ttl: v.Ttl.ToUint32(), } volumeMessages = append(volumeMessages, volumeMessage) } else { if v.expiredLongEnough(MAX_TTL_VOLUME_REMOVAL_DELAY) { volumeToDelete = append(volumeToDelete, v.Id) glog.V(0).Infoln("volume", v.Id, "is deleted.") } else { glog.V(0).Infoln("volume", v.Id, "is expired.") } } return nil }) for _, vid := range volumeToDelete { location.DeleteVolume(vid) } } joinMsgV2 := &weedpb.JoinMessageV2{ JoinKey: s.GetJoinKey(), Ip: s.GetIP(), Port: uint32(s.Port), PublicUrl: s.PublicUrl, MaxVolumeCount: uint32(maxVolumeCount), MaxFileKey: maxFileKey, DataCenter: s.dataCenter, Rack: s.rack, Volumes: volumeMessages, } ret := &weedpb.JoinResponse{} joinUrl := util.MkUrl(masterNode, "/dir/join2", nil) glog.V(4).Infof("Sending heartbeat to %s ...", joinUrl) if err = util.PostPbMsg(joinUrl, joinMsgV2, ret); err != nil { s.masterNodes.Reset() return err } if ret.Error != "" { s.masterNodes.Reset() return errors.New(ret.Error) } if ret.JoinKey != s.GetJoinKey() { if glog.V(4) { jsonData, _ := json.Marshal(ret) glog.V(4).Infof("dir join sync settings: %v", string(jsonData)) } s.SetJoinKey(ret.JoinKey) if ret.JoinIp != "" { s.SetIP(ret.JoinIp) } if ret.VolumeSizeLimit != 0 { s.SetVolumeSizeLimit(ret.VolumeSizeLimit) } if callback != nil { callback(ret) } if len(ret.CollectionSettings) > 0 { cs := NewCollectionSettingsFromPbMessage(ret.CollectionSettings) s.SetCollectionSettings(cs) } } return nil }