func runCopy(cmd *Command, args []string) bool { copy.secret = security.Secret(*copy.secretKey) if len(args) <= 1 { return false } filerDestination := args[len(args)-1] fileOrDirs := args[0 : len(args)-1] filerUrl, err := url.Parse(filerDestination) if err != nil { fmt.Printf("The last argument should be a URL on filer: %v\n", err) return false } path := filerUrl.Path if !strings.HasSuffix(path, "/") { path = path + "/" } for _, fileOrDir := range fileOrDirs { if !doEachCopy(fileOrDir, filerUrl.Host, path) { return false } } return true }
func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { defer wait.Done() delayedDeleteChan := make(chan *delayedFile, 100) var waitForDeletions sync.WaitGroup secret := security.Secret(*b.secretKey) for i := 0; i < 7; i++ { waitForDeletions.Add(1) go func() { defer waitForDeletions.Done() for df := range delayedDeleteChan { if df.enterTime.After(time.Now()) { time.Sleep(df.enterTime.Sub(time.Now())) } if e := util.Delete("http://"+df.fp.Server+"/"+df.fp.Fid, security.GenJwt(secret, df.fp.Fid)); e == nil { s.completed++ } else { s.failed++ } } }() } for id := range idChan { start := time.Now() fileSize := int64(*b.fileSize + rand.Intn(64)) fp := &operation.FilePart{Reader: &FakeReader{id: uint64(id), size: fileSize}, FileSize: fileSize} ar := &operation.VolumeAssignRequest{ Count: 1, Collection: *b.collection, } if assignResult, err := operation.Assign(*b.server, ar); err == nil { fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection if _, err := fp.Upload(0, *b.server, secret); err == nil { if rand.Intn(100) < *b.deletePercentage { s.total++ delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp} } else { fileIdLineChan <- fp.Fid } s.completed++ s.transferred += fileSize } else { s.failed++ fmt.Printf("Failed to write with error:%v\n", err) } writeStats.addSample(time.Now().Sub(start)) if *cmdBenchmark.IsDebug { fmt.Printf("writing %d file %s\n", id, fp.Fid) } } else { s.failed++ println("writing file error:", err.Error()) } } close(delayedDeleteChan) waitForDeletions.Wait() }
func runUpload(cmd *Command, args []string) bool { secret := security.Secret(*upload.secretKey) if len(cmdUpload.Flag.Args()) == 0 { if *upload.dir == "" { return false } filepath.Walk(*upload.dir, func(path string, info os.FileInfo, err error) error { if err == nil { if !info.IsDir() { if *upload.include != "" { if ok, _ := filepath.Match(*upload.include, filepath.Base(path)); !ok { return nil } } parts, e := operation.NewFileParts([]string{path}) if e != nil { return e } results, e := operation.SubmitFiles(*upload.master, parts, *upload.replication, *upload.collection, *upload.ttl, *upload.maxMB, secret) bytes, _ := json.Marshal(results) fmt.Println(string(bytes)) if e != nil { return e } } } else { fmt.Println(err) } return err }) } else { parts, e := operation.NewFileParts(args) if e != nil { fmt.Println(e.Error()) } results, _ := operation.SubmitFiles(*upload.master, parts, *upload.replication, *upload.collection, *upload.ttl, *upload.maxMB, secret) bytes, _ := json.Marshal(results) fmt.Println(string(bytes)) } return true }
func (s *Store) SendHeartbeatToMaster() (masterNode string, secretKey security.Secret, e error) { masterNode, e = s.masterNodes.FindMaster() if e != nil { return } var volumeMessages []*operation.VolumeInformationMessage maxVolumeCount := 0 var maxFileKey uint64 for _, location := range s.Locations { maxVolumeCount = maxVolumeCount + location.MaxVolumeCount for k, v := range location.volumes { if maxFileKey < v.nm.MaxFileKey() { maxFileKey = v.nm.MaxFileKey() } if !v.expired(s.volumeSizeLimit) { volumeMessage := &operation.VolumeInformationMessage{ Id: proto.Uint32(uint32(k)), Size: proto.Uint64(uint64(v.Size())), Collection: proto.String(v.Collection), FileCount: proto.Uint64(uint64(v.nm.FileCount())), DeleteCount: proto.Uint64(uint64(v.nm.DeletedCount())), DeletedByteCount: proto.Uint64(v.nm.DeletedSize()), ReadOnly: proto.Bool(v.readOnly), ReplicaPlacement: proto.Uint32(uint32(v.ReplicaPlacement.Byte())), Version: proto.Uint32(uint32(v.Version())), Ttl: proto.Uint32(v.Ttl.ToUint32()), } volumeMessages = append(volumeMessages, volumeMessage) } else { if v.exiredLongEnough(MAX_TTL_VOLUME_REMOVAL_DELAY) { location.deleteVolumeById(v.Id) glog.V(0).Infoln("volume", v.Id, "is deleted.") } else { glog.V(0).Infoln("volume", v.Id, "is expired.") } } } } joinMessage := &operation.JoinMessage{ IsInit: proto.Bool(!s.connected), Ip: proto.String(s.Ip), Port: proto.Uint32(uint32(s.Port)), PublicUrl: proto.String(s.PublicUrl), MaxVolumeCount: proto.Uint32(uint32(maxVolumeCount)), MaxFileKey: proto.Uint64(maxFileKey), DataCenter: proto.String(s.dataCenter), Rack: proto.String(s.rack), Volumes: volumeMessages, } data, err := proto.Marshal(joinMessage) if err != nil { return "", "", err } joinUrl := "http://" + masterNode + "/dir/join" glog.V(4).Infof("Connecting to %s ...", joinUrl) jsonBlob, err := util.PostBytes(joinUrl, data) if err != nil { s.masterNodes.Reset() return "", "", err } var ret operation.JoinResult if err := json.Unmarshal(jsonBlob, &ret); err != nil { glog.V(0).Infof("Failed to join %s with response: %s", joinUrl, string(jsonBlob)) s.masterNodes.Reset() return masterNode, "", err } if ret.Error != "" { s.masterNodes.Reset() return masterNode, "", errors.New(ret.Error) } s.volumeSizeLimit = ret.VolumeSizeLimit secretKey = security.Secret(ret.SecretKey) s.connected = true return }