Example #1
0
func main() {
	flag.Parse()
	runtime.GOMAXPROCS(*cpus)
	w := new(writer)
	r := new(reader)
	worker := func(wr *writer) {
		hasher := sha1.New()
		dst := io.MultiWriter(wr, hasher)
		defer log.Printf("%x", hasher.Sum(nil))
		io.Copy(dst, r)
	}
	for index := 0; index < *workers; index++ {
		go worker(w)
	}
	time.Sleep(*duration)
	bytes := bytefmt.ByteSize(w.sum)
	speed := uint64(float64(w.sum) / duration.Seconds())
	log.Println(duration, bytes, "total;", bytefmt.ByteSize(speed), "per second")
	log.Println(processed, "is rought", bytefmt.ByteSize(hath.GetRoughCacheSize(processed)))
}
Example #2
0
func main() {
	flag.Parse()
	g := hath.FileGenerator{
		SizeMax:       sizeMax,
		SizeMin:       sizeMin,
		ResolutionMax: resMax,
		ResolutionMin: resMin,
		Dir:           dir,
	}
	files := make(chan hath.File)
	worker := func(work chan hath.File) {
		log.Println("starting worker")
		for {
			f, err := g.New()
			if err != nil {
				log.Fatal(err)
			}
			work <- f
		}
	}
	for i := 0; i < workers; i++ {
		go worker(files)
	}
	fmt.Printf("%+v\n", g)
	start := time.Now()
	var i int64
	var total int64
	for i = 0; i < count; i++ {
		f := <-files
		total += f.Size
		fmt.Println(f)
	}
	end := time.Now()
	duration := end.Sub(start)
	totalWrote := bytefmt.ByteSize(uint64(total))
	perSecond := float64(total) / duration.Seconds()
	rate := bytefmt.ByteSize(uint64(perSecond))
	fmt.Printf("OK for %v\n", duration)
	fmt.Printf("%s at rate %s/s\n", totalWrote, rate)
	log.Println(count, "is rought", bytefmt.ByteSize(hath.GetRoughCacheSize(count)))
}
Example #3
0
func main() {
	go func() {
		log.Println(http.ListenAndServe("localhost:6060", nil))
	}()
	flag.Parse()
	runtime.GOMAXPROCS(cpus)
	if *cpuprofile != "" {
		f, err := os.Create(*cpuprofile)
		if err != nil {
			log.Fatal(err)
		}
		pprof.StartCPUProfile(f)
		defer pprof.StopCPUProfile()
	}
	g := hath.FileGenerator{
		SizeMax:       sizeMax,
		SizeMin:       sizeMin,
		ResolutionMax: resMax,
		ResolutionMin: resMin,
		TimeDelta:     20,
	}
	if onlyMemory {
		log.Println("allocating")
		var files = make([]hath.File, count)
		log.Println("generating")
		for i := range files {
			files[i] = g.NewFake()
		}
		log.Println("generated", len(files))
		fmt.Scanln()
		start := time.Now()
		log.Println("iterating...")
		var count int

		fileBytes := len(g.NewFake().Bytes())
		for _, f := range files {
			if f.Static {
				count++
			}
		}
		end := time.Now()
		duration := end.Sub(start)
		rate := float64(len(files)) / duration.Seconds()
		fmt.Println("static", count)
		fmt.Printf("OK for %v at rate %f per second\n", duration, rate)
		log.Println("iterated")

		log.Println("Writing dump")
		start = time.Now()
		d, err := os.Create("dump.mem")
		if err != nil {
			log.Fatal(err)
		}
		var totalSize int
		for _, f := range files {
			n, err := d.Write(f.Bytes())
			if err != nil {
				log.Fatal(err)
			}
			totalSize += n
		}
		d.Close()
		end = time.Now()
		duration = end.Sub(start)
		rate = float64(len(files)) / duration.Seconds()
		fmt.Println("static", count)
		fmt.Printf("OK for %v at rate %f per second\n", duration, rate)
		log.Println("written", totalSize)
		log.Println("reading dump")
		d, err = os.Open("dump.mem")
		defer d.Close()
		if err != nil {
			log.Fatal(err)
		}
		var buffer = make([]byte, fileBytes)
		var f hath.File
		start = time.Now()
		for {
			_, err := d.Read(buffer)
			if err == io.EOF {
				break
			}
			if err != nil {
				log.Fatal(err)
			}
			if err = hath.UnmarshalFileTo(buffer, &f); err != nil {
				log.Fatal(err)
			}
		}
		end = time.Now()
		duration = end.Sub(start)
		rate = float64(len(files)) / duration.Seconds()
		fmt.Println("static", count)
		fmt.Printf("OK for %v at rate %f per second\n", duration, rate)
		log.Println("read")
		os.Exit(0)
	}
	if onlyArray {
		log.Println("allocating")
		var ids = make([][hath.HashSize]byte, count)
		log.Println("allocated")
		var f hath.File
		for i := range ids {
			f = g.NewFake()
			ids[i] = f.Hash
		}

	}
	db, err := hath.NewDB(dbpath)
	defer db.Close()
	if err != nil {
		log.Fatal(err)
	}
	d, err := g.NewFake().Marshal()
	if err != nil {
		log.Fatal(err)
	}
	log.Println("average file info is", len(d), "bytes")
	log.Printf("%x", d)
	if onlyOpen {
		log.Println("only open. Waiting for 10s")
		count = int64(db.Count())
		fmt.Scanln()
		start := time.Now()
		n, err := db.GetOldFilesCount(time.Now().Add(time.Second * -10))
		if err != nil {
			log.Fatal(err)
		}
		log.Println("got", n)
		end := time.Now()
		duration := end.Sub(start)
		rate := float64(count) / duration.Seconds()
		log.Printf("Scanned %d for %v at rate %f per second\n", count, duration, rate)
		fmt.Scanln()
	}
	if generate {
		log.Println("generating", count, "files")

		fmt.Printf("%+v\n", g)
		var i int64
		files := make([]hath.File, count)
		for i = 0; i < count; i++ {
			files[i] = g.NewFake()
		}
		start := time.Now()
		if count < bulkSize {
			log.Println("writing")
			if err := db.AddBatch(files); err != nil {
				log.Fatal(err)
			}
		} else {
			log.Println("writing in bulks")
			for i = 0; i+bulkSize < count; i += bulkSize {
				bulkstart := time.Now()

				if err := db.AddBatch(files[i : i+bulkSize]); err != nil {
					log.Fatal(err)
				}

				log.Println("from", i, "to", i+bulkSize, time.Now().Sub(bulkstart))
			}
			log.Println("from", i+bulkSize, "to", count)
			if err := db.AddBatch(files[i:]); err != nil {
				log.Fatal(err)
			}
		}
		end := time.Now()
		duration := end.Sub(start)
		rate := float64(count) / duration.Seconds()
		fmt.Printf("OK for %v at rate %f per second\n", duration, rate)
	}
	// if collect {
	// 	log.Println("collecting")
	// 	start := time.Now()
	// 	n, err := db.Collect(time.Now().Add(-time.Second))
	// 	if err != nil {
	// 		log.Fatal(err)
	// 	}
	//
	// 	end := time.Now()
	// 	duration := end.Sub(start)
	// 	rate := float64(n) / duration.Seconds()
	// 	fmt.Printf("Removed %d for %v at rate %f per second\n", n, duration, rate)
	// }
	log.Println(count, "is rought", bytefmt.ByteSize(hath.GetRoughCacheSize(count)))
	log.Println("OK")
}