Beispiel #1
2
func GetHash(a string) (hash.Hash, error) {
	var h hash.Hash
	switch a {
	case "adler32":
		h = adler32.New()
	case "crc32", "crc32ieee":
		h = crc32.New(crc32.MakeTable(crc32.IEEE))
	case "crc32castagnoli":
		h = crc32.New(crc32.MakeTable(crc32.Castagnoli))
	case "crc32koopman":
		h = crc32.New(crc32.MakeTable(crc32.Koopman))
	case "crc64", "crc64iso":
		h = crc64.New(crc64.MakeTable(crc64.ISO))
	case "crc64ecma":
		h = crc64.New(crc64.MakeTable(crc64.ECMA))
	case "fnv", "fnv32":
		h = fnv.New32()
	case "fnv32a":
		h = fnv.New32a()
	case "fnv64":
		h = fnv.New64()
	case "fnv64a":
		h = fnv.New64a()
	case "hmac", "hmacsha256":
		h = hmac.New(sha256.New, []byte(key))
	case "hmacmd5":
		h = hmac.New(md5.New, []byte(key))
	case "hmacsha1":
		h = hmac.New(sha1.New, []byte(key))
	case "hmacsha512":
		h = hmac.New(sha512.New, []byte(key))
	case "md4":
		h = md4.New()
	case "md5":
		h = md5.New()
	case "ripemd160":
		h = ripemd160.New()
	case "sha1":
		h = sha1.New()
	case "sha224":
		h = sha256.New224()
	case "sha256":
		h = sha256.New()
	case "sha384":
		h = sha512.New384()
	case "sha512":
		h = sha512.New()
	default:
		return nil, errors.New("Invalid algorithm")
	}
	return h, nil
}
func TestDeepObjectPointer(t *testing.T) {
	// Arrange
	wheel1 := wheel{radius: 17}
	wheel2 := wheel{radius: 22}
	wheel3 := wheel{radius: 17}

	myUni1 := unicycle{licencePlateID: "blah", primaryWheel: &wheel1, tags: map[string]string{"color": "blue", "name": "john"}}
	myUni2 := unicycle{licencePlateID: "blah", primaryWheel: &wheel2, tags: map[string]string{"color": "blue", "name": "john"}}
	myUni3 := unicycle{licencePlateID: "blah", primaryWheel: &wheel3, tags: map[string]string{"color": "blue", "name": "john"}}

	// Run it more than once to verify determinism of hasher.
	for i := 0; i < 100; i++ {
		hasher1 := adler32.New()
		hasher2 := adler32.New()
		hasher3 := adler32.New()
		// Act
		DeepHashObject(hasher1, myUni1)
		hash1 := hasher1.Sum32()
		DeepHashObject(hasher2, myUni2)
		hash2 := hasher2.Sum32()
		DeepHashObject(hasher3, myUni3)
		hash3 := hasher3.Sum32()

		// Assert
		if hash1 == hash2 {
			t.Errorf("hash1 (%d) and hash2(%d) must be different because they have different values for wheel size", hash1, hash2)
		}

		if hash1 != hash3 {
			t.Errorf("hash1 (%d) and hash3(%d) must be the same because although they point to different objects, they have the same values for wheel size", hash1, hash3)
		}
	}
}
func TestDeepObjectPointer(t *testing.T) {
	// Arrange
	wheel1 := wheel{radius: 17}
	wheel2 := wheel{radius: 22}
	wheel3 := wheel{radius: 17}

	myUni1 := unicycle{licencePlateID: "blah", primaryWheel: &wheel1}
	myUni2 := unicycle{licencePlateID: "blah", primaryWheel: &wheel2}
	myUni3 := unicycle{licencePlateID: "blah", primaryWheel: &wheel3}

	hasher1 := adler32.New()
	hasher2 := adler32.New()
	hasher3 := adler32.New()

	// Act
	DeepHashObject(hasher1, myUni1)
	hash1 := hasher1.Sum32()
	DeepHashObject(hasher2, myUni2)
	hash2 := hasher2.Sum32()
	DeepHashObject(hasher3, myUni3)
	hash3 := hasher3.Sum32()

	// Assert
	if hash1 == hash2 {
		t.Errorf("hash1 (%d) and hash2(%d) must be different because they have different values for wheel size", hash1, hash2)
	}

	if hash1 != hash3 {
		t.Errorf("hash1 (%d) and hash3(%d) must be the same because although they point to different objects, they have the same values for wheel size", hash1, hash3)
	}
}
func (pm *PrinterManager) syncPrinters(ignorePrivet bool) error {
	glog.Info("Synchronizing printers, stand by")

	// Get current snapshot of CUPS printers.
	cupsPrinters, err := pm.cups.GetPrinters()
	if err != nil {
		return fmt.Errorf("Sync failed while calling GetPrinters(): %s", err)
	}
	if pm.ignoreRawPrinters {
		cupsPrinters, _ = lib.FilterRawPrinters(cupsPrinters)
	}

	// Augment CUPS printers with extra information from SNMP.
	if pm.snmp != nil {
		err = pm.snmp.AugmentPrinters(cupsPrinters)
		if err != nil {
			glog.Warningf("Failed to augment printers with SNMP data: %s", err)
		}
	}

	// Set CapsHash on all printers.
	for i := range cupsPrinters {
		h := adler32.New()
		lib.DeepHash(cupsPrinters[i].Tags, h)
		cupsPrinters[i].Tags["tagshash"] = fmt.Sprintf("%x", h.Sum(nil))

		h = adler32.New()
		lib.DeepHash(cupsPrinters[i].Description, h)
		cupsPrinters[i].CapsHash = fmt.Sprintf("%x", h.Sum(nil))
	}

	// Compare the snapshot to what we know currently.
	diffs := lib.DiffPrinters(cupsPrinters, pm.printers.GetAll())
	if diffs == nil {
		glog.Infof("Printers are already in sync; there are %d", len(cupsPrinters))
		return nil
	}

	// Update GCP.
	ch := make(chan lib.Printer, len(diffs))
	for i := range diffs {
		go pm.applyDiff(&diffs[i], ch, ignorePrivet)
	}
	currentPrinters := make([]lib.Printer, 0, len(diffs))
	for _ = range diffs {
		p := <-ch
		if p.Name != "" {
			currentPrinters = append(currentPrinters, p)
		}
	}

	// Update what we know.
	pm.printers.Refresh(currentPrinters)
	glog.Infof("Finished synchronizing %d printers", len(currentPrinters))

	return nil
}
Beispiel #5
0
func TestDeepHashObject(t *testing.T) {
	successCases := []func() interface{}{
		func() interface{} { return 8675309 },
		func() interface{} { return "Jenny, I got your number" },
		func() interface{} { return []string{"eight", "six", "seven"} },
		func() interface{} { return [...]int{5, 3, 0, 9} },
		func() interface{} { return map[int]string{8: "8", 6: "6", 7: "7"} },
		func() interface{} { return map[string]int{"5": 5, "3": 3, "0": 0, "9": 9} },
		func() interface{} { return A{867, "5309"} },
		func() interface{} { return &A{867, "5309"} },
		func() interface{} {
			return B{[]int{8, 6, 7}, map[string]bool{"5": true, "3": true, "0": true, "9": true}}
		},
		func() interface{} { return map[A]bool{A{8675309, "Jenny"}: true, A{9765683, "!Jenny"}: false} },
		func() interface{} { return map[C]bool{C{8675309, "Jenny"}: true, C{9765683, "!Jenny"}: false} },
		func() interface{} { return map[*A]bool{&A{8675309, "Jenny"}: true, &A{9765683, "!Jenny"}: false} },
		func() interface{} { return map[*C]bool{&C{8675309, "Jenny"}: true, &C{9765683, "!Jenny"}: false} },
	}

	for _, tc := range successCases {
		hasher1 := adler32.New()
		DeepHashObject(hasher1, tc())
		hash1 := hasher1.Sum32()
		DeepHashObject(hasher1, tc())
		hash2 := hasher1.Sum32()
		if hash1 != hash2 {
			t.Fatalf("hash of the same object (%q) produced different results: %d vs %d", toString(tc()), hash1, hash2)
		}
		for i := 0; i < 100; i++ {
			hasher2 := adler32.New()

			DeepHashObject(hasher1, tc())
			hash1a := hasher1.Sum32()
			DeepHashObject(hasher2, tc())
			hash2a := hasher2.Sum32()

			if hash1a != hash1 {
				t.Errorf("repeated hash of the same object (%q) produced different results: %d vs %d", toString(tc()), hash1, hash1a)
			}
			if hash2a != hash2 {
				t.Errorf("repeated hash of the same object (%q) produced different results: %d vs %d", toString(tc()), hash2, hash2a)
			}
			if hash1a != hash2a {
				t.Errorf("hash of the same object produced (%q) different results: %d vs %d", toString(tc()), hash1a, hash2a)
			}
		}
	}
}
Beispiel #6
0
Datei: pic.go Projekt: afajl/am
func picHash(picf io.Reader) string {
	hash := adler32.New()
	if _, err := io.Copy(hash, picf); err != nil {
		log.Panic(err)
	}
	return fmt.Sprintf("%x", hash.Sum32())
}
Beispiel #7
0
func (c *controller) computeChecksum(fn string) (string, error) {
	checksumType := strings.ToLower(c.conf.GetDirectives().Data.Simple.Checksum)
	var hash hash.Hash
	switch checksumType {
	case "md5":
		hash = md5.New()
	case "adler32":
		hash = adler32.New()
	case "sha1":
		hash = sha1.New()
	case "sha256":
		hash = sha256.New()
	default:
		return "", errors.New("provided checksum not implemented")
	}
	fd, err := os.Open(fn)
	defer fd.Close()
	if err != nil {
		return "", err
	}
	if _, err := io.Copy(hash, fd); err != nil {
		return "", err
	}
	checksum := fmt.Sprintf("%x", hash.Sum([]byte{}))
	return checksumType + ":" + checksum, nil
}
Beispiel #8
0
// NewDeflater creates a new io.WriteCloser that satisfies writes by compressing data written to w.
// It is the caller's responsibility to call Close on the WriteCloser when done.
// level is the compression level, which can be DefaultCompression, NoCompression,
// or any integer value between BestSpeed and BestCompression (inclusive).
func NewDeflaterLevel(w io.Writer, level int) (io.WriteCloser, os.Error) {
	z := new(writer)
	// ZLIB has a two-byte header (as documented in RFC 1950).
	// The first four bits is the CINFO (compression info), which is 7 for the default deflate window size.
	// The next four bits is the CM (compression method), which is 8 for deflate.
	z.scratch[0] = 0x78
	// The next two bits is the FLEVEL (compression level). The four values are:
	// 0=fastest, 1=fast, 2=default, 3=best.
	// The next bit, FDICT, is unused, in this implementation.
	// The final five FCHECK bits form a mod-31 checksum.
	switch level {
	case 0, 1:
		z.scratch[1] = 0x01
	case 2, 3, 4, 5:
		z.scratch[1] = 0x5e
	case 6, -1:
		z.scratch[1] = 0x9c
	case 7, 8, 9:
		z.scratch[1] = 0xda
	default:
		return nil, os.NewError("level out of range")
	}
	_, err := w.Write(z.scratch[0:2])
	if err != nil {
		return nil, err
	}
	z.w = w
	z.deflater = flate.NewDeflater(w, level)
	z.digest = adler32.New()
	return z, nil
}
Beispiel #9
0
// Helper function to take the key string and determine which bucket
// the item should be placed in. This is the simplest hash function
// I found in the core libraries. Not really sure how appropriate it is.
func getIndex(key string, max int) int {
	hash := adler32.New()
	hash.Write([]byte(key))
	digest := hash.Sum32()

	return int(digest) % max
}
// Returns an RC that matches the intent of the given deployment.
// It creates a new RC if required.
func (d *DeploymentController) getDesiredRC(deployment *experimental.Deployment) (*api.ReplicationController, error) {
	namespace := deployment.ObjectMeta.Namespace
	// Find if the required RC exists already.
	rcList, err := d.client.ReplicationControllers(namespace).List(labels.Everything())
	if err != nil {
		return nil, fmt.Errorf("error listing replication controllers: %v", err)
	}
	for _, rc := range rcList.Items {
		if api.Semantic.DeepEqual(rc.Spec.Template, deployment.Spec.Template) {
			// This is the desired RC.
			return &rc, nil
		}
	}
	// desired RC does not exist, create a new one.
	podTemplateSpecHasher := adler32.New()
	util.DeepHashObject(podTemplateSpecHasher, deployment.Spec.Template)
	podTemplateSpecHash := podTemplateSpecHasher.Sum32()
	rcName := fmt.Sprintf("deploymentrc-%d", podTemplateSpecHash)
	desiredRC := api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name:      rcName,
			Namespace: namespace,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: 0,
			Template: deployment.Spec.Template,
		},
	}
	createdRC, err := d.client.ReplicationControllers(namespace).Create(&desiredRC)
	if err != nil {
		return nil, fmt.Errorf("error creating replication controller: %v", err)
	}
	return createdRC, nil
}
Beispiel #11
0
func (e *Engine) adler32() error {
	data, err := computeHash(adler32.New(), e.stack.Pop())
	if err == nil {
		e.stack.Push(data)
	}
	return err
}
Beispiel #12
0
func (self *FileService) AddFile(file File) error {

	writer := adler32.New()
	job := NewJob(file, writer, 2048, func(job *Job) error {
		var data []byte
		var err error

		switch {
		case file.Type() == FILE_DEFAULT:
			// Unchecked conversion. May cause problems
			defaultFile, _ := file.(DefaultFile)
			defaultFile.SetId(fmt.Sprintf("%x", writer.Sum32()))
			log.Printf("Adding file %s", defaultFile.Id())
			data, err = json.Marshal(defaultFile.Data())
			if err != nil {
				return err
			}
		}

		err = self.ctx.Database.Update(func(tx *bolt.Tx) error {
			b := tx.Bucket(filesBucketName)
			return b.Put([]byte(file.Id()), data)
		})
		return nil
	})
	self.ctx.JobService.Channel() <- job
	return nil
}
Beispiel #13
0
func TestGolden(t *testing.T) {
	for _, g := range golden {
		in := g.in

		// We test the vanilla implementation
		p := []byte(g.in)
		vanilla := adler32.New()
		vanilla.Write(p)
		if got := vanilla.Sum32(); got != g.out {
			t.Errorf("vanilla implentation: for %q, expected 0x%x, got 0x%x", in, g.out, got)
			continue
		}

		// We test the rolling implementation by prefixing the slice by a
		// space, writing it to our rolling hash, and then rolling once
		q := []byte(" ")
		q = append(q, p...)
		rolling := rollsum.New()
		rolling.Write(q[:len(q)-1])
		rolling.Roll(q[len(q)-1])
		if got := rolling.Sum32(); got != g.out {
			t.Errorf("rolling implentation: for %q, expected 0x%x, got 0x%x", in, g.out, got)
			continue
		}
	}
}
Beispiel #14
0
// NewReaderDict is like NewReader but uses a preset dictionary.
// NewReaderDict ignores the dictionary if the compressed data does not refer to it.
func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, os.Error) {
	z := new(reader)
	if fr, ok := r.(flate.Reader); ok {
		z.r = fr
	} else {
		z.r = bufio.NewReader(r)
	}
	_, err := io.ReadFull(z.r, z.scratch[0:2])
	if err != nil {
		return nil, err
	}
	h := uint(z.scratch[0])<<8 | uint(z.scratch[1])
	if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) {
		return nil, HeaderError
	}
	if z.scratch[1]&0x20 != 0 {
		_, err = io.ReadFull(z.r, z.scratch[0:4])
		if err != nil {
			return nil, err
		}
		checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
		if checksum != adler32.Checksum(dict) {
			return nil, DictionaryError
		}
		z.decompressor = flate.NewReaderDict(z.r, dict)
	} else {
		z.decompressor = flate.NewReader(z.r)
	}
	z.digest = adler32.New()
	return z, nil
}
Beispiel #15
0
func extractFromFile(filename string) (api.BoundPod, error) {
	var pod api.BoundPod

	glog.V(3).Infof("Reading config file %q", filename)
	file, err := os.Open(filename)
	if err != nil {
		return pod, err
	}
	defer file.Close()

	data, err := ioutil.ReadAll(file)
	if err != nil {
		return pod, err
	}

	manifest := &api.ContainerManifest{}
	// TODO: use api.Scheme.DecodeInto
	if err := yaml.Unmarshal(data, manifest); err != nil {
		return pod, fmt.Errorf("can't unmarshal file %q: %v", filename, err)
	}

	if err := api.Scheme.Convert(manifest, &pod); err != nil {
		return pod, fmt.Errorf("can't convert pod from file %q: %v", filename, err)
	}

	hostname, err := os.Hostname() //TODO: kubelet name would be better
	if err != nil {
		return pod, err
	}

	if len(pod.UID) == 0 {
		hasher := md5.New()
		fmt.Fprintf(hasher, "host:%s", hostname)
		fmt.Fprintf(hasher, "file:%s", filename)
		util.DeepHashObject(hasher, pod)
		pod.UID = hex.EncodeToString(hasher.Sum(nil)[0:])
		glog.V(5).Infof("Generated UID %q for pod %q from file %s", pod.UID, pod.Name, filename)
	}
	if len(pod.Namespace) == 0 {
		hasher := adler32.New()
		fmt.Fprint(hasher, filename)
		// TODO: file-<sum>.hostname would be better, if DNS subdomains
		// are allowed for namespace (some places only allow DNS
		// labels).
		pod.Namespace = fmt.Sprintf("file-%08x-%s", hasher.Sum32(), hostname)
		glog.V(5).Infof("Generated namespace %q for pod %q from file %s", pod.Namespace, pod.Name, filename)
	}
	// TODO(dchen1107): BoundPod is not type of runtime.Object. Once we allow kubelet talks
	// about Pod directly, we can use SelfLinker defined in package: latest
	// Currently just simply follow the same format in resthandler.go
	pod.ObjectMeta.SelfLink = fmt.Sprintf("/api/v1beta2/pods/%s?namespace=%s",
		pod.Name, pod.Namespace)

	if glog.V(4) {
		glog.Infof("Got pod from file %q: %#v", filename, pod)
	} else {
		glog.V(1).Infof("Got pod from file %q: %s.%s (%s)", filename, pod.Namespace, pod.Name, pod.UID)
	}
	return pod, nil
}
Beispiel #16
0
func (z *Writer) init(w io.Writer, level int) {
	z.compressor = nil
	z.ModTime = time.Now()
	z.level = level
	z.adler32 = adler32.New()
	z.crc32 = crc32.NewIEEE()
	z.w = io.MultiWriter(w, z.adler32, z.crc32)
}
func (pm *PrinterManager) syncPrinters(ignorePrivet bool) error {
	log.Info("Synchronizing printers, stand by")

	// Get current snapshot of native printers.
	nativePrinters, err := pm.native.GetPrinters()
	if err != nil {
		return fmt.Errorf("Sync failed while calling GetPrinters(): %s", err)
	}

	// Set CapsHash on all printers.
	for i := range nativePrinters {
		h := adler32.New()
		lib.DeepHash(nativePrinters[i].Tags, h)
		nativePrinters[i].Tags["tagshash"] = fmt.Sprintf("%x", h.Sum(nil))

		h = adler32.New()
		lib.DeepHash(nativePrinters[i].Description, h)
		nativePrinters[i].CapsHash = fmt.Sprintf("%x", h.Sum(nil))
	}

	// Compare the snapshot to what we know currently.
	diffs := lib.DiffPrinters(nativePrinters, pm.printers.GetAll())
	if diffs == nil {
		log.Infof("Printers are already in sync; there are %d", len(nativePrinters))
		return nil
	}

	// Update GCP.
	ch := make(chan lib.Printer, len(diffs))
	for i := range diffs {
		go pm.applyDiff(&diffs[i], ch, ignorePrivet)
	}
	currentPrinters := make([]lib.Printer, 0, len(diffs))
	for _ = range diffs {
		p := <-ch
		if p.Name != "" {
			currentPrinters = append(currentPrinters, p)
		}
	}

	// Update what we know.
	pm.printers.Refresh(currentPrinters)
	log.Infof("Finished synchronizing %d printers", len(currentPrinters))

	return nil
}
Beispiel #18
0
// keyFunc returns the key of an object, which is used to look up in the cache for it's matching object.
// Since we match objects by namespace and Labels/Selector, so if two objects have the same namespace and labels,
// they will have the same key.
func keyFunc(obj objectWithMeta) uint64 {
	hash := adler32.New()
	hashutil.DeepHashObject(hash, &equivalenceLabelObj{
		namespace: obj.GetNamespace(),
		labels:    obj.GetLabels(),
	})
	return uint64(hash.Sum32())
}
Beispiel #19
0
func hashSliceFactory(length int) []Hash32 {
	fmt.Println(length)
	hashSlice := []Hash32{}
	for j := 0; j <= 10; j++ {
		hashSlice = append(hashSlice, adler32.New())
	}
	return hashSlice
}
Beispiel #20
0
// NewReader creates a new Reader reading the given reader.
func NewReader(r io.Reader) (*Reader, error) {
	z := new(Reader)
	z.adler32 = adler32.New()
	z.crc32 = crc32.NewIEEE()
	z.r = io.TeeReader(r, io.MultiWriter(z.adler32, z.crc32))
	if err := z.readHeader(); err != nil {
		return nil, err
	}
	return z, nil
}
Beispiel #21
0
func (h *Hasher) initAlgorithm() {
	switch h.Algorithm {
	case "SHA1":
		h.Hash = sha1.New()
	case "MD5":
		h.Hash = md5.New()
	case "ADLER32":
		h.Hash = adler32.New()
	}
}
Beispiel #22
0
func main() {
	var s string = "hello world"
	fmt.Printf("s=\"%s\"\n", s)
	adler := adler32.New()
	adler.Write([]byte(s))
	fmt.Printf("adler32(s)=%x\n", adler.Sum32())
	md := md5.New()
	md.Write([]byte(s))
	v := uint64(md.Sum(nil))
	fmt.Printf("md5(s)=%x\n", v)
}
Beispiel #23
0
func verifyPackUnpack(t *testing.T, podNamespace, podUID, podName, containerName string) {
	container := &api.Container{Name: containerName}
	hasher := adler32.New()
	util.DeepHashObject(hasher, *container)
	computedHash := uint64(hasher.Sum32())
	podFullName := fmt.Sprintf("%s.%s", podName, podNamespace)
	name := BuildDockerName(podUID, podFullName, container)
	returnedPodFullName, returnedUID, returnedContainerName, hash := ParseDockerName(name)
	if podFullName != returnedPodFullName || podUID != returnedUID || containerName != returnedContainerName || computedHash != hash {
		t.Errorf("For (%s, %s, %s, %d), unpacked (%s, %s, %s, %d)", podFullName, podUID, containerName, computedHash, returnedPodFullName, returnedUID, returnedContainerName, hash)
	}
}
func ZlibCompress(options *Options, in []byte, out io.Writer) error {
	var counter countingWriter
	if options.Verbose {
		counter = newCountingWriter(out)
		out = &counter
	}

	const cmf = 120 /* CM 8, CINFO 7. See zlib spec.*/
	const flevel = 0
	const fdict = 0
	var cmfflg uint16 = 256*cmf + fdict*32 + flevel*64
	fcheck := 31 - cmfflg%31
	cmfflg += fcheck
	flagBytes := []byte{
		byte(cmfflg >> 8),
		byte(cmfflg),
	}
	_, flagErr := out.Write(flagBytes)
	if flagErr != nil {
		return flagErr
	}

	z := NewDeflator(out, options)
	writeErr := z.Deflate(true, in)
	if writeErr != nil {
		return writeErr
	}

	checksum := adler32.New()
	checksum.Write(in)
	final := checksum.Sum32()
	checksumBytes := []byte{
		byte(final >> 24),
		byte(final >> 16),
		byte(final >> 8),
		byte(final),
	}
	_, checksumErr := out.Write(checksumBytes)
	if checksumErr != nil {
		return checksumErr
	}

	if options.Verbose {
		inSize := len(in)
		outSize := counter.written
		fmt.Fprintf(os.Stderr,
			"Original Size: %d, Zlib: %d, Compression: %f%% Removed\n",
			inSize, outSize,
			100*float64(inSize-outSize)/float64(inSize))
	}
	return nil
}
Beispiel #25
0
// Write (via the embedded io.Writer interface) adds more data to the
// running hash. It never returns an error.
func (d *digest) Write(p []byte) (int, error) {
	// Copy the window
	d.window = make([]byte, len(p))
	copy(d.window, p)

	// Piggy-back on the core implementation
	h := vanilla.New()
	h.Write(p)
	s := h.Sum32()
	d.a, d.b = s&0xffff, s>>16
	d.n = uint32(len(p)) % mod
	return len(d.window), nil
}
Beispiel #26
0
func verifyPackUnpack(t *testing.T, podNamespace, podName, containerName string) {
	container := &api.Container{Name: containerName}
	hasher := adler32.New()
	data := fmt.Sprintf("%#v", *container)
	hasher.Write([]byte(data))
	computedHash := uint64(hasher.Sum32())
	podFullName := fmt.Sprintf("%s.%s", podName, podNamespace)
	name := BuildDockerName("", podFullName, container)
	returnedPodFullName, _, returnedContainerName, hash := ParseDockerName(name)
	if podFullName != returnedPodFullName || containerName != returnedContainerName || computedHash != hash {
		t.Errorf("For (%s, %s, %d), unpacked (%s, %s, %d)", podFullName, containerName, computedHash, returnedPodFullName, returnedContainerName, hash)
	}
}
Beispiel #27
0
func makeHash(name string) hash.Hash {
	switch strings.ToLower(name) {
	case "ripemd160":
		return ripemd160.New()
	case "md4":
		return md4.New()
	case "md5":
		return md5.New()
	case "sha1":
		return sha1.New()
	case "sha256":
		return sha256.New()
	case "sha384":
		return sha512.New384()
	case "sha3-224":
		return sha3.New224()
	case "sha3-256":
		return sha3.New256()
	case "sha3-384":
		return sha3.New384()
	case "sha3-512":
		return sha3.New512()
	case "sha512":
		return sha512.New()
	case "sha512-224":
		return sha512.New512_224()
	case "sha512-256":
		return sha512.New512_256()
	case "crc32-ieee":
		return crc32.NewIEEE()
	case "crc64-iso":
		return crc64.New(crc64.MakeTable(crc64.ISO))
	case "crc64-ecma":
		return crc64.New(crc64.MakeTable(crc64.ECMA))
	case "adler32":
		return adler32.New()
	case "fnv32":
		return fnv.New32()
	case "fnv32a":
		return fnv.New32a()
	case "fnv64":
		return fnv.New64()
	case "fnv64a":
		return fnv.New64a()
	case "xor8":
		return new(xor8)
	case "fletch16":
		return &fletch16{}
	}
	return nil
}
Beispiel #28
0
// writeHeader writes the ZLIB header.
func (z *Writer) writeHeader() (err error) {
	z.wroteHeader = true
	// ZLIB has a two-byte header (as documented in RFC 1950).
	// The first four bits is the CINFO (compression info), which is 7 for the default deflate window size.
	// The next four bits is the CM (compression method), which is 8 for deflate.
	z.scratch[0] = 0x78
	// The next two bits is the FLEVEL (compression level). The four values are:
	// 0=fastest, 1=fast, 2=default, 3=best.
	// The next bit, FDICT, is set if a dictionary is given.
	// The final five FCHECK bits form a mod-31 checksum.
	switch z.level {
	case 0, 1:
		z.scratch[1] = 0 << 6
	case 2, 3, 4, 5:
		z.scratch[1] = 1 << 6
	case 6, -1:
		z.scratch[1] = 2 << 6
	case 7, 8, 9:
		z.scratch[1] = 3 << 6
	default:
		panic("unreachable")
	}
	if z.dict != nil {
		z.scratch[1] |= 1 << 5
	}
	z.scratch[1] += uint8(31 - (uint16(z.scratch[0])<<8+uint16(z.scratch[1]))%31)
	if _, err = z.w.Write(z.scratch[0:2]); err != nil {
		return err
	}
	if z.dict != nil {
		// The next four bytes are the Adler-32 checksum of the dictionary.
		checksum := adler32.Checksum(z.dict)
		z.scratch[0] = uint8(checksum >> 24)
		z.scratch[1] = uint8(checksum >> 16)
		z.scratch[2] = uint8(checksum >> 8)
		z.scratch[3] = uint8(checksum >> 0)
		if _, err = z.w.Write(z.scratch[0:4]); err != nil {
			return err
		}
	}
	if z.compressor == nil {
		// Initialize deflater unless the Writer is being reused
		// after a Reset call.
		z.compressor, err = flate.NewWriterDict(z.w, z.level, z.dict)
		if err != nil {
			return err
		}
		z.digest = adler32.New()
	}
	return nil
}
Beispiel #29
0
func uploadFile(client *Client, ccmBaseUrl string, filepath string, contentType string) (string, int64, int64, error) {
	uuid := generateUUID()
	file, err := os.Open(filepath)
	if err != nil {
		return "", -1, -1, err
	}
	hash := adler32.New()
	_, err = io.Copy(hash, file)

	if err != nil {
		return "", -1, -1, err
	}
	file.Close()
	sum := hash.Sum(nil)
	sumInt := int64(sum[0])<<(8*3) | int64(sum[1])<<(8*2) | int64(sum[2])<<(8*1) | int64(sum[3])

	file, err = os.Open(filepath)
	if err != nil {
		return "", -1, -1, err
	}
	defer file.Close()

	uploadFileServiceUrl := path.Join(ccmBaseUrl, "/team/service/com.ibm.team.repository.common.transport.IDirectWritingContentService", uuid, strconv.FormatInt(sumInt, 10))
	uploadFileServiceUrl = strings.Replace(uploadFileServiceUrl, ":/", "://", 1)
	request, err := http.NewRequest("PUT", uploadFileServiceUrl, file)
	if err != nil {
		return "", -1, -1, err
	}
	request.Header.Add("Content-Type", contentType)

	s, err := os.Stat(file.Name())
	if err != nil {
		return "", -1, -1, err
	}

	request.ContentLength = s.Size()

	response, err := client.Do(request)
	if err != nil {
		return "", -1, -1, err
	}
	defer response.Body.Close()

	if response.StatusCode != 200 {
		return "", -1, -1, errorFromResponse(response)
	}

	return uuid, s.Size(), sumInt, nil
}
Beispiel #30
0
func (z *reader) Reset(r io.Reader, dict []byte) error {
	*z = reader{decompressor: z.decompressor}
	if fr, ok := r.(flate.Reader); ok {
		z.r = fr
	} else {
		z.r = bufio.NewReader(r)
	}

	// Read the header (RFC 1950 section 2.2.).
	_, z.err = io.ReadFull(z.r, z.scratch[0:2])
	if z.err != nil {
		if z.err == io.EOF {
			z.err = io.ErrUnexpectedEOF
		}
		return z.err
	}
	h := uint(z.scratch[0])<<8 | uint(z.scratch[1])
	if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) {
		z.err = ErrHeader
		return z.err
	}
	haveDict := z.scratch[1]&0x20 != 0
	if haveDict {
		_, z.err = io.ReadFull(z.r, z.scratch[0:4])
		if z.err != nil {
			if z.err == io.EOF {
				z.err = io.ErrUnexpectedEOF
			}
			return z.err
		}
		checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
		if checksum != adler32.Checksum(dict) {
			z.err = ErrDictionary
			return z.err
		}
	}

	if z.decompressor == nil {
		if haveDict {
			z.decompressor = flate.NewReaderDict(z.r, dict)
		} else {
			z.decompressor = flate.NewReader(z.r)
		}
	} else {
		z.decompressor.(flate.Resetter).Reset(z.r, dict)
	}
	z.digest = adler32.New()
	return nil
}