Ejemplo n.º 1
1
func Test_Shadow_ToStringFull(t *testing.T) {
	id := proto.ID("ac934d9a88b42aa3b40ef7c81a9dee1aad5a2cddccb00ae6abab9c38095fc15c")
	expect := fixtures.FixStream(`BAR:MANIFEST

		id ac934d9a88b42aa3b40ef7c81a9dee1aad5a2cddccb00ae6abab9c38095fc15c
		size 1234


		id ac934d9a88b42aa3b40ef7c81a9dee1aad5a2cddccb00ae6abab9c38095fc15c
		size 1234
		offset 0
		`)

	sh := &proto.Manifest{
		proto.Data{id, 1234},
		[]proto.Chunk{
			proto.Chunk{proto.Data{id, 1234}, 0},
		},
	}

	assert.Equal(t, expect, (*sh).String())
}
Ejemplo n.º 2
0
func Test_Assembler_StoreChunk(t *testing.T) {
	wd, _ := os.Getwd()
	wd = filepath.Join(wd, "testdata", "assembler-StoreChunk")
	m, err := model.New(wd, false, proto.CHUNK_SIZE, 128)
	assert.NoError(t, err)

	data := []byte("mama myla ramu")
	hasher := sha3.New256()
	_, err = hasher.Write([]byte(data))
	id := proto.ID(hex.EncodeToString(hasher.Sum(nil)))

	a, err := model.NewAssembler(m)
	assert.NoError(t, err)
	defer a.Close()

	err = a.StoreChunk(bytes.NewReader(data), id)
	assert.NoError(t, err)

	// check stored chunk
	f, err := os.Open(filepath.Join(a.Where, id.String()))
	assert.NoError(t, err)
	defer f.Close()
	defer os.Remove(filepath.Join(a.Where, id.String()))

	r2, err := ioutil.ReadAll(f)
	assert.NoError(t, err)

	assert.Equal(t, data, r2)
}
Ejemplo n.º 3
0
func Test_Proto_Spec1(t *testing.T) {
	m1, err := proto.NewFromManifest(fixtures.CleanInput(`BAR:MANIFEST

		id ac934d9a88b42aa3b40ef7c81a9dee1aad5a2cddccb00ae6abab9c38095fc15c
		size 1234


		id ac934d9a88b42aa3b40ef7c81a9dee1aad5a2cddccb00ae6abab9c38095fc15c
		size 1234
		offset 0
	`))
	assert.NoError(t, err)
	m2, err := proto.NewFromManifest(fixtures.CleanInput(`BAR:MANIFEST

		id ac934d9a88b42aa3b40ef7c81a9dee1aad5a2cddccb00ae6abab9c38095fc15c
		size 1234


		id ac934d9a88b42aa3b40ef7c81a9dee1aad5a2cddccb00ae6abab9c38095fc15c
		size 1234
		offset 0
	`))
	assert.NoError(t, err)

	spec, err := proto.NewSpec(time.Now().UnixNano(), map[string]proto.ID{
		"file/1": m1.ID,
		"file/2": m2.ID,
	}, []string{})

	// hand-made fixture
	var sorted sort.StringSlice
	sorted = append(sorted, "file/2")
	sorted = append(sorted, "file/1")
	sorted.Sort()

	hasher := sha3.New256()
	var id []byte

	err = m1.ID.Decode(id)
	assert.NoError(t, err)

	_, err = hasher.Write([]byte(sorted[0]))
	_, err = hasher.Write(id)

	_, err = hasher.Write([]byte(sorted[1]))
	_, err = hasher.Write(id)

	assert.Equal(t, spec.ID, proto.ID(hex.EncodeToString(hasher.Sum(nil))))
}
Ejemplo n.º 4
0
func (s *BlockStorage) UploadChunk(uploadID uuid.UUID, chunkID proto.ID, r io.Reader) (err error) {
	lock, err := s.FDLocks.Take()
	if err != nil {
		return
	}
	defer lock.Release()
	hexid := proto.ID(hex.EncodeToString(uploadID[:]))

	n := filepath.Join(s.idPath(upload_ns, hexid), chunkID.String())
	w, err := s.getCAFile(n)
	if err != nil {
		return
	}
	defer w.Close()

	if _, err = io.Copy(w, r); err != nil {
		return
	}

	err = w.Accept()
	return
}
Ejemplo n.º 5
0
func (h *Handlers) HandleSpec(w http.ResponseWriter, r *http.Request) {
	id := proto.ID(strings.TrimPrefix(r.URL.Path, "/v1/spec/"))

	logx.Debugf("serving spec %s", id)

	ok, err := h.Storage.IsSpecExists(id)
	if err != nil {
		logx.Error(err)
		w.WriteHeader(500)
		return
	}
	if !ok {
		logx.Errorf("bad spec id %s", id)
		w.WriteHeader(404)
		return
	}
	w.Header().Set("Content-Type", "text/html; charset=utf-8")
	h.handleTpl(w, "spec", map[string]interface{}{
		"Info":    h.options.Info,
		"ID":      id,
		"ShortID": id[:12],
	})
}
Ejemplo n.º 6
0
func (s *BlockStorage) FinishUploadSession(uploadID uuid.UUID) (err error) {
	hexid := proto.ID(hex.EncodeToString(uploadID[:]))
	base := s.idPath(upload_ns, hexid)
	defer os.RemoveAll(base)

	// load manifests
	manifests_base := filepath.Join(base, manifests_ns)
	var manifests []proto.Manifest
	if err = func() (err error) {
		lock, err := s.FDLocks.Take()
		if err != nil {
			return
		}
		defer lock.Release()

		err = filepath.Walk(manifests_base, func(path string, info os.FileInfo, ferr error) (err error) {
			if strings.HasSuffix(path, "-manifest.json") {
				var man proto.Manifest
				if man, err = s.readManifest(path); err != nil {
					return
				}
				manifests = append(manifests, man)
			}
			return
		})
		return
	}(); err != nil {
		return
	}

	// collect all manifests
	var req, res []interface{}
	for _, v := range manifests {
		req = append(req, v)
	}

	err = s.BatchPool.Do(
		func(ctx context.Context, in interface{}) (out interface{}, err error) {
			lock, err := s.FDLocks.Take()
			if err != nil {
				return
			}
			defer lock.Release()

			m := in.(proto.Manifest)
			target := s.idPath(blob_ns, m.ID)

			f, fErr := s.getCAFile(target)
			if os.IsExist(fErr) {
				return
			} else if fErr != nil {
				err = fErr
				return
			}
			defer f.Close()
			logx.Debugf("assembling %s", m.ID)

			for _, chunk := range m.Chunks {
				if err = func(chunk proto.Chunk) (err error) {
					lock, err := s.FDLocks.Take()
					if err != nil {
						return
					}
					defer lock.Release()

					r, err := os.Open(filepath.Join(base, chunk.ID.String()))
					if err != nil {
						return
					}
					defer r.Close()

					_, err = io.Copy(f, r)
					return
				}(chunk); err != nil {
					return
				}
			}
			err = f.Accept()

			// move manifest
			manTarget := s.idPath(manifests_ns, m.ID) + ".json"
			os.MkdirAll(filepath.Dir(manTarget), 0755)
			err = os.Rename(filepath.Join(manifests_base, m.ID.String()+"-manifest.json"), manTarget)
			return
		}, &req, &res, concurrency.DefaultBatchOptions().AllowErrors(),
	)
	return
}
Ejemplo n.º 7
0
func (s *BlockStorage) CreateUploadSession(uploadID uuid.UUID, in []proto.Manifest, ttl time.Duration) (missing []proto.ID, err error) {
	hexid := proto.ID(hex.EncodeToString(uploadID[:]))

	// take lock
	lock, err := s.FDLocks.Take()
	if err != nil {
		return
	}
	defer lock.Release()

	// Create directories and write support data
	base := filepath.Join(s.idPath(upload_ns,
		proto.ID(hex.EncodeToString(uploadID[:]))), manifests_ns)
	if err = os.MkdirAll(base, 0755); err != nil {
		return
	}

	var missingBlobs []proto.Manifest

	for _, m := range in {
		if err = func(m proto.Manifest) (err error) {
			var statErr error
			_, statErr = os.Stat(s.idPath(manifests_ns, m.ID))
			if os.IsNotExist(statErr) {
				missingBlobs = append(missingBlobs, m)
			} else if statErr != nil {
				err = statErr
				return
			} else {
				// exists - ok
				return
			}

			w, err := os.OpenFile(filepath.Join(base, m.ID.String()+"-manifest.json"),
				os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644)
			if err != nil {
				return
			}
			defer w.Close()
			err = json.NewEncoder(w).Encode(&m)
			return
		}(m); err != nil {
			return
		}
	}

	missing = proto.ManifestSlice(missingBlobs).GetChunkSlice()

	w, err := os.OpenFile(filepath.Join(s.idPath(upload_ns, hexid), "expires.timestamp"),
		os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644)
	if err != nil {
		return
	}
	defer w.Close()

	if _, err = w.Write([]byte(fmt.Sprintf("%d", time.Now().Add(ttl).UnixNano()))); err != nil {
		return
	}
	logx.Debugf("upload session %s created succefully", hexid)
	return
}
Ejemplo n.º 8
0
func (c *SpecImportCmd) Run(args ...string) (err error) {
	var spec proto.Spec

	mod, err := model.New(c.WD, c.UseGit, c.ChunkSize, c.PoolSize)
	if err != nil {
		return
	}
	trans := transport.NewTransport(mod, "", c.Endpoint, c.PoolSize)

	if c.Raw {
		if err = json.NewDecoder(c.Stdin).Decode(&spec); err != nil {
			return
		}
	} else {
		// tree spec types
		id := proto.ID(args[0])

		if spec, err = trans.GetSpec(id); err != nil {
			logx.Debug(spec, err)
			return
		}
	}

	idm := lists.IDMap{}
	for n, id := range spec.BLOBs {
		idm[id] = append(idm[id], n)
	}

	// request manifests and
	mans, err := trans.GetManifests(idm.IDs())
	if err != nil {
		return
	}
	feed := idm.ToBlobMap(mans)
	names := feed.Names()

	if len(names) == 0 {
		logx.Fatalf("no manifests on server %s", names)
	}

	logx.Debugf("importing %s", names)

	if c.UseGit {
		// If git is used - check names for attrs
		byAttr, err := mod.Git.FilterByAttr("bar", names...)
		if err != nil {
			return err
		}

		diff := []string{}
		attrs := map[string]struct{}{}
		for _, x := range byAttr {
			attrs[x] = struct{}{}
		}

		for _, x := range names {
			if _, ok := attrs[x]; !ok {
				diff = append(diff, x)
			}
		}
		if len(diff) > 0 {
			return fmt.Errorf("some spec blobs is not under bar control %s", diff)
		}
	}

	// get stored links, ignore errors
	stored, _ := mod.FeedManifests(true, true, false, names...)

	logx.Debugf("already stored %s", stored.Names())

	// squash present
	toSquash := lists.BlobMap{}
	for n, m := range feed {
		m1, ok := stored[filepath.FromSlash(n)]
		if !ok || m.ID != m1.ID {
			toSquash[n] = feed[n]
		}
	}

	if c.Squash {
		if err = mod.SquashBlobs(toSquash); err != nil {
			return
		}
	}
	for k, _ := range feed {
		fmt.Fprintf(c.Stdout, "%s ", filepath.FromSlash(k))
	}
	return
}