Exemple #1
0
func main() {
	// TODO(tsileo): host flag
	// and display an usage func
	// api key as a venv
	// and support an optional namespace as arg
	// Store the latest uploaded blob as feature? log the uploaded? with an optional comment (import vkv)
	// and write a README
	// sharing feature (with a blob app to register?)
	apiKey := os.Getenv("BLOB_API_KEY")
	if len(os.Args) < 2 {
		fmt.Println("Missing hash, use \"-\" to upload from stdin")
		os.Exit(1)
	}
	blobstore := client.NewBlobStore("")
	blobstore.SetAPIKey(apiKey)
	if os.Args[1] == "-" {
		stdin, err := ioutil.ReadAll(os.Stdin)
		if err != nil {
			panic(err)
		}
		hash := fmt.Sprintf("%x", blake2b.Sum256(stdin))
		if err := blobstore.Put(hash, stdin); err != nil {
			panic(err)
		}
		fmt.Printf("%s", hash)
		os.Exit(0)
	}
	blob, err := blobstore.Get(os.Args[1])
	if err != nil {
		panic(err)
	}
	fmt.Printf("%s", blob)
	os.Exit(0)
}
Exemple #2
0
func (b *EncryptBackend) Put(hash string, rawData []byte) (err error) {
	// #blobstash/secretbox\n
	// data hash\n
	// data
	var nonce [24]byte
	//out := make([]byte, len(data) + secretbox.Overhead + 24 + headerSize)
	if err := GenerateNonce(&nonce); err != nil {
		return err
	}
	// First we compress the data with snappy
	data := snappy.Encode(nil, rawData)

	var out bytes.Buffer
	out.WriteString("#blobstash/secretbox\n")
	out.WriteString(fmt.Sprintf("%v\n", hash))
	encData := make([]byte, len(data)+secretbox.Overhead)
	secretbox.Seal(encData[0:0], data, &nonce, b.key)
	out.Write(nonce[:])
	out.Write(encData)
	encHash := fmt.Sprintf("%x", blake2b.Sum256(out.Bytes()))
	b.dest.Put(encHash, out.Bytes())
	b.Lock()
	b.index[hash] = encHash
	defer b.Unlock()
	blobsUploaded.Add(b.dest.String(), 1)
	bytesUploaded.Add(b.dest.String(), int64(len(out.Bytes())))
	return
}
Exemple #3
0
func TestBlobsIndex(t *testing.T) {
	index, err := NewIndex("tmp_test_index")
	check(err)
	defer index.Close()
	defer os.RemoveAll("tmp_test_index")

	bp := &BlobPos{n: 1, offset: 5, size: 10}
	h := fmt.Sprintf("%x", blake2b.Sum256([]byte("fakehash")))
	err = index.SetPos(h, bp)
	check(err)
	bp3, err := index.GetPos(h)
	if bp.n != bp3.n || bp.offset != bp3.offset || bp.size != bp3.size {
		t.Errorf("index.GetPos error, expected:%q, got:%q", bp, bp3)
	}

	err = index.SetN(5)
	check(err)
	n2, err := index.GetN()
	check(err)
	if n2 != 5 {
		t.Errorf("Error GetN, got %v, expected 5", n2)
	}
	err = index.SetN(100)
	check(err)
	n2, err = index.GetN()
	check(err)
	if n2 != 100 {
		t.Errorf("Error GetN, got %v, expected 100", n2)
	}
}
Exemple #4
0
func NewRandomName() string {
	c := 12
	b := make([]byte, c)
	n, err := io.ReadFull(rand.Reader, b)
	if n != len(b) || err != nil {
		panic(err)
	}
	return fmt.Sprintf("%x", blake2b.Sum256(b))
}
Exemple #5
0
func (docstore *DocStoreExt) docHandler() func(http.ResponseWriter, *http.Request) {
	return func(w http.ResponseWriter, r *http.Request) {
		vars := mux.Vars(r)
		collection := vars["collection"]
		if collection == "" {
			panic("missing collection query arg")
		}
		sid := vars["_id"]
		if sid == "" {
			panic("missing _id query arg")
		}
		var _id *id.ID
		var err error
		srw := httputil.NewSnappyResponseWriter(w, r)
		defer srw.Close()
		switch r.Method {
		case "GET":
			js := []byte{}
			if _id, err = docstore.fetchDoc(collection, sid, &js); err != nil {
				panic(err)
			}
			w.Header().Set("Content-Type", "application/json")
			srw.Write(addID(js, sid))
		case "POST":
			doc := map[string]interface{}{}
			if _id, err = docstore.fetchDoc(collection, sid, &doc); err != nil {
				panic(err)
			}
			var update map[string]interface{}
			decoder := json.NewDecoder(r.Body)
			if err := decoder.Decode(&update); err != nil {
				panic(err)
			}
			docstore.logger.Debug("Update", "_id", sid, "update_query", update)
			newDoc, err := updateDoc(doc, update)
			blob, err := json.Marshal(newDoc)
			if err != nil {
				panic(err)
			}
			hash := fmt.Sprintf("%x", blake2b.Sum256(blob))
			docstore.blobStore.Put(hash, blob)
			bash := make([]byte, len(hash)+1)
			// FIXME(tsileo) fetch previous flag and set the same
			bash[0] = FlagIndexed
			copy(bash[1:], []byte(hash)[:])
			if _, err := docstore.kvStore.Put(fmt.Sprintf(KeyFmt, collection, _id.String()), string(bash), -1); err != nil {
				panic(err)
			}
			httputil.WriteJSON(srw, newDoc)
		}
		w.Header().Set("BlobStash-DocStore-Doc-Id", sid)
		w.Header().Set("BlobStash-DocStore-Doc-Hash", _id.Hash())
		w.Header().Set("BlobStash-DocStore-Doc-CreatedAt", strconv.Itoa(_id.Ts()))
	}
}
Exemple #6
0
// RandomBlob generates a random BlobTest.
func RandomBlob(content []byte) *BlobTest {
	var data []byte
	if content == nil {
		data = make([]byte, 512)
		rand.Read(data)
	} else {
		data = content
	}
	hash := fmt.Sprintf("%x", blake2b.Sum256(data))
	return &BlobTest{hash, data}
}
Exemple #7
0
func (stc *SyncTableClient) saveBlob(hash string, blob []byte) error {
	req := &router.Request{
		Type:      router.Write,
		Namespace: stc.namespace,
	}
	// Ensures the blob isn't corrupted
	chash := fmt.Sprintf("%x", blake2b.Sum256(blob))
	if hash != chash {
		return fmt.Errorf("Blob %s is corrupted", hash)
	}
	stc.blobs <- &router.Blob{Hash: hash, Req: req, Blob: blob}
	return nil
}
Exemple #8
0
// UploadBlob upload blobs to BlobDB
func UploadBlob(blob []byte) (blobHash string, err error) {
	blobHash = fmt.Sprintf("%x", blake2b.Sum256(blob))
	exists, err := bs.Stat(blobHash)
	if err != nil {
		return blobHash, err
	}
	if !exists {
		if err := bs.Put(blobHash, blob); err != nil {
			return blobHash, err
		}
	}
	return blobHash, nil
}
Exemple #9
0
// Create new handshake state.
func NewHandshake(addr string, conn io.Writer, conf *PeerConf) *Handshake {
	state := Handshake{
		addr:     addr,
		conn:     conn,
		LastPing: time.Now(),
		Conf:     conf,
	}
	state.dsaPubH = new([ed25519.PublicKeySize]byte)
	copy(state.dsaPubH[:], state.Conf.Verifier.Pub[:])
	hashed := blake2b.Sum256(state.dsaPubH[:])
	state.dsaPubH = &hashed
	return &state
}
Exemple #10
0
func (lua *LuaExt) RegisterHandler() func(http.ResponseWriter, *http.Request) {
	return func(w http.ResponseWriter, r *http.Request) {
		switch r.Method {
		//POST takes the uploaded file(s) and saves it to disk.
		case "POST":
			appID := r.URL.Query().Get("appID")
			if appID == "" {
				panic("Missing \"appID\"")
			}
			public, _ := strconv.ParseBool(r.URL.Query().Get("public"))
			inMem, _ := strconv.ParseBool(r.URL.Query().Get("in_memory"))
			// TODO(tsileo) Handle ACL like "authorized_methods" GET,POST...
			// FIXME Find a better way to setup the environment
			//parse the multipart form in the request
			mr, err := r.MultipartReader()
			if err != nil {
				http.Error(w, err.Error(), http.StatusInternalServerError)
				return
			}
			part, err := mr.NextPart()
			if err != nil {
				http.Error(w, err.Error(), http.StatusInternalServerError)
				return
			}
			filename := part.FormName()
			var buf bytes.Buffer
			buf.ReadFrom(part)
			blob := buf.Bytes()
			chash := fmt.Sprintf("%x", blake2b.Sum256(blob))
			app := &LuaApp{
				Name:   filename,
				Public: public,
				AppID:  appID,
				Script: blob,
				Hash:   chash,
				InMem:  inMem,
				Stats:  NewAppStats(),
				APIKey: uuid.NewV4().String(),
			}
			lua.appMutex.Lock()
			lua.registeredApps[appID] = app
			lua.appMutex.Unlock()
			lua.logger.Info("Registered new app", "appID", appID, "app", app.String())

		default:
			w.WriteHeader(http.StatusMethodNotAllowed)
		}
	}
}
Exemple #11
0
func (s *Server) processBlobs() {
	s.wg.Add(1)
	defer s.wg.Done()
	for {
		select {
		case blob := <-s.blobs:
			log.Printf("processBlobs: %+v", blob)
			backend := s.Router.Route(blob.Req)
			exists, err := backend.Exists(blob.Hash)
			if err != nil {
				panic(fmt.Errorf("processBlobs error: %v", err))
			}
			if !exists {
				if err := backend.Put(blob.Hash, blob.Blob); err != nil {
					panic(fmt.Errorf("processBlobs error: %v", err))
				}
			}
			if blob.Req.MetaBlob {
				if err := s.NsDB.ApplyMeta(blob.Hash); err != nil {
					panic(err)
				}
			}
			if blob.Req.Namespace != "" {
				nsBlobBody := meta.CreateNsBlob(blob.Hash, blob.Req.Namespace)
				nsHash := fmt.Sprintf("%x", blake2b.Sum256(nsBlobBody))
				s.blobs <- &router.Blob{
					Blob: nsBlobBody,
					Hash: nsHash,
					Req: &router.Request{
						Type:   router.Write,
						NsBlob: true,
					},
				}
				if err := s.NsDB.AddNs(blob.Hash, blob.Req.Namespace); err != nil {
					panic(err)
				}
			}
		case <-s.stop:
			return
		}
	}
}
Exemple #12
0
func blobUploadHandler(blobs chan<- *router.Blob) func(http.ResponseWriter, *http.Request) {
	return func(w http.ResponseWriter, r *http.Request) {
		switch r.Method {
		//POST takes the uploaded file(s) and saves it to disk.
		case "POST":
			//parse the multipart form in the request
			mr, err := r.MultipartReader()
			if err != nil {
				http.Error(w, err.Error(), http.StatusInternalServerError)
				return
			}

			for {
				part, err := mr.NextPart()
				if err == io.EOF {
					break
				}
				if err != nil {
					http.Error(w, err.Error(), http.StatusInternalServerError)
					return
				}
				hash := part.FormName()
				var buf bytes.Buffer
				buf.ReadFrom(part)
				blob := buf.Bytes()
				chash := fmt.Sprintf("%x", blake2b.Sum256(blob))
				if hash != chash {
					http.Error(w, "blob corrupted, hash does not match", http.StatusInternalServerError)
					return
				}
				req := &router.Request{
					Type:      router.Write,
					Namespace: r.URL.Query().Get("ns"),
				}
				blobs <- &router.Blob{Hash: hash, Req: req, Blob: blob}
			}
		default:
			w.WriteHeader(http.StatusMethodNotAllowed)
		}
	}
}
Exemple #13
0
func (docstore *DocStoreExt) Insert(collection string, idoc interface{}) (*id.ID, error) {
	switch doc := idoc.(type) {
	case *map[string]interface{}:
		// fdoc := *doc
		docFlag := FlagNoIndex
		// docFlag = FlagIndexed
		blob, err := json.Marshal(doc)
		if err != nil {
			return nil, err
		}

		// FIXME(tsileo): What to do if there's an empty ID?

		// Store the payload in a blob
		hash := fmt.Sprintf("%x", blake2b.Sum256(blob))
		docstore.blobStore.Put(hash, blob)
		// Create a pointer in the key-value store
		now := time.Now().UTC().Unix()
		_id, err := id.New(int(now))
		if err != nil {
			return nil, err
		}
		bash := make([]byte, len(hash)+1)
		bash[0] = docFlag
		copy(bash[1:], []byte(hash)[:])
		if _, err := docstore.kvStore.Put(fmt.Sprintf(KeyFmt, collection, _id.String()), string(bash), -1); err != nil {
			return nil, err
		}
		// Returns the doc along with its new ID
		// if err := docstore.index.Index(_id.String(), doc); err != nil {
		// return err
		// }
		_id.SetHash(hash)
		return _id, nil
	}
	return nil, fmt.Errorf("shouldn't happen")
}
Exemple #14
0
func (mh *MetaHandler) processKvUpdate(wg sync.WaitGroup, blobs chan<- *router.Blob, kvUpdate <-chan *vkv.KeyValue, vkvhub *hub.Hub) {
	wg.Add(1)
	defer wg.Done()
	for kv := range kvUpdate {
		mh.log.Debug(fmt.Sprintf("kvupdate: %+v", kv))
		go vkvhub.Pub(kv.Key, fmt.Sprintf("%d:%s", kv.Version, kv.Value))
		blob := CreateMetaBlob(kv)
		req := &router.Request{
			MetaBlob:  true,
			Type:      router.Write,
			Namespace: kv.Namespace(),
		}
		hash := fmt.Sprintf("%x", blake2b.Sum256(blob))
		if err := kv.SetMetaBlob(hash); err != nil {
			panic(err)
		}
		select {
		case blobs <- &router.Blob{Req: req, Hash: hash, Blob: blob}:
		case <-mh.stop:
			mh.log.Info("Stopping...")
			return
		}
	}
}
Exemple #15
0
// reindex scans all BlobsFile and reconstruct the index from scratch.
func (backend *BlobsFileBackend) reindex() error {
	backend.log.Info("re-indexing BlobsFiles...")
	if backend.writeOnly {
		panic("can't re-index in write-only mode")
	}
	if backend.Len() != 0 {
		panic("can't re-index, an non-empty backend already exists")
	}
	n := 0
	for {
		err := backend.ropen(n)
		if os.IsNotExist(err) {
			break
		}
		if err != nil {
			return err
		}
		offset := 6
		blobsfile := backend.files[n]
		blobsIndexed := 0
		for {
			// SCAN
			blobHash := make([]byte, hashSize)
			blobSizeEncoded := make([]byte, 4)
			flags := make([]byte, 1)
			if _, err := blobsfile.Read(blobHash); err == io.EOF {
				break
			}
			if _, err := blobsfile.Read(flags); err != nil {
				return err
			}
			if _, err := blobsfile.Read(blobSizeEncoded); err != nil {
				return err
			}
			blobSize := binary.LittleEndian.Uint32(blobSizeEncoded)
			rawBlob := make([]byte, int(blobSize))
			read, err := blobsfile.Read(rawBlob)
			if err != nil || read != int(blobSize) {
				return fmt.Errorf("error while reading raw blob: %v", err)
			}
			if flags[0] == Deleted {
				backend.log.Debug("blob deleted, continue indexing")
				offset += Overhead + int(blobSize)
				continue
			}
			blobPos := &BlobPos{n: n, offset: offset, size: int(blobSize)}
			offset += Overhead + int(blobSize)
			var blob []byte
			if backend.snappyCompression {
				blobDecoded, err := snappy.Decode(nil, rawBlob)
				if err != nil {
					return fmt.Errorf("failed to decode blob: %v %v %v", err, blobSize, flags)
				}
				blob = blobDecoded
			} else {
				blob = rawBlob
			}
			hash := fmt.Sprintf("%x", blake2b.Sum256(blob))
			if fmt.Sprintf("%x", blobHash) != hash {
				return fmt.Errorf("hash doesn't match %v/%v", fmt.Sprintf("%x", blobHash), hash)
			}
			if err := backend.index.SetPos(hash, blobPos); err != nil {
				return err
			}
			blobsIndexed++
		}
		log.Printf("BlobsFileBackend: %v re-indexed (%v blobs)", backend.filename(n), blobsIndexed)
		n++
	}
	if n == 0 {
		backend.log.Debug("no BlobsFiles found for re-indexing")
		return nil
	}
	if err := backend.saveN(); err != nil {
		return err
	}
	return nil
}
Exemple #16
0
func setCustomGlobals(L *luamod.LState) {
	// Return the server unix timestamp
	L.SetGlobal("unix", L.NewFunction(func(L *luamod.LState) int {
		L.Push(luamod.LNumber(time.Now().Unix()))
		return 1
	}))

	// Generate a random hexadecimal ID with the current timestamp as first 4 bytes,
	// this means keys will be sorted by creation date automatically if sorted lexicographically
	L.SetGlobal("hexid", L.NewFunction(func(L *luamod.LState) int {
		id, err := hexid.New(int(time.Now().UTC().Unix()))
		if err != nil {
			panic(err)
		}
		L.Push(luamod.LString(id.String()))
		return 1
	}))

	// Compute the Blake2B hash for the given string
	L.SetGlobal("blake2b", L.NewFunction(func(L *luamod.LState) int {
		hash := fmt.Sprintf("%x", blake2b.Sum256([]byte(L.ToString(1))))
		L.Push(luamod.LString(hash))
		return 1
	}))

	// Sleep for the given number of seconds
	L.SetGlobal("sleep", L.NewFunction(func(L *luamod.LState) int {
		time.Sleep(time.Duration(float64(L.ToNumber(1)) * float64(1e9)))
		return 0
	}))

	// Convert the given Markdown to HTML
	L.SetGlobal("markdownify", L.NewFunction(func(L *luamod.LState) int {
		output := blackfriday.MarkdownCommon([]byte(L.ToString(1)))
		L.Push(luamod.LString(string(output)))
		return 1
	}))

	// Render execute a Go HTML template, data must be a table with string keys
	L.SetGlobal("render", L.NewFunction(func(L *luamod.LState) int {
		tplString := L.ToString(1)
		data := luautil.TableToMap(L.ToTable(2))
		tpl, err := template.New("tpl").Parse(tplString)
		if err != nil {
			L.Push(luamod.LString(err.Error()))
			return 1
		}
		// TODO(tsileo) add some templatFuncs/template filter
		out := &bytes.Buffer{}
		if err := tpl.Execute(out, data); err != nil {
			L.Push(luamod.LString(err.Error()))
			return 1
		}
		L.Push(luamod.LString(out.String()))
		return 1
	}))

	// TODO(tsileo) a urljoin?

	// Return an absoulte URL for the given path
	L.SetGlobal("url", L.NewFunction(func(L *luamod.LState) int {
		// FIXME(tsileo) take the host from the req?
		L.Push(luamod.LString("http://localhost:8050" + L.ToString(1)))
		return 1
	}))
}
Exemple #17
0
func TestBlobStore(t *testing.T) {
	dir, err := ioutil.TempDir("", "blobfs")
	t.Logf("tmp dir=%+v\n", dir)
	if err != nil {
		panic(err)
	}

	defer os.RemoveAll(dir) // clean up

	bs, err := New(dir, "testblobfs")
	if err != nil {
		panic(err)
	}
	defer func() {
		bs.Close()
		bs.Destroy()
	}()
	blobs := []*Blob{}
	blobsIndex := map[string]struct{}{}

	// Create fixtures
	for i := 0; i < 10; i++ {
		blob := make([]byte, 1024*8)
		if _, err := rand.Read(blob); err != nil {
			panic(err)
		}
		hash := fmt.Sprintf("%x", blake2b.Sum256(blob))

		blobs = append(blobs, &Blob{hash, blob})
		blobsIndex[hash] = struct{}{}

		// Put the just created blob
		if err := bs.Put(hash, blob); err != nil {
			panic(err)
		}
	}
	for _, blob := range blobs {
		ok, err := bs.Stat(blob.Hash)
		if !ok {
			t.Errorf("failed to stat blob %s", blob.Hash)
		}
		if err != nil {
			panic(err)
		}
		blob2, err := bs.Get(blob.Hash)
		if err != nil {
			panic(err)
		}

		if !bytes.Equal(blob.Data, blob2) {
			t.Errorf("failed to fetch blob %s", blob.Hash)
		}

	}

	// Delete a blobs
	blob1 := blobs[0]
	blobs = blobs[1:len(blobs)]

	if err := bs.Remove(blob1.Hash); err != nil {
		panic(err)
	}

	// Ensure the stat return false
	ok, err := bs.Stat(blob1.Hash)
	if err != nil {
		panic(err)
	}
	if ok {
		t.Errorf("blob %s should be removed", blob1.Hash)
	}

	if _, err := bs.Get(blob1.Hash); err != ErrBlobNotFound {
		t.Errorf("Blob %s should not be present", blob1.Hash)
	}

	cnt := 0
	walkFunc := func(path, hash string, err error) error {
		cnt++
		if _, ok := blobsIndex[hash]; !ok {
			t.Errorf("blob %s not in index", hash)
		}
		return nil
	}

	if err := bs.Iter(walkFunc); err != nil {
		panic(err)
	}

	if cnt != 9 {
		t.Errorf("9 blobs expected, got %d", cnt)
	}
}
Exemple #18
0
func dhKeyGen(priv, pub *[32]byte) *[32]byte {
	key := new([32]byte)
	curve25519.ScalarMult(key, priv, pub)
	hashed := blake2b.Sum256(key[:])
	return &hashed
}
Exemple #19
0
Fichier : hash.go Projet : mm3/Sia
// HashBytes takes a byte slice and returns the result.
func HashBytes(data []byte) Hash {
	return Hash(blake2b.Sum256(data))
}