Ejemplo n.º 1
0
// This is the simple 1MB chunk version. The rolling checksum version is below.
func WriteFileMap(bs blobserver.StatReceiver, fileMap map[string]interface{}, r io.Reader) (*blobref.BlobRef, os.Error) {
	parts, size := []BytesPart{}, int64(0)

	buf := new(bytes.Buffer)
	for {
		buf.Reset()

		n, err := io.Copy(buf, io.LimitReader(r, 1<<20))
		if err != nil {
			return nil, err
		}
		if n == 0 {
			break
		}

		hash := crypto.SHA1.New()
		io.Copy(hash, bytes.NewBuffer(buf.Bytes()))
		br := blobref.FromHash("sha1", hash)
		hasBlob, err := serverHasBlob(bs, br)
		if err != nil {
			return nil, err
		}
		if !hasBlob {
			sb, err := bs.ReceiveBlob(br, buf)
			if err != nil {
				return nil, err
			}
			if expect := (blobref.SizedBlobRef{br, n}); !expect.Equal(sb) {
				return nil, fmt.Errorf("schema/filewriter: wrote %s bytes, got %s ack'd", expect, sb)
			}
		}

		size += n
		parts = append(parts, BytesPart{
			BlobRef: br,
			Size:    uint64(n),
			Offset:  0, // into BlobRef to read from (not of dest)
		})
	}

	err := PopulateParts(fileMap, size, parts)
	if err != nil {
		return nil, err
	}

	json, err := MapToCamliJson(fileMap)
	if err != nil {
		return nil, err
	}
	br := blobref.Sha1FromString(json)
	sb, err := bs.ReceiveBlob(br, strings.NewReader(json))
	if err != nil {
		return nil, err
	}
	if expect := (blobref.SizedBlobRef{br, int64(len(json))}); !expect.Equal(sb) {
		return nil, fmt.Errorf("schema/filewriter: wrote %s bytes, got %s ack'd", expect, sb)
	}

	return br, nil
}
Ejemplo n.º 2
0
// Note: must not touch data after calling this.
func NewUploadHandleFromString(data string) *UploadHandle {
	s1 := sha1.New()
	s1.Write([]byte(data))
	bref := blobref.FromHash("sha1", s1)
	buf := bytes.NewBufferString(data)
	return &UploadHandle{BlobRef: bref, Size: int64(len(data)), Contents: buf}
}
Ejemplo n.º 3
0
func (mi *Indexer) populateFile(client *mysql.Client, blobRef *blobref.BlobRef, ss *schema.Superset) (err os.Error) {
	if ss.Fragment {
		return nil
	}
	seekFetcher, err := blobref.SeekerFromStreamingFetcher(mi.BlobSource)
	if err != nil {
		return err
	}

	sha1 := sha1.New()
	fr := ss.NewFileReader(seekFetcher)
	mime, reader := magic.MimeTypeFromReader(fr)
	n, err := io.Copy(sha1, reader)
	if err != nil {
		// TODO: job scheduling system to retry this spaced
		// out max n times.  Right now our options are
		// ignoring this error (forever) or returning the
		// error and making the indexing try again (likely
		// forever failing).  Both options suck.  For now just
		// log and act like all's okay.
		log.Printf("mysqlindex: error indexing file %s: %v", blobRef, err)
		return nil
	}

	attrs := []string{}
	if ss.UnixPermission != "" {
		attrs = append(attrs, "perm")
	}
	if ss.UnixOwnerId != 0 || ss.UnixOwner != "" || ss.UnixGroupId != 0 || ss.UnixGroup != "" {
		attrs = append(attrs, "owner")
	}
	if ss.UnixMtime != "" || ss.UnixCtime != "" || ss.UnixAtime != "" {
		attrs = append(attrs, "time")
	}

	log.Printf("file %s blobref is %s, size %d", blobRef, blobref.FromHash("sha1", sha1), n)
	err = execSQL(client,
		"INSERT IGNORE INTO files (fileschemaref, bytesref, size, filename, mime, setattrs) VALUES (?, ?, ?, ?, ?, ?)",
		blobRef.String(),
		blobref.FromHash("sha1", sha1).String(),
		n,
		ss.FileNameString(),
		mime,
		strings.Join(attrs, ","))
	return
}
Ejemplo n.º 4
0
func blobDetails(contents io.ReadSeeker) (bref *blobref.BlobRef, size int64, err os.Error) {
	s1 := sha1.New()
	contents.Seek(0, 0)
	size, err = io.Copy(s1, contents)
	if err == nil {
		bref = blobref.FromHash("sha1", s1)
	}
	return
}
Ejemplo n.º 5
0
func (d *defaultStatHasher) Hash(fileName string) (*blobref.BlobRef, os.Error) {
	s1 := sha1.New()
	file, err := os.Open(fileName, os.O_RDONLY, 0)
	if err != nil {
		return nil, err
	}
	defer file.Close()
	_, err = io.Copy(s1, file)
	if err != nil {
		return nil, err
	}
	return blobref.FromHash("sha1", s1), nil
}
Ejemplo n.º 6
0
func (mi *Indexer) populateFile(blobRef *blobref.BlobRef, ss *schema.Superset) (err os.Error) {
	seekFetcher, err := blobref.SeekerFromStreamingFetcher(mi.BlobSource)
	if err != nil {
		return err
	}

	sha1 := sha1.New()
	fr, err := ss.NewFileReader(seekFetcher)
	if err != nil {
		log.Printf("mysqlindex: error indexing file %s: %v", blobRef, err)
		return nil
	}
	mime, reader := magic.MimeTypeFromReader(fr)
	n, err := io.Copy(sha1, reader)
	if err != nil {
		// TODO: job scheduling system to retry this spaced
		// out max n times.  Right now our options are
		// ignoring this error (forever) or returning the
		// error and making the indexing try again (likely
		// forever failing).  Both options suck.  For now just
		// log and act like all's okay.
		log.Printf("mysqlindex: error indexing file %s: %v", blobRef, err)
		return nil
	}

	log.Printf("file %s blobref is %s, size %d", blobRef, blobref.FromHash("sha1", sha1), n)
	err = mi.db.Execute(
		"INSERT IGNORE INTO bytesfiles (schemaref, camlitype, wholedigest, size, filename, mime) VALUES (?, ?, ?, ?, ?, ?)",
		blobRef.String(),
		"file",
		blobref.FromHash("sha1", sha1).String(),
		n,
		ss.FileNameString(),
		mime,
	)
	return
}
Ejemplo n.º 7
0
func (up *Uploader) UploadFileBlob(filename string) (*client.PutResult, os.Error) {
	var (
		err  os.Error
		size int64
		ref  *blobref.BlobRef
		body io.Reader
	)
	if filename == "-" {
		buf := bytes.NewBuffer(make([]byte, 0))
		size, err = io.Copy(buf, os.Stdin)
		if err != nil {
			return nil, err
		}
		// TODO(bradfitz,mpl): limit this buffer size?
		file := buf.Bytes()
		s1 := sha1.New()
		size, err = io.Copy(s1, buf)
		if err != nil {
			return nil, err
		}
		ref = blobref.FromHash("sha1", s1)
		body = io.LimitReader(bytes.NewBuffer(file), size)
	} else {
		fi, err := os.Stat(filename)
		if err != nil {
			return nil, err
		}
		if !fi.IsRegular() {
			return nil, fmt.Errorf("%q is not a regular file", filename)
		}
		file, err := os.Open(filename)
		if err != nil {
			return nil, err
		}
		ref, size, err = blobDetails(file)
		if err != nil {
			return nil, err
		}
		file.Seek(0, 0)
		body = io.LimitReader(file, size)
	}

	handle := &client.UploadHandle{ref, size, body}
	return up.Upload(handle)
}
Ejemplo n.º 8
0
// WriteFileFromReader creates and uploads a "file" JSON schema
// composed of chunks of r, also uploading the chunks.  The returned
// BlobRef is of the JSON file schema blob.
func WriteFileFromReader(bs blobserver.Storage, filename string, r io.Reader) (*blobref.BlobRef, os.Error) {
	// Naive for now.  Just in 1MB chunks.
	// TODO: rolling hash and hash trees.

	parts, size := []ContentPart{}, int64(0)

	buf := new(bytes.Buffer)
	for {
		buf.Reset()

		n, err := io.Copy(buf, io.LimitReader(r, 1<<20))
		if err != nil {
			return nil, err
		}
		if n == 0 {
			break
		}

		hash := crypto.SHA1.New()
		io.Copy(hash, bytes.NewBuffer(buf.Bytes()))
		br := blobref.FromHash("sha1", hash)
		hasBlob, err := serverHasBlob(bs, br)
		if err != nil {
			return nil, err
		}
		if !hasBlob {
			sb, err := bs.ReceiveBlob(br, buf)
			if err != nil {
				return nil, err
			}
			if expect := (blobref.SizedBlobRef{br, n}); !expect.Equal(sb) {
				return nil, fmt.Errorf("schema/filewriter: wrote %s bytes, got %s ack'd", expect, sb)
			}
		}

		size += n
		parts = append(parts, ContentPart{
			BlobRef: br,
			Size:    uint64(n),
			Offset:  0, // into BlobRef to read from (not of dest)
		})
	}

	m := NewCommonFilenameMap(filename)
	err := PopulateRegularFileMap(m, size, parts)
	if err != nil {
		return nil, err
	}

	json, err := MapToCamliJson(m)
	if err != nil {
		return nil, err
	}
	br := blobref.Sha1FromString(json)
	sb, err := bs.ReceiveBlob(br, strings.NewReader(json))
	if err != nil {
		return nil, err
	}
	if expect := (blobref.SizedBlobRef{br, int64(len(json))}); !expect.Equal(sb) {
		return nil, fmt.Errorf("schema/filewriter: wrote %s bytes, got %s ack'd", expect, sb)
	}

	return br, nil
}
Ejemplo n.º 9
0
func (tb *testBlob) BlobRef() *blobref.BlobRef {
	h := sha1.New()
	h.Write([]byte(tb.val))
	return blobref.FromHash("sha1", h)
}
Ejemplo n.º 10
0
func doInit() {
	blobDir := path.Join(osutil.CamliConfigDir(), "keyblobs")
	os.Mkdir(osutil.CamliConfigDir(), 0700)
	os.Mkdir(blobDir, 0700)

	keyId := *flagGpgKey
	if keyId == "" {
		keyId = os.Getenv("GPGKEY")
	}
	if keyId == "" {
		// TODO: run and parse gpg --list-secret-keys and see if there's just one and suggest that?  Or show
		// a list of them?
		log.Fatalf("Initialization requires your public GPG key.  Set --gpgkey=<pubid> or set $GPGKEY in your environment.  Run gpg --list-secret-keys to find their key IDs.")
	}

	if os.Getenv("GPG_AGENT_INFO") == "" {
		log.Printf("No GPG_AGENT_INFO found in environment; you should setup gnupg-agent.  camput will be annoying otherwise.")
	}

	// TODO: use same command-line flag as the jsonsign package.
	// unify them into a shared package just for gpg-related stuff?
	keyBytes, err := exec.Command("gpg", "--export", "--armor", keyId).Output()
	if err != nil {
		log.Fatalf("Error running gpg to export public key: %v", err)
	}

	hash := sha1.New()
	hash.Write(keyBytes)
	bref := blobref.FromHash("sha1", hash)

	keyBlobPath := path.Join(blobDir, bref.String()+".camli")
	if err = ioutil.WriteFile(keyBlobPath, keyBytes, 0644); err != nil {
		log.Fatalf("Error writing public key blob to %q: %v", keyBlobPath, err)
	}

	if ok, err := jsonsign.VerifyPublicKeyFile(keyBlobPath, keyId); !ok {
		log.Fatalf("Error verifying public key at %q: %v", keyBlobPath, err)
	}

	log.Printf("Your Camlistore identity (your GPG public key's blobref) is: %s", bref.String())

	_, err = os.Stat(client.ConfigFilePath())
	if err == nil {
		log.Fatalf("Config file %q already exists; quitting without touching it.", client.ConfigFilePath())
	}

	if f, err := os.OpenFile(client.ConfigFilePath(), os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0600); err == nil {
		defer f.Close()
		m := make(map[string]interface{})
		m["publicKeyBlobref"] = bref.String()

		blobPut := make([]map[string]string, 1)
		blobPut[0] = map[string]string{
			"alias":    "local",
			"host":     "http://localhost:3179/",
			"password": "******",
		}
		m["blobPut"] = blobPut

		blobGet := make([]map[string]string, 2)
		blobGet[0] = map[string]string{
			"alias": "keyblobs",
			"path":  "$HOME/.camli/keyblobs",
		}
		blobGet[1] = map[string]string{
			"alias":    "local",
			"host":     "http://localhost:3179/",
			"password": "******",
		}
		m["blobGet"] = blobGet

		jsonBytes, err := json.MarshalIndent(m, "", "  ")
		if err != nil {
			log.Fatalf("JSON serialization error: %v", err)
		}
		_, err = f.Write(jsonBytes)
		if err != nil {
			log.Fatalf("Error writing to %q: %v", client.ConfigFilePath(), err)
		}
		log.Printf("Wrote %q; modify as necessary.", client.ConfigFilePath())
	}
}
Ejemplo n.º 11
0
func (tb *Blob) BlobRef() *blobref.BlobRef {
	h := sha1.New()
	h.Write([]byte(tb.Contents))
	return blobref.FromHash("sha1", h)
}