Ejemplo n.º 1
0
func (mi *Indexer) ReceiveBlob(blobRef *blobref.BlobRef, source io.Reader) (retsb blobref.SizedBlobRef, err os.Error) {
	sniffer := new(blobSniffer)
	hash := blobRef.Hash()
	var written int64
	written, err = io.Copy(io.MultiWriter(hash, sniffer), source)
	log.Printf("mysqlindexer: hashed+sniffed %d bytes; err %v", written, err)
	if err != nil {
		return
	}

	if !blobRef.HashMatches(hash) {
		err = blobserver.ErrCorruptBlob
		return
	}

	sniffer.Parse()
	mimeType := sniffer.MimeType()
	log.Printf("mysqlindexer: type=%v; truncated=%v", mimeType, sniffer.IsTruncated())

	var client *mysql.Client
	if client, err = mi.getConnection(); err != nil {
		return
	}
	defer mi.releaseConnection(client)

	var stmt *mysql.Statement
	if stmt, err = client.Prepare("INSERT IGNORE INTO blobs (blobref, size, type) VALUES (?, ?, ?)"); err != nil {
		log.Printf("mysqlindexer: prepare error: %v", err)
		return
	}
	if err = stmt.BindParams(blobRef.String(), written, mimeType); err != nil {
		log.Printf("mysqlindexer: bind error: %v", err)
		return
	}
	if err = stmt.Execute(); err != nil {
		log.Printf("mysqlindexer: execute error: %v", err)
		return
	}

	if camli := sniffer.camli; camli != nil {
		switch camli.Type {
		case "claim":
			if err = mi.populateClaim(client, blobRef, camli, sniffer); err != nil {
				return
			}
		case "permanode":
			if err = mi.populatePermanode(client, blobRef, camli); err != nil {
				return
			}
		case "file":
			if err = mi.populateFile(client, blobRef, camli); err != nil {
				return
			}
		}
	}

	retsb = blobref.SizedBlobRef{BlobRef: blobRef, Size: written}
	return
}
Ejemplo n.º 2
0
func (sto *appengineStorage) ReceiveBlob(br *blobref.BlobRef, in io.Reader) (sb blobref.SizedBlobRef, err os.Error) {
	if sto.ctx == nil {
		err = errNoContext
		return
	}

	var b bytes.Buffer
	hash := br.Hash()
	written, err := io.Copy(io.MultiWriter(hash, &b), in)
	if err != nil {
		return
	}

	if !br.HashMatches(hash) {
		err = blobserver.ErrCorruptBlob
		return
	}
	mimeType := "application/octet-stream"
	bw, err := blobstore.Create(sto.ctx, mimeType)
	if err != nil {
		return
	}
	written, err = io.Copy(bw, &b)
	if err != nil {
		// TODO(bradfitz): try to clean up; close it, see if we can find the key, delete it.
		return
	}
	err = bw.Close()
	if err != nil {
		// TODO(bradfitz): try to clean up; see if we can find the key, delete it.
		return
	}
	bkey, err := bw.Key()
	if err != nil {
		return
	}

	var ent blobEnt
	ent.BlobRefStr = br.String()
	ent.Size = written
	ent.BlobKey = bkey

	dkey := datastore.NewKey(sto.ctx, blobKind, br.String(), 0, nil)
	_, err = datastore.Put(sto.ctx, dkey, &ent)
	if err != nil {
		blobstore.Delete(sto.ctx, bkey) // TODO: insert into task queue on error to try later?
		return
	}

	return blobref.SizedBlobRef{br, written}, nil
}
Ejemplo n.º 3
0
func (mi *Indexer) ReceiveBlob(blobRef *blobref.BlobRef, source io.Reader) (retsb blobref.SizedBlobRef, err os.Error) {
	sniffer := new(blobSniffer)
	hash := blobRef.Hash()
	var written int64
	written, err = io.Copy(io.MultiWriter(hash, sniffer), source)
	log.Printf("mysqlindexer: hashed+sniffed %d bytes; err %v", written, err)
	if err != nil {
		return
	}

	if !blobRef.HashMatches(hash) {
		err = blobserver.ErrCorruptBlob
		return
	}

	sniffer.Parse()
	mimeType := sniffer.MimeType()
	log.Printf("mysqlindexer: type=%v; truncated=%v", mimeType, sniffer.IsTruncated())

	if camli := sniffer.camli; camli != nil {
		switch camli.Type {
		case "claim":
			if err = mi.populateClaim(blobRef, camli, sniffer); err != nil {
				return
			}
		case "permanode":
			if err = mi.populatePermanode(blobRef, camli); err != nil {
				return
			}
		case "file":
			if err = mi.populateFile(blobRef, camli); err != nil {
				return
			}
		}
	}

	if err = mi.db.Execute("INSERT IGNORE INTO blobs (blobref, size, type) VALUES (?, ?, ?)",
		blobRef.String(), written, mimeType); err != nil {
		log.Printf("mysqlindexer: insert into blobs: %v", err)
		return
	}

	retsb = blobref.SizedBlobRef{BlobRef: blobRef, Size: written}
	return
}
Ejemplo n.º 4
0
func (ds *DiskStorage) ReceiveBlob(blobRef *blobref.BlobRef, source io.Reader) (blobGot blobref.SizedBlobRef, err os.Error) {
	pname := ds.partition
	if pname != "" {
		err = fmt.Errorf("refusing upload directly to queue partition %q", pname)
		return
	}
	hashedDirectory := ds.blobDirectory(pname, blobRef)
	err = os.MkdirAll(hashedDirectory, 0700)
	if err != nil {
		return
	}

	tempFile, err := ioutil.TempFile(hashedDirectory, BlobFileBaseName(blobRef)+".tmp")
	if err != nil {
		return
	}

	success := false // set true later
	defer func() {
		if !success {
			log.Println("Removing temp file: ", tempFile.Name())
			os.Remove(tempFile.Name())
		}
	}()

	hash := blobRef.Hash()
	written, err := io.Copy(io.MultiWriter(hash, tempFile), source)
	if err != nil {
		return
	}
	if err = tempFile.Sync(); err != nil {
		return
	}
	if err = tempFile.Close(); err != nil {
		return
	}

	if !blobRef.HashMatches(hash) {
		err = blobserver.ErrCorruptBlob
		return
	}

	fileName := ds.blobPath("", blobRef)
	if err = os.Rename(tempFile.Name(), fileName); err != nil {
		return
	}

	stat, err := os.Lstat(fileName)
	if err != nil {
		return
	}
	if !stat.IsRegular() || stat.Size != written {
		err = os.NewError("Written size didn't match.")
		return
	}

	for _, mirror := range ds.mirrorPartitions {
		pname := mirror.partition
		if pname == "" {
			panic("expected partition name")
		}
		partitionDir := ds.blobDirectory(pname, blobRef)
		if err = os.MkdirAll(partitionDir, 0700); err != nil {
			return
		}
		partitionFileName := ds.blobPath(pname, blobRef)
		pfi, err := os.Stat(partitionFileName)
		if err == nil && pfi.IsRegular() {
			log.Printf("Skipped dup on partition %q", pname)
		} else {
			if err = os.Link(fileName, partitionFileName); err != nil && !linkAlreadyExists(err) {
				log.Fatalf("got link error %T %#v", err, err)
				return
			}
			log.Printf("Mirrored to partition %q", pname)
		}
	}

	blobGot = blobref.SizedBlobRef{BlobRef: blobRef, Size: stat.Size}
	success = true

	if os.Getenv("CAMLI_HACK_OPEN_IMAGES") == "1" {
		exec.Command("eog", fileName).Run()
	}

	hub := ds.GetBlobHub()
	hub.NotifyBlobReceived(blobRef)
	for _, mirror := range ds.mirrorPartitions {
		mirror.GetBlobHub().NotifyBlobReceived(blobRef)
	}
	return
}
Ejemplo n.º 5
0
func receiveBlob(blobRef *blobref.BlobRef, source io.Reader) (blobGot *receivedBlob, err os.Error) {
	hashedDirectory := BlobDirectoryName(blobRef)
	err = os.MkdirAll(hashedDirectory, 0700)
	if err != nil {
		return
	}

	var tempFile *os.File
	tempFile, err = ioutil.TempFile(hashedDirectory, BlobFileBaseName(blobRef)+".tmp")
	if err != nil {
		return
	}

	success := false // set true later
	defer func() {
		if !success {
			log.Println("Removing temp file: ", tempFile.Name())
			os.Remove(tempFile.Name())
		}
	}()

	hash := blobRef.Hash()
	var written int64
	written, err = io.Copy(io.MultiWriter(hash, tempFile), source)
	if err != nil {
		return
	}
	// TODO: fsync before close.
	if err = tempFile.Close(); err != nil {
		return
	}

	if !blobRef.HashMatches(hash) {
		err = CorruptBlobError
		return
	}

	fileName := BlobFileName(blobRef)
	if err = os.Rename(tempFile.Name(), fileName); err != nil {
		return
	}

	stat, err := os.Lstat(fileName)
	if err != nil {
		return
	}
	if !stat.IsRegular() || stat.Size != written {
		err = os.NewError("Written size didn't match.")
		return
	}

	blobGot = &receivedBlob{blobRef: blobRef, size: stat.Size}
	success = true

	if *flagOpenImages {
		exec.Run("/usr/bin/eog",
			[]string{"/usr/bin/eog", fileName},
			os.Environ(),
			"/",
			exec.DevNull,
			exec.DevNull,
			exec.MergeWithStdout)
	}

	return
}