Example #1
0
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) {
	url := config.RequiredString("url")
	auth := config.RequiredString("auth")
	skipStartupCheck := config.OptionalBool("skipStartupCheck", false)
	if err := config.Validate(); err != nil {
		return nil, err
	}

	client := client.New(url)
	if err = client.SetupAuthFromString(auth); err != nil {
		return nil, err
	}
	client.SetLogger(log.New(os.Stderr, "remote", log.LstdFlags))
	sto := &remoteStorage{
		client: client,
	}
	if !skipStartupCheck {
		// Do a quick dummy operation to check that our credentials are
		// correct.
		// TODO(bradfitz,mpl): skip this operation smartly if it turns out this is annoying/slow for whatever reason.
		c := make(chan blob.SizedRef, 1)
		err = sto.EnumerateBlobs(context.TODO(), c, "", 1)
		if err != nil {
			return nil, err
		}
	}
	return sto, nil
}
Example #2
0
func TestIsolation(t *testing.T) {
	ld := test.NewLoader()
	master, _ := ld.GetStorage("/good-storage/")

	ns1 := newNamespace(t, ld)
	ns2 := newNamespace(t, ld)
	stoMap := map[string]blobserver.Storage{
		"ns1":    ns1,
		"ns2":    ns2,
		"master": master,
	}

	want := func(src string, want ...blob.Ref) {
		if _, ok := stoMap[src]; !ok {
			t.Fatalf("undefined storage %q", src)
		}
		sort.Sort(blob.ByRef(want))
		var got []blob.Ref
		if err := blobserver.EnumerateAll(context.TODO(), stoMap[src], func(sb blob.SizedRef) error {
			got = append(got, sb.Ref)
			return nil
		}); err != nil {
			t.Fatal(err)
		}
		if !reflect.DeepEqual(got, want) {
			t.Errorf("server %q = %q; want %q", src, got, want)
		}
	}

	b1 := &test.Blob{Contents: "Blob 1"}
	b1r := b1.BlobRef()
	b2 := &test.Blob{Contents: "Blob 2"}
	b2r := b2.BlobRef()
	b3 := &test.Blob{Contents: "Shared Blob"}
	b3r := b3.BlobRef()

	b1.MustUpload(t, ns1)
	want("ns1", b1r)
	want("ns2")
	want("master", b1r)

	b2.MustUpload(t, ns2)
	want("ns1", b1r)
	want("ns2", b2r)
	want("master", b1r, b2r)

	b3.MustUpload(t, ns2)
	want("ns1", b1r)
	want("ns2", b2r, b3r)
	want("master", b1r, b2r, b3r)

	b3.MustUpload(t, ns1)
	want("ns1", b1r, b3r)
	want("ns2", b2r, b3r)
	want("master", b1r, b2r, b3r)

	if _, _, err := ns2.FetchStreaming(b1r); err == nil {
		t.Errorf("b1 shouldn't be accessible via ns2")
	}
}
Example #3
0
// Reindex rewrites the index files of the diskpacked .pack files
func Reindex(root string, overwrite bool, indexConf jsonconfig.Obj) (err error) {
	// there is newStorage, but that may open a file for writing
	var s = &storage{root: root}
	index, err := newIndex(root, indexConf)
	if err != nil {
		return err
	}
	defer func() {
		closeErr := index.Close()
		// just returning the first error - if the index or disk is corrupt
		// and can't close, it's very likely these two errors are related and
		// have the same root cause.
		if err == nil {
			err = closeErr
		}
	}()

	ctx := context.TODO() // TODO(tgulacsi): get the verbosity from context
	for i := 0; i >= 0; i++ {
		fh, err := os.Open(s.filename(i))
		if err != nil {
			if os.IsNotExist(err) {
				break
			}
			return err
		}
		err = s.reindexOne(ctx, index, overwrite, i)
		fh.Close()
		if err != nil {
			return err
		}
	}
	return nil
}
Example #4
0
func TestParseExpression(t *testing.T) {
	qj := func(sq *SearchQuery) []byte {
		v, err := json.MarshalIndent(sq, "", "  ")
		if err != nil {
			panic(err)
		}
		return v
	}
	for _, tt := range parseExpressionTests {
		ins := tt.inList
		if len(ins) == 0 {
			ins = []string{tt.in}
		}
		for _, in := range ins {
			got, err := parseExpression(context.TODO(), in)
			if err != nil {
				if tt.errContains != "" && strings.Contains(err.Error(), tt.errContains) {
					continue
				}
				t.Errorf("%s: parseExpression(%q) error: %v", tt.name, in, err)
				continue
			}
			if tt.errContains != "" {
				t.Errorf("%s: parseExpression(%q) succeeded; want error containing %q", tt.name, in, tt.errContains)
				continue
			}
			if !reflect.DeepEqual(got, tt.want) {
				t.Errorf("%s: parseExpression(%q) got:\n%s\n\nwant:%s\n", tt.name, in, qj(got), qj(tt.want))
			}
		}
	}
}
Example #5
0
// Serves oauth callback at http://host/importer/TYPE/callback
func (h *Host) serveImporterAcctCallback(w http.ResponseWriter, r *http.Request, imp *importer) {
	if r.Method != "GET" {
		http.Error(w, "invalid method", 400)
		return
	}
	acctRef, err := imp.impl.CallbackRequestAccount(r)
	if err != nil {
		httputil.ServeError(w, r, err)
		return
	}
	if !acctRef.Valid() {
		httputil.ServeError(w, r, errors.New("No valid blobref returned from CallbackRequestAccount(r)"))
		return
	}
	ia, err := imp.account(acctRef)
	if err != nil {
		http.Error(w, "invalid 'acct' param: "+err.Error(), 400)
		return
	}
	imp.impl.ServeCallback(w, r, &SetupContext{
		Context:     context.TODO(),
		Host:        h,
		AccountNode: ia.acct,
		ia:          ia,
	})
}
Example #6
0
func (ia *importerAcct) setup(w http.ResponseWriter, r *http.Request) {
	ia.im.impl.ServeSetup(w, r, &SetupContext{
		Context:     context.TODO(),
		Host:        ia.im.host,
		AccountNode: ia.acct,
		ia:          ia,
	})
}
Example #7
0
func TestParseOperand(t *testing.T) {
	for _, tt := range parseOperandTests {
		p := newParser(tt.in)

		got, err := p.parseOperand(context.TODO())

		doChecking("parseOperand", t, tt, got, err, p)
	}
}
Example #8
0
func TestParseConjuction(t *testing.T) {
	for _, tt := range parseAndRHSTests {
		p := newParser(tt.in)

		got, err := p.parseAndRHS(context.TODO(), tt.lhs)

		doSticherChecking("parseAndRHS", t, tt, got, err, p)
	}
}
Example #9
0
func (ia *importerAcct) setup(w http.ResponseWriter, r *http.Request) {
	if err := ia.im.impl.ServeSetup(w, r, &SetupContext{
		Context:     context.TODO(),
		Host:        ia.im.host,
		AccountNode: ia.acct,
		ia:          ia,
	}); err != nil {
		log.Printf("%v", err)
	}
}
Example #10
0
func (c *gceCmd) RunCommand(args []string) error {
	if c.verbose {
		gce.Verbose = true
	}
	if c.project == "" {
		return cmdmain.UsageError("Missing --project flag.")
	}
	if (c.certFile == "") != (c.keyFile == "") {
		return cmdmain.UsageError("--cert and --key must both be given together.")
	}
	if c.certFile == "" && c.hostname == "" {
		return cmdmain.UsageError("Either --hostname, or --cert & --key must provided.")
	}
	config := gce.NewOAuthConfig(readFile(clientIdDat), readFile(clientSecretDat))
	config.RedirectURL = "urn:ietf:wg:oauth:2.0:oob"

	instConf := &gce.InstanceConf{
		Name:     c.instName,
		Project:  c.project,
		Machine:  c.machine,
		Zone:     c.zone,
		CertFile: c.certFile,
		KeyFile:  c.keyFile,
		Hostname: c.hostname,
	}
	if c.sshPub != "" {
		instConf.SSHPub = strings.TrimSpace(readFile(c.sshPub))
	}

	depl := &gce.Deployer{
		Client: oauth2.NewClient(oauth2.NoContext, oauth2.ReuseTokenSource(nil, &oauthutil.TokenSource{
			Config:    config,
			CacheFile: c.project + "-token.json",
			AuthCode: func() string {
				fmt.Println("Get auth code from:")
				fmt.Printf("%v\n", config.AuthCodeURL("my-state", oauth2.AccessTypeOffline, oauth2.ApprovalForce))
				fmt.Println("Enter auth code:")
				sc := bufio.NewScanner(os.Stdin)
				sc.Scan()
				return strings.TrimSpace(sc.Text())
			},
		})),
		Conf: instConf,
	}
	inst, err := depl.Create(context.TODO())
	if err != nil {
		return err
	}

	log.Printf("Instance is up at %s", inst.NetworkInterfaces[0].AccessConfigs[0].NatIP)
	return nil
}
Example #11
0
// Serves oauth callback at http://host/importer/TYPE/callback
func (h *Host) serveImporterAcctCallback(w http.ResponseWriter, r *http.Request, imp *importer) {
	if r.Method != "GET" {
		http.Error(w, "invalid method", 400)
		return
	}
	acctRef, ok := blob.Parse(r.FormValue("acct"))
	if !ok {
		http.Error(w, "missing 'acct' blobref param", 400)
		return
	}
	ia, err := imp.account(acctRef)
	if err != nil {
		http.Error(w, "invalid 'acct' param: "+err.Error(), 400)
		return
	}
	imp.impl.ServeCallback(w, r, &SetupContext{
		Context:     context.TODO(),
		Host:        h,
		AccountNode: ia.acct,
		ia:          ia,
	})
}
Example #12
0
func (h *DeployHandler) serveCallback(w http.ResponseWriter, r *http.Request) {
	ck, err := r.Cookie("user")
	if err != nil {
		http.Error(w,
			fmt.Sprintf("Cookie expired, or CSRF attempt. Restart from %s%s%s", h.scheme, h.host, h.prefix),
			http.StatusBadRequest)
		h.Printf("Cookie expired, or CSRF attempt on callback.")
		return
	}
	code := r.FormValue("code")
	if code == "" {
		httputil.ServeError(w, r, errors.New("No oauth code parameter in callback URL"))
		return
	}
	h.Printf("successful authentication: %v", r.URL.RawQuery)

	br, tk, err := fromState(r)
	if err != nil {
		httputil.ServeError(w, r, err)
		return
	}
	if !xsrftoken.Valid(tk, h.xsrfKey, ck.Value, br.String()) {
		httputil.ServeError(w, r, fmt.Errorf("Invalid xsrf token: %q", tk))
		return
	}

	oAuthConf := h.oAuthConfig()
	tok, err := oAuthConf.Exchange(oauth2.NoContext, code)
	if err != nil {
		httputil.ServeError(w, r, fmt.Errorf("could not obtain a token: %v", err))
		return
	}
	h.Printf("successful authorization with token: %v", tok)

	instConf, err := h.instanceConf(br)
	if err != nil {
		httputil.ServeError(w, r, err)
		return
	}

	depl := &Deployer{
		Client: oAuthConf.Client(oauth2.NoContext, tok),
		Conf:   instConf,
	}

	if found := h.serveOldInstance(w, br, depl); found {
		return
	}

	if err := h.recordState(br, &creationState{
		InstConf: br,
	}); err != nil {
		httputil.ServeError(w, r, err)
		return
	}

	if h.debug {
		// We simulate an instance creation, without actually ever doing anything on Google Cloud,
		// by sleeping for a while. Then, as we would do in the real case, we record a creation
		// state (but a made-up one). In the meantime, the progress page/animation is served as
		// usual.
		go func() {
			time.Sleep(7 * time.Second)
			if err := h.recordState(br, &creationState{
				InstConf:              br,
				InstAddr:              "fake.instance.com",
				Success:               true,
				CertFingerprintSHA1:   "XXXXXXXXXXXXXXXXXXXX",
				CertFingerprintSHA256: "YYYYYYYYYYYYYYYYYYYY",
			}); err != nil {
				h.Printf("Could not record creation state for %v: %v", br, err)
				h.recordStateErrMu.Lock()
				defer h.recordStateErrMu.Unlock()
				h.recordStateErr[br.String()] = err
			}
		}()
		h.serveProgress(w, br)
		return
	}

	go func() {
		inst, err := depl.Create(context.TODO())
		state := &creationState{
			InstConf: br,
		}
		if err != nil {
			h.Printf("could not create instance: %v", err)
			switch e := err.(type) {
			case instanceExistsError:
				state.Err = fmt.Sprintf("%v %v", e, helpDeleteInstance)
			case projectIDError:
				state.Err = fmt.Sprintf("%v", e)
			default:
				state.Err = fmt.Sprintf("%v. %v", err, fileIssue(br.String()))
			}
		} else {
			state.InstAddr = addr(inst)
			state.Success = true
			state.CertFingerprintSHA1 = depl.certFingerprints["SHA-1"]
			state.CertFingerprintSHA256 = depl.certFingerprints["SHA-256"]
		}
		if err := h.recordState(br, state); err != nil {
			h.Printf("Could not record creation state for %v: %v", br, err)
			h.recordStateErrMu.Lock()
			defer h.recordStateErrMu.Unlock()
			h.recordStateErr[br.String()] = err
		}
	}()
	h.serveProgress(w, br)
}
Example #13
0
func newSyncFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) {
	var (
		from           = conf.RequiredString("from")
		to             = conf.RequiredString("to")
		fullSync       = conf.OptionalBool("fullSyncOnStart", false)
		blockFullSync  = conf.OptionalBool("blockingFullSyncOnStart", false)
		idle           = conf.OptionalBool("idle", false)
		queueConf      = conf.OptionalObject("queue")
		copierPoolSize = conf.OptionalInt("copierPoolSize", 5)
		validate       = conf.OptionalBool("validateOnStart", validateOnStartDefault)
	)
	if err := conf.Validate(); err != nil {
		return nil, err
	}
	if idle {
		return newIdleSyncHandler(from, to), nil
	}
	if len(queueConf) == 0 {
		return nil, errors.New(`Missing required "queue" object`)
	}
	q, err := sorted.NewKeyValue(queueConf)
	if err != nil {
		return nil, err
	}

	isToIndex := false
	fromBs, err := ld.GetStorage(from)
	if err != nil {
		return nil, err
	}
	toBs, err := ld.GetStorage(to)
	if err != nil {
		return nil, err
	}
	if _, ok := fromBs.(*index.Index); !ok {
		if _, ok := toBs.(*index.Index); ok {
			isToIndex = true
		}
	}

	sh := newSyncHandler(from, to, fromBs, toBs, q)
	sh.toIndex = isToIndex
	sh.copierPoolSize = copierPoolSize
	if err := sh.readQueueToMemory(); err != nil {
		return nil, fmt.Errorf("Error reading sync queue to memory: %v", err)
	}

	if fullSync || blockFullSync {
		sh.logf("Doing full sync")
		didFullSync := make(chan bool, 1)
		go func() {
			for {
				n := sh.runSync("queue", sh.enumeratePendingBlobs)
				if n > 0 {
					sh.logf("Queue sync copied %d blobs", n)
					continue
				}
				break
			}
			n := sh.runSync("full", blobserverEnumerator(context.TODO(), fromBs))
			sh.logf("Full sync copied %d blobs", n)
			didFullSync <- true
			sh.syncLoop()
		}()
		if blockFullSync {
			sh.logf("Blocking startup, waiting for full sync from %q to %q", from, to)
			<-didFullSync
			sh.logf("Full sync complete.")
		}
	} else {
		go sh.syncLoop()
	}

	if validate {
		go sh.startFullValidation()
	}

	blobserver.GetHub(fromBs).AddReceiveHook(sh.enqueue)
	return sh, nil
}
Example #14
0
// src: non-nil source
// dest: non-nil destination
// thirdLeg: optional third-leg client. if not nil, anything on src
//     but not on dest will instead be copied to thirdLeg, instead of
//     directly to dest. (sneakernet mode, copying to a portable drive
//     and transporting thirdLeg to dest)
func (c *syncCmd) doPass(src, dest, thirdLeg blobserver.Storage) (stats SyncStats, retErr error) {
	srcBlobs := make(chan blob.SizedRef, 100)
	destBlobs := make(chan blob.SizedRef, 100)
	srcErr := make(chan error, 1)
	destErr := make(chan error, 1)

	ctx := context.TODO()
	defer ctx.Cancel()
	go func() {
		srcErr <- enumerateAllBlobs(ctx, src, srcBlobs)
	}()
	checkSourceError := func() {
		if err := <-srcErr; err != nil {
			retErr = fmt.Errorf("Enumerate error from source: %v", err)
		}
	}

	if c.dest == "stdout" {
		for sb := range srcBlobs {
			fmt.Printf("%s %d\n", sb.Ref, sb.Size)
		}
		checkSourceError()
		return
	}

	go func() {
		destErr <- enumerateAllBlobs(ctx, dest, destBlobs)
	}()
	checkDestError := func() {
		if err := <-destErr; err != nil {
			retErr = errors.New(fmt.Sprintf("Enumerate error from destination: %v", err))
		}
	}

	destNotHaveBlobs := make(chan blob.SizedRef)
	sizeMismatch := make(chan blob.Ref)
	readSrcBlobs := srcBlobs
	if c.verbose {
		readSrcBlobs = loggingBlobRefChannel(srcBlobs)
	}
	mismatches := []blob.Ref{}
	go client.ListMissingDestinationBlobs(destNotHaveBlobs, sizeMismatch, readSrcBlobs, destBlobs)

	// Handle three-legged mode if tc is provided.
	checkThirdError := func() {} // default nop
	syncBlobs := destNotHaveBlobs
	firstHopDest := dest
	if thirdLeg != nil {
		thirdBlobs := make(chan blob.SizedRef, 100)
		thirdErr := make(chan error, 1)
		go func() {
			thirdErr <- enumerateAllBlobs(ctx, thirdLeg, thirdBlobs)
		}()
		checkThirdError = func() {
			if err := <-thirdErr; err != nil {
				retErr = fmt.Errorf("Enumerate error from third leg: %v", err)
			}
		}
		thirdNeedBlobs := make(chan blob.SizedRef)
		go client.ListMissingDestinationBlobs(thirdNeedBlobs, sizeMismatch, destNotHaveBlobs, thirdBlobs)
		syncBlobs = thirdNeedBlobs
		firstHopDest = thirdLeg
	}
For:
	for {
		select {
		case br := <-sizeMismatch:
			// TODO(bradfitz): check both sides and repair, carefully.  For now, fail.
			log.Printf("WARNING: blobref %v has differing sizes on source and dest", br)
			stats.ErrorCount++
			mismatches = append(mismatches, br)
		case sb, ok := <-syncBlobs:
			if !ok {
				break For
			}
			fmt.Printf("Destination needs blob: %s\n", sb)

			blobReader, size, err := src.FetchStreaming(sb.Ref)
			if err != nil {
				stats.ErrorCount++
				log.Printf("Error fetching %s: %v", sb.Ref, err)
				continue
			}
			if size != sb.Size {
				stats.ErrorCount++
				log.Printf("Source blobserver's enumerate size of %d for blob %s doesn't match its Get size of %d",
					sb.Size, sb.Ref, size)
				continue
			}

			if _, err := blobserver.Receive(firstHopDest, sb.Ref, blobReader); err != nil {
				stats.ErrorCount++
				log.Printf("Upload of %s to destination blobserver failed: %v", sb.Ref, err)
				continue
			}
			stats.BlobsCopied++
			stats.BytesCopied += size

			if c.removeSrc {
				if err = src.RemoveBlobs([]blob.Ref{sb.Ref}); err != nil {
					stats.ErrorCount++
					log.Printf("Failed to delete %s from source: %v", sb.Ref, err)
				}
			}
		}
	}

	checkSourceError()
	checkDestError()
	checkThirdError()
	if retErr == nil && stats.ErrorCount > 0 {
		retErr = fmt.Errorf("%d errors during sync", stats.ErrorCount)
	}
	return stats, retErr
}
Example #15
0
func TestParseCoreAtom(t *testing.T) {
	for _, tt := range parseCoreAtomTests {
		got, err := parseCoreAtom(context.TODO(), tt.in)
		doAtomChecking("parseCoreAtom", t, tt, got, err)
	}
}
Example #16
0
func newSyncFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) {
	var (
		from          = conf.RequiredString("from")
		to            = conf.RequiredString("to")
		fullSync      = conf.OptionalBool("fullSyncOnStart", false)
		blockFullSync = conf.OptionalBool("blockingFullSyncOnStart", false)
		idle          = conf.OptionalBool("idle", false)
		queueConf     = conf.OptionalObject("queue")
	)
	if err := conf.Validate(); err != nil {
		return nil, err
	}
	if idle {
		synch, err := createIdleSyncHandler(from, to)
		if err != nil {
			return nil, err
		}
		return synch, nil
	}
	if len(queueConf) == 0 {
		return nil, errors.New(`Missing required "queue" object`)
	}
	q, err := sorted.NewKeyValue(queueConf)
	if err != nil {
		return nil, err
	}

	isToIndex := false
	fromBs, err := ld.GetStorage(from)
	if err != nil {
		return nil, err
	}
	toBs, err := ld.GetStorage(to)
	if err != nil {
		return nil, err
	}
	if _, ok := fromBs.(*index.Index); !ok {
		if _, ok := toBs.(*index.Index); ok {
			isToIndex = true
		}
	}

	sh, err := createSyncHandler(from, to, fromBs, toBs, q, isToIndex)
	if err != nil {
		return nil, err
	}

	if fullSync || blockFullSync {
		didFullSync := make(chan bool, 1)
		go func() {
			n := sh.runSync("queue", sh.enumerateQueuedBlobs)
			sh.logf("Queue sync copied %d blobs", n)
			n = sh.runSync("full", blobserverEnumerator(context.TODO(), fromBs))
			sh.logf("Full sync copied %d blobs", n)
			didFullSync <- true
			sh.syncQueueLoop()
		}()
		if blockFullSync {
			sh.logf("Blocking startup, waiting for full sync from %q to %q", from, to)
			<-didFullSync
			sh.logf("Full sync complete.")
		}
	} else {
		go sh.syncQueueLoop()
	}

	blobserver.GetHub(fromBs).AddReceiveHook(sh.enqueue)
	return sh, nil
}
Example #17
0
func handleEnumerateBlobs(conn http.ResponseWriter, req *http.Request, storage blobserver.BlobEnumerator) {
	// Potential input parameters
	formValueLimit := req.FormValue("limit")
	formValueMaxWaitSec := req.FormValue("maxwaitsec")
	formValueAfter := req.FormValue("after")

	maxEnumerate := defaultMaxEnumerate
	if config, ok := storage.(blobserver.MaxEnumerateConfig); ok {
		maxEnumerate = config.MaxEnumerate() - 1 // Since we'll add one below.
	}

	limit := defaultEnumerateSize
	if formValueLimit != "" {
		n, err := strconv.ParseUint(formValueLimit, 10, 32)
		if err != nil || n > uint64(maxEnumerate) {
			limit = maxEnumerate
		} else {
			limit = int(n)
		}
	}

	waitSeconds := 0
	if formValueMaxWaitSec != "" {
		waitSeconds, _ = strconv.Atoi(formValueMaxWaitSec)
		if waitSeconds != 0 && formValueAfter != "" {
			conn.WriteHeader(http.StatusBadRequest)
			fmt.Fprintf(conn, errMsgMaxWaitSecWithAfter)
			return
		}
		switch {
		case waitSeconds < 0:
			waitSeconds = 0
		case waitSeconds > 30:
			// TODO: don't hard-code 30.  push this up into a blobserver interface
			// for getting the configuration of the server (ultimately a flag in
			// in the binary)
			waitSeconds = 30
		}
	}

	conn.Header().Set("Content-Type", "text/javascript; charset=utf-8")
	fmt.Fprintf(conn, "{\n  \"blobs\": [\n")

	loop := true
	needsComma := false
	deadline := time.Now().Add(time.Duration(waitSeconds) * time.Second)
	after := ""
	for loop && (waitSeconds == 0 || time.Now().After(deadline)) {
		if waitSeconds == 0 {
			loop = false
		}

		blobch := make(chan blob.SizedRef, 100)
		resultch := make(chan error, 1)
		go func() {
			resultch <- storage.EnumerateBlobs(context.TODO(), blobch, formValueAfter, limit+1)
		}()

		endsReached := 0
		gotBlobs := 0
		for endsReached < 2 {
			select {
			case sb, ok := <-blobch:
				if !ok {
					endsReached++
					if gotBlobs <= limit {
						after = ""
					}
					continue
				}
				gotBlobs++
				loop = false
				if gotBlobs > limit {
					// We requested one more from storage than the user asked for.
					// Now we know to return a "continueAfter" response key.
					// But we don't return this blob.
					continue
				}
				blobName := sb.Ref.String()
				if needsComma {
					fmt.Fprintf(conn, ",\n")
				}
				fmt.Fprintf(conn, "    {\"blobRef\": \"%s\", \"size\": %d}",
					blobName, sb.Size)
				after = blobName
				needsComma = true
			case err := <-resultch:
				if err != nil {
					log.Printf("Error during enumerate: %v", err)
					fmt.Fprintf(conn, "{{{ SERVER ERROR }}}")
					return
				}
				endsReached++
			}
		}

		if loop {
			blobserver.WaitForBlob(storage, deadline, nil)
		}
	}
	fmt.Fprintf(conn, "\n  ]")
	if after != "" {
		fmt.Fprintf(conn, ",\n  \"continueAfter\": \"%s\"", after)
	}
	const longPollSupported = true
	if longPollSupported {
		fmt.Fprintf(conn, ",\n  \"canLongPoll\": true")
	}
	fmt.Fprintf(conn, "\n}\n")
}
Example #18
0
func (h *Handler) Query(rawq *SearchQuery) (*SearchResult, error) {
	ctx := context.TODO() // TODO: set from rawq
	exprResult, err := rawq.checkValid(ctx)
	if err != nil {
		return nil, fmt.Errorf("Invalid SearchQuery: %v", err)
	}
	q := rawq.plannedQuery(exprResult)
	res := new(SearchResult)
	s := &search{
		h:   h,
		q:   q,
		res: res,
		ctx: context.TODO(),
	}
	defer s.ctx.Cancel()

	corpus := h.corpus
	var unlockOnce sync.Once
	if corpus != nil {
		corpus.RLock()
		defer unlockOnce.Do(corpus.RUnlock)
	}

	ch := make(chan camtypes.BlobMeta, buffered)
	errc := make(chan error, 1)

	cands := q.pickCandidateSource(s)
	if candSourceHook != nil {
		candSourceHook(cands.name)
	}

	sendCtx := s.ctx.New()
	defer sendCtx.Cancel()
	go func() { errc <- cands.send(sendCtx, s, ch) }()

	wantAround, foundAround := false, false
	if q.Around.Valid() {
		wantAround = true
	}
	blobMatches := q.Constraint.matcher()
	for meta := range ch {
		match, err := blobMatches(s, meta.Ref, meta)
		if err != nil {
			return nil, err
		}
		if match {
			res.Blobs = append(res.Blobs, &SearchResultBlob{
				Blob: meta.Ref,
			})
			if q.Limit <= 0 || !cands.sorted {
				continue
			}
			if !wantAround || foundAround {
				if len(res.Blobs) == q.Limit {
					sendCtx.Cancel()
					break
				}
				continue
			}
			if q.Around == meta.Ref {
				foundAround = true
				if len(res.Blobs)*2 > q.Limit {
					// If we've already collected more than half of the Limit when Around is found,
					// we ditch the surplus from the beginning of the slice of results.
					// If Limit is even, and the number of results before and after Around
					// are both greater than half the limit, then there will be one more result before
					// than after.
					discard := len(res.Blobs) - q.Limit/2 - 1
					if discard < 0 {
						discard = 0
					}
					res.Blobs = res.Blobs[discard:]
				}
				if len(res.Blobs) == q.Limit {
					sendCtx.Cancel()
					break
				}
				continue
			}
			if len(res.Blobs) == q.Limit {
				n := copy(res.Blobs, res.Blobs[len(res.Blobs)/2:])
				res.Blobs = res.Blobs[:n]
			}
		}
	}
	if err := <-errc; err != nil && err != context.ErrCanceled {
		return nil, err
	}
	if q.Limit > 0 && cands.sorted && wantAround && !foundAround {
		// results are ignored if Around was not found
		res.Blobs = nil
	}
	if !cands.sorted {
		switch q.Sort {
		case UnspecifiedSort, Unsorted:
			// Nothing to do.
		case BlobRefAsc:
			sort.Sort(sortSearchResultBlobs{res.Blobs, func(a, b *SearchResultBlob) bool {
				return a.Blob.Less(b.Blob)
			}})
		case CreatedDesc, CreatedAsc:
			if corpus == nil {
				return nil, errors.New("TODO: Sorting without a corpus unsupported")
			}
			var err error
			corpus.RLock()
			sort.Sort(sortSearchResultBlobs{res.Blobs, func(a, b *SearchResultBlob) bool {
				if err != nil {
					return false
				}
				ta, ok := corpus.PermanodeAnyTimeLocked(a.Blob)
				if !ok {
					err = fmt.Errorf("no ctime or modtime found for %v", a.Blob)
					return false
				}
				tb, ok := corpus.PermanodeAnyTimeLocked(b.Blob)
				if !ok {
					err = fmt.Errorf("no ctime or modtime found for %v", b.Blob)
					return false
				}
				if q.Sort == CreatedAsc {
					return ta.Before(tb)
				}
				return tb.Before(ta)
			}})
			corpus.RUnlock()
			if err != nil {
				return nil, err
			}
		// TODO(mpl): LastModifiedDesc, LastModifiedAsc
		default:
			return nil, errors.New("TODO: unsupported sort+query combination.")
		}
		if q.Limit > 0 && len(res.Blobs) > q.Limit {
			res.Blobs = res.Blobs[:q.Limit]
		}
	}
	if corpus != nil {
		if !wantAround {
			q.setResultContinue(corpus, res)
		}
		unlockOnce.Do(corpus.RUnlock)
	}

	if q.Describe != nil {
		q.Describe.BlobRef = blob.Ref{} // zero this out, if caller set it
		blobs := make([]blob.Ref, 0, len(res.Blobs))
		for _, srb := range res.Blobs {
			blobs = append(blobs, srb.Blob)
		}
		q.Describe.BlobRefs = blobs
		res, err := s.h.Describe(q.Describe)
		if err != nil {
			return nil, err
		}
		s.res.Describe = res
	}
	return s.res, nil
}
Example #19
0
func (s *storage) readAllMetaBlobs() error {
	type metaBlob struct {
		br  blob.Ref
		dat []byte // encrypted blob
		err error
	}
	metac := make(chan metaBlob, 16)

	const maxInFlight = 50
	var gate = make(chan bool, maxInFlight)

	var stopEnumerate = make(chan bool) // closed on error
	enumErrc := make(chan error, 1)
	go func() {
		var wg sync.WaitGroup
		enumErrc <- blobserver.EnumerateAll(context.TODO(), s.meta, func(sb blob.SizedRef) error {
			select {
			case <-stopEnumerate:
				return errors.New("enumeration stopped")
			default:
			}

			wg.Add(1)
			gate <- true
			go func() {
				defer wg.Done()
				defer func() { <-gate }()
				rc, _, err := s.meta.Fetch(sb.Ref)
				var all []byte
				if err == nil {
					all, err = ioutil.ReadAll(rc)
					rc.Close()
				}
				metac <- metaBlob{sb.Ref, all, err}
			}()
			return nil
		})
		wg.Wait()
		close(metac)
	}()

	for mi := range metac {
		err := mi.err
		if err == nil {
			err = s.processEncryptedMetaBlob(mi.br, mi.dat)
		}
		if err != nil {
			close(stopEnumerate)
			go func() {
				for _ = range metac {
				}
			}()
			// TODO: advertise in this error message a new option or environment variable
			// to skip a certain or all meta blobs, to allow partial recovery, if some
			// are corrupt. For now, require all to be correct.
			return fmt.Errorf("Error with meta blob %v: %v", mi.br, err)
		}
	}

	return <-enumErrc
}
Example #20
0
func (h *DeployHandler) serveCallback(w http.ResponseWriter, r *http.Request) {
	ck, err := r.Cookie("user")
	if err != nil {
		http.Error(w,
			fmt.Sprintf("Cookie expired, or CSRF attempt. Restart from %s%s%s", h.scheme, h.host, h.prefix),
			http.StatusBadRequest)
		h.Printf("Cookie expired, or CSRF attempt on callback.")
		return
	}
	code := r.FormValue("code")
	if code == "" {
		httputil.ServeError(w, r, errors.New("No oauth code parameter in callback URL"))
		return
	}
	h.Printf("successful authentication: %v", r.URL.RawQuery)

	br, tk, err := fromState(r)
	if err != nil {
		httputil.ServeError(w, r, err)
		return
	}
	if !xsrftoken.Valid(tk, h.xsrfKey, ck.Value, br.String()) {
		httputil.ServeError(w, r, fmt.Errorf("Invalid xsrf token: %q", tk))
		return
	}

	oAuthConf := h.oAuthConfig()
	tok, err := oAuthConf.Exchange(oauth2.NoContext, code)
	if err != nil {
		httputil.ServeError(w, r, fmt.Errorf("could not obtain a token: %v", err))
		return
	}
	h.Printf("successful authorization with token: %v", tok)

	instConf, err := h.instanceConf(br)
	if err != nil {
		httputil.ServeError(w, r, err)
		return
	}

	depl := &Deployer{
		Client: oAuthConf.Client(oauth2.NoContext, tok),
		Conf:   instConf,
	}

	if found := h.serveOldInstance(w, br, depl); found {
		return
	}

	if err := h.recordState(br, &creationState{
		InstConf: br,
	}); err != nil {
		httputil.ServeError(w, r, err)
		return
	}

	go func() {
		inst, err := depl.Create(context.TODO())
		state := &creationState{
			InstConf: br,
		}
		if err != nil {
			h.Printf("could not create instance: %v", err)
			switch e := err.(type) {
			case instanceExistsError:
				state.Err = fmt.Sprintf("%v %v", e, helpDeleteInstance)
			case projectIDError:
				state.Err = fmt.Sprintf("%v", e)
			default:
				state.Err = fmt.Sprintf("%v. %v", err, fileIssue(br.String()))
			}
		} else {
			state.InstAddr = addr(inst)
			state.Success = true
			state.CertFingerprintSHA1 = depl.certFingerprints["SHA-1"]
			state.CertFingerprintSHA256 = depl.certFingerprints["SHA-256"]
		}
		if err := h.recordState(br, state); err != nil {
			h.Printf("Could not record creation state for %v: %v", br, err)
			h.recordStateErrMu.Lock()
			defer h.recordStateErrMu.Unlock()
			h.recordStateErr[br.String()] = err
		}
	}()
	h.serveProgress(w, br)
}
Example #21
0
func (x *Index) Reindex() error {
	ctx := context.TODO()

	wiper, ok := x.s.(sorted.Wiper)
	if !ok {
		return fmt.Errorf("index's storage type %T doesn't support sorted.Wiper", x.s)
	}
	log.Printf("Wiping index storage type %T ...", x.s)
	if err := wiper.Wipe(); err != nil {
		return fmt.Errorf("error wiping index's sorted key/value type %T: %v", x.s, err)
	}
	log.Printf("Index wiped. Rebuilding...")

	reindexStart, _ := blob.Parse(os.Getenv("CAMLI_REINDEX_START"))

	err := x.s.Set(keySchemaVersion.name, fmt.Sprintf("%d", requiredSchemaVersion))
	if err != nil {
		return err
	}

	var nerrmu sync.Mutex
	nerr := 0

	blobc := make(chan blob.Ref, 32)

	enumCtx := ctx.New()
	enumErr := make(chan error, 1)
	go func() {
		defer close(blobc)
		donec := enumCtx.Done()
		var lastTick time.Time
		enumErr <- blobserver.EnumerateAll(enumCtx, x.BlobSource, func(sb blob.SizedRef) error {
			now := time.Now()
			if lastTick.Before(now.Add(-1 * time.Second)) {
				log.Printf("Reindexing at %v", sb.Ref)
				lastTick = now
			}
			if reindexStart.Valid() && sb.Ref.Less(reindexStart) {
				return nil
			}
			select {
			case <-donec:
				return context.ErrCanceled
			case blobc <- sb.Ref:
				return nil
			}
		})
	}()
	const j = 4 // arbitrary concurrency level
	var wg sync.WaitGroup
	for i := 0; i < j; i++ {
		wg.Add(1)
		go func() {
			defer wg.Done()
			for br := range blobc {
				if err := x.reindex(br); err != nil {
					log.Printf("Error reindexing %v: %v", br, err)
					nerrmu.Lock()
					nerr++
					nerrmu.Unlock()
					// TODO: flag (or default?) to stop the EnumerateAll above once
					// there's any error with reindexing?
				}
			}
		}()
	}
	if err := <-enumErr; err != nil {
		return err
	}
	wg.Wait()

	log.Printf("Index rebuild complete.")
	nerrmu.Lock() // no need to unlock
	if nerr != 0 {
		return fmt.Errorf("%d blobs failed to re-index", nerr)
	}
	if err := x.initDeletesCache(); err != nil {
		return err
	}
	return nil
}
Example #22
0
func TestDeletePermanode(t *testing.T) {
	idx := index.NewMemoryIndex()
	idxd := indextest.NewIndexDeps(idx)

	foopn := idxd.NewPlannedPermanode("foo")
	idxd.SetAttribute(foopn, "tag", "foo")
	barpn := idxd.NewPlannedPermanode("bar")
	idxd.SetAttribute(barpn, "tag", "bar")
	bazpn := idxd.NewPlannedPermanode("baz")
	idxd.SetAttribute(bazpn, "tag", "baz")
	idxd.Delete(barpn)
	c, err := idxd.Index.KeepInMemory()
	if err != nil {
		t.Fatalf("error slurping index to memory: %v", err)
	}

	// check that we initially only find permanodes foo and baz,
	// because bar is already marked as deleted.
	want := []blob.Ref{foopn, bazpn}
	ch := make(chan camtypes.BlobMeta, 10)
	var got []camtypes.BlobMeta
	errc := make(chan error, 1)
	c.RLock()
	go func() { errc <- c.EnumeratePermanodesLastModifiedLocked(context.TODO(), ch) }()
	for blobMeta := range ch {
		got = append(got, blobMeta)
	}
	err = <-errc
	c.RUnlock()
	if err != nil {
		t.Fatalf("Could not enumerate permanodes: %v", err)
	}
	if len(got) != len(want) {
		t.Fatalf("Saw %d permanodes in corpus; want %d", len(got), len(want))
	}
	for _, bm := range got {
		found := false
		for _, perm := range want {
			if bm.Ref == perm {
				found = true
				break
			}
		}
		if !found {
			t.Fatalf("permanode %v was not found in corpus", bm.Ref)
		}
	}

	// now add a delete claim for permanode baz, and check that we're only left with foo permanode
	delbaz := idxd.Delete(bazpn)
	want = []blob.Ref{foopn}
	got = got[:0]
	ch = make(chan camtypes.BlobMeta, 10)
	c.RLock()
	go func() { errc <- c.EnumeratePermanodesLastModifiedLocked(context.TODO(), ch) }()
	for blobMeta := range ch {
		got = append(got, blobMeta)
	}
	err = <-errc
	c.RUnlock()
	if err != nil {
		t.Fatalf("Could not enumerate permanodes: %v", err)
	}
	if len(got) != len(want) {
		t.Fatalf("Saw %d permanodes in corpus; want %d", len(got), len(want))
	}
	if got[0].Ref != foopn {
		t.Fatalf("Wrong permanode found in corpus. Wanted %v, got %v", foopn, got[0].Ref)
	}

	// baz undeletion. delete delbaz.
	idxd.Delete(delbaz)
	want = []blob.Ref{foopn, bazpn}
	got = got[:0]
	ch = make(chan camtypes.BlobMeta, 10)
	c.RLock()
	go func() { errc <- c.EnumeratePermanodesLastModifiedLocked(context.TODO(), ch) }()
	for blobMeta := range ch {
		got = append(got, blobMeta)
	}
	err = <-errc
	c.RUnlock()
	if err != nil {
		t.Fatalf("Could not enumerate permanodes: %v", err)
	}
	if len(got) != len(want) {
		t.Fatalf("Saw %d permanodes in corpus; want %d", len(got), len(want))
	}
	for _, bm := range got {
		found := false
		for _, perm := range want {
			if bm.Ref == perm {
				found = true
				break
			}
		}
		if !found {
			t.Fatalf("permanode %v was not found in corpus", bm.Ref)
		}
	}
}
Example #23
0
func (h *Handler) Query(rawq *SearchQuery) (*SearchResult, error) {
	ctx := context.TODO() // TODO: set from rawq
	exprResult, err := rawq.checkValid(ctx)
	if err != nil {
		return nil, fmt.Errorf("Invalid SearchQuery: %v", err)
	}
	q := rawq.plannedQuery(exprResult)
	res := new(SearchResult)
	s := &search{
		h:   h,
		q:   q,
		res: res,
		ctx: context.TODO(),
	}
	defer s.ctx.Cancel()

	corpus := h.corpus
	var unlockOnce sync.Once
	if corpus != nil {
		corpus.RLock()
		defer unlockOnce.Do(corpus.RUnlock)
	}

	ch := make(chan camtypes.BlobMeta, buffered)
	errc := make(chan error, 1)

	cands := q.pickCandidateSource(s)
	if candSourceHook != nil {
		candSourceHook(cands.name)
	}

	sendCtx := s.ctx.New()
	defer sendCtx.Cancel()
	go func() { errc <- cands.send(sendCtx, s, ch) }()

	blobMatches := q.Constraint.matcher()
	for meta := range ch {
		match, err := blobMatches(s, meta.Ref, meta)
		if err != nil {
			return nil, err
		}
		if match {
			res.Blobs = append(res.Blobs, &SearchResultBlob{
				Blob: meta.Ref,
			})
			if q.Limit > 0 && len(res.Blobs) == q.Limit && cands.sorted {
				sendCtx.Cancel()
				break
			}
		}
	}
	if err := <-errc; err != nil && err != context.ErrCanceled {
		return nil, err
	}
	if !cands.sorted {
		// TODO(bradfitz): sort them
		if q.Limit > 0 && len(res.Blobs) > q.Limit {
			res.Blobs = res.Blobs[:q.Limit]
		}
	}
	if corpus != nil {
		q.setResultContinue(corpus, res)
		unlockOnce.Do(corpus.RUnlock)
	}

	if q.Describe != nil {
		q.Describe.BlobRef = blob.Ref{} // zero this out, if caller set it
		blobs := make([]blob.Ref, 0, len(res.Blobs))
		for _, srb := range res.Blobs {
			blobs = append(blobs, srb.Blob)
		}
		q.Describe.BlobRefs = blobs
		res, err := s.h.Describe(q.Describe)
		if err != nil {
			return nil, err
		}
		s.res.Describe = res
	}
	return s.res, nil
}
Example #24
0
// src: non-nil source
// dest: non-nil destination
// thirdLeg: optional third-leg client. if not nil, anything on src
//     but not on dest will instead be copied to thirdLeg, instead of
//     directly to dest. (sneakernet mode, copying to a portable drive
//     and transporting thirdLeg to dest)
func (c *syncCmd) doPass(src, dest, thirdLeg blobserver.Storage) (stats SyncStats, retErr error) {
	srcBlobs := make(chan blob.SizedRef, 100)
	destBlobs := make(chan blob.SizedRef, 100)
	srcErr := make(chan error, 1)
	destErr := make(chan error, 1)

	ctx := context.TODO()
	enumCtx := ctx.New() // used for all (2 or 3) enumerates
	defer enumCtx.Cancel()
	enumerate := func(errc chan<- error, sto blobserver.Storage, blobc chan<- blob.SizedRef) {
		err := enumerateAllBlobs(enumCtx, sto, blobc)
		if err != nil {
			enumCtx.Cancel()
		}
		errc <- err
	}

	go enumerate(srcErr, src, srcBlobs)
	checkSourceError := func() {
		if err := <-srcErr; err != nil && err != context.ErrCanceled {
			retErr = fmt.Errorf("Enumerate error from source: %v", err)
		}
	}

	if c.dest == "stdout" {
		for sb := range srcBlobs {
			fmt.Fprintf(cmdmain.Stdout, "%s %d\n", sb.Ref, sb.Size)
		}
		checkSourceError()
		return
	}

	if c.wipe {
		// TODO(mpl): dest is a client. make it send a "wipe" request?
		// upon reception its server then wipes itself if it is a wiper.
		log.Print("Index wiping not yet supported.")
	}

	go enumerate(destErr, dest, destBlobs)
	checkDestError := func() {
		if err := <-destErr; err != nil && err != context.ErrCanceled {
			retErr = fmt.Errorf("Enumerate error from destination: %v", err)
		}
	}

	destNotHaveBlobs := make(chan blob.SizedRef)

	readSrcBlobs := srcBlobs
	if c.verbose {
		readSrcBlobs = loggingBlobRefChannel(srcBlobs)
	}

	mismatches := []blob.Ref{}
	onMismatch := func(br blob.Ref) {
		// TODO(bradfitz): check both sides and repair, carefully.  For now, fail.
		log.Printf("WARNING: blobref %v has differing sizes on source and dest", br)
		stats.ErrorCount++
		mismatches = append(mismatches, br)
	}

	go blobserver.ListMissingDestinationBlobs(destNotHaveBlobs, onMismatch, readSrcBlobs, destBlobs)

	// Handle three-legged mode if tc is provided.
	checkThirdError := func() {} // default nop
	syncBlobs := destNotHaveBlobs
	firstHopDest := dest
	if thirdLeg != nil {
		thirdBlobs := make(chan blob.SizedRef, 100)
		thirdErr := make(chan error, 1)
		go enumerate(thirdErr, thirdLeg, thirdBlobs)
		checkThirdError = func() {
			if err := <-thirdErr; err != nil && err != context.ErrCanceled {
				retErr = fmt.Errorf("Enumerate error from third leg: %v", err)
			}
		}
		thirdNeedBlobs := make(chan blob.SizedRef)
		go blobserver.ListMissingDestinationBlobs(thirdNeedBlobs, onMismatch, destNotHaveBlobs, thirdBlobs)
		syncBlobs = thirdNeedBlobs
		firstHopDest = thirdLeg
	}

	for sb := range syncBlobs {
		fmt.Fprintf(cmdmain.Stdout, "Destination needs blob: %s\n", sb)

		blobReader, size, err := src.Fetch(sb.Ref)
		if err != nil {
			stats.ErrorCount++
			log.Printf("Error fetching %s: %v", sb.Ref, err)
			continue
		}
		if size != sb.Size {
			stats.ErrorCount++
			log.Printf("Source blobserver's enumerate size of %d for blob %s doesn't match its Get size of %d",
				sb.Size, sb.Ref, size)
			continue
		}

		if _, err := blobserver.Receive(firstHopDest, sb.Ref, blobReader); err != nil {
			stats.ErrorCount++
			log.Printf("Upload of %s to destination blobserver failed: %v", sb.Ref, err)
			continue
		}
		stats.BlobsCopied++
		stats.BytesCopied += int64(size)

		if c.removeSrc {
			if err = src.RemoveBlobs([]blob.Ref{sb.Ref}); err != nil {
				stats.ErrorCount++
				log.Printf("Failed to delete %s from source: %v", sb.Ref, err)
			}
		}
	}

	checkSourceError()
	checkDestError()
	checkThirdError()
	if retErr == nil && stats.ErrorCount > 0 {
		retErr = fmt.Errorf("%d errors during sync", stats.ErrorCount)
	}
	return stats, retErr
}