Exemple #1
0
// setFirewall adds the firewall rules needed for ports 80 & 433 to the default network.
func (d *Deployer) setFirewall(ctx *context.Context, computeService *compute.Service) error {
	defaultNet, err := computeService.Networks.Get(d.Conf.Project, "default").Do()
	if err != nil {
		return fmt.Errorf("error getting default network: %v", err)
	}

	needRules := map[string]compute.Firewall{
		"default-allow-http": compute.Firewall{
			Name:         "default-allow-http",
			SourceRanges: []string{"0.0.0.0/0"},
			SourceTags:   []string{"http-server"},
			Allowed:      []*compute.FirewallAllowed{{"tcp", []string{"80"}}},
			Network:      defaultNet.SelfLink,
		},
		"default-allow-https": compute.Firewall{
			Name:         "default-allow-https",
			SourceRanges: []string{"0.0.0.0/0"},
			SourceTags:   []string{"https-server"},
			Allowed:      []*compute.FirewallAllowed{{"tcp", []string{"443"}}},
			Network:      defaultNet.SelfLink,
		},
	}

	rules, err := computeService.Firewalls.List(d.Conf.Project).Do()
	if err != nil {
		return fmt.Errorf("error listing rules: %v", err)
	}
	for _, it := range rules.Items {
		delete(needRules, it.Name)
	}
	if len(needRules) == 0 {
		return nil
	}

	if Verbose {
		log.Printf("Need to create rules: %v", needRules)
	}
	var wg syncutil.Group
	for name, rule := range needRules {
		if ctx.IsCanceled() {
			return context.ErrCanceled
		}
		name, rule := name, rule
		wg.Go(func() error {
			if Verbose {
				log.Printf("Creating rule %s", name)
			}
			r, err := computeService.Firewalls.Insert(d.Conf.Project, &rule).Do()
			if err != nil {
				return fmt.Errorf("error creating rule %s: %v", name, err)
			}
			if Verbose {
				log.Printf("Created rule %s: %+v", name, r)
			}
			return nil
		})
	}
	return wg.Err()
}
Exemple #2
0
// setBuckets defines the buckets needed by the instance and creates them.
func (d *Deployer) setBuckets(storageService *storage.Service, ctx *context.Context) error {
	projBucket := d.Conf.Project + "-camlistore"

	needBucket := map[string]bool{
		projBucket: true,
	}

	buckets, err := storageService.Buckets.List(d.Conf.Project).Do()
	if err != nil {
		return fmt.Errorf("error listing buckets: %v", err)
	}
	for _, it := range buckets.Items {
		delete(needBucket, it.Name)
	}
	if len(needBucket) > 0 {
		if Verbose {
			log.Printf("Need to create buckets: %v", needBucket)
		}
		var waitBucket sync.WaitGroup
		var bucketErr error
		for name := range needBucket {
			if ctx.IsCanceled() {
				return context.ErrCanceled
			}
			name := name
			waitBucket.Add(1)
			go func() {
				defer waitBucket.Done()
				if Verbose {
					log.Printf("Creating bucket %s", name)
				}
				b, err := storageService.Buckets.Insert(d.Conf.Project, &storage.Bucket{
					Id:   name,
					Name: name,
				}).Do()
				if err != nil && bucketErr == nil {
					bucketErr = fmt.Errorf("error creating bucket %s: %v", name, err)
					return
				}
				if Verbose {
					log.Printf("Created bucket %s: %+v", name, b)
				}
			}()
		}
		waitBucket.Wait()
		if bucketErr != nil {
			return bucketErr
		}
	}

	d.Conf.configDir = path.Join(projBucket, configDir)
	d.Conf.blobDir = path.Join(projBucket, "blobs")
	return nil
}
Exemple #3
0
func (im *imp) importTweets(ctx *context.Context) error {
	maxId := ""
	continueRequests := true

	for continueRequests {
		if ctx.IsCanceled() {
			log.Printf("Twitter importer: interrupted")
			return context.ErrCanceled
		}

		var resp []*tweetItem
		if err := im.doAPI(&resp, "statuses/user_timeline.json", "count", strconv.Itoa(tweetRequestLimit), "max_id", maxId); err != nil {
			return err
		}

		tweetsNode, err := im.getTopLevelNode("tweets", "Tweets")
		if err != nil {
			return err
		}

		itemcount := len(resp)
		log.Printf("Twitter importer: Importing %d tweets", itemcount)
		if itemcount < tweetRequestLimit {
			continueRequests = false
		} else {
			lastTweet := resp[len(resp)-1]
			maxId = lastTweet.Id
		}

		for _, tweet := range resp {
			if ctx.IsCanceled() {
				log.Printf("Twitter importer: interrupted")
				return context.ErrCanceled
			}
			err = im.importTweet(tweetsNode, tweet)
			if err != nil {
				log.Printf("Twitter importer: error importing tweet %s %v", tweet.Id, err)
				continue
			}
		}
	}

	return nil
}
Exemple #4
0
// createInstance starts the creation of the Compute Engine instance and waits for the
// result of the creation operation. It should be called after setBuckets and setupHTTPS.
func (d *Deployer) createInstance(computeService *compute.Service, ctx *context.Context) error {
	prefix := projectsAPIURL + d.Conf.Project
	machType := prefix + "/zones/" + d.Conf.Zone + "/machineTypes/" + d.Conf.Machine
	config := cloudConfig(d.Conf)
	password := d.Conf.Password
	if password == "" {
		password = d.Conf.Project
	}
	instance := &compute.Instance{
		Name:        d.Conf.Name,
		Description: "Camlistore server",
		MachineType: machType,
		Disks: []*compute.AttachedDisk{
			{
				AutoDelete: true,
				Boot:       true,
				Type:       "PERSISTENT",
				InitializeParams: &compute.AttachedDiskInitializeParams{
					DiskName:    d.Conf.Name + "-coreos-stateless-pd",
					SourceImage: coreosImgURL,
				},
			},
		},
		Tags: &compute.Tags{
			Items: []string{"http-server", "https-server"},
		},
		Metadata: &compute.Metadata{
			Items: []*compute.MetadataItems{
				{
					Key:   "camlistore-username",
					Value: camliUsername,
				},
				{
					Key:   "camlistore-password",
					Value: password,
				},
				{
					Key:   "camlistore-blob-dir",
					Value: "gs://" + d.Conf.blobDir,
				},
				{
					Key:   "camlistore-config-dir",
					Value: "gs://" + d.Conf.configDir,
				},
				{
					Key:   "user-data",
					Value: config,
				},
			},
		},
		NetworkInterfaces: []*compute.NetworkInterface{
			&compute.NetworkInterface{
				AccessConfigs: []*compute.AccessConfig{
					&compute.AccessConfig{
						Type: "ONE_TO_ONE_NAT",
						Name: "External NAT",
					},
				},
				Network: prefix + "/global/networks/default",
			},
		},
		ServiceAccounts: []*compute.ServiceAccount{
			{
				Email: "default",
				Scopes: []string{
					logging.Scope,
					compute.DevstorageFull_controlScope,
					compute.ComputeScope,
					"https://www.googleapis.com/auth/sqlservice",
					"https://www.googleapis.com/auth/sqlservice.admin",
				},
			},
		},
	}
	if d.Conf.Hostname != "" && d.Conf.Hostname != "localhost" {
		instance.Metadata.Items = append(instance.Metadata.Items, &compute.MetadataItems{
			Key:   "camlistore-hostname",
			Value: d.Conf.Hostname,
		})
	}
	const localMySQL = false // later
	if localMySQL {
		instance.Disks = append(instance.Disks, &compute.AttachedDisk{
			AutoDelete: false,
			Boot:       false,
			Type:       "PERSISTENT",
			InitializeParams: &compute.AttachedDiskInitializeParams{
				DiskName:   "camlistore-mysql-index-pd",
				DiskSizeGb: 4,
			},
		})
	}

	if Verbose {
		log.Print("Creating instance...")
	}
	op, err := computeService.Instances.Insert(d.Conf.Project, d.Conf.Zone, instance).Do()
	if err != nil {
		return fmt.Errorf("failed to create instance: %v", err)
	}
	opName := op.Name
	if Verbose {
		log.Printf("Created. Waiting on operation %v", opName)
	}
OpLoop:
	for {
		if ctx.IsCanceled() {
			return context.ErrCanceled
		}
		time.Sleep(2 * time.Second)
		op, err := computeService.ZoneOperations.Get(d.Conf.Project, d.Conf.Zone, opName).Do()
		if err != nil {
			return fmt.Errorf("failed to get op %s: %v", opName, err)
		}
		switch op.Status {
		case "PENDING", "RUNNING":
			if Verbose {
				log.Printf("Waiting on operation %v", opName)
			}
			continue
		case "DONE":
			if op.Error != nil {
				for _, operr := range op.Error.Errors {
					log.Printf("Error: %+v", operr)
				}
				return fmt.Errorf("failed to start.")
			}
			if Verbose {
				log.Printf("Success. %+v", op)
			}
			break OpLoop
		default:
			return fmt.Errorf("unknown status %q: %+v", op.Status, op)
		}
	}
	return nil
}
Exemple #5
0
func (im *imp) Run(ctx *context.Context) (err error) {
	log.Printf("Running picasa importer.")
	defer func() {
		log.Printf("picasa importer returned: %v", err)
	}()

	im.Lock()
	client := &http.Client{Transport: im.transport}
	im.Unlock()

	root, err := im.getRootNode()
	if err != nil {
		return err
	}
	itemch := make(chan imageFile)
	errch := make(chan error, parallelWorkers)
	tbd := make(chan imageFile)

	// For caching album name -> imported Object, to skip lookup by path
	// (Attr) as much as possible.
	var albumCacheMu sync.Mutex
	albumCache := make(map[string]*importer.Object)

	getParentObj := func(name, title string) *importer.Object {
		albumCacheMu.Lock()
		defer albumCacheMu.Unlock()
		parent, ok := albumCache[name]
		if ok {
			return parent
		}

		parent, err = im.getChildByPath(name)
		if err != nil {
			log.Printf("getParentObj(%s): %v", name, err)
		}
		if parent == nil {
			parent, err = root.ChildPathObject(name)
			if err != nil {
				log.Printf("error creating ChildPathObject(%s): %v", name, err)
				errch <- err
				parent = root
			}
		}
		albumCache[name] = parent
		if err = parent.SetAttrs("title", title, "tag", name); err != nil {
			errch <- err
		}
		return parent
	}

	var workers sync.WaitGroup
	worker := func() {
		for img := range tbd {
			parent := getParentObj(img.albumName, img.albumTitle)

			fn := img.albumName + "/" + img.fileName
			log.Printf("importing %s", fn)
			fileRef, err := schema.WriteFileFromReader(im.host.Target(), fn, img.r)
			img.r.Close()
			if err != nil {
				// FIXME(tgulacsi): cannot download movies
				log.Printf("error downloading %s: %v", img.fileName, err)
				continue
			}
			// parent will have an attr camliPath:img.fileName set to this permanode
			obj, err := parent.ChildPathObject(img.fileName)
			if err != nil {
				errch <- err
			}

			if err = obj.SetAttrs(
				"camliContent", fileRef.String(),
				"album", img.albumTitle,
				"tag", img.albumName,
			); err != nil {
				errch <- err
			}
		}
		workers.Done()
	}

	workers.Add(parallelWorkers)
	for i := 0; i < parallelWorkers; i++ {
		go worker()
	}

	// decide whether we should import this image
	filter := func(img imageFile) (bool, error) {
		intrErr := func(e error) error {
			if e != nil {
				return e
			}
			if ctx.IsCanceled() {
				return context.ErrCanceled
			}
			return nil
		}
		parent := getParentObj(img.albumName, img.albumTitle)
		if parent != nil {
			pn := parent.Attr("camliPath:" + img.fileName)
			if pn != "" {
				ref, ok := blob.Parse(pn)
				if !ok {
					return true, fmt.Errorf("cannot parse %s as blobRef", pn)
				}
				obj, err := im.host.ObjectFromRef(ref)
				if err != nil {
					return false, err
				}
				if obj != nil {
					log.Printf("%s/%s already imported as %s.",
						img.albumName, img.fileName, obj.PermanodeRef())
					return false, intrErr(nil)
				}
			}
		}
		return true, intrErr(nil)
	}

	go iterItems(itemch, errch, filter, client, "default")
	for {
		select {
		case err = <-errch:
			close(tbd)
			if err == context.ErrCanceled {
				log.Printf("Picasa importer has been interrupted.")
			} else {
				log.Printf("Picasa importer error: %v", err)
				workers.Wait()
			}
			return err
		case <-ctx.Done():
			log.Printf("Picasa importer has been interrupted.")
			close(tbd)
			return context.ErrCanceled
		case img := <-itemch:
			tbd <- img
		}
	}
	close(tbd)
	workers.Wait()
	return nil
}