コード例 #1
0
// NewConnSrc returns a source of new connections based on Lookups in the
// provided mount directory. If there isn't a directory located at tmpdir one
// is created. The second return parameter can be used to shutdown and release
// any resources. As a result of this shutdown, or during any other fatal
// error, the returned chan will be closed.
//
// The connset parameter is optional.
func NewConnSrc(mountdir, tmpdir string, connset *proxy.ConnSet) (<-chan proxy.Conn, io.Closer, error) {
	if err := os.MkdirAll(tmpdir, 0777); err != nil {
		return nil, nil, err
	}

	if err := fuse.Unmount(mountdir); err != nil {
		// The error is too verbose to be useful to print out
	}
	log.Printf("Mounting %v...", mountdir)
	c, err := fuse.Mount(mountdir, fuse.AllowOther())
	if err != nil {
		return nil, nil, fmt.Errorf("cannot mount %q: %v", mountdir, err)
	}
	log.Printf("Mounted %v", mountdir)

	if connset == nil {
		// Make a dummy one.
		connset = proxy.NewConnSet()
	}
	conns := make(chan proxy.Conn, 1)
	root := &fsRoot{
		tmpDir:  tmpdir,
		linkDir: mountdir,
		dst:     conns,
		links:   make(map[string]symlink),
		closers: []io.Closer{c},
		connset: connset,
	}

	server := fs.New(c, &fs.Config{
		Debug: func(msg interface{}) {
			if false {
				log.Print(msg)
			}
		},
	})

	go func() {
		if err := server.Serve(root); err != nil {
			log.Printf("serve %q exited due to error: %v", mountdir, err)
		}
		// The server exited but we don't know whether this is because of a
		// graceful reason (via root.Close) or via an external force unmounting.
		// Closing the root will ensure the 'dst' chan is closed correctly to
		// signify that no new connections are possible.
		if err := root.Close(); err != nil {
			log.Printf("root.Close() error: %v", err)
		}
		log.Printf("FUSE exited")
	}()

	return conns, root, nil
}
コード例 #2
0
func main() {
	flag.Parse()

	if *version {
		fmt.Println("Cloud SQL Proxy:", versionString)
		return
	}

	instList := stringList(*instances)
	projList := stringList(*projects)
	// TODO: it'd be really great to consolidate flag verification in one place.
	if len(instList) == 0 && *instanceSrc == "" && len(projList) == 0 && !*useFuse {
		projList = gcloudProject()
	}

	onGCE := onGCE()
	if err := checkFlags(onGCE); err != nil {
		log.Fatal(err)
	}

	ctx := context.Background()
	client, err := authenticatedClient(ctx)
	if err != nil {
		log.Fatal(err)
	}

	ins, err := listInstances(ctx, client, projList)
	if err != nil {
		log.Fatal(err)
	}
	instList = append(instList, ins...)

	cfgs, err := CreateInstanceConfigs(*dir, *useFuse, instList, *instanceSrc)
	if err != nil {
		log.Fatal(err)
	}

	// All active connections are stored in this variable.
	connset := proxy.NewConnSet()

	// Initialize a source of new connections to Cloud SQL instances.
	var connSrc <-chan proxy.Conn
	if *useFuse {
		c, fuse, err := fuse.NewConnSrc(*dir, *fuseTmp, connset)
		if err != nil {
			log.Fatalf("Could not start fuse directory at %q: %v", *dir, err)
		}
		connSrc = c
		defer fuse.Close()
	} else {
		updates := make(chan string)
		if *instanceSrc != "" {
			go func() {
				for {
					err := metadata.Subscribe(*instanceSrc, func(v string, ok bool) error {
						if ok {
							updates <- v
						}
						return nil
					})
					if err != nil {
						log.Print(err)
					}
					time.Sleep(5 * time.Second)
				}
			}()
		}

		c, err := WatchInstances(*dir, cfgs, updates)
		if err != nil {
			log.Fatal(err)
		}
		connSrc = c
	}

	log.Print("Ready for new connections")

	(&proxy.Client{
		Port:  port,
		Certs: certs.NewCertSource(host, client, *checkRegion),
		Conns: connset,
	}).Run(connSrc)
}
コード例 #3
0
func main() {
	flag.Parse()

	instances := strings.Split(*instances, ",")
	if len(instances) == 1 && instances[0] == "" {
		instances = nil
	}
	if err := Check(*dir, *useFuse, instances, *instanceSrc); err != nil {
		log.Fatal(err)
	}

	// All active connections are stored in this variable.
	connset := proxy.NewConnSet()

	// Initialize a source of new connections to Cloud SQL instances.
	var connSrc <-chan proxy.Conn
	if *useFuse {
		c, fuse, err := fuse.NewConnSrc(*dir, *fuseTmp, connset)
		if err != nil {
			log.Fatalf("Could not start fuse directory at %q: %v", *dir, err)
		}
		connSrc = c
		defer fuse.Close()
	} else {
		updates := make(chan string)
		if *instanceSrc != "" {
			go func() {
				for {
					err := metadata.Subscribe(*instanceSrc, func(v string, ok bool) error {
						if ok {
							updates <- v
						}
						return nil
					})
					if err != nil {
						log.Print(err)
					}
					time.Sleep(5 * time.Second)
				}
			}()
		}

		c, err := WatchInstances(*dir, instances, updates)
		if err != nil {
			log.Fatal(err)
		}
		connSrc = c
	}

	// Use the environment variable only if the flag hasn't been set.
	if *tokenFile == "" {
		*tokenFile = os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
	}

	var client *http.Client
	if file := *tokenFile; file != "" {
		all, err := ioutil.ReadFile(file)
		if err != nil {
			log.Fatalf("invalid json file %q: %v", file, err)
		}
		cfg, err := goauth.JWTConfigFromJSON(all, sqlScope)
		if err != nil {
			log.Fatalf("invalid json file %q: %v", file, err)
		}
		client = auth.NewClientFrom(cfg.TokenSource(context.Background()))
	} else if *token != "" || onGCE() {
		// Passing token == "" causes the GCE metadata server to be used.
		client = auth.NewAuthenticatedClient(*token)
	} else {
		log.Fatal("No authentication method available! When not running on Google Compute Engine, provide the -credential_file flag.")
	}

	log.Print("Socket prefix: " + *dir)

	(&proxy.Client{
		Port:  *port,
		Certs: certs.NewCertSource(*host, client, *checkRegion),
		Conns: connset,
	}).Run(connSrc)
}