Exemplo n.º 1
0
func (delegate *delegate) Initialize(context application.Context) {
	roaming.SetArgs(context)
	ctx, shutdown := v23.Init()
	delegate.ctx = ctx
	delegate.shutdown = shutdown
	ctx.Infof("delegate.Initialize...")
}
Exemplo n.º 2
0
func onStart(glctx gl.Context, u *uistate.UIState) {
	flag.Set("v23.credentials", "/sdcard/credentials")
	vlog.Log.Configure(vlog.OverridePriorConfiguration(true), vlog.LogToStderr(true))
	vlog.Log.Configure(vlog.OverridePriorConfiguration(true), vlog.Level(0))
	ctx, shutdown := v23.Init()
	u.Shutdown = shutdown
	u.Ctx = ctx
	u.Service = syncbase.NewService(util.MountPoint + "/croupier/" + util.SBName)
	namespace := v23.GetNamespace(u.Ctx)
	allAccess := access.AccessList{In: []security.BlessingPattern{"..."}}
	permissions := access.Permissions{
		"Admin":   allAccess,
		"Write":   allAccess,
		"Read":    allAccess,
		"Resolve": allAccess,
		"Debug":   allAccess,
	}
	namespace.SetPermissions(u.Ctx, util.MountPoint, permissions, "")
	namespace.SetPermissions(u.Ctx, util.MountPoint+"/croupier", permissions, "")
	u.Service.SetPermissions(u.Ctx, permissions, "")
	u.Images = glutil.NewImages(glctx)
	fps = debug.NewFPS(u.Images)
	u.Eng = glsprite.Engine(u.Images)
	u.Texs = texture.LoadTextures(u.Eng)
	u.CurTable = table.InitializeGame(u.NumPlayers, u.Texs)
	sound.InitPlayers(u)
	sync.CreateTables(u)
	// Create watch stream to update game state based on Syncbase updates
	go sync.UpdateSettings(u)
}
Exemplo n.º 3
0
func (d *delegate) Initialize(mctx application.Context) {
	// TODO(bjornick): Calling init multiple times in the same process
	// will be bad.  For now, this is ok because this is the only
	// vanadium service that will be used in the demos and each go library
	// will be in its own process.
	roaming.SetArgs(mctx)
	d.ctx, d.shutdown = v23.Init()

	if *flagTestMode {
		// Inject a mock plugin.
		df, _ := idiscovery.NewFactory(d.ctx, mock.New())
		fdiscovery.InjectFactory(df)

		// Start a mounttable and set the namespace roots.
		//
		// Note that we need to listen on a local IP address in order to
		// accept connections within a GCE instance.
		d.ctx = v23.WithListenSpec(d.ctx, rpc.ListenSpec{Addrs: rpc.ListenAddrs{{Protocol: "tcp", Address: "127.0.0.1:0"}}})
		name, _, err := mounttablelib.StartServers(d.ctx, v23.GetListenSpec(d.ctx), "", "", "", "", "mounttable")
		if err != nil {
			panic(err)
		}
		ns := v23.GetNamespace(d.ctx)
		ns.SetRoots(name)
	}
}
Exemplo n.º 4
0
func (nm *V23Manager) getReadyToRun(ch chan bool) {
	defer nm.mu.Unlock()
	if nm.chatty {
		log.Printf("Calling v23.Init")
	}
	nm.ctx, nm.shutdown = v23.Init()
	if nm.shutdown == nil {
		log.Panic("shutdown nil")
	}
	if nm.chatty {
		log.Printf("Setting root to %v", nm.namespaceRoot)
	}
	v23.GetNamespace(nm.ctx).SetRoots(nm.namespaceRoot)

	nm.initialPlayerNumbers = nm.playerNumbers()
	if nm.chatty {
		log.Printf("Found %d players.", len(nm.initialPlayerNumbers))
	}
	sort.Ints(nm.initialPlayerNumbers)
	myId := 1
	if len(nm.initialPlayerNumbers) > 0 {
		myId = nm.initialPlayerNumbers[len(nm.initialPlayerNumbers)-1] + 1
	}

	if nm.isGameMaster {
		myId = 999
	}

	nm.relay = relay.MakeRelay()
	nm.myself = model.NewPlayer(myId)
	if nm.isGameMaster {
		if nm.chatty {
			log.Printf("I am game master.")
		}
		nm.isReady = true
		ch <- true
		return
	}
	if nm.chatty {
		log.Printf("I am player %v\n", nm.myself)
	}

	myName := nm.serverName(nm.Me().Id())
	if nm.chatty {
		log.Printf("Calling myself %s\n", myName)
	}
	ctx, s, err := v23.WithNewServer(nm.ctx, myName, ifc.GameServiceServer(nm.relay), MakeAuthorizer())
	if err != nil {
		log.Panic("Error creating server:", err)
		ch <- false
		return
	}
	saveEndpointToFile(s)
	nm.ctx = ctx
	nm.isReady = true
	ch <- true
}
Exemplo n.º 5
0
func main() {
	var shutdown func()
	ctx, shutdown = v23.Init()
	defer shutdown()

	// TODO(bprosnitz) Do we need to worry about a race between Exists() and Create()?
	app = syncbase.NewService(*serviceName).App(*appName)
	exists, err := app.Exists(ctx)
	if err != nil {
		log.Fatalf("error in app.Exists(): %v", exists)
	}
	if !exists {
		if err := app.Create(ctx, nil); err != nil {
			log.Fatalf("error in app.Create(): %v", err)
		}
	}

	http.HandleFunc("/", handler)
	http.ListenAndServe(*addr, nil)
}
Exemplo n.º 6
0
func (delegate *delegate) Initialize(context application.Context) {
	// Start up v23 whenever a v23proxy is begun.
	// This is done regardless of whether we are initializing this v23proxy for use
	// as a client or as a server.
	roaming.SetArgs(context)
	ctx, shutdown := v23.Init()
	delegate.ctx = ctx
	delegate.shutdown = shutdown
	ctx.Infof("delegate.Initialize...")

	// TODO(alexfandrianto): Does Mojo stop us from creating too many v23proxy?
	// Is it 1 per shell? Ideally, each device will only serve 1 of these v23proxy,
	// but it is not problematic to have extra.
	_, s, err := v23.WithNewDispatchingServer(ctx, "", &dispatcher{
		appctx: context,
	})
	if err != nil {
		ctx.Fatal("Error serving service: ", err)
	}
	delegate.v23Server = s
	fmt.Println("Listening at:", s.Status().Endpoints[0].Name())
}
Exemplo n.º 7
0
//export swift_io_v_impl_google_rt_VRuntimeImpl_nativeInit
func swift_io_v_impl_google_rt_VRuntimeImpl_nativeInit() C.GoContextHandle {
	ctx, shutdownFunc := v23.Init()
	ctx = context.WithValue(ctx, shutdownKey{}, shutdownFunc)
	return C.GoContextHandle(scontext.SwiftContext(ctx))
}
Exemplo n.º 8
0
func main() {
	ctx, shutdown := v23.Init()
	defer shutdown()
	fmt.Println(vango.AllFunc(ctx, os.Stdout))
}
Exemplo n.º 9
0
func (nm *networkManager) run(ready chan<- interface{}, newLeftScreen, newRightScreen chan<- chan<- *spec.Triangle, newInvite chan<- Invitation) {
	defer close(nm.myScreen)
	defer close(newLeftScreen)
	defer close(newRightScreen)
	notifyReady := func(result interface{}) {
		ready <- result
		close(ready)
		ready = nil
	}
	ctx, shutdown := v23.Init()
	defer shutdown()
	ctx, server, err := v23.WithNewServer(ctx, "", spec.ScreenServer(nm), security.AllowEveryone())
	if err != nil {
		notifyReady(err)
		return
	}
	disc, err := v23.NewDiscovery(ctx)
	if err != nil {
		notifyReady(err)
		return
	}
	// Select a color based on some unique identifier of the process, the PublicKey serves as one.
	notifyReady(selectColor(v23.GetPrincipal(ctx).PublicKey()))
	var (
		left     = remoteScreen{myScreen: nm.myScreen, notify: newLeftScreen}
		right    = remoteScreen{myScreen: nm.myScreen, notify: newRightScreen}
		accepted = make(chan string) // Names of remote screens that accepted an invitation
		seek     = make(chan bool)   // Send false to stop seeking invitations from others, true otherwise

		pendingInviterName        string
		pendingInviteUserResponse <-chan error
		pendingInviteRPCResponse  chan<- error
	)
	seekInvites(ctx, disc, server, seek)
	go sendInvites(ctx, disc, accepted)
	for {
		select {
		case invitation := <-nm.inviteRPCs:
			if left.Active() {
				invitation.Response <- fmt.Errorf("thanks for the invite but I'm already engaged with a previous invitation")
				break
			}
			// Defer the response to the user interface.
			ch := make(chan error)
			pendingInviterName = invitation.Name
			pendingInviteRPCResponse = invitation.Response
			pendingInviteUserResponse = ch
			invitation.Response = ch
			newInvite <- invitation
		case err := <-pendingInviteUserResponse:
			pendingInviteRPCResponse <- err
			if err == nil {
				ctx.Infof("Activating left screen %q", pendingInviterName)
				left.Activate(ctx, pendingInviterName)
				seek <- false
			}
			pendingInviterName = ""
			pendingInviteUserResponse = nil
			pendingInviteRPCResponse = nil
		case <-left.Lost():
			ctx.Infof("Deactivating left screen")
			left.Deactivate()
			seek <- true
		case invitee := <-accepted:
			ctx.Infof("Activating right screen %q", invitee)
			right.Activate(ctx, invitee)
		case <-right.Lost():
			ctx.Infof("Deactivating right screen")
			right.Deactivate()
			go sendInvites(ctx, disc, accepted)
		case <-ctx.Done():
			return
		}
	}
}
Exemplo n.º 10
0
func main() {
	ctx, shutdown := v23.Init()
	defer shutdown()

	// Run server
	if *to == "" {
		// We are not the caller, so make the RPC available for the
		// caller to call in on.
		bsrv := ifc.BridgeServer(makeImpl())
		_, server, err := v23.WithNewServer(ctx, *serviceName, bsrv,
			security.DefaultAuthorizer())

		if err != nil {
			ctx.Error("Error serving service: ", err)
			return
		}

		endpoint := server.Status().Endpoints[0]
		fmt.Printf("Listening at: %v\n", endpoint)

		// Wait forever.
		<-signals.ShutdownOnSignals(ctx)
	} else if *to != "" && *from != "" {
		// pipe mode
		ifct := ifcTopics(*topics)
		tmout := options.ChannelTimeout(*timeout)

		leftc := ifc.BridgeClient(*to)
		rightc := ifc.BridgeClient(*from)

		leftcc, err := leftc.Link(ctx, ifct, tmout)
		if err != nil {
			ctx.Error(err)
			return
		}

		rightcc, err := rightc.Link(ctx, ifct, tmout)
		if err != nil {
			ctx.Error(err)
			return
		}

		errCh := make(chan error, 2)

		wg := &sync.WaitGroup{}
		wg.Add(2)
		go linkToLink(leftcc.RecvStream(), rightcc.SendStream(), errCh, wg)
		go linkToLink(rightcc.RecvStream(), leftcc.SendStream(), errCh, wg)
		wg.Wait()
		select {
		case err := <-errCh:
			log.Print("pipe error: ", err)
		default:
			// don't block on channel read
		}
	} else {
		cc, mu, err := mqttConnect()
		if err != nil {
			ctx.Error("mqtt connect: ", err)
			return
		}

		bc := ifc.BridgeClient(*to)

		ifct := ifcTopics(*topics)
		bcc, err := bc.Link(ctx, ifct, options.ChannelTimeout(*timeout))
		if err != nil {
			ctx.Error(err)
			return
		}

		done := make(chan error, 2)
		go func() {
			done <- transmitter(ifct, bcc.SendStream(), cc, mu)
			println("send done")
		}()
		go func() {
			done <- receiver(bcc.RecvStream(), cc, mu)
			println("recv done")
		}()
		err = <-done
		log.Print("Stopped with error ", err)

		// Stop sender by closing cc.Incoming
		cc.Disconnect()
	}
}