//export Java_io_v_v23_security_VSecurity_nativeCreateAuthorizer func Java_io_v_v23_security_VSecurity_nativeCreateAuthorizer(jenv *C.JNIEnv, jVSecurityClass C.jclass, kind C.jint, jKey C.jobject) C.jobject { env := jutil.Env(uintptr(unsafe.Pointer(jenv))) var auth security.Authorizer switch kind { case 0: auth = security.AllowEveryone() case 1: auth = security.EndpointAuthorizer() case 2: auth = security.DefaultAuthorizer() case 3: key, err := GoPublicKey(env, jutil.Object(uintptr(unsafe.Pointer(jKey)))) if err != nil { jutil.JThrowV(env, err) return nil } auth = security.PublicKeyAuthorizer(key) default: return nil } ref := jutil.GoNewRef(&auth) // Un-refed when the Java PermissionsAuthorizer is finalized jAuthorizer, err := jutil.NewObject(env, jutil.Class(uintptr(unsafe.Pointer(jPermissionsAuthorizerClass))), []jutil.Sign{jutil.LongSign}, int64(ref)) if err != nil { jutil.GoDecRef(ref) jutil.JThrowV(env, err) return nil } return C.jobject(unsafe.Pointer(jAuthorizer)) }
func startLockServer(ctx *context.T, configDir string) (func(), error) { blessings, _ := v23.GetPrincipal(ctx).BlessingStore().Default() lockNhSuffix := fmt.Sprint(blessings) // Start a local mounttable where the lock server would be // mounted, and make this mounttable visible in the local // neighborhood. mtName, stopMT, err := locklib.StartMounttable(ctx, configDir, locklib.LockNhPrefix+lockNhSuffix) if err != nil { return nil, err } ctx, _, err = v23.WithNewNamespace(ctx, mtName) if err != nil { stopMT() return nil, err } ctx, cancel := context.WithCancel(ctx) _, server, err := v23.WithNewServer(ctx, lockObjectName(ctx), newLock(), security.DefaultAuthorizer()) if err != nil { stopMT() return nil, err } stopLock := func() { cancel() vlog.Infof("Stopping lock server...") <-server.Closed() vlog.Infof("Stopped lock server...") stopMT() } vlog.Infof("Started lock server\n") vlog.Infof("ENDPOINT: %v\n", server.Status().Endpoints[0].Name()) return stopLock, nil }
func main() { ctx, shutdown := v23.Init() defer shutdown() // Run server if *to == "" { // We are not the caller, so make the RPC available for the // caller to call in on. bsrv := ifc.BridgeServer(makeImpl()) _, server, err := v23.WithNewServer(ctx, *serviceName, bsrv, security.DefaultAuthorizer()) if err != nil { ctx.Error("Error serving service: ", err) return } endpoint := server.Status().Endpoints[0] fmt.Printf("Listening at: %v\n", endpoint) // Wait forever. <-signals.ShutdownOnSignals(ctx) } else if *to != "" && *from != "" { // pipe mode ifct := ifcTopics(*topics) tmout := options.ChannelTimeout(*timeout) leftc := ifc.BridgeClient(*to) rightc := ifc.BridgeClient(*from) leftcc, err := leftc.Link(ctx, ifct, tmout) if err != nil { ctx.Error(err) return } rightcc, err := rightc.Link(ctx, ifct, tmout) if err != nil { ctx.Error(err) return } errCh := make(chan error, 2) wg := &sync.WaitGroup{} wg.Add(2) go linkToLink(leftcc.RecvStream(), rightcc.SendStream(), errCh, wg) go linkToLink(rightcc.RecvStream(), leftcc.SendStream(), errCh, wg) wg.Wait() select { case err := <-errCh: log.Print("pipe error: ", err) default: // don't block on channel read } } else { cc, mu, err := mqttConnect() if err != nil { ctx.Error("mqtt connect: ", err) return } bc := ifc.BridgeClient(*to) ifct := ifcTopics(*topics) bcc, err := bc.Link(ctx, ifct, options.ChannelTimeout(*timeout)) if err != nil { ctx.Error(err) return } done := make(chan error, 2) go func() { done <- transmitter(ifct, bcc.SendStream(), cc, mu) println("send done") }() go func() { done <- receiver(bcc.RecvStream(), cc, mu) println("recv done") }() err = <-done log.Print("Stopped with error ", err) // Stop sender by closing cc.Incoming cc.Disconnect() } }