// WithMetadataForwardTLSInfo reads certificate from context and returns context where // ForwardCert is set based on original certificate. func WithMetadataForwardTLSInfo(ctx context.Context) (context.Context, error) { md, ok := metadata.FromContext(ctx) if !ok { md = metadata.MD{} } ous := []string{} org := "" cn := "" certSubj, err := certSubjectFromContext(ctx) if err == nil { cn = certSubj.CommonName ous = certSubj.OrganizationalUnit if len(certSubj.Organization) > 0 { org = certSubj.Organization[0] } } // If there's no TLS cert, forward with blank TLS metadata. // Note that the presence of this blank metadata is extremely // important. Without it, it would look like manager is making // the request directly. md[certForwardedKey] = []string{"true"} md[certCNKey] = []string{cn} md[certOrgKey] = []string{org} md[certOUKey] = ous peer, ok := peer.FromContext(ctx) if ok { md[remoteAddrKey] = []string{peer.Addr.String()} } return metadata.NewContext(ctx, md), nil }
// RevokeAddrFS ... func (s *FileSystemAPIServer) RevokeAddrFS(ctx context.Context, r *pb.RevokeAddrFSRequest) (*pb.RevokeAddrFSResponse, error) { var err error srcAddr := "" // Get incomming ip pr, ok := peer.FromContext(ctx) if ok { srcAddr = pr.Addr.String() } // Validate Token _, err = s.validateToken(r.Token) if err != nil { log.Printf("%s REVOKE FAILED %s\n", srcAddr, "PermissionDenied") return nil, errf(codes.PermissionDenied, "%v", "Invalid Token") } // REVOKE an file system entry for the addr // delete /fs/FSID/addr addr AddrRef pKey := fmt.Sprintf("/fs/%s/addr", r.FSid) pKeyA, pKeyB := murmur3.Sum128([]byte(pKey)) cKeyA, cKeyB := murmur3.Sum128([]byte(r.Addr)) timestampMicro := brimtime.TimeToUnixMicro(time.Now()) _, err = s.gstore.Delete(context.Background(), pKeyA, pKeyB, cKeyA, cKeyB, timestampMicro) if store.IsNotFound(err) { log.Printf("%s REVOKE FAILED %s %s\n", srcAddr, r.FSid, r.Addr) return nil, errf(codes.NotFound, "%v", "Not Found") } // return Addr was revoked // Log Operation log.Printf("%s REVOKE SUCCESS %s %s\n", srcAddr, r.FSid, r.Addr) return &pb.RevokeAddrFSResponse{Data: r.FSid}, nil }
// ListFS ... func (s *FileSystemAPIServer) ListFS(ctx context.Context, r *fb.ListFSRequest) (*fb.ListFSResponse, error) { var status string var acctData AcctPayLoad var err error var data string // Get incomming ip pr, ok := peer.FromContext(ctx) if ok { fmt.Println(pr.Addr) } // getAcct data acctData, err = s.getAcct("/acct", r.Acctnum) if err != nil { log.Printf("Error %v on lookup for account %s", err, r.Acctnum) return nil, err } // validate token if acctData.Token != r.Token { return nil, errf(codes.PermissionDenied, "%s", "Invalid Token") } // Setup Account read for list of file systems parentKey := fmt.Sprintf("/acct/%s/fs", r.Acctnum) data, err = s.fsws.readGroupGStore(parentKey) if err != nil { return nil, errf(codes.Internal, "%v", err) } // Prep things to return status = "OK" return &fb.ListFSResponse{Payload: data, Status: status}, nil }
// RevokeAddrFS ... func (s *FileSystemAPIServer) RevokeAddrFS(ctx context.Context, r *pb.RevokeAddrFSRequest) (*pb.RevokeAddrFSResponse, error) { var err error var acctID string var value []byte var fsRef FileSysRef srcAddr := "" // Get incomming ip pr, ok := peer.FromContext(ctx) if ok { srcAddr = pr.Addr.String() } // Validate Token acctID, err = s.validateToken(r.Token) if err != nil { log.Printf("%s REVOKE FAILED %s\n", srcAddr, "PermissionDenied") return nil, errf(codes.PermissionDenied, "%v", "Invalid Token") } // Validate Token/Account owns this file system // Read FileSysRef entry to determine if it exists pKey := fmt.Sprintf("/fs") pKeyA, pKeyB := murmur3.Sum128([]byte(pKey)) cKeyA, cKeyB := murmur3.Sum128([]byte(r.FSid)) _, value, err = s.gstore.Read(context.Background(), pKeyA, pKeyB, cKeyA, cKeyB, nil) if store.IsNotFound(err) { log.Printf("%s REVOKE FAILED %s NOTFOUND", srcAddr, r.FSid) return nil, errf(codes.NotFound, "%v", "Not Found") } if err != nil { log.Printf("%s REVOKE FAILED %v\n", srcAddr, err) return nil, errf(codes.Internal, "%v", err) } err = json.Unmarshal(value, &fsRef) if err != nil { log.Printf("%s REVOKE FAILED %v\n", srcAddr, err) return nil, errf(codes.Internal, "%v", err) } if fsRef.AcctID != acctID { log.Printf("$s REVOKE FAILED %v ACCOUNT MISMATCH", r.FSid) return nil, errf(codes.FailedPrecondition, "%v", "Account Mismatch") } // REVOKE an file system entry for the addr // delete /fs/FSID/addr addr AddrRef pKey = fmt.Sprintf("/fs/%s/addr", r.FSid) pKeyA, pKeyB = murmur3.Sum128([]byte(pKey)) cKeyA, cKeyB = murmur3.Sum128([]byte(r.Addr)) timestampMicro := brimtime.TimeToUnixMicro(time.Now()) _, err = s.gstore.Delete(context.Background(), pKeyA, pKeyB, cKeyA, cKeyB, timestampMicro) if store.IsNotFound(err) { log.Printf("%s REVOKE FAILED %s %s\n", srcAddr, r.FSid, r.Addr) return nil, errf(codes.NotFound, "%v", "Not Found") } // return Addr was revoked // Log Operation log.Printf("%s REVOKE SUCCESS %s %s\n", srcAddr, r.FSid, r.Addr) return &pb.RevokeAddrFSResponse{Data: r.FSid}, nil }
// ProcessRaftMessage calls 'Step' which advances the // raft state machine with the provided message on the // receiving node func (n *Node) ProcessRaftMessage(ctx context.Context, msg *api.ProcessRaftMessageRequest) (*api.ProcessRaftMessageResponse, error) { if msg == nil || msg.Message == nil { return nil, grpc.Errorf(codes.InvalidArgument, "no message provided") } // Don't process the message if this comes from // a node in the remove set if n.cluster.IsIDRemoved(msg.Message.From) { return nil, ErrMemberRemoved } var sourceHost string peer, ok := peer.FromContext(ctx) if ok { sourceHost, _, _ = net.SplitHostPort(peer.Addr.String()) } n.cluster.ReportActive(msg.Message.From, sourceHost) // Reject vote requests from unreachable peers if msg.Message.Type == raftpb.MsgVote { member := n.cluster.GetMember(msg.Message.From) if member == nil || member.Conn == nil { n.Config.Logger.Errorf("received vote request from unknown member %x", msg.Message.From) return nil, ErrMemberUnknown } healthCtx, cancel := context.WithTimeout(ctx, time.Duration(n.Config.ElectionTick)*n.opts.TickInterval) defer cancel() if err := member.HealthCheck(healthCtx); err != nil { n.Config.Logger.Warningf("member %x which sent vote request failed health check: %v", msg.Message.From, err) return nil, errors.Wrap(err, "member unreachable") } } if msg.Message.Type == raftpb.MsgProp { // We don't accepted forwarded proposals. Our // current architecture depends on only the leader // making proposals, so in-flight proposals can be // guaranteed not to conflict. return nil, grpc.Errorf(codes.InvalidArgument, "proposals not accepted") } // can't stop the raft node while an async RPC is in progress n.stopMu.RLock() defer n.stopMu.RUnlock() if !n.IsMember() { return nil, ErrNoRaftMember } if err := n.Step(n.Ctx, *msg.Message); err != nil { return nil, err } return &api.ProcessRaftMessageResponse{}, nil }
// Batch implements the roachpb.KVServer interface. func (s *DBServer) Batch( ctx context.Context, args *roachpb.BatchRequest, ) (br *roachpb.BatchResponse, err error) { // TODO(marc,bdarnell): this code is duplicated in server/node.go, // which should be fixed. defer func() { // We always return errors via BatchResponse.Error so structure is // preserved; plain errors are presumed to be from the RPC // framework and not from cockroach. if err != nil { if br == nil { br = &roachpb.BatchResponse{} } if br.Error != nil { panic(fmt.Sprintf( "attempting to return both a plain error (%s) and roachpb.Error (%s)", err, br.Error)) } br.Error = roachpb.NewError(err) err = nil } }() // TODO(marc): grpc's authentication model (which gives credential access in // the request handler) doesn't really fit with the current design of the // security package (which assumes that TLS state is only given at connection // time) - that should be fixed. if peer, ok := peer.FromContext(ctx); ok { if tlsInfo, ok := peer.AuthInfo.(credentials.TLSInfo); ok { certUser, err := security.GetCertificateUser(&tlsInfo.State) if err != nil { return nil, err } if certUser != security.NodeUser { return nil, errors.Errorf("user %s is not allowed", certUser) } } } if err = verifyRequest(args); err != nil { return br, err } err = s.stopper.RunTask(func() { var pErr *roachpb.Error // TODO(wiz): This is required to be a different context from the one // provided by grpc since it has to last for the entire transaction and not // just this one RPC call. See comment for (*TxnCoordSender).hearbeatLoop. br, pErr = s.sender.Send(context.TODO(), *args) if pErr != nil { br = &roachpb.BatchResponse{} } if br.Error != nil { panic(roachpb.ErrorUnexpectedlySet(s.sender, br)) } br.Error = pErr }) return br, err }
// DeleteFS ... func (s *FileSystemAPIServer) DeleteFS(ctx context.Context, r *fb.DeleteFSRequest) (*fb.DeleteFSResponse, error) { var status string var result string var dataS string var dataB []byte var acctData AcctPayLoad var fsdata FileSysPayLoad var err error // Get incomming ip pr, ok := peer.FromContext(ctx) if ok { fmt.Println(pr.Addr) } // getAcct data acctData, err = s.getAcct("/acct", r.Acctnum) if err != nil { log.Printf("Error %v on lookup for account %s", err, r.Acctnum) return nil, err } // validate token if acctData.Token != r.Token { return nil, errf(codes.PermissionDenied, "%s", "Invalid Token") } // Setup keys and get filesystem data parentKey := fmt.Sprintf("/acct/%s/fs", r.Acctnum) childKey := r.FSid dataS, err = s.fsws.getGStore(parentKey, childKey) if err != nil { return nil, errf(codes.Internal, "%v", err) } err = json.Unmarshal([]byte(dataS), &fsdata) if err != nil { return nil, errf(codes.Internal, "%v", err) } // only active accounts can be marked as deleted if fsdata.Status != "active" || fsdata.DeleteDate != 0 { return nil, errf(codes.InvalidArgument, "%s", "Passing File System Status") } // send delete to the group store fsdata.Status = "deleted" fsdata.DeleteDate = time.Now().Unix() dataB, err = json.Marshal(fsdata) if err != nil { return nil, errf(codes.Internal, "%v", err) } // write updated information into the group store _, err = s.fsws.writeGStore(parentKey, childKey, dataB) if err != nil { return nil, errf(codes.Internal, "%v", err) } // Prep things to return status = "OK" result = fmt.Sprintf("filesystem %s in account %s was deleted", r.FSid, r.Acctnum) return &fb.DeleteFSResponse{Payload: result, Status: status}, nil }
func extractRemoteAddress(stream stream) string { var remoteAddress string p, ok := peer.FromContext(stream.Context()) if ok { if address := p.Addr; address != nil { remoteAddress = address.String() } } return remoteAddress }
// GrantAddrFS ... func (s *FileSystemAPIServer) GrantAddrFS(ctx context.Context, r *fb.GrantAddrFSRequest) (*fb.GrantAddrFSResponse, error) { var status string var err error var acctData AcctPayLoad var fsData FileSysPayLoad var addrData AddrPayLoad var dataB []byte // Get incomming ip pr, ok := peer.FromContext(ctx) if ok { fmt.Println(pr.Addr) } // getAcct data acctData, err = s.getAcct("/acct", r.Acctnum) if err != nil { log.Printf("Error %v on lookup for account %s", err, r.Acctnum) return nil, err } // validate token if acctData.Token != r.Token { return nil, errf(codes.PermissionDenied, "%s", "Invalid Token") } // getFS data fs := fmt.Sprintf("/acct/%s/fs", r.Acctnum) fsData, err = s.getFS(fs, r.FSid) if err != nil { log.Printf("Error %v on lookup for File system %s", err, r.Acctnum) return nil, err } if fsData.Status == "active" { log.Println("FileSystem is active") } // write out the ip address parentKey := fmt.Sprintf("/fs/%s/addr", r.FSid) childKey := r.Addr parentKeyA, parentKeyB := murmur3.Sum128([]byte(parentKey)) childKeyA, childKeyB := murmur3.Sum128([]byte(childKey)) timestampMicro := brimtime.TimeToUnixMicro(time.Now()) addrData.Addr = r.Addr dataB, err = json.Marshal(addrData) if err != nil { log.Printf("Marshal Error: %v\n...", err) return nil, errf(codes.Internal, "%v", err) } _, err = s.fsws.gstore.Write(context.Background(), parentKeyA, parentKeyB, childKeyA, childKeyB, timestampMicro, dataB) if err != nil { log.Printf("Write Error: %v", err) return nil, errf(codes.Internal, "%v", err) } // DO stuff status = fmt.Sprintf("addr %s for filesystem %s with account id %s was granted", r.Addr, r.FSid, r.Acctnum) return &fb.GrantAddrFSResponse{Status: status}, nil }
func tlsConnStateFromContext(ctx context.Context) (*tls.ConnectionState, error) { peer, ok := peer.FromContext(ctx) if !ok { return nil, grpc.Errorf(codes.PermissionDenied, "Permission denied: no peer info") } tlsInfo, ok := peer.AuthInfo.(credentials.TLSInfo) if !ok { return nil, grpc.Errorf(codes.PermissionDenied, "Permission denied: peer didn't not present valid peer certificate") } return &tlsInfo.State, nil }
// RemoteNode returns the node ID and role from the client's TLS certificate. // If the RPC was forwarded, the original client's ID and role is returned, as // well as the forwarder's ID. This function does not do authorization checks - // it only looks up the node ID. func RemoteNode(ctx context.Context) (RemoteNodeInfo, error) { // If we have a value on the context that marks this as a local // request, we return the node info from the context. localNodeInfo := ctx.Value(LocalRequestKey) if localNodeInfo != nil { nodeInfo, ok := localNodeInfo.(RemoteNodeInfo) if ok { return nodeInfo, nil } } certSubj, err := certSubjectFromContext(ctx) if err != nil { return RemoteNodeInfo{}, err } org := "" if len(certSubj.Organization) > 0 { org = certSubj.Organization[0] } peer, ok := peer.FromContext(ctx) if !ok { return RemoteNodeInfo{}, grpc.Errorf(codes.PermissionDenied, "Permission denied: no peer info") } directInfo := RemoteNodeInfo{ Roles: certSubj.OrganizationalUnit, NodeID: certSubj.CommonName, Organization: org, RemoteAddr: peer.Addr.String(), } if isForwardedRequest(ctx) { remoteAddr, cn, org, ous := forwardedTLSInfoFromContext(ctx) if len(ous) == 0 || cn == "" || org == "" { return RemoteNodeInfo{}, grpc.Errorf(codes.PermissionDenied, "Permission denied: missing information in forwarded request") } return RemoteNodeInfo{ Roles: ous, NodeID: cn, Organization: org, ForwardedBy: &directInfo, RemoteAddr: remoteAddr, }, nil } return directInfo, nil }
// ShowFS ... func (s *FileSystemAPIServer) ShowFS(ctx context.Context, r *fb.ShowFSRequest) (*fb.ShowFSResponse, error) { var status string var acctData AcctPayLoad var fsData FileSysPayLoad var fsDataB []byte var err error // Get incomming ip pr, ok := peer.FromContext(ctx) if ok { fmt.Println(pr.Addr) } // getAcct data acctData, err = s.getAcct("/acct", r.Acctnum) if err != nil { log.Printf("Error %v on lookup for account %s", err, r.Acctnum) return nil, err } // validate token if acctData.Token != r.Token { return nil, errf(codes.PermissionDenied, "%s", "Invalid Token") } pKey := fmt.Sprintf("/acct/%s/fs", r.Acctnum) pKeyA, pKeyB := murmur3.Sum128([]byte(pKey)) cKeyA, cKeyB := murmur3.Sum128([]byte(r.FSid)) _, fsDataB, err = s.fsws.gstore.Read(context.Background(), pKeyA, pKeyB, cKeyA, cKeyB, nil) if store.IsNotFound(err) { return nil, errf(codes.NotFound, "%v", "File System Not Found") } if err != nil { return nil, errf(codes.Internal, "%s", err) } err = json.Unmarshal(fsDataB, &fsData) if err != nil { return nil, errf(codes.Internal, "%s", err) } // Get list of Addr fsData.Addr, err = s.addrList(fsData.ID) if err != nil { return nil, errf(codes.Internal, "%s", err) } fsDataB, err = json.Marshal(&fsData) if err != nil { return nil, errf(codes.Internal, "%s", err) } // Prep things to return status = "OK" return &fb.ShowFSResponse{Payload: string(fsDataB), Status: status}, nil }
// ExtractTLSUnique extracts the TLS-Unique from the stream func ExtractTLSUnique(ctx context.Context) []byte { pr, extracted := peer.FromContext(ctx) if !extracted { return nil } authInfo := pr.AuthInfo if authInfo == nil { return nil } tlsInfo, isTLSConn := authInfo.(credentials.TLSInfo) if !isTLSConn { return nil } return tlsInfo.State.TLSUnique }
func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { md, ok := metadata.FromContext(ctx) if ok { if err := grpc.SendHeader(ctx, md); err != nil { return nil, fmt.Errorf("grpc.SendHeader(%v, %v) = %v, want %v", ctx, md, err, nil) } grpc.SetTrailer(ctx, md) } pr, ok := peer.FromContext(ctx) if !ok { return nil, fmt.Errorf("failed to get peer from ctx") } if pr.Addr == net.Addr(nil) { return nil, fmt.Errorf("failed to get peer address") } if s.security != "" { // Check Auth info var authType, serverName string switch info := pr.AuthInfo.(type) { case credentials.TLSInfo: authType = info.AuthType() serverName = info.State.ServerName default: return nil, fmt.Errorf("Unknown AuthInfo type") } if authType != s.security { return nil, fmt.Errorf("Wrong auth type: got %q, want %q", authType, s.security) } if serverName != "x.test.youtube.com" { return nil, fmt.Errorf("Unknown server name %q", serverName) } } // Simulate some service delay. time.Sleep(time.Second) payload, err := newPayload(in.GetResponseType(), in.GetResponseSize()) if err != nil { return nil, err } return &testpb.SimpleResponse{ Payload: payload, }, nil }
// peerFromContext extracts host from the context and port the from local address. // Returns an address based on extracted host:port func peerFromContext(ctx context.Context, laddr string) (string, error) { p, ok := peer.FromContext(ctx) if !ok { return "", errors.New("peer information in context does not exist") } host, _, err := net.SplitHostPort(p.Addr.String()) if err != nil { return "", err } _, port, err := net.SplitHostPort(laddr) if err != nil { return "", err } return net.JoinHostPort(host, port), nil }
// GrantAddrFS ... func (s *FileSystemAPIServer) GrantAddrFS(ctx context.Context, r *pb.GrantAddrFSRequest) (*pb.GrantAddrFSResponse, error) { var err error var addrData AddrRef var addrByte []byte srcAddr := "" // Get incomming ip pr, ok := peer.FromContext(ctx) if ok { srcAddr = pr.Addr.String() } // validate token _, err = s.validateToken(r.Token) if err != nil { log.Printf("%s GRANT FAILED %s\n", srcAddr, "PermissionDenied") return nil, errf(codes.PermissionDenied, "%v", "Invalid Token") } // GRANT an file system entry for the addr // write /fs/FSID/addr addr AddrRef pKey := fmt.Sprintf("/fs/%s/addr", r.FSid) pKeyA, pKeyB := murmur3.Sum128([]byte(pKey)) cKeyA, cKeyB := murmur3.Sum128([]byte(r.Addr)) timestampMicro := brimtime.TimeToUnixMicro(time.Now()) addrData.Addr = r.Addr addrData.FSID = r.FSid addrByte, err = json.Marshal(addrData) if err != nil { log.Printf("%s GRANT FAILED %v\n", srcAddr, err) return nil, errf(codes.Internal, "%v", err) } _, err = s.gstore.Write(context.Background(), pKeyA, pKeyB, cKeyA, cKeyB, timestampMicro, addrByte) if err != nil { log.Printf("%s GRANT FAILED %v\n", srcAddr, err) return nil, errf(codes.Internal, "%v", err) } // return Addr was Granted // Log Operation log.Printf("%s GRANT SUCCESS %s %s\n", srcAddr, r.FSid, r.Addr) return &pb.GrantAddrFSResponse{Data: r.FSid}, nil }
func (s *apiServer) validateIP(ctx context.Context) error { if s.comms == nil { // TODO: Fix abstraction so that we don't have to do this for tests // Assume that it is a unit test return nil } p, ok := peer.FromContext(ctx) if !ok { return errors.New("Couldn't get client IP") } ip, _, err := net.SplitHostPort(p.Addr.String()) if err != nil { return err } fsidUUID, err := GetFsId(ctx) fsid := fsidUUID.String() if err != nil { return err } // First check the cache ips, ok := s.validIPs[fsid] if !ok { ips = make(map[string]bool) s.validIPs[fsid] = ips } valid, ok := ips[ip] if ok && valid { return nil } _, err = s.comms.ReadGroupItem(ctx, []byte(fmt.Sprintf("/fs/%s/addr", fsid)), []byte(ip)) if store.IsNotFound(err) { log.Println("Invalid IP: ", ip) // No access return ErrUnauthorized } if err != nil { return err } // Cache the valid ip s.validIPs[fsid][ip] = true return nil }
func GetPeerInfo(s grpc.Stream) PeerInfo { var pi PeerInfo ctx := s.Context() trs, ok := transport.StreamFromContext(ctx) if ok { pi.addr = trs.ServerTransport().RemoteAddr().String() } p, _ := peer.FromContext(ctx) switch creds := p.AuthInfo.(type) { case credentials.TLSInfo: state := creds.State if len(state.PeerCertificates) > 0 { pi.cert = state.PeerCertificates[0] } } return pi }
func (s *server) Send(ctx context.Context, in *pb.Request) (*pb.Reply, error) { pr, _ := peer.FromContext(ctx) re := &pb.Reply{} logData := []string{ fmt.Sprintf("logid=%s", in.Logid), fmt.Sprintf("remote=%s", pr.Addr.String()), fmt.Sprintf("topic=%s", in.Topic), fmt.Sprintf("key=%s", string(in.Key)), fmt.Sprintf("value_len=%d", len(in.Value)), } defer (func() { logData = append(logData, fmt.Sprintf("errno=%d", re.Errno)) logData = append(logData, fmt.Sprintf("offset=%d", re.Offset)) log.Println(logData) })() producer, err := s.getProducer(in.Topic) if err != nil { re.Errno = 502404 re.Error = "get producer failed:" + err.Error() return re, nil } log.Println("input:", in, in.Partition) message := &sarama.ProducerMessage{ Topic: in.Topic, Key: sarama.ByteEncoder(in.Key), Value: sarama.ByteEncoder(in.Value), } partition, offset, err := producer.SendMessage(message) if err != nil { re.Errno = 500 re.Error = err.Error() } else { re.Offset = offset re.Partition = partition } return re, nil }
// Batch implements the roachpb.KVServer interface. func (s *DBServer) Batch(ctx context.Context, args *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { // TODO(marc): this code is duplicated in server/node.go, which should be // fixed. Also, grpc's authentication model (which gives credential access in // the request handler) doesn't really fit with the current design of the // security package (which assumes that TLS state is only given at connection // time) - that should be fixed. if peer, ok := peer.FromContext(ctx); ok { if tlsInfo, ok := peer.AuthInfo.(credentials.TLSInfo); ok { certUser, err := security.GetCertificateUser(&tlsInfo.State) if err != nil { return nil, err } if certUser != security.NodeUser { return nil, util.Errorf("user %s is not allowed", certUser) } } } var br *roachpb.BatchResponse var err error f := func() { if err = verifyRequest(args); err != nil { return } var pErr *roachpb.Error br, pErr = s.sender.Send(context.TODO(), *args) if pErr != nil { br = &roachpb.BatchResponse{} } if br.Error != nil { panic(roachpb.ErrorUnexpectedlySet(s.sender, br)) } br.Error = pErr } if !s.stopper.RunTask(f) { err = util.Errorf("node stopped") } return br, err }
// UpdateFS ... func (s *FileSystemAPIServer) UpdateFS(ctx context.Context, r *pb.UpdateFSRequest) (*pb.UpdateFSResponse, error) { var err error srcAddr := "" // Get incomming ip pr, ok := peer.FromContext(ctx) if ok { srcAddr = pr.Addr.String() } // validate Token _, err = s.validateToken(r.Token) if err != nil { log.Printf("%s UPDATE FAILED %s\n", srcAddr, "PermissionDenied") return nil, errf(codes.PermissionDenied, "%v", "Invalid Token") } // return message // Log Operation log.Printf("%s UPDATE NOTIMPLEMENTED %s\n", srcAddr, r.FSid) return &pb.UpdateFSResponse{Data: "UPDATE operation is not supported in EA"}, nil }
// DeleteFS ... func (s *FileSystemAPIServer) DeleteFS(ctx context.Context, r *pb.DeleteFSRequest) (*pb.DeleteFSResponse, error) { var err error srcAddr := "" // Get incomming ip pr, ok := peer.FromContext(ctx) if ok { srcAddr = pr.Addr.String() } // validate Token _, err = s.validateToken(r.Token) if err != nil { log.Printf("%s DELETE FAILED %s\n", srcAddr, "PermissionDenied") return nil, errf(codes.PermissionDenied, "%v", "Invalid Token") } // Prep things to return // Log Operation log.Printf("%s DELETE NOTIMPLEMENTED %s\n", srcAddr, r.FSid) return &pb.DeleteFSResponse{Data: "Delete Operation not supported at this time"}, nil }
// immediateCallerID tries to extract the common name of the certificate // that was used to connect to vtgate. If it fails for any reason, // it will return "". That immediate caller id is then inserted // into a Context, and will be used when talking to vttablet. // vttablet in turn can use table ACLs to validate access is authorized. func immediateCallerID(ctx context.Context) string { p, ok := peer.FromContext(ctx) if !ok { return "" } if p.AuthInfo == nil { return "" } tlsInfo, ok := p.AuthInfo.(credentials.TLSInfo) if !ok { return "" } if len(tlsInfo.State.VerifiedChains) < 1 { return "" } if len(tlsInfo.State.VerifiedChains[0]) < 1 { return "" } cert := tlsInfo.State.VerifiedChains[0][0] return cert.Subject.CommonName }
func authenticateVerifier(ctx context.Context) (uint64, error) { pr, ok := peer.FromContext(ctx) if !ok { return 0, fmt.Errorf("failed to authenticate verifier: peer.FromContext returned false") } certChains := pr.AuthInfo.(credentials.TLSInfo).State.VerifiedChains if len(certChains) != 1 { return 0, fmt.Errorf("failed to authenticate verifier: expected exactly one valid certificate chain") } chain := certChains[0] leaf := chain[0] verifierIDString := leaf.Subject.CommonName if !strings.HasPrefix(verifierIDString, verifierCommonNamePrefix) { return 0, fmt.Errorf("failed to authenticate verifier: invalid common name: missing prefix %q (got %q)", verifierCommonNamePrefix, verifierIDString) } verifierID, err := strconv.ParseUint(verifierIDString[len(verifierCommonNamePrefix):], 16, 64) if err != nil { return 0, fmt.Errorf("failed to authenticate verifier: invalid common name: id not an integer: %s", err) } return verifierID, nil }
func (r *RpcCacheServer) authenticateClient(certs map[string]*x509.Certificate, ctx context.Context) error { if len(certs) == 0 { return nil // Open to anyone. } p, ok := peer.FromContext(ctx) if !ok { return fmt.Errorf("Missing client certificate") } info, ok := p.AuthInfo.(credentials.TLSInfo) if !ok { return fmt.Errorf("Could not extract auth info") } if len(info.State.PeerCertificates) == 0 { return fmt.Errorf("No peer certificate available") } cert := info.State.PeerCertificates[0] if okCert := certs[string(cert.RawSubject)]; okCert != nil && okCert.Equal(cert) { return nil } return fmt.Errorf("Invalid or unknown certificate") }
// RemoteNode returns the node ID and role from the client's TLS certificate. // If the RPC was forwarded, the original client's ID and role is returned, as // well as the forwarder's ID. This function does not do authorization checks - // it only looks up the node ID. func RemoteNode(ctx context.Context) (RemoteNodeInfo, error) { certSubj, err := certSubjectFromContext(ctx) if err != nil { return RemoteNodeInfo{}, err } org := "" if len(certSubj.Organization) > 0 { org = certSubj.Organization[0] } peer, ok := peer.FromContext(ctx) if !ok { return RemoteNodeInfo{}, grpc.Errorf(codes.PermissionDenied, "Permission denied: no peer info") } directInfo := RemoteNodeInfo{ Roles: certSubj.OrganizationalUnit, NodeID: certSubj.CommonName, Organization: org, RemoteAddr: peer.Addr.String(), } if isForwardedRequest(ctx) { remoteAddr, cn, org, ous := forwardedTLSInfoFromContext(ctx) if len(ous) == 0 || cn == "" || org == "" { return RemoteNodeInfo{}, grpc.Errorf(codes.PermissionDenied, "Permission denied: missing information in forwarded request") } return RemoteNodeInfo{ Roles: ous, NodeID: cn, Organization: org, ForwardedBy: &directInfo, RemoteAddr: remoteAddr, }, nil } return directInfo, nil }
func (o *OortFS) validateIP(ctx context.Context) (bool, error) { // TODO: Add caching of validation p, ok := peer.FromContext(ctx) if !ok { return false, errors.New("Couldn't get client IP") } if p.Addr.String() == "internal" { // This is an internal call, so we can skip return true, nil } ip, _, err := net.SplitHostPort(p.Addr.String()) if err != nil { return false, err } // First check the cache valid, ok := o.validIps[ip] if ok && valid { return true, nil } fsid, err := GetFsId(ctx) if err != nil { return false, err } _, err = o.comms.ReadGroupItem(ctx, []byte(fmt.Sprintf("/fs/%s/addr", fsid.String())), []byte(ip)) if store.IsNotFound(err) { log.Println("Invalid IP: ", ip) // No access return false, nil } if err != nil { return false, err } // Cache the valid ip o.validIps[ip] = true return true, nil }
// RevokeAddrFS ... func (s *FileSystemAPIServer) RevokeAddrFS(ctx context.Context, r *fb.RevokeAddrFSRequest) (*fb.RevokeAddrFSResponse, error) { var status string var err error var acctData AcctPayLoad // Get incomming ip pr, ok := peer.FromContext(ctx) if ok { fmt.Println(pr.Addr) } // getAcct data acctData, err = s.getAcct("/acct", r.Acctnum) if err != nil { log.Printf("Error %v on lookup for account %s", err, r.Acctnum) return nil, errf(codes.NotFound, "%v", err) } // validate token if acctData.Token != r.Token { return nil, errf(codes.PermissionDenied, "%s", "Invalid Token") } parentKey := fmt.Sprintf("/fs/%s/addr", r.FSid) childKey := r.Addr parentKeyA, parentKeyB := murmur3.Sum128([]byte(parentKey)) childKeyA, childKeyB := murmur3.Sum128([]byte(childKey)) timestampMicro := brimtime.TimeToUnixMicro(time.Now()) // Delete addr _, err = s.fsws.gstore.Delete(context.Background(), parentKeyA, parentKeyB, childKeyA, childKeyB, timestampMicro) if store.IsNotFound(err) { log.Printf("/fs/%s/addr/%s did not exist to delete", r.FSid, r.Addr) return nil, errf(codes.NotFound, "%s", "Addr not found") } else if err != nil { return nil, errf(codes.Internal, "%s", err) } // DO stuff status = fmt.Sprintf("addr %s for filesystem %s with account id %s was revoked", r.Addr, r.FSid, r.Acctnum) return &fb.RevokeAddrFSResponse{Status: status}, nil }
// Batch implements the roachpb.KVServer interface. func (n *Node) Batch(ctx context.Context, args *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { // TODO(marc): this code is duplicated in kv/db.go, which should be fixed. // Also, grpc's authentication model (which gives credential access in the // request handler) doesn't really fit with the current design of the // security package (which assumes that TLS state is only given at connection // time) - that should be fixed. if peer, ok := peer.FromContext(ctx); ok { if tlsInfo, ok := peer.AuthInfo.(credentials.TLSInfo); ok { certUser, err := security.GetCertificateUser(&tlsInfo.State) if err != nil { return nil, err } if certUser != security.NodeUser { return nil, util.Errorf("user %s is not allowed", certUser) } } } var br *roachpb.BatchResponse opName := "node " + strconv.Itoa(int(n.Descriptor.NodeID)) // could save allocs here fail := func(err error) { br = &roachpb.BatchResponse{} br.Error = roachpb.NewError(err) } f := func() { sp, err := tracing.JoinOrNew(n.ctx.Tracer, args.Trace, opName) if err != nil { fail(err) return } // If this is a snowball span, it gets special treatment: It skips the // regular tracing machinery, and we instead send the collected spans // back with the response. This is more expensive, but then again, // those are individual requests traced by users, so they can be. if sp.BaggageItem(tracing.Snowball) != "" { sp.LogEvent("delegating to snowball tracing") sp.Finish() if sp, err = tracing.JoinOrNewSnowball(opName, args.Trace, func(rawSpan basictracer.RawSpan) { encSp, err := tracing.EncodeRawSpan(&rawSpan, nil) if err != nil { log.Warning(err) } br.CollectedSpans = append(br.CollectedSpans, encSp) }); err != nil { fail(err) return } } defer sp.Finish() traceCtx := opentracing.ContextWithSpan(n.context(ctx), sp) tStart := timeutil.Now() var pErr *roachpb.Error br, pErr = n.stores.Send(traceCtx, *args) if pErr != nil { br = &roachpb.BatchResponse{} log.Trace(traceCtx, fmt.Sprintf("error: %T", pErr.GetDetail())) } if br.Error != nil { panic(roachpb.ErrorUnexpectedlySet(n.stores, br)) } n.metrics.callComplete(timeutil.Since(tStart), pErr) br.Error = pErr } if !n.stopper.RunTask(f) { return nil, util.Errorf("node %d stopped", n.Descriptor.NodeID) } return br, nil }
// CreateFS ... func (s *FileSystemAPIServer) CreateFS(ctx context.Context, r *pb.CreateFSRequest) (*pb.CreateFSResponse, error) { var err error var acctID string srcAddr := "" var fsRef FileSysRef var fsRefByte []byte var fsSysAttr FileSysAttr var fsSysAttrByte []byte // Get incomming ip pr, ok := peer.FromContext(ctx) if ok { srcAddr = pr.Addr.String() } // Validate Token acctID, err = s.validateToken(r.Token) if err != nil { log.Printf("%s CREATE FAILED %s\n", srcAddr, "PermissionDenied") return nil, errf(codes.PermissionDenied, "%v", "Invalid Token") } fsID := uuid.NewV4().String() timestampMicro := brimtime.TimeToUnixMicro(time.Now()) // Write file system reference entries. // write /fs FSID FileSysRef pKey := "/fs" pKeyA, pKeyB := murmur3.Sum128([]byte(pKey)) cKeyA, cKeyB := murmur3.Sum128([]byte(fsID)) fsRef.AcctID = acctID fsRef.FSID = fsID fsRefByte, err = json.Marshal(fsRef) if err != nil { log.Printf("%s CREATE FAILED %v\n", srcAddr, err) return nil, errf(codes.Internal, "%v", err) } _, err = s.gstore.Write(context.Background(), pKeyA, pKeyB, cKeyA, cKeyB, timestampMicro, fsRefByte) if err != nil { log.Printf("%s CREATE FAILED %v\n", srcAddr, err) return nil, errf(codes.Internal, "%v", err) } // write /acct/acctID FSID FileSysRef pKey = fmt.Sprintf("/acct/%s", acctID) pKeyA, pKeyB = murmur3.Sum128([]byte(pKey)) _, err = s.gstore.Write(context.Background(), pKeyA, pKeyB, cKeyA, cKeyB, timestampMicro, fsRefByte) if err != nil { log.Printf("%s CREATE FAILED %v\n", srcAddr, err) return nil, errf(codes.Internal, "%v", err) } // Write file system attributes // write /fs/FSID name FileSysAttr pKey = fmt.Sprintf("/fs/%s", fsID) pKeyA, pKeyB = murmur3.Sum128([]byte(pKey)) cKeyA, cKeyB = murmur3.Sum128([]byte("name")) fsSysAttr.Attr = "name" fsSysAttr.Value = r.FSName fsSysAttr.FSID = fsID fsSysAttrByte, err = json.Marshal(fsSysAttr) if err != nil { log.Printf("%s CREATE FAILED %v\n", srcAddr, err) return nil, errf(codes.Internal, "%v", err) } _, err = s.gstore.Write(context.Background(), pKeyA, pKeyB, cKeyA, cKeyB, timestampMicro, fsSysAttrByte) if err != nil { log.Printf("%s CREATE FAILED %v\n", srcAddr, err) return nil, errf(codes.Internal, "%v", err) } // Return File System UUID // Log Operation log.Printf("%s CREATE SUCCESS %s\n", srcAddr, fsID) return &pb.CreateFSResponse{Data: fsID}, nil }