func TestPrivateKeyManagerHealthyFail(t *testing.T) { keyFixture := generatePrivateKeyStatic(t, 1) tests := []*privateKeyManager{ // keySet nil &privateKeyManager{ keySet: nil, clock: clockwork.NewRealClock(), }, // zero keys &privateKeyManager{ keySet: &PrivateKeySet{ keys: []*PrivateKey{}, expiresAt: time.Now().Add(time.Minute), }, clock: clockwork.NewRealClock(), }, // key set expired &privateKeyManager{ keySet: &PrivateKeySet{ keys: []*PrivateKey{keyFixture}, expiresAt: time.Now().Add(-1 * time.Minute), }, clock: clockwork.NewRealClock(), }, } for i, tt := range tests { if err := tt.Healthy(); err == nil { t.Errorf("case %d: nil error", i) } } }
func NewProviderConfigSyncer(from ProviderConfigGetter, to ProviderConfigSetter) *ProviderConfigSyncer { return &ProviderConfigSyncer{ from: from, to: to, clock: clockwork.NewRealClock(), } }
func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !allowMethod(w, r.Method, "HEAD", "GET", "PUT", "POST", "DELETE") { return } w.Header().Set("X-Etcd-Cluster-ID", h.clusterInfo.ID().String()) ctx, cancel := context.WithTimeout(context.Background(), h.timeout) defer cancel() rr, err := parseKeyRequest(r, clockwork.NewRealClock()) if err != nil { writeError(w, err) return } resp, err := h.server.Do(ctx, rr) if err != nil { err = trimErrorPrefix(err, etcdserver.StoreKeysPrefix) writeError(w, err) return } switch { case resp.Event != nil: if err := writeKeyEvent(w, resp.Event, h.timer); err != nil { // Should never be reached log.Printf("error writing event: %v", err) } case resp.Watcher != nil: ctx, cancel := context.WithTimeout(context.Background(), defaultWatchTimeout) defer cancel() handleKeyWatch(ctx, w, resp.Watcher, rr.Stream, h.timer) default: writeError(w, errors.New("received response with no Event/Watcher!")) } }
func NewClientCredsTokenManager(client *Client, issuer string) *ClientCredsTokenManager { return &ClientCredsTokenManager{ client: client, issuer: issuer, clock: clockwork.NewRealClock(), } }
func newDiscovery(durl, dproxyurl string, id types.ID) (*discovery, error) { u, err := url.Parse(durl) if err != nil { return nil, err } token := u.Path u.Path = "" pf, err := newProxyFunc(dproxyurl) if err != nil { return nil, err } // TODO: add ResponseHeaderTimeout back when watch on discovery service writes header early tr, err := transport.NewTransport(transport.TLSInfo{}, 30*time.Second) if err != nil { return nil, err } tr.Proxy = pf cfg := client.Config{ Transport: tr, Endpoints: []string{u.String()}, } c, err := client.New(cfg) if err != nil { return nil, err } dc := client.NewKeysAPIWithPrefix(c, "") return &discovery{ cluster: token, c: dc, id: id, url: u, clock: clockwork.NewRealClock(), }, nil }
func NewGlobalContext() *GlobalContext { return &GlobalContext{ Log: logger.New("keybase", ErrorWriter()), ProofCheckerFactory: defaultProofCheckerFactory, Clock: clockwork.NewRealClock(), } }
func NewKeySetSyncer(r ReadableKeySetRepo, w WritableKeySetRepo) *KeySetSyncer { return &KeySetSyncer{ readable: r, writable: w, clock: clockwork.NewRealClock(), } }
func NewHTTPProviderConfigGetter(hc phttp.Client, issuerURL string) *httpProviderConfigGetter { return &httpProviderConfigGetter{ hc: hc, issuerURL: issuerURL, clock: clockwork.NewRealClock(), } }
// NewUDPHook returns logrus-compatible hook that sends data to UDP socket func NewUDPHook(opts ...UDPOptionSetter) (*UDPHook, error) { f := &UDPHook{} for _, o := range opts { o(f) } if f.Clock == nil { f.Clock = clockwork.NewRealClock() } if f.clientNet == "" { f.clientNet = UDPDefaultNet } if f.clientAddr == "" { f.clientAddr = UDPDefaultAddr } addr, err := net.ResolveUDPAddr(f.clientNet, f.clientAddr) if err != nil { return nil, Wrap(err) } conn, err := net.ListenPacket("udp", ":0") if err != nil { return nil, Wrap(err) } f.addr = addr f.conn = conn.(*net.UDPConn) return f, nil }
func TestNewPasswordReset(t *testing.T) { clock = clockwork.NewFakeClock() defer func() { clock = clockwork.NewRealClock() }() now := clock.Now() issuer, _ := url.Parse("http://example.com") clientID := "myclient" usr := User{ID: "123456", Email: "*****@*****.**"} callback := "http://client.example.com/callback" expires := time.Hour * 3 password := Password("passy") tests := []struct { user User password Password issuer url.URL clientID string callback string expires time.Duration want jose.Claims }{ { issuer: *issuer, clientID: clientID, user: usr, callback: callback, expires: expires, password: password, want: map[string]interface{}{ "iss": issuer.String(), "aud": clientID, ClaimPasswordResetCallback: callback, ClaimPasswordResetPassword: string(password), "exp": float64(now.Add(expires).Unix()), "sub": usr.ID, "iat": float64(now.Unix()), }, }, } for i, tt := range tests { cbURL, err := url.Parse(tt.callback) if err != nil { t.Fatalf("case %d: non-nil err: %q", i, err) } ev := NewPasswordReset(tt.user, tt.password, tt.issuer, tt.clientID, *cbURL, tt.expires) if diff := pretty.Compare(tt.want, ev.claims); diff != "" { t.Errorf("case %d: Compare(want, got): %v", i, diff) } if diff := pretty.Compare(ev.Password(), password); diff != "" { t.Errorf("case %d: Compare(want, got): %v", i, diff) } } }
func (g *GlobalContext) Clock() clockwork.Clock { g.clockMu.Lock() defer g.clockMu.Unlock() if g.clock == nil { g.clock = clockwork.NewRealClock() } return g.clock }
// NewPeriodicReconciler creates a PeriodicReconciler that will run recFunc at least every // ival, or in response to anything emitted from EventStream.Next() func NewPeriodicReconciler(interval time.Duration, recFunc func(), eStream EventStream) PeriodicReconciler { return &reconciler{ ival: interval, rFunc: recFunc, eStream: eStream, clock: clockwork.NewRealClock(), } }
func NewPeriodic(h int, rg RevGetter, c Compactable) *Periodic { return &Periodic{ clock: clockwork.NewRealClock(), periodInHour: h, rg: rg, c: c, } }
func NewJWTVerifier(issuer, clientID string, syncFunc func() error, keysFunc func() []key.PublicKey) JWTVerifier { return JWTVerifier{ issuer: issuer, clientID: clientID, syncFunc: syncFunc, keysFunc: keysFunc, clock: clockwork.NewRealClock(), } }
func NewSessionManager(sRepo session.SessionRepo, skRepo session.SessionKeyRepo) *SessionManager { return &SessionManager{ GenerateCode: DefaultGenerateCode, Clock: clockwork.NewRealClock(), ValidityWindow: session.DefaultSessionValidityWindow, sessions: sRepo, keys: skRepo, } }
func NewGlobalContext() *GlobalContext { log := logger.New("keybase") return &GlobalContext{ Log: log, VDL: NewVDebugLog(log), ProofCheckerFactory: defaultProofCheckerFactory, Clock: clockwork.NewRealClock(), } }
func NewPatcher(encoder runtime.Encoder, decoder runtime.Decoder, mapping *meta.RESTMapping, helper *resource.Helper) *patcher { return &patcher{ encoder: encoder, decoder: decoder, mapping: mapping, helper: helper, backOff: clockwork.NewRealClock(), } }
func NewWatcher(url string) *VagrantShareRemoteWatcher { return &VagrantShareRemoteWatcher{ url: url, Updated: make(chan *VagrantBoxDescripter), OnError: make(chan error), periodInSeconds: time.Second * 60, jsonGetter: jsonGetter, clock: clockwork.NewRealClock(), } }
func NewManager(userRepo UserRepo, pwRepo PasswordInfoRepo, txnFactory repo.TransactionFactory, options ManagerOptions) *Manager { return &Manager{ Clock: clockwork.NewRealClock(), userRepo: userRepo, pwRepo: pwRepo, begin: txnFactory, userIDGenerator: DefaultUserIDGenerator, } }
func NewPrivateKeyRotator(repo PrivateKeySetRepo, ttl time.Duration) *PrivateKeyRotator { return &PrivateKeyRotator{ repo: repo, ttl: ttl, keep: 2, generateKey: GeneratePrivateKey, clock: clockwork.NewRealClock(), } }
func NewUserManager(userRepo user.UserRepo, pwRepo user.PasswordInfoRepo, connCfgRepo connector.ConnectorConfigRepo, txnFactory repo.TransactionFactory, options ManagerOptions) *UserManager { return &UserManager{ Clock: clockwork.NewRealClock(), userRepo: userRepo, pwRepo: pwRepo, connCfgRepo: connCfgRepo, begin: txnFactory, userIDGenerator: user.DefaultUserIDGenerator, } }
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests. func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http.Handler { sec := auth.NewStore(server, timeout) kh := &keysHandler{ sec: sec, server: server, cluster: server.Cluster(), timer: server, timeout: timeout, clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, } sh := &statsHandler{ stats: server, } mh := &membersHandler{ sec: sec, server: server, cluster: server.Cluster(), timeout: timeout, clock: clockwork.NewRealClock(), clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, } dmh := &deprecatedMachinesHandler{ cluster: server.Cluster(), } sech := &authHandler{ sec: sec, cluster: server.Cluster(), clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, } mux := http.NewServeMux() mux.HandleFunc("/", http.NotFound) mux.Handle(healthPath, healthHandler(server)) mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) mux.Handle(keysPrefix, kh) mux.Handle(keysPrefix+"/", kh) mux.HandleFunc(statsPrefix+"/store", sh.serveStore) mux.HandleFunc(statsPrefix+"/self", sh.serveSelf) mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader) mux.HandleFunc(varsPath, serveVars) mux.HandleFunc(configPath+"/local/log", logHandleFunc) mux.Handle(metricsPath, prometheus.Handler()) mux.Handle(membersPrefix, mh) mux.Handle(membersPrefix+"/", mh) mux.Handle(deprecatedMachinesPrefix, dmh) handleAuth(mux, sech) return requestLogger(mux) }
func NewUnitStatePublisher(reg registry.Registry, mach machine.Machine, ttl time.Duration) *UnitStatePublisher { return &UnitStatePublisher{ mach: mach, ttl: ttl, publisher: newPublisher(reg, ttl), cache: make(map[string]*unit.UnitState), cacheMutex: sync.RWMutex{}, toPublish: make(chan string), toPublishStates: make(map[string]*unit.UnitState), toPublishMutex: sync.RWMutex{}, clock: clockwork.NewRealClock(), } }
func (g *gregorHandler) resetGregorClient() (err error) { defer g.G().Trace("gregorHandler#newGregorClient", func() error { return err })() of := gregor1.ObjFactory{} sm := grstorage.NewMemEngine(of, clockwork.NewRealClock()) var guid gregor.UID var gdid gregor.DeviceID var b []byte uid := g.G().Env.GetUID() if !uid.Exists() { err = errors.New("no UID; probably not logged in") return err } if b = uid.ToBytes(); b == nil { err = errors.New("Can't convert UID to byte array") return err } if guid, err = of.MakeUID(b); err != nil { return err } did := g.G().Env.GetDeviceID() if !did.Exists() { err = errors.New("no device ID; probably not logged in") return err } if b, err = hex.DecodeString(did.String()); err != nil { return err } if gdid, err = of.MakeDeviceID(b); err != nil { return err } // Create client object gcli := grclient.NewClient(guid, gdid, sm, newLocalDB(g.G()), g.G().Env.GetGregorSaveInterval(), g.G().Log) // Bring up local state g.Debug("restoring state from leveldb") if err = gcli.Restore(); err != nil { // If this fails, we'll keep trying since the server can bail us out g.Debug("restore local state failed: %s", err) } g.gregorCli = gcli return nil }
func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !allowMethod(w, r.Method, "HEAD", "GET", "PUT", "POST", "DELETE") { return } w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) ctx, cancel := context.WithTimeout(context.Background(), h.timeout) defer cancel() clock := clockwork.NewRealClock() startTime := clock.Now() rr, err := parseKeyRequest(r, clock) if err != nil { writeKeyError(w, err) return } // The path must be valid at this point (we've parsed the request successfully). if !hasKeyPrefixAccess(h.sec, r, r.URL.Path[len(keysPrefix):], rr.Recursive) { writeKeyNoAuth(w) return } if !rr.Wait { reportRequestReceived(rr) } resp, err := h.server.Do(ctx, rr) if err != nil { err = trimErrorPrefix(err, etcdserver.StoreKeysPrefix) writeKeyError(w, err) reportRequestFailed(rr, err) return } switch { case resp.Event != nil: if err := writeKeyEvent(w, resp.Event, h.timer); err != nil { // Should never be reached plog.Errorf("error writing event (%v)", err) } reportRequestCompleted(rr, resp, startTime) case resp.Watcher != nil: ctx, cancel := context.WithTimeout(context.Background(), defaultWatchTimeout) defer cancel() handleKeyWatch(ctx, w, resp.Watcher, rr.Stream, h.timer) default: writeKeyError(w, errors.New("received response with no Event/Watcher!")) } }
func TestHandleKeysFuncMethodNotAllowed(t *testing.T) { for _, m := range []string{"POST", "PUT", "DELETE"} { hdlr := handleKeysFunc(nil, clockwork.NewRealClock()) req, err := http.NewRequest(m, "http://example.com", nil) if err != nil { t.Errorf("case %s: unable to create HTTP request: %v", m, err) continue } w := httptest.NewRecorder() hdlr.ServeHTTP(w, req) want := http.StatusMethodNotAllowed got := w.Code if want != got { t.Errorf("case %s: expected HTTP %d, got %d", m, want, got) } } }
func NewCmdDrain(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { options := &DrainOptions{factory: f, out: out, errOut: errOut, backOff: clockwork.NewRealClock()} cmd := &cobra.Command{ Use: "drain NODE", Short: "Drain node in preparation for maintenance", Long: drain_long, Example: drain_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.SetupDrain(cmd, args)) cmdutil.CheckErr(options.RunDrain()) }, } cmd.Flags().BoolVar(&options.Force, "force", false, "Continue even if there are pods not managed by a ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet.") cmd.Flags().BoolVar(&options.IgnoreDaemonsets, "ignore-daemonsets", false, "Ignore DaemonSet-managed pods.") cmd.Flags().BoolVar(&options.DeleteLocalData, "delete-local-data", false, "Continue even if there are pods using emptyDir (local data that will be deleted when the node is drained).") cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.") cmd.Flags().DurationVar(&options.Timeout, "timeout", 0, "The length of time to wait before giving up, zero means infinite") return cmd }
func TestBasicUsage(t *testing.T) { var wg sync.WaitGroup watcher := NewWatcher("test-url-does-not-exist") MockifyWatcher(watcher, &MockJsonAccessor{ eras: []MockJsonEra{ MockJsonEra{errorToReturn: errors.New("!")}}, }, clockwork.NewRealClock()) ctx, cancel := context.WithCancel(context.Background()) go func() { select { case <-watcher.Updated: cancel() wg.Done() case <-watcher.OnError: cancel() wg.Done() } }() wg.Add(1) watcher.Watch(ctx) wg.Wait() if ctx.Err() != context.Canceled { t.Error("Should have been cancelled!") } }
func NewGarbageCollector(dbm *gorp.DbMap, ival time.Duration) *GarbageCollector { sRepo := NewSessionRepo(dbm) skRepo := NewSessionKeyRepo(dbm) purgers := []namedPurger{ namedPurger{ name: "session", purger: sRepo, }, namedPurger{ name: "session_key", purger: skRepo, }, } gc := GarbageCollector{ purgers: purgers, interval: ival, clock: clockwork.NewRealClock(), } return &gc }
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests. func NewClientHandler(server *etcdserver.EtcdServer) http.Handler { kh := &keysHandler{ server: server, clusterInfo: server.Cluster, timer: server, timeout: defaultServerTimeout, } sh := &statsHandler{ stats: server, } mh := &membersHandler{ server: server, clusterInfo: server.Cluster, clock: clockwork.NewRealClock(), } dmh := &deprecatedMachinesHandler{ clusterInfo: server.Cluster, } mux := http.NewServeMux() mux.HandleFunc("/", http.NotFound) mux.Handle(healthPath, healthHandler(server)) mux.HandleFunc(versionPath, serveVersion) mux.Handle(keysPrefix, kh) mux.Handle(keysPrefix+"/", kh) mux.HandleFunc(statsPrefix+"/store", sh.serveStore) mux.HandleFunc(statsPrefix+"/self", sh.serveSelf) mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader) mux.HandleFunc(statsPath, serveStats) mux.Handle(membersPrefix, mh) mux.Handle(membersPrefix+"/", mh) mux.Handle(deprecatedMachinesPrefix, dmh) return mux }